commit
1d05938b25
101
cmd/resync.go
Normal file
101
cmd/resync.go
Normal file
@ -0,0 +1,101 @@
|
|||||||
|
// Copyright © 2020 Vulcanize, Inc
|
||||||
|
//
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package cmd
|
||||||
|
|
||||||
|
import (
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
"github.com/spf13/viper"
|
||||||
|
|
||||||
|
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
|
||||||
|
"github.com/vulcanize/vulcanizedb/pkg/super_node/resync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// resyncCmd represents the resync command
|
||||||
|
var resyncCmd = &cobra.Command{
|
||||||
|
Use: "resync",
|
||||||
|
Short: "Resync historical data",
|
||||||
|
Long: `Use this command to fill in sections of missing data in the super node`,
|
||||||
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
|
subCommand = cmd.CalledAs()
|
||||||
|
logWithCommand = *log.WithField("SubCommand", subCommand)
|
||||||
|
rsyncCmdCommand()
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func rsyncCmdCommand() {
|
||||||
|
rConfig, err := resync.NewReSyncConfig()
|
||||||
|
if err != nil {
|
||||||
|
logWithCommand.Fatal(err)
|
||||||
|
}
|
||||||
|
if err := ipfs.InitIPFSPlugins(); err != nil {
|
||||||
|
logWithCommand.Fatal(err)
|
||||||
|
}
|
||||||
|
rService, err := resync.NewResyncService(rConfig)
|
||||||
|
if err != nil {
|
||||||
|
logWithCommand.Fatal(err)
|
||||||
|
}
|
||||||
|
if err := rService.Resync(); err != nil {
|
||||||
|
logWithCommand.Fatal(err)
|
||||||
|
}
|
||||||
|
logWithCommand.Infof("%s %s resync finished", rConfig.Chain.String(), rConfig.ResyncType.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
rootCmd.AddCommand(resyncCmd)
|
||||||
|
|
||||||
|
// flags
|
||||||
|
resyncCmd.PersistentFlags().String("ipfs-path", "", "ipfs repository path")
|
||||||
|
|
||||||
|
resyncCmd.PersistentFlags().String("resync-chain", "", "which chain to support, options are currently Ethereum or Bitcoin.")
|
||||||
|
resyncCmd.PersistentFlags().String("resync-type", "", "which type of data to resync")
|
||||||
|
resyncCmd.PersistentFlags().Int("resync-start", 0, "block height to start resync")
|
||||||
|
resyncCmd.PersistentFlags().Int("resync-stop", 0, "block height to stop resync")
|
||||||
|
resyncCmd.PersistentFlags().Int("resync-batch-size", 0, "data fetching batch size")
|
||||||
|
resyncCmd.PersistentFlags().Int("resync-batch-number", 0, "how many goroutines to fetch data concurrently")
|
||||||
|
resyncCmd.PersistentFlags().Bool("resync-clear-old-cache", false, "if true, clear out old data of the provided type within the resync range before resyncing")
|
||||||
|
|
||||||
|
resyncCmd.PersistentFlags().String("btc-http-path", "", "http url for bitcoin node")
|
||||||
|
resyncCmd.PersistentFlags().String("btc-password", "", "password for btc node")
|
||||||
|
resyncCmd.PersistentFlags().String("btc-username", "", "username for btc node")
|
||||||
|
resyncCmd.PersistentFlags().String("btc-node-id", "", "btc node id")
|
||||||
|
resyncCmd.PersistentFlags().String("btc-client-name", "", "btc client name")
|
||||||
|
resyncCmd.PersistentFlags().String("btc-genesis-block", "", "btc genesis block hash")
|
||||||
|
resyncCmd.PersistentFlags().String("btc-network-id", "", "btc network id")
|
||||||
|
|
||||||
|
resyncCmd.PersistentFlags().String("eth-http-path", "", "http url for ethereum node")
|
||||||
|
|
||||||
|
// and their bindings
|
||||||
|
viper.BindPFlag("ipfs.path", resyncCmd.PersistentFlags().Lookup("ipfs-path"))
|
||||||
|
|
||||||
|
viper.BindPFlag("resync.chain", resyncCmd.PersistentFlags().Lookup("resync-chain"))
|
||||||
|
viper.BindPFlag("resync.type", resyncCmd.PersistentFlags().Lookup("resync-type"))
|
||||||
|
viper.BindPFlag("resync.start", resyncCmd.PersistentFlags().Lookup("resync-start"))
|
||||||
|
viper.BindPFlag("resync.stop", resyncCmd.PersistentFlags().Lookup("resync-stop"))
|
||||||
|
viper.BindPFlag("resync.batchSize", resyncCmd.PersistentFlags().Lookup("resync-batch-size"))
|
||||||
|
viper.BindPFlag("resync.batchNumber", resyncCmd.PersistentFlags().Lookup("resync-batch-number"))
|
||||||
|
viper.BindPFlag("resync.clearOldCache", resyncCmd.PersistentFlags().Lookup("resync-clear-old-cache"))
|
||||||
|
|
||||||
|
viper.BindPFlag("bitcoin.httpPath", resyncCmd.PersistentFlags().Lookup("btc-http-path"))
|
||||||
|
viper.BindPFlag("bitcoin.pass", resyncCmd.PersistentFlags().Lookup("btc-password"))
|
||||||
|
viper.BindPFlag("bitcoin.user", resyncCmd.PersistentFlags().Lookup("btc-username"))
|
||||||
|
viper.BindPFlag("bitcoin.nodeID", resyncCmd.PersistentFlags().Lookup("btc-node-id"))
|
||||||
|
viper.BindPFlag("bitcoin.clientName", resyncCmd.PersistentFlags().Lookup("btc-client-name"))
|
||||||
|
viper.BindPFlag("bitcoin.genesisBlock", resyncCmd.PersistentFlags().Lookup("btc-genesis-block"))
|
||||||
|
viper.BindPFlag("bitcoin.networkID", resyncCmd.PersistentFlags().Lookup("btc-network-id"))
|
||||||
|
|
||||||
|
viper.BindPFlag("ethereum.httpPath", resyncCmd.PersistentFlags().Lookup("eth-http-path"))
|
||||||
|
}
|
@ -41,12 +41,9 @@ var (
|
|||||||
databaseConfig config.Database
|
databaseConfig config.Database
|
||||||
genConfig config.Plugin
|
genConfig config.Plugin
|
||||||
ipc string
|
ipc string
|
||||||
levelDbPath string
|
|
||||||
queueRecheckInterval time.Duration
|
queueRecheckInterval time.Duration
|
||||||
startingBlockNumber int64
|
startingBlockNumber int64
|
||||||
storageDiffsPath string
|
storageDiffsPath string
|
||||||
syncAll bool
|
|
||||||
endingBlockNumber int64
|
|
||||||
recheckHeadersArg bool
|
recheckHeadersArg bool
|
||||||
subCommand string
|
subCommand string
|
||||||
logWithCommand log.Entry
|
logWithCommand log.Entry
|
||||||
@ -81,7 +78,6 @@ func initFuncs(cmd *cobra.Command, args []string) {
|
|||||||
|
|
||||||
func setViperConfigs() {
|
func setViperConfigs() {
|
||||||
ipc = viper.GetString("client.ipcpath")
|
ipc = viper.GetString("client.ipcpath")
|
||||||
levelDbPath = viper.GetString("client.leveldbpath")
|
|
||||||
storageDiffsPath = viper.GetString("filesystem.storageDiffsPath")
|
storageDiffsPath = viper.GetString("filesystem.storageDiffsPath")
|
||||||
storageDiffsSource = viper.GetString("storageDiffs.source")
|
storageDiffsSource = viper.GetString("storageDiffs.source")
|
||||||
databaseConfig = config.Database{
|
databaseConfig = config.Database{
|
||||||
@ -114,6 +110,7 @@ func init() {
|
|||||||
viper.AutomaticEnv()
|
viper.AutomaticEnv()
|
||||||
|
|
||||||
rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file location")
|
rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file location")
|
||||||
|
rootCmd.PersistentFlags().String("logfile", "", "file path for logging")
|
||||||
rootCmd.PersistentFlags().String("database-name", "vulcanize_public", "database name")
|
rootCmd.PersistentFlags().String("database-name", "vulcanize_public", "database name")
|
||||||
rootCmd.PersistentFlags().Int("database-port", 5432, "database port")
|
rootCmd.PersistentFlags().Int("database-port", 5432, "database port")
|
||||||
rootCmd.PersistentFlags().String("database-hostname", "localhost", "database hostname")
|
rootCmd.PersistentFlags().String("database-hostname", "localhost", "database hostname")
|
||||||
@ -126,6 +123,7 @@ func init() {
|
|||||||
rootCmd.PersistentFlags().String("exporter-name", "exporter", "name of exporter plugin")
|
rootCmd.PersistentFlags().String("exporter-name", "exporter", "name of exporter plugin")
|
||||||
rootCmd.PersistentFlags().String("log-level", log.InfoLevel.String(), "Log level (trace, debug, info, warn, error, fatal, panic")
|
rootCmd.PersistentFlags().String("log-level", log.InfoLevel.String(), "Log level (trace, debug, info, warn, error, fatal, panic")
|
||||||
|
|
||||||
|
viper.BindPFlag("logfile", rootCmd.PersistentFlags().Lookup("logfile"))
|
||||||
viper.BindPFlag("database.name", rootCmd.PersistentFlags().Lookup("database-name"))
|
viper.BindPFlag("database.name", rootCmd.PersistentFlags().Lookup("database-name"))
|
||||||
viper.BindPFlag("database.port", rootCmd.PersistentFlags().Lookup("database-port"))
|
viper.BindPFlag("database.port", rootCmd.PersistentFlags().Lookup("database-port"))
|
||||||
viper.BindPFlag("database.hostname", rootCmd.PersistentFlags().Lookup("database-hostname"))
|
viper.BindPFlag("database.hostname", rootCmd.PersistentFlags().Lookup("database-hostname"))
|
||||||
|
@ -67,7 +67,11 @@ func streamEthSubscription() {
|
|||||||
payloadChan := make(chan super_node.SubscriptionPayload, 20000)
|
payloadChan := make(chan super_node.SubscriptionPayload, 20000)
|
||||||
|
|
||||||
// Subscribe to the super node service with the given config/filter parameters
|
// Subscribe to the super node service with the given config/filter parameters
|
||||||
sub, err := str.Stream(payloadChan, ethSubConfig)
|
rlpParams, err := rlp.EncodeToBytes(ethSubConfig)
|
||||||
|
if err != nil {
|
||||||
|
logWithCommand.Fatal(err)
|
||||||
|
}
|
||||||
|
sub, err := str.Stream(payloadChan, rlpParams)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logWithCommand.Fatal(err)
|
logWithCommand.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -80,24 +84,22 @@ func streamEthSubscription() {
|
|||||||
logWithCommand.Error(payload.Err)
|
logWithCommand.Error(payload.Err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
data, ok := payload.Data.(eth.StreamResponse)
|
var ethData eth.IPLDs
|
||||||
if !ok {
|
if err := rlp.DecodeBytes(payload.Data, ðData); err != nil {
|
||||||
logWithCommand.Warnf("payload data expected type %T got %T", eth.StreamResponse{}, payload.Data)
|
logWithCommand.Error(err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
for _, headerRlp := range data.HeadersRlp {
|
|
||||||
var header types.Header
|
var header types.Header
|
||||||
err = rlp.Decode(bytes.NewBuffer(headerRlp), &header)
|
err = rlp.Decode(bytes.NewBuffer(ethData.Header.Data), &header)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logWithCommand.Error(err)
|
logWithCommand.Error(err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
fmt.Printf("Header number %d, hash %s\n", header.Number.Int64(), header.Hash().Hex())
|
fmt.Printf("Header number %d, hash %s\n", header.Number.Int64(), header.Hash().Hex())
|
||||||
fmt.Printf("header: %v\n", header)
|
fmt.Printf("header: %v\n", header)
|
||||||
}
|
for _, trxRlp := range ethData.Transactions {
|
||||||
for _, trxRlp := range data.TransactionsRlp {
|
|
||||||
var trx types.Transaction
|
var trx types.Transaction
|
||||||
buff := bytes.NewBuffer(trxRlp)
|
buff := bytes.NewBuffer(trxRlp.Data)
|
||||||
stream := rlp.NewStream(buff, 0)
|
stream := rlp.NewStream(buff, 0)
|
||||||
err := trx.DecodeRLP(stream)
|
err := trx.DecodeRLP(stream)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -107,9 +109,9 @@ func streamEthSubscription() {
|
|||||||
fmt.Printf("Transaction with hash %s\n", trx.Hash().Hex())
|
fmt.Printf("Transaction with hash %s\n", trx.Hash().Hex())
|
||||||
fmt.Printf("trx: %v\n", trx)
|
fmt.Printf("trx: %v\n", trx)
|
||||||
}
|
}
|
||||||
for _, rctRlp := range data.ReceiptsRlp {
|
for _, rctRlp := range ethData.Receipts {
|
||||||
var rct types.ReceiptForStorage
|
var rct types.Receipt
|
||||||
buff := bytes.NewBuffer(rctRlp)
|
buff := bytes.NewBuffer(rctRlp.Data)
|
||||||
stream := rlp.NewStream(buff, 0)
|
stream := rlp.NewStream(buff, 0)
|
||||||
err = rct.DecodeRLP(stream)
|
err = rct.DecodeRLP(stream)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -129,40 +131,34 @@ func streamEthSubscription() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// This assumes leafs only
|
// This assumes leafs only
|
||||||
for key, stateRlp := range data.StateNodesRlp {
|
for _, stateNode := range ethData.StateNodes {
|
||||||
var acct state.Account
|
var acct state.Account
|
||||||
err = rlp.Decode(bytes.NewBuffer(stateRlp), &acct)
|
err = rlp.DecodeBytes(stateNode.IPLD.Data, &acct)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logWithCommand.Error(err)
|
logWithCommand.Error(err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
fmt.Printf("Account for key %s, and root %s, with balance %d\n",
|
fmt.Printf("Account for key %s, and root %s, with balance %s\n",
|
||||||
key.Hex(), acct.Root.Hex(), acct.Balance.Int64())
|
stateNode.StateLeafKey.Hex(), acct.Root.Hex(), acct.Balance.String())
|
||||||
fmt.Printf("state account: %v\n", acct)
|
fmt.Printf("state account: %+v\n", acct)
|
||||||
}
|
}
|
||||||
for stateKey, mappedRlp := range data.StorageNodesRlp {
|
for _, storageNode := range ethData.StorageNodes {
|
||||||
fmt.Printf("Storage for state key %s ", stateKey.Hex())
|
fmt.Printf("Storage for state key %s ", storageNode.StateLeafKey.Hex())
|
||||||
for storageKey, storageRlp := range mappedRlp {
|
fmt.Printf("with storage key %s\n", storageNode.StorageLeafKey.Hex())
|
||||||
fmt.Printf("with storage key %s\n", storageKey.Hex())
|
|
||||||
var i []interface{}
|
var i []interface{}
|
||||||
err := rlp.DecodeBytes(storageRlp, &i)
|
err := rlp.DecodeBytes(storageNode.IPLD.Data, &i)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logWithCommand.Error(err)
|
logWithCommand.Error(err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// if a leaf node
|
// if a value node
|
||||||
if len(i) == 2 {
|
if len(i) == 1 {
|
||||||
keyBytes, ok := i[0].([]byte)
|
valueBytes, ok := i[0].([]byte)
|
||||||
if !ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
valueBytes, ok := i[1].([]byte)
|
|
||||||
if !ok {
|
if !ok {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
fmt.Printf("Storage leaf key: %s, and value hash: %s\n",
|
fmt.Printf("Storage leaf key: %s, and value hash: %s\n",
|
||||||
common.BytesToHash(keyBytes).Hex(), common.BytesToHash(valueBytes).Hex())
|
storageNode.StorageLeafKey.Hex(), common.BytesToHash(valueBytes).Hex())
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case err = <-sub.Err():
|
case err = <-sub.Err():
|
||||||
|
@ -18,12 +18,13 @@ package cmd
|
|||||||
import (
|
import (
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
|
"github.com/spf13/viper"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/rpc"
|
"github.com/ethereum/go-ethereum/rpc"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
|
||||||
|
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/super_node"
|
"github.com/vulcanize/vulcanizedb/pkg/super_node"
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
|
"github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
|
||||||
)
|
)
|
||||||
@ -50,12 +51,8 @@ and fill in gaps in the data
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
|
||||||
rootCmd.AddCommand(superNodeCmd)
|
|
||||||
}
|
|
||||||
|
|
||||||
func superNode() {
|
func superNode() {
|
||||||
superNodeConfigs, err := shared.NewSuperNodeConfigs()
|
superNodeConfig, err := super_node.NewSuperNodeConfig()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logWithCommand.Fatal(err)
|
logWithCommand.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -63,21 +60,20 @@ func superNode() {
|
|||||||
logWithCommand.Fatal(err)
|
logWithCommand.Fatal(err)
|
||||||
}
|
}
|
||||||
wg := &sync.WaitGroup{}
|
wg := &sync.WaitGroup{}
|
||||||
for _, superNodeConfig := range superNodeConfigs {
|
|
||||||
superNode, err := super_node.NewSuperNode(superNodeConfig)
|
superNode, err := super_node.NewSuperNode(superNodeConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logWithCommand.Fatal(err)
|
logWithCommand.Fatal(err)
|
||||||
}
|
}
|
||||||
var forwardPayloadChan chan shared.StreamedIPLDs
|
var forwardPayloadChan chan shared.ConvertedData
|
||||||
if superNodeConfig.Serve {
|
if superNodeConfig.Serve {
|
||||||
forwardPayloadChan = make(chan shared.StreamedIPLDs, super_node.PayloadChanBufferSize)
|
forwardPayloadChan = make(chan shared.ConvertedData, super_node.PayloadChanBufferSize)
|
||||||
superNode.ScreenAndServe(wg, forwardPayloadChan)
|
superNode.FilterAndServe(wg, forwardPayloadChan)
|
||||||
if err := startServers(superNode, superNodeConfig); err != nil {
|
if err := startServers(superNode, superNodeConfig); err != nil {
|
||||||
logWithCommand.Fatal(err)
|
logWithCommand.Fatal(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if superNodeConfig.Sync {
|
if superNodeConfig.Sync {
|
||||||
if err := superNode.SyncAndPublish(wg, forwardPayloadChan); err != nil {
|
if err := superNode.ProcessData(wg, forwardPayloadChan); err != nil {
|
||||||
logWithCommand.Fatal(err)
|
logWithCommand.Fatal(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -86,13 +82,12 @@ func superNode() {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
logWithCommand.Fatal(err)
|
logWithCommand.Fatal(err)
|
||||||
}
|
}
|
||||||
backFiller.FillGaps(wg)
|
backFiller.FillGapsInSuperNode(wg)
|
||||||
}
|
|
||||||
}
|
}
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
}
|
}
|
||||||
|
|
||||||
func startServers(superNode super_node.SuperNode, settings *shared.SuperNodeConfig) error {
|
func startServers(superNode super_node.SuperNode, settings *super_node.Config) error {
|
||||||
_, _, err := rpc.StartIPCEndpoint(settings.IPCEndpoint, superNode.APIs())
|
_, _, err := rpc.StartIPCEndpoint(settings.IPCEndpoint, superNode.APIs())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -104,3 +99,61 @@ func startServers(superNode super_node.SuperNode, settings *shared.SuperNodeConf
|
|||||||
_, _, err = rpc.StartHTTPEndpoint(settings.HTTPEndpoint, superNode.APIs(), []string{settings.Chain.API()}, nil, nil, rpc.HTTPTimeouts{})
|
_, _, err = rpc.StartHTTPEndpoint(settings.HTTPEndpoint, superNode.APIs(), []string{settings.Chain.API()}, nil, nil, rpc.HTTPTimeouts{})
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
rootCmd.AddCommand(superNodeCmd)
|
||||||
|
|
||||||
|
// flags
|
||||||
|
superNodeCmd.PersistentFlags().String("ipfs-path", "", "ipfs repository path")
|
||||||
|
|
||||||
|
superNodeCmd.PersistentFlags().String("supernode-chain", "", "which chain to support, options are currently Ethereum or Bitcoin.")
|
||||||
|
superNodeCmd.PersistentFlags().Bool("supernode-server", false, "turn vdb server on or off")
|
||||||
|
superNodeCmd.PersistentFlags().String("supernode-ws-path", "", "vdb server ws path")
|
||||||
|
superNodeCmd.PersistentFlags().String("supernode-http-path", "", "vdb server http path")
|
||||||
|
superNodeCmd.PersistentFlags().String("supernode-ipc-path", "", "vdb server ipc path")
|
||||||
|
superNodeCmd.PersistentFlags().Bool("supernode-sync", false, "turn vdb sync on or off")
|
||||||
|
superNodeCmd.PersistentFlags().Int("supernode-workers", 0, "how many worker goroutines to publish and index data")
|
||||||
|
superNodeCmd.PersistentFlags().Bool("supernode-back-fill", false, "turn vdb backfill on or off")
|
||||||
|
superNodeCmd.PersistentFlags().Int("supernode-frequency", 0, "how often (in seconds) the backfill process checks for gaps")
|
||||||
|
superNodeCmd.PersistentFlags().Int("supernode-batch-size", 0, "data fetching batch size")
|
||||||
|
superNodeCmd.PersistentFlags().Int("supernode-batch-number", 0, "how many goroutines to fetch data concurrently")
|
||||||
|
|
||||||
|
superNodeCmd.PersistentFlags().String("btc-ws-path", "", "ws url for bitcoin node")
|
||||||
|
superNodeCmd.PersistentFlags().String("btc-http-path", "", "http url for bitcoin node")
|
||||||
|
superNodeCmd.PersistentFlags().String("btc-password", "", "password for btc node")
|
||||||
|
superNodeCmd.PersistentFlags().String("btc-username", "", "username for btc node")
|
||||||
|
superNodeCmd.PersistentFlags().String("btc-node-id", "", "btc node id")
|
||||||
|
superNodeCmd.PersistentFlags().String("btc-client-name", "", "btc client name")
|
||||||
|
superNodeCmd.PersistentFlags().String("btc-genesis-block", "", "btc genesis block hash")
|
||||||
|
superNodeCmd.PersistentFlags().String("btc-network-id", "", "btc network id")
|
||||||
|
|
||||||
|
superNodeCmd.PersistentFlags().String("eth-ws-path", "", "ws url for ethereum node")
|
||||||
|
superNodeCmd.PersistentFlags().String("eth-http-path", "", "http url for ethereum node")
|
||||||
|
|
||||||
|
// and their bindings
|
||||||
|
viper.BindPFlag("ipfs.path", superNodeCmd.PersistentFlags().Lookup("ipfs-path"))
|
||||||
|
|
||||||
|
viper.BindPFlag("superNode.chain", superNodeCmd.PersistentFlags().Lookup("supernode-chain"))
|
||||||
|
viper.BindPFlag("superNode.server", superNodeCmd.PersistentFlags().Lookup("supernode-server"))
|
||||||
|
viper.BindPFlag("superNode.wsPath", superNodeCmd.PersistentFlags().Lookup("supernode-ws-path"))
|
||||||
|
viper.BindPFlag("superNode.httpPath", superNodeCmd.PersistentFlags().Lookup("supernode-http-path"))
|
||||||
|
viper.BindPFlag("superNode.ipcPath", superNodeCmd.PersistentFlags().Lookup("supernode-ipc-path"))
|
||||||
|
viper.BindPFlag("superNode.sync", superNodeCmd.PersistentFlags().Lookup("supernode-sync"))
|
||||||
|
viper.BindPFlag("superNode.workers", superNodeCmd.PersistentFlags().Lookup("supernode-workers"))
|
||||||
|
viper.BindPFlag("superNode.backFill", superNodeCmd.PersistentFlags().Lookup("supernode-back-fill"))
|
||||||
|
viper.BindPFlag("superNode.frequency", superNodeCmd.PersistentFlags().Lookup("supernode-frequency"))
|
||||||
|
viper.BindPFlag("superNode.batchSize", superNodeCmd.PersistentFlags().Lookup("supernode-batch-size"))
|
||||||
|
viper.BindPFlag("superNode.batchNumber", superNodeCmd.PersistentFlags().Lookup("supernode-batch-number"))
|
||||||
|
|
||||||
|
viper.BindPFlag("bitcoin.wsPath", superNodeCmd.PersistentFlags().Lookup("btc-ws-path"))
|
||||||
|
viper.BindPFlag("bitcoin.httpPath", superNodeCmd.PersistentFlags().Lookup("btc-http-path"))
|
||||||
|
viper.BindPFlag("bitcoin.pass", superNodeCmd.PersistentFlags().Lookup("btc-password"))
|
||||||
|
viper.BindPFlag("bitcoin.user", superNodeCmd.PersistentFlags().Lookup("btc-username"))
|
||||||
|
viper.BindPFlag("bitcoin.nodeID", superNodeCmd.PersistentFlags().Lookup("btc-node-id"))
|
||||||
|
viper.BindPFlag("bitcoin.clientName", superNodeCmd.PersistentFlags().Lookup("btc-client-name"))
|
||||||
|
viper.BindPFlag("bitcoin.genesisBlock", superNodeCmd.PersistentFlags().Lookup("btc-genesis-block"))
|
||||||
|
viper.BindPFlag("bitcoin.networkID", superNodeCmd.PersistentFlags().Lookup("btc-network-id"))
|
||||||
|
|
||||||
|
viper.BindPFlag("ethereum.wsPath", superNodeCmd.PersistentFlags().Lookup("eth-ws-path"))
|
||||||
|
viper.BindPFlag("ethereum.httpPath", superNodeCmd.PersistentFlags().Lookup("eth-http-path"))
|
||||||
|
}
|
||||||
|
43
cmd/watch.go
Normal file
43
cmd/watch.go
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
// Copyright © 2020 Vulcanize, Inc
|
||||||
|
//
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package cmd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
// watchCmd represents the watch command
|
||||||
|
var watchCmd = &cobra.Command{
|
||||||
|
Use: "watch",
|
||||||
|
Short: "Watch and transform data from a chain source",
|
||||||
|
Long: `This command allows one to configure a set of wasm functions and SQL trigger functions
|
||||||
|
that call them to watch and transform data from the specified chain source.
|
||||||
|
|
||||||
|
A watcher is composed of four parts:
|
||||||
|
1) Go execution engine- this command- which fetches raw chain data and adds it to the Postres queued ready data tables
|
||||||
|
2) TOML config file which specifies what subset of chain data to fetch and from where and contains references to the below
|
||||||
|
3) Set of WASM binaries which are loaded into Postgres and used by
|
||||||
|
4) Set of PostgreSQL trigger functions which automatically act on data as it is inserted into the queued ready data tables`,
|
||||||
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
|
fmt.Println("watch called")
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
rootCmd.AddCommand(watchCmd)
|
||||||
|
}
|
@ -7,6 +7,7 @@ CREATE TABLE eth.header_cids (
|
|||||||
cid TEXT NOT NULL,
|
cid TEXT NOT NULL,
|
||||||
td NUMERIC NOT NULL,
|
td NUMERIC NOT NULL,
|
||||||
node_id INTEGER NOT NULL REFERENCES nodes (id) ON DELETE CASCADE,
|
node_id INTEGER NOT NULL REFERENCES nodes (id) ON DELETE CASCADE,
|
||||||
|
reward NUMERIC NOT NULL,
|
||||||
UNIQUE (block_number, block_hash)
|
UNIQUE (block_number, block_hash)
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -5,6 +5,7 @@ CREATE TABLE eth.uncle_cids (
|
|||||||
block_hash VARCHAR(66) NOT NULL,
|
block_hash VARCHAR(66) NOT NULL,
|
||||||
parent_hash VARCHAR(66) NOT NULL,
|
parent_hash VARCHAR(66) NOT NULL,
|
||||||
cid TEXT NOT NULL,
|
cid TEXT NOT NULL,
|
||||||
|
reward NUMERIC NOT NULL,
|
||||||
UNIQUE (header_id, block_hash)
|
UNIQUE (header_id, block_hash)
|
||||||
);
|
);
|
||||||
|
|
||||||
|
9
db/migrations/00024_create_eth_queued_data_table.sql
Normal file
9
db/migrations/00024_create_eth_queued_data_table.sql
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
-- +goose Up
|
||||||
|
CREATE TABLE eth.queue_data (
|
||||||
|
id SERIAL PRIMARY KEY,
|
||||||
|
data BYTEA NOT NULL,
|
||||||
|
height BIGINT UNIQUE NOT NULL
|
||||||
|
);
|
||||||
|
|
||||||
|
-- +goose Down
|
||||||
|
DROP TABLE eth.queue_data;
|
9
db/migrations/00025_create_btc_queued_data_table.sql
Normal file
9
db/migrations/00025_create_btc_queued_data_table.sql
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
-- +goose Up
|
||||||
|
CREATE TABLE btc.queue_data (
|
||||||
|
id SERIAL PRIMARY KEY,
|
||||||
|
data BYTEA NOT NULL,
|
||||||
|
height BIGINT UNIQUE NOT NULL
|
||||||
|
);
|
||||||
|
|
||||||
|
-- +goose Down
|
||||||
|
DROP TABLE btc.queue_data;
|
13
db/migrations/00026_create_postgraphile_comments.sql
Normal file
13
db/migrations/00026_create_postgraphile_comments.sql
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
-- +goose Up
|
||||||
|
COMMENT ON TABLE public.nodes IS E'@name NodeInfo';
|
||||||
|
COMMENT ON TABLE btc.header_cids IS E'@name BtcHeaderCids';
|
||||||
|
COMMENT ON TABLE btc.transaction_cids IS E'@name BtcTransactionCids';
|
||||||
|
COMMENT ON TABLE btc.queue_data IS E'@name BtcQueueData';
|
||||||
|
COMMENT ON TABLE eth.transaction_cids IS E'@name EthTransactionCids';
|
||||||
|
COMMENT ON TABLE eth.header_cids IS E'@name EthHeaderCids';
|
||||||
|
COMMENT ON TABLE eth.queue_data IS E'@name EthQueueData';
|
||||||
|
COMMENT ON TABLE public.headers IS E'@name EthHeaders';
|
||||||
|
COMMENT ON COLUMN public.headers.node_id IS E'@name EthNodeID';
|
||||||
|
COMMENT ON COLUMN public.nodes.node_id IS E'@name ChainNodeID';
|
||||||
|
COMMENT ON COLUMN eth.header_cids.node_id IS E'@name EthNodeID';
|
||||||
|
COMMENT ON COLUMN btc.header_cids.node_id IS E'@name BtcNodeID';
|
37
db/migrations/00027_update_state_cids.sql
Normal file
37
db/migrations/00027_update_state_cids.sql
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
-- +goose Up
|
||||||
|
ALTER TABLE eth.state_cids
|
||||||
|
ADD COLUMN state_path BYTEA;
|
||||||
|
|
||||||
|
ALTER TABLE eth.state_cids
|
||||||
|
DROP COLUMN leaf;
|
||||||
|
|
||||||
|
ALTER TABLE eth.state_cids
|
||||||
|
ADD COLUMN node_type INTEGER;
|
||||||
|
|
||||||
|
ALTER TABLE eth.state_cids
|
||||||
|
ALTER COLUMN state_key DROP NOT NULL;
|
||||||
|
|
||||||
|
ALTER TABLE eth.state_cids
|
||||||
|
DROP CONSTRAINT state_cids_header_id_state_key_key;
|
||||||
|
|
||||||
|
ALTER TABLE eth.state_cids
|
||||||
|
ADD CONSTRAINT state_cids_header_id_state_path_key UNIQUE (header_id, state_path);
|
||||||
|
|
||||||
|
-- +goose Down
|
||||||
|
ALTER TABLE eth.state_cids
|
||||||
|
ADD CONSTRAINT state_cids_header_id_state_key_key UNIQUE (header_id, state_key);
|
||||||
|
|
||||||
|
ALTER TABLE eth.state_cids
|
||||||
|
DROP CONSTRAINT state_cids_header_id_state_path_key;
|
||||||
|
|
||||||
|
ALTER TABLE eth.state_cids
|
||||||
|
ALTER COLUMN state_key SET NOT NULL;
|
||||||
|
|
||||||
|
ALTER TABLE eth.state_cids
|
||||||
|
DROP COLUMN node_type;
|
||||||
|
|
||||||
|
ALTER TABLE eth.state_cids
|
||||||
|
ADD COLUMN leaf BOOLEAN NOT NULL;
|
||||||
|
|
||||||
|
ALTER TABLE eth.state_cids
|
||||||
|
DROP COLUMN state_path;
|
37
db/migrations/00028_update_storage_cids.sql
Normal file
37
db/migrations/00028_update_storage_cids.sql
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
-- +goose Up
|
||||||
|
ALTER TABLE eth.storage_cids
|
||||||
|
ADD COLUMN storage_path BYTEA;
|
||||||
|
|
||||||
|
ALTER TABLE eth.storage_cids
|
||||||
|
DROP COLUMN leaf;
|
||||||
|
|
||||||
|
ALTER TABLE eth.storage_cids
|
||||||
|
ADD COLUMN node_type INTEGER;
|
||||||
|
|
||||||
|
ALTER TABLE eth.storage_cids
|
||||||
|
ALTER COLUMN storage_key DROP NOT NULL;
|
||||||
|
|
||||||
|
ALTER TABLE eth.storage_cids
|
||||||
|
DROP CONSTRAINT storage_cids_state_id_storage_key_key;
|
||||||
|
|
||||||
|
ALTER TABLE eth.storage_cids
|
||||||
|
ADD CONSTRAINT storage_cids_state_id_storage_path_key UNIQUE (state_id, storage_path);
|
||||||
|
|
||||||
|
-- +goose Down
|
||||||
|
ALTER TABLE eth.storage_cids
|
||||||
|
DROP CONSTRAINT storage_cids_state_id_storage_path_key;
|
||||||
|
|
||||||
|
ALTER TABLE eth.storage_cids
|
||||||
|
ADD CONSTRAINT storage_cids_state_id_storage_key_key UNIQUE (state_id, storage_key);
|
||||||
|
|
||||||
|
ALTER TABLE eth.storage_cids
|
||||||
|
ALTER COLUMN storage_key SET NOT NULL;
|
||||||
|
|
||||||
|
ALTER TABLE eth.storage_cids
|
||||||
|
DROP COLUMN node_type;
|
||||||
|
|
||||||
|
ALTER TABLE eth.storage_cids
|
||||||
|
ADD COLUMN leaf BOOLEAN NOT NULL;
|
||||||
|
|
||||||
|
ALTER TABLE eth.storage_cids
|
||||||
|
DROP COLUMN storage_path;
|
37
db/migrations/00029_update_header_cids.sql
Normal file
37
db/migrations/00029_update_header_cids.sql
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
-- +goose Up
|
||||||
|
ALTER TABLE eth.header_cids
|
||||||
|
ADD COLUMN state_root VARCHAR(66);
|
||||||
|
|
||||||
|
ALTER TABLE eth.header_cids
|
||||||
|
ADD COLUMN tx_root VARCHAR(66);
|
||||||
|
|
||||||
|
ALTER TABLE eth.header_cids
|
||||||
|
ADD COLUMN receipt_root VARCHAR(66);
|
||||||
|
|
||||||
|
ALTER TABLE eth.header_cids
|
||||||
|
ADD COLUMN uncle_root VARCHAR(66);
|
||||||
|
|
||||||
|
ALTER TABLE eth.header_cids
|
||||||
|
ADD COLUMN bloom BYTEA;
|
||||||
|
|
||||||
|
ALTER TABLE eth.header_cids
|
||||||
|
ADD COLUMN timestamp NUMERIC;
|
||||||
|
|
||||||
|
-- +goose Down
|
||||||
|
ALTER TABLE eth.header_cids
|
||||||
|
DROP COLUMN timestamp;
|
||||||
|
|
||||||
|
ALTER TABLE eth.header_cids
|
||||||
|
DROP COLUMN bloom;
|
||||||
|
|
||||||
|
ALTER TABLE eth.header_cids
|
||||||
|
DROP COLUMN uncle_root;
|
||||||
|
|
||||||
|
ALTER TABLE eth.header_cids
|
||||||
|
DROP COLUMN receipt_root;
|
||||||
|
|
||||||
|
ALTER TABLE eth.header_cids
|
||||||
|
DROP COLUMN tx_root;
|
||||||
|
|
||||||
|
ALTER TABLE eth.header_cids
|
||||||
|
DROP COLUMN state_root;
|
13
db/migrations/00030_create_eth_state_accouts_table.sql
Normal file
13
db/migrations/00030_create_eth_state_accouts_table.sql
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
-- +goose Up
|
||||||
|
CREATE TABLE eth.state_accounts (
|
||||||
|
id SERIAL PRIMARY KEY,
|
||||||
|
state_id INTEGER NOT NULL REFERENCES eth.state_cids (id) ON DELETE CASCADE,
|
||||||
|
balance NUMERIC NOT NULL,
|
||||||
|
nonce INTEGER NOT NULL,
|
||||||
|
code_hash BYTEA NOT NULL,
|
||||||
|
storage_root VARCHAR(66) NOT NULL,
|
||||||
|
UNIQUE (state_id)
|
||||||
|
);
|
||||||
|
|
||||||
|
-- +goose Down
|
||||||
|
DROP TABLE eth.state_accounts;
|
13
db/migrations/00031_rename_to_leaf_key.sql
Normal file
13
db/migrations/00031_rename_to_leaf_key.sql
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
-- +goose Up
|
||||||
|
ALTER TABLE eth.state_cids
|
||||||
|
RENAME COLUMN state_key TO state_leaf_key;
|
||||||
|
|
||||||
|
ALTER TABLE eth.storage_cids
|
||||||
|
RENAME COLUMN storage_key TO storage_leaf_key;
|
||||||
|
|
||||||
|
-- +goose Down
|
||||||
|
ALTER TABLE eth.storage_cids
|
||||||
|
RENAME COLUMN storage_leaf_key TO storage_key;
|
||||||
|
|
||||||
|
ALTER TABLE eth.state_cids
|
||||||
|
RENAME COLUMN state_leaf_key TO state_key;
|
@ -0,0 +1 @@
|
|||||||
|
package maaaybe
|
309
db/schema.sql
309
db/schema.sql
@ -3,7 +3,7 @@
|
|||||||
--
|
--
|
||||||
|
|
||||||
-- Dumped from database version 10.10
|
-- Dumped from database version 10.10
|
||||||
-- Dumped by pg_dump version 12.1
|
-- Dumped by pg_dump version 10.10
|
||||||
|
|
||||||
SET statement_timeout = 0;
|
SET statement_timeout = 0;
|
||||||
SET lock_timeout = 0;
|
SET lock_timeout = 0;
|
||||||
@ -30,8 +30,24 @@ CREATE SCHEMA btc;
|
|||||||
CREATE SCHEMA eth;
|
CREATE SCHEMA eth;
|
||||||
|
|
||||||
|
|
||||||
|
--
|
||||||
|
-- Name: plpgsql; Type: EXTENSION; Schema: -; Owner: -
|
||||||
|
--
|
||||||
|
|
||||||
|
CREATE EXTENSION IF NOT EXISTS plpgsql WITH SCHEMA pg_catalog;
|
||||||
|
|
||||||
|
|
||||||
|
--
|
||||||
|
-- Name: EXTENSION plpgsql; Type: COMMENT; Schema: -; Owner: -
|
||||||
|
--
|
||||||
|
|
||||||
|
COMMENT ON EXTENSION plpgsql IS 'PL/pgSQL procedural language';
|
||||||
|
|
||||||
|
|
||||||
SET default_tablespace = '';
|
SET default_tablespace = '';
|
||||||
|
|
||||||
|
SET default_with_oids = false;
|
||||||
|
|
||||||
--
|
--
|
||||||
-- Name: header_cids; Type: TABLE; Schema: btc; Owner: -
|
-- Name: header_cids; Type: TABLE; Schema: btc; Owner: -
|
||||||
--
|
--
|
||||||
@ -48,6 +64,20 @@ CREATE TABLE btc.header_cids (
|
|||||||
);
|
);
|
||||||
|
|
||||||
|
|
||||||
|
--
|
||||||
|
-- Name: TABLE header_cids; Type: COMMENT; Schema: btc; Owner: -
|
||||||
|
--
|
||||||
|
|
||||||
|
COMMENT ON TABLE btc.header_cids IS '@name BtcHeaderCids';
|
||||||
|
|
||||||
|
|
||||||
|
--
|
||||||
|
-- Name: COLUMN header_cids.node_id; Type: COMMENT; Schema: btc; Owner: -
|
||||||
|
--
|
||||||
|
|
||||||
|
COMMENT ON COLUMN btc.header_cids.node_id IS '@name BtcNodeID';
|
||||||
|
|
||||||
|
|
||||||
--
|
--
|
||||||
-- Name: header_cids_id_seq; Type: SEQUENCE; Schema: btc; Owner: -
|
-- Name: header_cids_id_seq; Type: SEQUENCE; Schema: btc; Owner: -
|
||||||
--
|
--
|
||||||
@ -68,6 +98,44 @@ CREATE SEQUENCE btc.header_cids_id_seq
|
|||||||
ALTER SEQUENCE btc.header_cids_id_seq OWNED BY btc.header_cids.id;
|
ALTER SEQUENCE btc.header_cids_id_seq OWNED BY btc.header_cids.id;
|
||||||
|
|
||||||
|
|
||||||
|
--
|
||||||
|
-- Name: queue_data; Type: TABLE; Schema: btc; Owner: -
|
||||||
|
--
|
||||||
|
|
||||||
|
CREATE TABLE btc.queue_data (
|
||||||
|
id integer NOT NULL,
|
||||||
|
data bytea NOT NULL,
|
||||||
|
height bigint NOT NULL
|
||||||
|
);
|
||||||
|
|
||||||
|
|
||||||
|
--
|
||||||
|
-- Name: TABLE queue_data; Type: COMMENT; Schema: btc; Owner: -
|
||||||
|
--
|
||||||
|
|
||||||
|
COMMENT ON TABLE btc.queue_data IS '@name BtcQueueData';
|
||||||
|
|
||||||
|
|
||||||
|
--
|
||||||
|
-- Name: queue_data_id_seq; Type: SEQUENCE; Schema: btc; Owner: -
|
||||||
|
--
|
||||||
|
|
||||||
|
CREATE SEQUENCE btc.queue_data_id_seq
|
||||||
|
AS integer
|
||||||
|
START WITH 1
|
||||||
|
INCREMENT BY 1
|
||||||
|
NO MINVALUE
|
||||||
|
NO MAXVALUE
|
||||||
|
CACHE 1;
|
||||||
|
|
||||||
|
|
||||||
|
--
|
||||||
|
-- Name: queue_data_id_seq; Type: SEQUENCE OWNED BY; Schema: btc; Owner: -
|
||||||
|
--
|
||||||
|
|
||||||
|
ALTER SEQUENCE btc.queue_data_id_seq OWNED BY btc.queue_data.id;
|
||||||
|
|
||||||
|
|
||||||
--
|
--
|
||||||
-- Name: transaction_cids; Type: TABLE; Schema: btc; Owner: -
|
-- Name: transaction_cids; Type: TABLE; Schema: btc; Owner: -
|
||||||
--
|
--
|
||||||
@ -83,6 +151,13 @@ CREATE TABLE btc.transaction_cids (
|
|||||||
);
|
);
|
||||||
|
|
||||||
|
|
||||||
|
--
|
||||||
|
-- Name: TABLE transaction_cids; Type: COMMENT; Schema: btc; Owner: -
|
||||||
|
--
|
||||||
|
|
||||||
|
COMMENT ON TABLE btc.transaction_cids IS '@name BtcTransactionCids';
|
||||||
|
|
||||||
|
|
||||||
--
|
--
|
||||||
-- Name: transaction_cids_id_seq; Type: SEQUENCE; Schema: btc; Owner: -
|
-- Name: transaction_cids_id_seq; Type: SEQUENCE; Schema: btc; Owner: -
|
||||||
--
|
--
|
||||||
@ -185,10 +260,31 @@ CREATE TABLE eth.header_cids (
|
|||||||
parent_hash character varying(66) NOT NULL,
|
parent_hash character varying(66) NOT NULL,
|
||||||
cid text NOT NULL,
|
cid text NOT NULL,
|
||||||
td numeric NOT NULL,
|
td numeric NOT NULL,
|
||||||
node_id integer NOT NULL
|
node_id integer NOT NULL,
|
||||||
|
reward numeric NOT NULL,
|
||||||
|
state_root character varying(66),
|
||||||
|
tx_root character varying(66),
|
||||||
|
receipt_root character varying(66),
|
||||||
|
uncle_root character varying(66),
|
||||||
|
bloom bytea,
|
||||||
|
"timestamp" numeric
|
||||||
);
|
);
|
||||||
|
|
||||||
|
|
||||||
|
--
|
||||||
|
-- Name: TABLE header_cids; Type: COMMENT; Schema: eth; Owner: -
|
||||||
|
--
|
||||||
|
|
||||||
|
COMMENT ON TABLE eth.header_cids IS '@name EthHeaderCids';
|
||||||
|
|
||||||
|
|
||||||
|
--
|
||||||
|
-- Name: COLUMN header_cids.node_id; Type: COMMENT; Schema: eth; Owner: -
|
||||||
|
--
|
||||||
|
|
||||||
|
COMMENT ON COLUMN eth.header_cids.node_id IS '@name EthNodeID';
|
||||||
|
|
||||||
|
|
||||||
--
|
--
|
||||||
-- Name: header_cids_id_seq; Type: SEQUENCE; Schema: eth; Owner: -
|
-- Name: header_cids_id_seq; Type: SEQUENCE; Schema: eth; Owner: -
|
||||||
--
|
--
|
||||||
@ -209,6 +305,44 @@ CREATE SEQUENCE eth.header_cids_id_seq
|
|||||||
ALTER SEQUENCE eth.header_cids_id_seq OWNED BY eth.header_cids.id;
|
ALTER SEQUENCE eth.header_cids_id_seq OWNED BY eth.header_cids.id;
|
||||||
|
|
||||||
|
|
||||||
|
--
|
||||||
|
-- Name: queue_data; Type: TABLE; Schema: eth; Owner: -
|
||||||
|
--
|
||||||
|
|
||||||
|
CREATE TABLE eth.queue_data (
|
||||||
|
id integer NOT NULL,
|
||||||
|
data bytea NOT NULL,
|
||||||
|
height bigint NOT NULL
|
||||||
|
);
|
||||||
|
|
||||||
|
|
||||||
|
--
|
||||||
|
-- Name: TABLE queue_data; Type: COMMENT; Schema: eth; Owner: -
|
||||||
|
--
|
||||||
|
|
||||||
|
COMMENT ON TABLE eth.queue_data IS '@name EthQueueData';
|
||||||
|
|
||||||
|
|
||||||
|
--
|
||||||
|
-- Name: queue_data_id_seq; Type: SEQUENCE; Schema: eth; Owner: -
|
||||||
|
--
|
||||||
|
|
||||||
|
CREATE SEQUENCE eth.queue_data_id_seq
|
||||||
|
AS integer
|
||||||
|
START WITH 1
|
||||||
|
INCREMENT BY 1
|
||||||
|
NO MINVALUE
|
||||||
|
NO MAXVALUE
|
||||||
|
CACHE 1;
|
||||||
|
|
||||||
|
|
||||||
|
--
|
||||||
|
-- Name: queue_data_id_seq; Type: SEQUENCE OWNED BY; Schema: eth; Owner: -
|
||||||
|
--
|
||||||
|
|
||||||
|
ALTER SEQUENCE eth.queue_data_id_seq OWNED BY eth.queue_data.id;
|
||||||
|
|
||||||
|
|
||||||
--
|
--
|
||||||
-- Name: receipt_cids; Type: TABLE; Schema: eth; Owner: -
|
-- Name: receipt_cids; Type: TABLE; Schema: eth; Owner: -
|
||||||
--
|
--
|
||||||
@ -245,6 +379,40 @@ CREATE SEQUENCE eth.receipt_cids_id_seq
|
|||||||
ALTER SEQUENCE eth.receipt_cids_id_seq OWNED BY eth.receipt_cids.id;
|
ALTER SEQUENCE eth.receipt_cids_id_seq OWNED BY eth.receipt_cids.id;
|
||||||
|
|
||||||
|
|
||||||
|
--
|
||||||
|
-- Name: state_accounts; Type: TABLE; Schema: eth; Owner: -
|
||||||
|
--
|
||||||
|
|
||||||
|
CREATE TABLE eth.state_accounts (
|
||||||
|
id integer NOT NULL,
|
||||||
|
state_id integer NOT NULL,
|
||||||
|
balance numeric NOT NULL,
|
||||||
|
nonce integer NOT NULL,
|
||||||
|
code_hash bytea NOT NULL,
|
||||||
|
storage_root character varying(66) NOT NULL
|
||||||
|
);
|
||||||
|
|
||||||
|
|
||||||
|
--
|
||||||
|
-- Name: state_accounts_id_seq; Type: SEQUENCE; Schema: eth; Owner: -
|
||||||
|
--
|
||||||
|
|
||||||
|
CREATE SEQUENCE eth.state_accounts_id_seq
|
||||||
|
AS integer
|
||||||
|
START WITH 1
|
||||||
|
INCREMENT BY 1
|
||||||
|
NO MINVALUE
|
||||||
|
NO MAXVALUE
|
||||||
|
CACHE 1;
|
||||||
|
|
||||||
|
|
||||||
|
--
|
||||||
|
-- Name: state_accounts_id_seq; Type: SEQUENCE OWNED BY; Schema: eth; Owner: -
|
||||||
|
--
|
||||||
|
|
||||||
|
ALTER SEQUENCE eth.state_accounts_id_seq OWNED BY eth.state_accounts.id;
|
||||||
|
|
||||||
|
|
||||||
--
|
--
|
||||||
-- Name: state_cids; Type: TABLE; Schema: eth; Owner: -
|
-- Name: state_cids; Type: TABLE; Schema: eth; Owner: -
|
||||||
--
|
--
|
||||||
@ -252,9 +420,10 @@ ALTER SEQUENCE eth.receipt_cids_id_seq OWNED BY eth.receipt_cids.id;
|
|||||||
CREATE TABLE eth.state_cids (
|
CREATE TABLE eth.state_cids (
|
||||||
id integer NOT NULL,
|
id integer NOT NULL,
|
||||||
header_id integer NOT NULL,
|
header_id integer NOT NULL,
|
||||||
state_key character varying(66) NOT NULL,
|
state_leaf_key character varying(66),
|
||||||
leaf boolean NOT NULL,
|
cid text NOT NULL,
|
||||||
cid text NOT NULL
|
state_path bytea,
|
||||||
|
node_type integer
|
||||||
);
|
);
|
||||||
|
|
||||||
|
|
||||||
@ -285,9 +454,10 @@ ALTER SEQUENCE eth.state_cids_id_seq OWNED BY eth.state_cids.id;
|
|||||||
CREATE TABLE eth.storage_cids (
|
CREATE TABLE eth.storage_cids (
|
||||||
id integer NOT NULL,
|
id integer NOT NULL,
|
||||||
state_id integer NOT NULL,
|
state_id integer NOT NULL,
|
||||||
storage_key character varying(66) NOT NULL,
|
storage_leaf_key character varying(66),
|
||||||
leaf boolean NOT NULL,
|
cid text NOT NULL,
|
||||||
cid text NOT NULL
|
storage_path bytea,
|
||||||
|
node_type integer
|
||||||
);
|
);
|
||||||
|
|
||||||
|
|
||||||
@ -326,6 +496,13 @@ CREATE TABLE eth.transaction_cids (
|
|||||||
);
|
);
|
||||||
|
|
||||||
|
|
||||||
|
--
|
||||||
|
-- Name: TABLE transaction_cids; Type: COMMENT; Schema: eth; Owner: -
|
||||||
|
--
|
||||||
|
|
||||||
|
COMMENT ON TABLE eth.transaction_cids IS '@name EthTransactionCids';
|
||||||
|
|
||||||
|
|
||||||
--
|
--
|
||||||
-- Name: transaction_cids_id_seq; Type: SEQUENCE; Schema: eth; Owner: -
|
-- Name: transaction_cids_id_seq; Type: SEQUENCE; Schema: eth; Owner: -
|
||||||
--
|
--
|
||||||
@ -355,7 +532,8 @@ CREATE TABLE eth.uncle_cids (
|
|||||||
header_id integer NOT NULL,
|
header_id integer NOT NULL,
|
||||||
block_hash character varying(66) NOT NULL,
|
block_hash character varying(66) NOT NULL,
|
||||||
parent_hash character varying(66) NOT NULL,
|
parent_hash character varying(66) NOT NULL,
|
||||||
cid text NOT NULL
|
cid text NOT NULL,
|
||||||
|
reward numeric NOT NULL
|
||||||
);
|
);
|
||||||
|
|
||||||
|
|
||||||
@ -616,6 +794,20 @@ CREATE TABLE public.headers (
|
|||||||
);
|
);
|
||||||
|
|
||||||
|
|
||||||
|
--
|
||||||
|
-- Name: TABLE headers; Type: COMMENT; Schema: public; Owner: -
|
||||||
|
--
|
||||||
|
|
||||||
|
COMMENT ON TABLE public.headers IS '@name EthHeaders';
|
||||||
|
|
||||||
|
|
||||||
|
--
|
||||||
|
-- Name: COLUMN headers.node_id; Type: COMMENT; Schema: public; Owner: -
|
||||||
|
--
|
||||||
|
|
||||||
|
COMMENT ON COLUMN public.headers.node_id IS '@name EthNodeID';
|
||||||
|
|
||||||
|
|
||||||
--
|
--
|
||||||
-- Name: headers_id_seq; Type: SEQUENCE; Schema: public; Owner: -
|
-- Name: headers_id_seq; Type: SEQUENCE; Schema: public; Owner: -
|
||||||
--
|
--
|
||||||
@ -649,6 +841,20 @@ CREATE TABLE public.nodes (
|
|||||||
);
|
);
|
||||||
|
|
||||||
|
|
||||||
|
--
|
||||||
|
-- Name: TABLE nodes; Type: COMMENT; Schema: public; Owner: -
|
||||||
|
--
|
||||||
|
|
||||||
|
COMMENT ON TABLE public.nodes IS '@name NodeInfo';
|
||||||
|
|
||||||
|
|
||||||
|
--
|
||||||
|
-- Name: COLUMN nodes.node_id; Type: COMMENT; Schema: public; Owner: -
|
||||||
|
--
|
||||||
|
|
||||||
|
COMMENT ON COLUMN public.nodes.node_id IS '@name ChainNodeID';
|
||||||
|
|
||||||
|
|
||||||
--
|
--
|
||||||
-- Name: nodes_id_seq; Type: SEQUENCE; Schema: public; Owner: -
|
-- Name: nodes_id_seq; Type: SEQUENCE; Schema: public; Owner: -
|
||||||
--
|
--
|
||||||
@ -771,6 +977,13 @@ ALTER SEQUENCE public.watched_logs_id_seq OWNED BY public.watched_logs.id;
|
|||||||
ALTER TABLE ONLY btc.header_cids ALTER COLUMN id SET DEFAULT nextval('btc.header_cids_id_seq'::regclass);
|
ALTER TABLE ONLY btc.header_cids ALTER COLUMN id SET DEFAULT nextval('btc.header_cids_id_seq'::regclass);
|
||||||
|
|
||||||
|
|
||||||
|
--
|
||||||
|
-- Name: queue_data id; Type: DEFAULT; Schema: btc; Owner: -
|
||||||
|
--
|
||||||
|
|
||||||
|
ALTER TABLE ONLY btc.queue_data ALTER COLUMN id SET DEFAULT nextval('btc.queue_data_id_seq'::regclass);
|
||||||
|
|
||||||
|
|
||||||
--
|
--
|
||||||
-- Name: transaction_cids id; Type: DEFAULT; Schema: btc; Owner: -
|
-- Name: transaction_cids id; Type: DEFAULT; Schema: btc; Owner: -
|
||||||
--
|
--
|
||||||
@ -799,6 +1012,13 @@ ALTER TABLE ONLY btc.tx_outputs ALTER COLUMN id SET DEFAULT nextval('btc.tx_outp
|
|||||||
ALTER TABLE ONLY eth.header_cids ALTER COLUMN id SET DEFAULT nextval('eth.header_cids_id_seq'::regclass);
|
ALTER TABLE ONLY eth.header_cids ALTER COLUMN id SET DEFAULT nextval('eth.header_cids_id_seq'::regclass);
|
||||||
|
|
||||||
|
|
||||||
|
--
|
||||||
|
-- Name: queue_data id; Type: DEFAULT; Schema: eth; Owner: -
|
||||||
|
--
|
||||||
|
|
||||||
|
ALTER TABLE ONLY eth.queue_data ALTER COLUMN id SET DEFAULT nextval('eth.queue_data_id_seq'::regclass);
|
||||||
|
|
||||||
|
|
||||||
--
|
--
|
||||||
-- Name: receipt_cids id; Type: DEFAULT; Schema: eth; Owner: -
|
-- Name: receipt_cids id; Type: DEFAULT; Schema: eth; Owner: -
|
||||||
--
|
--
|
||||||
@ -806,6 +1026,13 @@ ALTER TABLE ONLY eth.header_cids ALTER COLUMN id SET DEFAULT nextval('eth.header
|
|||||||
ALTER TABLE ONLY eth.receipt_cids ALTER COLUMN id SET DEFAULT nextval('eth.receipt_cids_id_seq'::regclass);
|
ALTER TABLE ONLY eth.receipt_cids ALTER COLUMN id SET DEFAULT nextval('eth.receipt_cids_id_seq'::regclass);
|
||||||
|
|
||||||
|
|
||||||
|
--
|
||||||
|
-- Name: state_accounts id; Type: DEFAULT; Schema: eth; Owner: -
|
||||||
|
--
|
||||||
|
|
||||||
|
ALTER TABLE ONLY eth.state_accounts ALTER COLUMN id SET DEFAULT nextval('eth.state_accounts_id_seq'::regclass);
|
||||||
|
|
||||||
|
|
||||||
--
|
--
|
||||||
-- Name: state_cids id; Type: DEFAULT; Schema: eth; Owner: -
|
-- Name: state_cids id; Type: DEFAULT; Schema: eth; Owner: -
|
||||||
--
|
--
|
||||||
@ -927,6 +1154,22 @@ ALTER TABLE ONLY btc.header_cids
|
|||||||
ADD CONSTRAINT header_cids_pkey PRIMARY KEY (id);
|
ADD CONSTRAINT header_cids_pkey PRIMARY KEY (id);
|
||||||
|
|
||||||
|
|
||||||
|
--
|
||||||
|
-- Name: queue_data queue_data_height_key; Type: CONSTRAINT; Schema: btc; Owner: -
|
||||||
|
--
|
||||||
|
|
||||||
|
ALTER TABLE ONLY btc.queue_data
|
||||||
|
ADD CONSTRAINT queue_data_height_key UNIQUE (height);
|
||||||
|
|
||||||
|
|
||||||
|
--
|
||||||
|
-- Name: queue_data queue_data_pkey; Type: CONSTRAINT; Schema: btc; Owner: -
|
||||||
|
--
|
||||||
|
|
||||||
|
ALTER TABLE ONLY btc.queue_data
|
||||||
|
ADD CONSTRAINT queue_data_pkey PRIMARY KEY (id);
|
||||||
|
|
||||||
|
|
||||||
--
|
--
|
||||||
-- Name: transaction_cids transaction_cids_pkey; Type: CONSTRAINT; Schema: btc; Owner: -
|
-- Name: transaction_cids transaction_cids_pkey; Type: CONSTRAINT; Schema: btc; Owner: -
|
||||||
--
|
--
|
||||||
@ -991,6 +1234,22 @@ ALTER TABLE ONLY eth.header_cids
|
|||||||
ADD CONSTRAINT header_cids_pkey PRIMARY KEY (id);
|
ADD CONSTRAINT header_cids_pkey PRIMARY KEY (id);
|
||||||
|
|
||||||
|
|
||||||
|
--
|
||||||
|
-- Name: queue_data queue_data_height_key; Type: CONSTRAINT; Schema: eth; Owner: -
|
||||||
|
--
|
||||||
|
|
||||||
|
ALTER TABLE ONLY eth.queue_data
|
||||||
|
ADD CONSTRAINT queue_data_height_key UNIQUE (height);
|
||||||
|
|
||||||
|
|
||||||
|
--
|
||||||
|
-- Name: queue_data queue_data_pkey; Type: CONSTRAINT; Schema: eth; Owner: -
|
||||||
|
--
|
||||||
|
|
||||||
|
ALTER TABLE ONLY eth.queue_data
|
||||||
|
ADD CONSTRAINT queue_data_pkey PRIMARY KEY (id);
|
||||||
|
|
||||||
|
|
||||||
--
|
--
|
||||||
-- Name: receipt_cids receipt_cids_pkey; Type: CONSTRAINT; Schema: eth; Owner: -
|
-- Name: receipt_cids receipt_cids_pkey; Type: CONSTRAINT; Schema: eth; Owner: -
|
||||||
--
|
--
|
||||||
@ -1000,11 +1259,27 @@ ALTER TABLE ONLY eth.receipt_cids
|
|||||||
|
|
||||||
|
|
||||||
--
|
--
|
||||||
-- Name: state_cids state_cids_header_id_state_key_key; Type: CONSTRAINT; Schema: eth; Owner: -
|
-- Name: state_accounts state_accounts_pkey; Type: CONSTRAINT; Schema: eth; Owner: -
|
||||||
|
--
|
||||||
|
|
||||||
|
ALTER TABLE ONLY eth.state_accounts
|
||||||
|
ADD CONSTRAINT state_accounts_pkey PRIMARY KEY (id);
|
||||||
|
|
||||||
|
|
||||||
|
--
|
||||||
|
-- Name: state_accounts state_accounts_state_id_key; Type: CONSTRAINT; Schema: eth; Owner: -
|
||||||
|
--
|
||||||
|
|
||||||
|
ALTER TABLE ONLY eth.state_accounts
|
||||||
|
ADD CONSTRAINT state_accounts_state_id_key UNIQUE (state_id);
|
||||||
|
|
||||||
|
|
||||||
|
--
|
||||||
|
-- Name: state_cids state_cids_header_id_state_path_key; Type: CONSTRAINT; Schema: eth; Owner: -
|
||||||
--
|
--
|
||||||
|
|
||||||
ALTER TABLE ONLY eth.state_cids
|
ALTER TABLE ONLY eth.state_cids
|
||||||
ADD CONSTRAINT state_cids_header_id_state_key_key UNIQUE (header_id, state_key);
|
ADD CONSTRAINT state_cids_header_id_state_path_key UNIQUE (header_id, state_path);
|
||||||
|
|
||||||
|
|
||||||
--
|
--
|
||||||
@ -1024,11 +1299,11 @@ ALTER TABLE ONLY eth.storage_cids
|
|||||||
|
|
||||||
|
|
||||||
--
|
--
|
||||||
-- Name: storage_cids storage_cids_state_id_storage_key_key; Type: CONSTRAINT; Schema: eth; Owner: -
|
-- Name: storage_cids storage_cids_state_id_storage_path_key; Type: CONSTRAINT; Schema: eth; Owner: -
|
||||||
--
|
--
|
||||||
|
|
||||||
ALTER TABLE ONLY eth.storage_cids
|
ALTER TABLE ONLY eth.storage_cids
|
||||||
ADD CONSTRAINT storage_cids_state_id_storage_key_key UNIQUE (state_id, storage_key);
|
ADD CONSTRAINT storage_cids_state_id_storage_path_key UNIQUE (state_id, storage_path);
|
||||||
|
|
||||||
|
|
||||||
--
|
--
|
||||||
@ -1321,6 +1596,14 @@ ALTER TABLE ONLY eth.receipt_cids
|
|||||||
ADD CONSTRAINT receipt_cids_tx_id_fkey FOREIGN KEY (tx_id) REFERENCES eth.transaction_cids(id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED;
|
ADD CONSTRAINT receipt_cids_tx_id_fkey FOREIGN KEY (tx_id) REFERENCES eth.transaction_cids(id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED;
|
||||||
|
|
||||||
|
|
||||||
|
--
|
||||||
|
-- Name: state_accounts state_accounts_state_id_fkey; Type: FK CONSTRAINT; Schema: eth; Owner: -
|
||||||
|
--
|
||||||
|
|
||||||
|
ALTER TABLE ONLY eth.state_accounts
|
||||||
|
ADD CONSTRAINT state_accounts_state_id_fkey FOREIGN KEY (state_id) REFERENCES eth.state_cids(id) ON DELETE CASCADE;
|
||||||
|
|
||||||
|
|
||||||
--
|
--
|
||||||
-- Name: state_cids state_cids_header_id_fkey; Type: FK CONSTRAINT; Schema: eth; Owner: -
|
-- Name: state_cids state_cids_header_id_fkey; Type: FK CONSTRAINT; Schema: eth; Owner: -
|
||||||
--
|
--
|
||||||
|
40
dockerfiles/migrations/Dockerfile
Normal file
40
dockerfiles/migrations/Dockerfile
Normal file
@ -0,0 +1,40 @@
|
|||||||
|
FROM golang:alpine
|
||||||
|
|
||||||
|
RUN apk --update --no-cache add make git g++ linux-headers
|
||||||
|
# DEBUG
|
||||||
|
RUN apk add busybox-extras
|
||||||
|
|
||||||
|
# this is probably a noob move, but I want apk from alpine for the above but need to avoid Go 1.13 below as this error still occurs https://github.com/ipfs/go-ipfs/issues/6603
|
||||||
|
FROM golang:1.12.4 as builder
|
||||||
|
|
||||||
|
# Get and build vulcanizedb
|
||||||
|
ADD . /go/src/github.com/vulcanize/vulcanizedb
|
||||||
|
|
||||||
|
# Build migration tool
|
||||||
|
RUN go get -u -d github.com/pressly/goose/cmd/goose
|
||||||
|
WORKDIR /go/src/github.com/pressly/goose/cmd/goose
|
||||||
|
RUN GCO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -ldflags '-extldflags "-static"' -tags='no_mysql no_sqlite' -o goose .
|
||||||
|
|
||||||
|
WORKDIR /go/src/github.com/vulcanize/vulcanizedb
|
||||||
|
|
||||||
|
# app container
|
||||||
|
FROM alpine
|
||||||
|
|
||||||
|
ARG USER
|
||||||
|
|
||||||
|
RUN adduser -Du 5000 $USER
|
||||||
|
WORKDIR /app
|
||||||
|
RUN chown $USER /app
|
||||||
|
USER $USER
|
||||||
|
|
||||||
|
# chown first so dir is writable
|
||||||
|
# note: using $USER is merged, but not in the stable release yet
|
||||||
|
COPY --chown=5000:5000 --from=builder /go/src/github.com/vulcanize/vulcanizedb/dockerfiles/migrations/startup_script.sh .
|
||||||
|
|
||||||
|
|
||||||
|
# keep binaries immutable
|
||||||
|
COPY --from=builder /go/src/github.com/pressly/goose/cmd/goose/goose goose
|
||||||
|
COPY --from=builder /go/src/github.com/vulcanize/vulcanizedb/db/migrations migrations/vulcanizedb
|
||||||
|
# XXX dir is already writeable RUN touch vulcanizedb.log
|
||||||
|
|
||||||
|
CMD ["./startup_script.sh"]
|
32
dockerfiles/migrations/startup_script.sh
Executable file
32
dockerfiles/migrations/startup_script.sh
Executable file
@ -0,0 +1,32 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
# Runs the db migrations and starts the super node services
|
||||||
|
|
||||||
|
# Exit if the variable tests fail
|
||||||
|
set -e
|
||||||
|
set +x
|
||||||
|
|
||||||
|
# Check the database variables are set
|
||||||
|
test $DATABASE_HOSTNAME
|
||||||
|
test $DATABASE_NAME
|
||||||
|
test $DATABASE_PORT
|
||||||
|
test $DATABASE_USER
|
||||||
|
test $DATABASE_PASSWORD
|
||||||
|
set +e
|
||||||
|
|
||||||
|
# Construct the connection string for postgres
|
||||||
|
VDB_PG_CONNECT=postgresql://$DATABASE_USER:$DATABASE_PASSWORD@$DATABASE_HOSTNAME:$DATABASE_PORT/$DATABASE_NAME?sslmode=disable
|
||||||
|
|
||||||
|
# Run the DB migrations
|
||||||
|
echo "Connecting with: $VDB_PG_CONNECT"
|
||||||
|
echo "Running database migrations"
|
||||||
|
./goose -dir migrations/vulcanizedb postgres "$VDB_PG_CONNECT" up
|
||||||
|
|
||||||
|
|
||||||
|
# If the db migrations ran without err
|
||||||
|
if [[ $? -eq 0 ]]; then
|
||||||
|
echo "Migrations ran successfully"
|
||||||
|
exit 0
|
||||||
|
else
|
||||||
|
echo "Could not run migrations. Are the database details correct?"
|
||||||
|
exit 1
|
||||||
|
fi
|
8
dockerfiles/postgraphile/Dockerfile
Normal file
8
dockerfiles/postgraphile/Dockerfile
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
FROM node:alpine
|
||||||
|
|
||||||
|
RUN npm install -g postgraphile
|
||||||
|
RUN npm install -g postgraphile-plugin-connection-filter
|
||||||
|
RUN npm install -g @graphile/pg-pubsub
|
||||||
|
|
||||||
|
EXPOSE 5000
|
||||||
|
ENTRYPOINT ["postgraphile"]
|
60
dockerfiles/postgraphile/docker-compose.yml
Normal file
60
dockerfiles/postgraphile/docker-compose.yml
Normal file
@ -0,0 +1,60 @@
|
|||||||
|
version: '3.2'
|
||||||
|
|
||||||
|
services:
|
||||||
|
db:
|
||||||
|
restart: always
|
||||||
|
image: postgres:10.12-alpine
|
||||||
|
environment:
|
||||||
|
POSTGRES_USER: "vdbm"
|
||||||
|
POSTGRES_DB: "vulcanize_public"
|
||||||
|
POSTGRES_PASSWORD: "password"
|
||||||
|
volumes:
|
||||||
|
- vulcanizedb_db_data:/var/lib/postgresql/data
|
||||||
|
expose:
|
||||||
|
- "5432"
|
||||||
|
ports:
|
||||||
|
- "127.0.0.1:8079:5432"
|
||||||
|
|
||||||
|
migrations:
|
||||||
|
restart: on-failure
|
||||||
|
depends_on:
|
||||||
|
- db
|
||||||
|
build:
|
||||||
|
context: ./../../
|
||||||
|
cache_from:
|
||||||
|
- alpine:latest
|
||||||
|
dockerfile: ./dockerfiles/migrations/Dockerfile
|
||||||
|
args:
|
||||||
|
USER: "vdbm"
|
||||||
|
environment:
|
||||||
|
DATABASE_NAME: "vulcanize_public"
|
||||||
|
DATABASE_HOSTNAME: "db"
|
||||||
|
DATABASE_PORT: 5432
|
||||||
|
DATABASE_USER: "vdbm"
|
||||||
|
DATABASE_PASSWORD: "password"
|
||||||
|
|
||||||
|
graphql:
|
||||||
|
restart: always
|
||||||
|
depends_on:
|
||||||
|
- db
|
||||||
|
- migrations
|
||||||
|
build:
|
||||||
|
context: ./../../
|
||||||
|
cache_from:
|
||||||
|
- node:alpine
|
||||||
|
dockerfile: ./dockerfiles/postgraphile/Dockerfile
|
||||||
|
expose:
|
||||||
|
- "5000"
|
||||||
|
ports:
|
||||||
|
- "127.0.0.1:5000:5000"
|
||||||
|
command: ["--plugins", "@graphile/pg-pubsub",
|
||||||
|
"--subscriptions",
|
||||||
|
"--simple-subscriptions",
|
||||||
|
"--connection", "postgres://vdbm:password@db:5432/vulcanize_public",
|
||||||
|
"--port", "5000",
|
||||||
|
"-n", "0.0.0.0",
|
||||||
|
"--schema", "public,btc,eth",
|
||||||
|
"--append-plugins", "postgraphile-plugin-connection-filter"]
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
vulcanizedb_db_data:
|
@ -1,9 +0,0 @@
|
|||||||
FROM golang:1.10.3-alpine3.7
|
|
||||||
|
|
||||||
RUN apk add --no-cache make gcc musl-dev
|
|
||||||
|
|
||||||
ADD . /go/src/github.com/vulcanize/vulcanizedb
|
|
||||||
WORKDIR /go/src/github.com/vulcanize/vulcanizedb
|
|
||||||
RUN go build -o /app main.go
|
|
||||||
|
|
||||||
ENTRYPOINT ["/app"]
|
|
@ -1,9 +0,0 @@
|
|||||||
[database]
|
|
||||||
name = "vulcanizedb"
|
|
||||||
hostname = "postgres"
|
|
||||||
port = 5432
|
|
||||||
user = "postgres"
|
|
||||||
password = "postgres"
|
|
||||||
|
|
||||||
[client]
|
|
||||||
ipcPath = "/geth/geth.ipc"
|
|
@ -1,63 +0,0 @@
|
|||||||
version: '2.2'
|
|
||||||
|
|
||||||
services:
|
|
||||||
|
|
||||||
vulcanizedb:
|
|
||||||
build:
|
|
||||||
context: ./../../
|
|
||||||
dockerfile: dockerfiles/rinkeby/Dockerfile
|
|
||||||
container_name: rinkeby_vulcanizedb
|
|
||||||
command: "sync --starting-block-number 0 --config /config.toml"
|
|
||||||
volumes:
|
|
||||||
- "./config.toml:/config.toml"
|
|
||||||
- "vulcanizedb_geth_data:/geth"
|
|
||||||
networks:
|
|
||||||
vulcanizedb_net:
|
|
||||||
|
|
||||||
migrations:
|
|
||||||
image: migrate/migrate:v3.3.0
|
|
||||||
container_name: rinkeby_vulcanizedb_migrations
|
|
||||||
depends_on:
|
|
||||||
postgres:
|
|
||||||
condition: service_healthy
|
|
||||||
command: -database postgresql://postgres:postgres@postgres:5432/vulcanizedb?sslmode=disable -path /migrations up
|
|
||||||
volumes:
|
|
||||||
- ./../../db/migrations:/migrations
|
|
||||||
networks:
|
|
||||||
vulcanizedb_net:
|
|
||||||
|
|
||||||
postgres:
|
|
||||||
image: postgres:9.6.5-alpine
|
|
||||||
container_name: rinkeby_vulcanizedb_postgres
|
|
||||||
environment:
|
|
||||||
POSTGRES_USER: postgres
|
|
||||||
POSTGRES_DB: vulcanizedb
|
|
||||||
POSTGRES_PASSWORD: postgres
|
|
||||||
volumes:
|
|
||||||
- "vulcanizedb_db_data:/var/lib/postgresql/data"
|
|
||||||
networks:
|
|
||||||
vulcanizedb_net:
|
|
||||||
healthcheck:
|
|
||||||
test: ["CMD", "pg_isready"]
|
|
||||||
interval: 5s
|
|
||||||
timeout: 5s
|
|
||||||
retries: 30
|
|
||||||
|
|
||||||
geth:
|
|
||||||
image: ethereum/client-go:v1.8.11
|
|
||||||
container_name: rinkeby_vulcanizedb_geth
|
|
||||||
cpus: 0.3
|
|
||||||
hostname: eth
|
|
||||||
command: '--rinkeby --rpc --rpcaddr="0.0.0.0" --rpcvhosts="geth"'
|
|
||||||
volumes:
|
|
||||||
- "vulcanizedb_geth_data:/root/.ethereum/rinkeby"
|
|
||||||
networks:
|
|
||||||
vulcanizedb_net:
|
|
||||||
|
|
||||||
volumes:
|
|
||||||
vulcanizedb_geth_data:
|
|
||||||
vulcanizedb_db_data:
|
|
||||||
|
|
||||||
networks:
|
|
||||||
vulcanizedb_net:
|
|
||||||
driver: bridge
|
|
@ -7,8 +7,6 @@ RUN apk add busybox-extras
|
|||||||
# this is probably a noob move, but I want apk from alpine for the above but need to avoid Go 1.13 below as this error still occurs https://github.com/ipfs/go-ipfs/issues/6603
|
# this is probably a noob move, but I want apk from alpine for the above but need to avoid Go 1.13 below as this error still occurs https://github.com/ipfs/go-ipfs/issues/6603
|
||||||
FROM golang:1.12.4 as builder
|
FROM golang:1.12.4 as builder
|
||||||
|
|
||||||
RUN yum install -y libusb1-devel systemd-devel
|
|
||||||
|
|
||||||
# Get and build vulcanizedb
|
# Get and build vulcanizedb
|
||||||
ADD . /go/src/github.com/vulcanize/vulcanizedb
|
ADD . /go/src/github.com/vulcanize/vulcanizedb
|
||||||
WORKDIR /go/src/github.com/vulcanize/vulcanizedb
|
WORKDIR /go/src/github.com/vulcanize/vulcanizedb
|
||||||
@ -19,7 +17,7 @@ RUN go get -u -d github.com/ipfs/go-ipfs
|
|||||||
WORKDIR /go/src/github.com/ipfs/go-ipfs
|
WORKDIR /go/src/github.com/ipfs/go-ipfs
|
||||||
RUN git remote add vulcanize https://github.com/vulcanize/go-ipfs.git
|
RUN git remote add vulcanize https://github.com/vulcanize/go-ipfs.git
|
||||||
RUN git fetch vulcanize
|
RUN git fetch vulcanize
|
||||||
RUN git checkout -b pg_ipfs v0.4.22-alpha
|
RUN git checkout -b pg_ipfs vulcanize/postgres_update
|
||||||
RUN GO111MODULE=on GCO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -ldflags '-extldflags "-static"' -o ipfs ./cmd/ipfs
|
RUN GO111MODULE=on GCO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -ldflags '-extldflags "-static"' -o ipfs ./cmd/ipfs
|
||||||
|
|
||||||
# Build migration tool
|
# Build migration tool
|
||||||
@ -31,32 +29,32 @@ WORKDIR /go/src/github.com/vulcanize/vulcanizedb
|
|||||||
|
|
||||||
# app container
|
# app container
|
||||||
FROM alpine
|
FROM alpine
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
ARG USER
|
ARG USER
|
||||||
ARG CONFIG_FILE
|
ARG CONFIG_FILE
|
||||||
ARG EXPOSE_PORT_1
|
ARG EXPOSE_PORT_1
|
||||||
ARG EXPOSE_PORT_2
|
ARG EXPOSE_PORT_2
|
||||||
ARG EXPOSE_PORT_3
|
|
||||||
ARG EXPOSE_PORT_4
|
|
||||||
|
|
||||||
RUN adduser -D 5000 $USER
|
RUN adduser -Du 5000 $USER
|
||||||
|
WORKDIR /app
|
||||||
|
RUN chown $USER /app
|
||||||
USER $USER
|
USER $USER
|
||||||
|
|
||||||
# chown first so dir is writable
|
# chown first so dir is writable
|
||||||
# note: using $USER is merged, but not in the stable release yet
|
# note: using $USER is merged, but not in the stable release yet
|
||||||
COPY --chown=5000:5000 --from=builder /go/src/github.com/vulcanize/vulcanizedb/$CONFIG_FILE config.toml
|
COPY --chown=5000:5000 --from=builder /go/src/github.com/vulcanize/vulcanizedb/$CONFIG_FILE config.toml
|
||||||
COPY --chown=5000:5000 --from=builder /go/src/github.com/vulcanize/vulcanizedb/dockerfiles/super_node/startup_script.sh .
|
COPY --chown=5000:5000 --from=builder /go/src/github.com/vulcanize/vulcanizedb/dockerfiles/super_node/startup_script.sh .
|
||||||
|
COPY --chown=5000:5000 --from=builder /go/src/github.com/vulcanize/vulcanizedb/dockerfiles/super_node/entrypoint.sh .
|
||||||
|
|
||||||
|
|
||||||
# keep binaries immutable
|
# keep binaries immutable
|
||||||
COPY --from=builder /go/src/github.com/vulcanize/vulcanizedb/vulcanizedb vulcanizedb
|
COPY --from=builder /go/src/github.com/vulcanize/vulcanizedb/vulcanizedb vulcanizedb
|
||||||
COPY --from=builder /go/src/github.com/pressly/goose/cmd/goose/goose goose
|
COPY --from=builder /go/src/github.com/pressly/goose/cmd/goose/goose goose
|
||||||
COPY --from=builder /go/src/github.com/vulcanize/vulcanizedb/db/migrations migrations/vulcanizedb
|
COPY --from=builder /go/src/github.com/vulcanize/vulcanizedb/db/migrations migrations/vulcanizedb
|
||||||
|
COPY --from=builder /go/src/github.com/vulcanize/vulcanizedb/environments environments
|
||||||
COPY --from=builder /go/src/github.com/ipfs/go-ipfs/ipfs ipfs
|
COPY --from=builder /go/src/github.com/ipfs/go-ipfs/ipfs ipfs
|
||||||
|
|
||||||
EXPOSE $EXPOSE_PORT_1
|
EXPOSE $EXPOSE_PORT_1
|
||||||
EXPOSE $EXPOSE_PORT_2
|
EXPOSE $EXPOSE_PORT_2
|
||||||
EXPOSE $EXPOSE_PORT_3
|
|
||||||
EXPOSE $EXPOSE_PORT_4
|
|
||||||
|
|
||||||
CMD ["./startup_script.sh"]
|
ENTRYPOINT ["/app/entrypoint.sh"]
|
||||||
|
91
dockerfiles/super_node/docker-compose.yml
Normal file
91
dockerfiles/super_node/docker-compose.yml
Normal file
@ -0,0 +1,91 @@
|
|||||||
|
version: '3.2'
|
||||||
|
|
||||||
|
services:
|
||||||
|
db:
|
||||||
|
restart: always
|
||||||
|
image: postgres:10.12-alpine
|
||||||
|
environment:
|
||||||
|
POSTGRES_USER: "vdbm"
|
||||||
|
POSTGRES_DB: "vulcanize_public"
|
||||||
|
POSTGRES_PASSWORD: "password"
|
||||||
|
volumes:
|
||||||
|
- vulcanizedb_db_data:/var/lib/postgresql/data
|
||||||
|
expose:
|
||||||
|
- "5432"
|
||||||
|
ports:
|
||||||
|
- "127.0.0.1:8079:5432"
|
||||||
|
|
||||||
|
btc:
|
||||||
|
depends_on:
|
||||||
|
- db
|
||||||
|
build:
|
||||||
|
context: ./../../
|
||||||
|
cache_from:
|
||||||
|
- alpine:latest
|
||||||
|
- golang:1.12.4
|
||||||
|
dockerfile: ./dockerfiles/super_node/Dockerfile
|
||||||
|
args:
|
||||||
|
USER: "vdbm"
|
||||||
|
CONFIG_FILE: ./environments/superNodeBTC.toml
|
||||||
|
environment:
|
||||||
|
VDB_COMMAND: "superNode"
|
||||||
|
IPFS_INIT: "true"
|
||||||
|
IPFS_PATH: "/root/.btc/.ipfs"
|
||||||
|
DATABASE_NAME: "vulcanize_public"
|
||||||
|
DATABASE_HOSTNAME: "db"
|
||||||
|
DATABASE_PORT: 5432
|
||||||
|
DATABASE_USER: "vdbm"
|
||||||
|
DATABASE_PASSWORD: "password"
|
||||||
|
ports:
|
||||||
|
- "127.0.0.1:8082:8082"
|
||||||
|
- "127.0.0.1:8083:8083"
|
||||||
|
|
||||||
|
eth:
|
||||||
|
depends_on:
|
||||||
|
- db
|
||||||
|
build:
|
||||||
|
context: ./../../
|
||||||
|
cache_from:
|
||||||
|
- alpine:latest
|
||||||
|
- golang:1.12.4
|
||||||
|
dockerfile: ./dockerfiles/super_node/Dockerfile
|
||||||
|
args:
|
||||||
|
USER: "vdbm"
|
||||||
|
CONFIG_FILE: ./environments/superNodeETH.toml
|
||||||
|
environment:
|
||||||
|
VDB_COMMAND: "superNode"
|
||||||
|
IPFS_INIT: "true"
|
||||||
|
IPFS_PATH: "/root/.eth/.ipfs"
|
||||||
|
DATABASE_NAME: "vulcanize_public"
|
||||||
|
DATABASE_HOSTNAME: "db"
|
||||||
|
DATABASE_PORT: 5432
|
||||||
|
DATABASE_USER: "vdbm"
|
||||||
|
DATABASE_PASSWORD: "password"
|
||||||
|
ports:
|
||||||
|
- "127.0.0.1:8080:8080"
|
||||||
|
- "127.0.0.1:8081:8081"
|
||||||
|
|
||||||
|
graphql:
|
||||||
|
restart: always
|
||||||
|
depends_on:
|
||||||
|
- db
|
||||||
|
build:
|
||||||
|
context: ./../../
|
||||||
|
cache_from:
|
||||||
|
- node:alpine
|
||||||
|
dockerfile: ./dockerfiles/postgraphile/Dockerfile
|
||||||
|
expose:
|
||||||
|
- "5000"
|
||||||
|
ports:
|
||||||
|
- "127.0.0.1:5000:5000"
|
||||||
|
command: ["--plugins", "@graphile/pg-pubsub",
|
||||||
|
"--subscriptions",
|
||||||
|
"--simple-subscriptions",
|
||||||
|
"--connection", "postgres://vdbm:password@db:5432/vulcanize_public",
|
||||||
|
"--port", "5000",
|
||||||
|
"-n", "0.0.0.0",
|
||||||
|
"--schema", "public,btc,eth",
|
||||||
|
"--append-plugins", "postgraphile-plugin-connection-filter"]
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
vulcanizedb_db_data:
|
68
dockerfiles/super_node/entrypoint.sh
Executable file
68
dockerfiles/super_node/entrypoint.sh
Executable file
@ -0,0 +1,68 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
# Runs the db migrations and starts the super node services
|
||||||
|
|
||||||
|
# Exit if the variable tests fail
|
||||||
|
set -e
|
||||||
|
set +x
|
||||||
|
|
||||||
|
# Check the database variables are set
|
||||||
|
# XXX set defaults, don't silently fail
|
||||||
|
#test $DATABASE_HOSTNAME
|
||||||
|
#test $DATABASE_NAME
|
||||||
|
#test $DATABASE_PORT
|
||||||
|
#test $DATABASE_USER
|
||||||
|
#test $DATABASE_PASSWORD
|
||||||
|
#test $IPFS_INIT
|
||||||
|
#test $IPFS_PATH
|
||||||
|
VDB_COMMAND=${VDB_COMMAND:-superNode}
|
||||||
|
set +e
|
||||||
|
|
||||||
|
# Construct the connection string for postgres
|
||||||
|
VDB_PG_CONNECT=postgresql://$DATABASE_USER:$DATABASE_PASSWORD@$DATABASE_HOSTNAME:$DATABASE_PORT/$DATABASE_NAME?sslmode=disable
|
||||||
|
|
||||||
|
# Run the DB migrations
|
||||||
|
echo "Connecting with: $VDB_PG_CONNECT"
|
||||||
|
echo "Running database migrations"
|
||||||
|
./goose -dir migrations/vulcanizedb postgres "$VDB_PG_CONNECT" up
|
||||||
|
rv=$?
|
||||||
|
|
||||||
|
if [ $rv != 0 ]; then
|
||||||
|
echo "Could not run migrations. Are the database details correct?"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Export our database variables so that the IPFS Postgres plugin can use them
|
||||||
|
export IPFS_PGHOST=$DATABASE_HOSTNAME
|
||||||
|
export IPFS_PGUSER=$DATABASE_USER
|
||||||
|
export IPFS_PGDATABASE=$DATABASE_NAME
|
||||||
|
export IPFS_PGPORT=$DATABASE_PORT
|
||||||
|
export IPFS_PGPASSWORD=$DATABASE_PASSWORD
|
||||||
|
|
||||||
|
|
||||||
|
if [ ! -d "$HOME/.ipfs" ]; then
|
||||||
|
# initialize PG-IPFS
|
||||||
|
echo "Initializing Postgres-IPFS profile"
|
||||||
|
./ipfs init --profile=postgresds
|
||||||
|
|
||||||
|
rv=$?
|
||||||
|
if [ $rv != 0 ]; then
|
||||||
|
echo "Could not initialize ipfs"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
echo "Beginning the vulcanizedb process"
|
||||||
|
VDB_CONFIG_FILE=${VDB_CONFIG_FILE:-config.toml}
|
||||||
|
DEFAULT_OPTIONS="--config=$VDB_CONFIG_FILE"
|
||||||
|
VDB_FULL_CL=${VDB_FULL_CL:-$VDB_COMMAND $DEFAULT_OPTIONS}
|
||||||
|
echo running: ./vulcanizedb $VDB_FULL_CL $@
|
||||||
|
|
||||||
|
# XXX need to lose the env vars
|
||||||
|
./vulcanizedb $@
|
||||||
|
rv=$?
|
||||||
|
|
||||||
|
if [ $rv != 0 ]; then
|
||||||
|
echo "VulcanizeDB startup failed"
|
||||||
|
exit 1
|
||||||
|
fi
|
@ -3,24 +3,28 @@
|
|||||||
|
|
||||||
# Exit if the variable tests fail
|
# Exit if the variable tests fail
|
||||||
set -e
|
set -e
|
||||||
|
set +x
|
||||||
|
|
||||||
# Check the database variables are set
|
# Check the database variables are set
|
||||||
test $VDB_PG_NAME
|
test $DATABASE_HOSTNAME
|
||||||
test $VDB_PG_HOSTNAME
|
test $DATABASE_NAME
|
||||||
test $VDB_PG_PORT
|
test $DATABASE_PORT
|
||||||
test $VDB_PG_USER
|
test $DATABASE_USER
|
||||||
|
test $DATABASE_PASSWORD
|
||||||
test $IPFS_INIT
|
test $IPFS_INIT
|
||||||
|
test $IPFS_PATH
|
||||||
|
test $VDB_COMMAND
|
||||||
set +e
|
set +e
|
||||||
|
|
||||||
# Export our database variables so that the IPFS Postgres plugin can use them
|
# Export our database variables so that the IPFS Postgres plugin can use them
|
||||||
export IPFS_PGHOST=$VDB_PG_HOSTNAME
|
export IPFS_PGHOST=$DATABASE_HOSTNAME
|
||||||
export IPFS_PGUSER=$VDB_PG_USER
|
export IPFS_PGUSER=$DATABASE_USER
|
||||||
export IPFS_PGDATABASE=$VDB_PG_NAME
|
export IPFS_PGDATABASE=$DATABASE_NAME
|
||||||
export IPFS_PGPORT=$VDB_PG_PORT
|
export IPFS_PGPORT=$DATABASE_PORT
|
||||||
export IPFS_PGPASSWORD=$VDB_PG_PASSWORD
|
export IPFS_PGPASSWORD=$DATABASE_PASSWORD
|
||||||
|
|
||||||
# Construct the connection string for postgres
|
# Construct the connection string for postgres
|
||||||
VDB_PG_CONNECT=postgresql://$VDB_PG_USER:$VDB_PG_PASSWORD@$VDB_PG_HOSTNAME:$VDB_PG_PORT/$VDB_PG_NAME?sslmode=disable
|
VDB_PG_CONNECT=postgresql://$DATABASE_USER:$DATABASE_PASSWORD@$DATABASE_HOSTNAME:$DATABASE_PORT/$DATABASE_NAME?sslmode=disable
|
||||||
|
|
||||||
# Run the DB migrations
|
# Run the DB migrations
|
||||||
echo "Connecting with: $VDB_PG_CONNECT"
|
echo "Connecting with: $VDB_PG_CONNECT"
|
||||||
@ -40,28 +44,22 @@ if [[ $? -eq 0 ]]; then
|
|||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
echo "Could not run migrations. Are the database details correct?"
|
echo "Could not run migrations. Are the database details correct?"
|
||||||
exit
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# If IPFS initialization was successful
|
# If IPFS initialization was successful
|
||||||
if [[ $? -eq 0 ]]; then
|
if [[ $? -eq 0 ]]; then
|
||||||
# Wait until block synchronisation has begun
|
echo "Running the VulcanizeDB process"
|
||||||
echo "Waiting for block synchronization to begin"
|
./vulcanizedb ${VDB_COMMAND} --config=config.toml
|
||||||
( tail -f -n0 log.txt & ) | grep -q "Block synchronisation started" # this blocks til we see "Block synchronisation started"
|
|
||||||
# And then spin up the syncPublishScreenAndServe Vulcanizedb service
|
|
||||||
echo "Beginning the syncPublishScreenAndServe vulcanizedb process"
|
|
||||||
./vulcanizedb superNode --config=config.toml 2>&1 | tee -a log.txt &
|
|
||||||
else
|
else
|
||||||
echo "Could not initialize state-diffing Geth."
|
echo "Could not initialize IPFS."
|
||||||
exit
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# If Vulcanizedb startup was successful
|
# If VulcanizeDB process was successful
|
||||||
if [ $? -eq 0 ]; then
|
if [ $? -eq 0 ]; then
|
||||||
echo "Seed node successfully booted"
|
echo "VulcanizeDB process ran successfully"
|
||||||
else
|
else
|
||||||
echo "Could not start vulcanizedb syncPublishScreenAndServe process. Is the config file correct?"
|
echo "Could not start VulcanizeDB process. Is the config file correct?"
|
||||||
exit
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
wait
|
|
||||||
|
16
documentation/super_node/architecture.md
Normal file
16
documentation/super_node/architecture.md
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
These are the components of a VulcanizeDB Watcher:
|
||||||
|
* Data Fetcher/Streamer sources:
|
||||||
|
* go-ethereum
|
||||||
|
* bitcoind
|
||||||
|
* btcd
|
||||||
|
* IPFS
|
||||||
|
* Transformers contain:
|
||||||
|
* converter
|
||||||
|
* publisher
|
||||||
|
* indexer
|
||||||
|
* Endpoints contain:
|
||||||
|
* api
|
||||||
|
* backend
|
||||||
|
* filterer
|
||||||
|
* retriever
|
||||||
|
* ipld_server
|
@ -17,7 +17,7 @@ The config for `streamEthSubscribe` has a set of parameters to fill the [EthSubs
|
|||||||
```toml
|
```toml
|
||||||
[superNode]
|
[superNode]
|
||||||
[superNode.ethSubscription]
|
[superNode.ethSubscription]
|
||||||
historicalData = true
|
historicalData = false
|
||||||
historicalDataOnly = false
|
historicalDataOnly = false
|
||||||
startingBlock = 0
|
startingBlock = 0
|
||||||
endingBlock = 0
|
endingBlock = 0
|
||||||
@ -27,26 +27,18 @@ The config for `streamEthSubscribe` has a set of parameters to fill the [EthSubs
|
|||||||
uncles = false
|
uncles = false
|
||||||
[superNode.ethSubscription.txFilter]
|
[superNode.ethSubscription.txFilter]
|
||||||
off = false
|
off = false
|
||||||
src = [
|
src = []
|
||||||
"0xde0B295669a9FD93d5F28D9Ec85E40f4cb697BAe",
|
dst = []
|
||||||
]
|
|
||||||
dst = [
|
|
||||||
"0xde0B295669a9FD93d5F28D9Ec85E40f4cb697BAe",
|
|
||||||
]
|
|
||||||
[superNode.ethSubscription.receiptFilter]
|
[superNode.ethSubscription.receiptFilter]
|
||||||
off = false
|
off = false
|
||||||
contracts = []
|
contracts = []
|
||||||
topics = [
|
topic0s = []
|
||||||
[
|
topic1s = []
|
||||||
"0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef",
|
topic2s = []
|
||||||
"0x930a61a57a70a73c2a503615b87e2e54fe5b9cdeacda518270b852296ab1a377"
|
topic3s = []
|
||||||
]
|
|
||||||
]
|
|
||||||
[superNode.ethSubscription.stateFilter]
|
[superNode.ethSubscription.stateFilter]
|
||||||
off = false
|
off = false
|
||||||
addresses = [
|
addresses = []
|
||||||
"0xde0B295669a9FD93d5F28D9Ec85E40f4cb697BAe"
|
|
||||||
]
|
|
||||||
intermediateNodes = false
|
intermediateNodes = false
|
||||||
[superNode.ethSubscription.storageFilter]
|
[superNode.ethSubscription.storageFilter]
|
||||||
off = true
|
off = true
|
||||||
|
@ -1,59 +0,0 @@
|
|||||||
[superNode]
|
|
||||||
chains = ["ethereum", "bitcoin"]
|
|
||||||
ipfsPath = "/root/.ipfs"
|
|
||||||
|
|
||||||
[superNode.ethereum.database]
|
|
||||||
name = "vulcanize_public"
|
|
||||||
hostname = "localhost"
|
|
||||||
port = 5432
|
|
||||||
user = "ec2-user"
|
|
||||||
|
|
||||||
[superNode.ethereum.sync]
|
|
||||||
on = true
|
|
||||||
wsPath = "ws://127.0.0.1:8546"
|
|
||||||
workers = 1
|
|
||||||
|
|
||||||
[superNode.ethereum.server]
|
|
||||||
on = true
|
|
||||||
ipcPath = "/root/.vulcanize/eth/vulcanize.ipc"
|
|
||||||
wsPath = "127.0.0.1:8080"
|
|
||||||
httpPath = "127.0.0.1:8081"
|
|
||||||
|
|
||||||
[superNode.ethereum.backFill]
|
|
||||||
on = true
|
|
||||||
httpPath = "http://127.0.0.1:8545"
|
|
||||||
frequency = 15
|
|
||||||
batchSize = 50
|
|
||||||
|
|
||||||
[superNode.bitcoin.database]
|
|
||||||
name = "vulcanize_public"
|
|
||||||
hostname = "localhost"
|
|
||||||
port = 5432
|
|
||||||
user = "ec2-user"
|
|
||||||
|
|
||||||
[superNode.bitcoin.sync]
|
|
||||||
on = true
|
|
||||||
wsPath = "127.0.0.1:8332"
|
|
||||||
workers = 1
|
|
||||||
pass = "password"
|
|
||||||
user = "username"
|
|
||||||
|
|
||||||
[superNode.bitcoin.server]
|
|
||||||
on = true
|
|
||||||
ipcPath = "/root/.vulcanize/btc/vulcanize.ipc"
|
|
||||||
wsPath = "127.0.0.1:8082"
|
|
||||||
httpPath = "127.0.0.1:8083"
|
|
||||||
|
|
||||||
[superNode.bitcoin.backFill]
|
|
||||||
on = true
|
|
||||||
httpPath = "127.0.0.1:8332"
|
|
||||||
frequency = 15
|
|
||||||
batchSize = 50
|
|
||||||
pass = "password"
|
|
||||||
user = "username"
|
|
||||||
|
|
||||||
[superNode.bitcoin.node]
|
|
||||||
nodeID = "ocd0"
|
|
||||||
clientName = "Omnicore"
|
|
||||||
genesisBlock = "000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f"
|
|
||||||
networkID = "0xD9B4BEF9"
|
|
41
environments/superNodeBTC.toml
Normal file
41
environments/superNodeBTC.toml
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
[database]
|
||||||
|
name = "vulcanize_public" # $DATABASE_NAME
|
||||||
|
hostname = "localhost" # &DATABASE_HOSTNAME
|
||||||
|
port = 5432 # $DATABASE_PORT
|
||||||
|
user = "vdbm" # $DATABASE_USER
|
||||||
|
password = "" # $DATABASE_PASSWORD
|
||||||
|
|
||||||
|
[ipfs]
|
||||||
|
path = "~/.ipfs" # $IPFS_PATH
|
||||||
|
|
||||||
|
[resync]
|
||||||
|
chain = "bitcoin" # $RESYNC_CHAIN
|
||||||
|
type = "full" # $RESYNC_TYPE
|
||||||
|
start = 0 # $RESYNC_START
|
||||||
|
stop = 0 # $RESYNC_STOP
|
||||||
|
batchSize = 1 # $RESYNC_BATCH_SIZE
|
||||||
|
batchNumber = 50 # $RESYNC_BATCH_NUMBER
|
||||||
|
clearOldCache = false # $RESYNC_CLEAR_OLD_CACHE
|
||||||
|
|
||||||
|
[superNode]
|
||||||
|
chain = "bitcoin" # $SUPERNODE_CHAIN
|
||||||
|
server = true # $SUPERNODE_SERVER
|
||||||
|
ipcPath = "~/.vulcanize/vulcanize.ipc" # $SUPERNODE_IPC_PATH
|
||||||
|
wsPath = "127.0.0.1:8082" # $SUPERNODE_WS_PATH
|
||||||
|
httpPath = "127.0.0.1:8083" # $SUPERNODE_HTTP_PATH
|
||||||
|
sync = true # $SUPERNODE_SYNC
|
||||||
|
workers = 1 # $SUPERNODE_WORKERS
|
||||||
|
backFill = true # $SUPERNODE_BACKFILL
|
||||||
|
frequency = 45 # $SUPERNODE_FREQUENCY
|
||||||
|
batchSize = 1 # $SUPERNODE_BATCH_SIZE
|
||||||
|
batchNumber = 50 # $SUPERNODE_BATCH_NUMBER
|
||||||
|
|
||||||
|
[bitcoin]
|
||||||
|
wsPath = "127.0.0.1:8332" # $BTC_WS_PATH
|
||||||
|
httpPath = "127.0.0.1:8332" # $BTC_HTTP_PATH
|
||||||
|
pass = "password" # $BTC_NODE_PASSWORD
|
||||||
|
user = "username" # $BTC_NODE_USER
|
||||||
|
nodeID = "ocd0" # $BTC_NODE_ID
|
||||||
|
clientName = "Omnicore" # $BTC_CLIENT_NAME
|
||||||
|
genesisBlock = "000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f" # $BTC_GENESIS_BLOCK
|
||||||
|
networkID = "0xD9B4BEF9" # $BTC_NETWORK_ID
|
35
environments/superNodeETH.toml
Normal file
35
environments/superNodeETH.toml
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
[database]
|
||||||
|
name = "vulcanize_public" # $DATABASE_NAME
|
||||||
|
hostname = "localhost" # &DATABASE_HOSTNAME
|
||||||
|
port = 5432 # $DATABASE_PORT
|
||||||
|
user = "vdbm" # $DATABASE_USER
|
||||||
|
password = "" # $DATABASE_PASSWORD
|
||||||
|
|
||||||
|
[ipfs]
|
||||||
|
path = "~/.ipfs" # $IPFS_PATH
|
||||||
|
|
||||||
|
[resync]
|
||||||
|
chain = "ethereum" # $RESYNC_CHAIN
|
||||||
|
type = "state" # $RESYNC_TYPE
|
||||||
|
start = 0 # $RESYNC_START
|
||||||
|
stop = 0 # $RESYNC_STOP
|
||||||
|
batchSize = 5 # $RESYNC_BATCH_SIZE
|
||||||
|
batchNumber = 50 # $RESYNC_BATCH_NUMBER
|
||||||
|
clearOldCache = true # $RESYNC_CLEAR_OLD_CACHE
|
||||||
|
|
||||||
|
[superNode]
|
||||||
|
chain = "ethereum" # $SUPERNODE_CHAIN
|
||||||
|
server = true # $SUPERNODE_SERVER
|
||||||
|
ipcPath = "~/.vulcanize/vulcanize.ipc" # $SUPERNODE_IPC_PATH
|
||||||
|
wsPath = "127.0.0.1:8081" # $SUPERNODE_WS_PATH
|
||||||
|
httpPath = "127.0.0.1:8082" # $SUPERNODE_HTTP_PATH
|
||||||
|
sync = true # $SUPERNODE_SYNC
|
||||||
|
workers = 1 # $SUPERNODE_WORKERS
|
||||||
|
backFill = true # $SUPERNODE_BACKFILL
|
||||||
|
frequency = 15 # $SUPERNODE_FREQUENCY
|
||||||
|
batchSize = 5 # $SUPERNODE_BATCH_SIZE
|
||||||
|
batchNumber = 50 # $SUPERNODE_BATCH_NUMBER
|
||||||
|
|
||||||
|
[ethereum]
|
||||||
|
wsPath = "127.0.0.1:8546" # $ETH_WS_PATH
|
||||||
|
httpPath = "127.0.0.1:8545" # $ETH_HTTP_PATH
|
@ -1,6 +1,6 @@
|
|||||||
[superNode]
|
[superNode]
|
||||||
[superNode.ethSubscription]
|
[superNode.ethSubscription]
|
||||||
historicalData = true
|
historicalData = false
|
||||||
historicalDataOnly = false
|
historicalDataOnly = false
|
||||||
startingBlock = 0
|
startingBlock = 0
|
||||||
endingBlock = 0
|
endingBlock = 0
|
||||||
@ -10,26 +10,18 @@
|
|||||||
uncles = false
|
uncles = false
|
||||||
[superNode.ethSubscription.txFilter]
|
[superNode.ethSubscription.txFilter]
|
||||||
off = false
|
off = false
|
||||||
src = [
|
src = []
|
||||||
"0xde0B295669a9FD93d5F28D9Ec85E40f4cb697BAe",
|
dst = []
|
||||||
]
|
|
||||||
dst = [
|
|
||||||
"0xde0B295669a9FD93d5F28D9Ec85E40f4cb697BAe",
|
|
||||||
]
|
|
||||||
[superNode.ethSubscription.receiptFilter]
|
[superNode.ethSubscription.receiptFilter]
|
||||||
off = false
|
off = false
|
||||||
contracts = []
|
contracts = []
|
||||||
topics = [
|
topic0s = []
|
||||||
[
|
topic1s = []
|
||||||
"0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef",
|
topic2s = []
|
||||||
"0x930a61a57a70a73c2a503615b87e2e54fe5b9cdeacda518270b852296ab1a377"
|
topic3s = []
|
||||||
]
|
|
||||||
]
|
|
||||||
[superNode.ethSubscription.stateFilter]
|
[superNode.ethSubscription.stateFilter]
|
||||||
off = false
|
off = false
|
||||||
addresses = [
|
addresses = []
|
||||||
"0xde0B295669a9FD93d5F28D9Ec85E40f4cb697BAe"
|
|
||||||
]
|
|
||||||
intermediateNodes = false
|
intermediateNodes = false
|
||||||
[superNode.ethSubscription.storageFilter]
|
[superNode.ethSubscription.storageFilter]
|
||||||
off = true
|
off = true
|
||||||
|
2
go.mod
2
go.mod
@ -97,4 +97,4 @@ replace github.com/ipfs/go-ipfs v0.4.22 => github.com/vulcanize/go-ipfs v0.4.22-
|
|||||||
|
|
||||||
replace github.com/ipfs/go-ipfs-config v0.0.3 => github.com/vulcanize/go-ipfs-config v0.0.8-alpha
|
replace github.com/ipfs/go-ipfs-config v0.0.3 => github.com/vulcanize/go-ipfs-config v0.0.8-alpha
|
||||||
|
|
||||||
replace github.com/ethereum/go-ethereum v1.9.1 => github.com/vulcanize/go-ethereum v1.5.10-0.20200116224441-2a980ec3dcb8
|
replace github.com/ethereum/go-ethereum v1.9.1 => github.com/vulcanize/go-ethereum v1.5.10-0.20200311182536-d07dc803d290
|
||||||
|
7
go.sum
7
go.sum
@ -37,6 +37,7 @@ github.com/aristanetworks/goarista v0.0.0-20170210015632-ea17b1a17847/go.mod h1:
|
|||||||
github.com/aristanetworks/goarista v0.0.0-20190712234253-ed1100a1c015 h1:7ABPr1+uJdqESAdlVevnc/2FJGiC/K3uMg1JiELeF+0=
|
github.com/aristanetworks/goarista v0.0.0-20190712234253-ed1100a1c015 h1:7ABPr1+uJdqESAdlVevnc/2FJGiC/K3uMg1JiELeF+0=
|
||||||
github.com/aristanetworks/goarista v0.0.0-20190712234253-ed1100a1c015/go.mod h1:D/tb0zPVXnP7fmsLZjtdUhSsumbK/ij54UXjjVgMGxQ=
|
github.com/aristanetworks/goarista v0.0.0-20190712234253-ed1100a1c015/go.mod h1:D/tb0zPVXnP7fmsLZjtdUhSsumbK/ij54UXjjVgMGxQ=
|
||||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||||
|
github.com/aws/aws-sdk-go v1.25.48/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||||
github.com/bren2010/proquint v0.0.0-20160323162903-38337c27106d h1:QgeLLoPD3kRVmeu/1al9iIpIANMi9O1zXFm8BnYGCJg=
|
github.com/bren2010/proquint v0.0.0-20160323162903-38337c27106d h1:QgeLLoPD3kRVmeu/1al9iIpIANMi9O1zXFm8BnYGCJg=
|
||||||
@ -105,7 +106,9 @@ github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUn
|
|||||||
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA=
|
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA=
|
||||||
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
|
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
|
||||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||||
|
github.com/dlclark/regexp2 v1.2.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc=
|
||||||
github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||||
|
github.com/dop251/goja v0.0.0-20200106141417-aaec0e7bde29/go.mod h1:Mw6PkjjMXWbTj+nnj4s3QPXq1jaT0s5pC0iFD4+BOAA=
|
||||||
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
|
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
|
||||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||||
github.com/edsrzf/mmap-go v0.0.0-20160512033002-935e0e8a636c/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
|
github.com/edsrzf/mmap-go v0.0.0-20160512033002-935e0e8a636c/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
|
||||||
@ -134,6 +137,7 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9
|
|||||||
github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA=
|
github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA=
|
||||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||||
github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8=
|
github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8=
|
||||||
|
github.com/go-sourcemap/sourcemap v2.1.2+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg=
|
||||||
github.com/go-sql-driver/mysql v1.4.0 h1:7LxgVwFb2hIQtMm87NdgAVfXjnt4OePseqT1tKx+opk=
|
github.com/go-sql-driver/mysql v1.4.0 h1:7LxgVwFb2hIQtMm87NdgAVfXjnt4OePseqT1tKx+opk=
|
||||||
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||||
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
|
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
|
||||||
@ -327,6 +331,7 @@ github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8/go.mod h1:Ly/wlsj
|
|||||||
github.com/jbenet/goprocess v0.1.3 h1:YKyIEECS/XvcfHtBzxtjBBbWK+MbvA6dG8ASiqwvr10=
|
github.com/jbenet/goprocess v0.1.3 h1:YKyIEECS/XvcfHtBzxtjBBbWK+MbvA6dG8ASiqwvr10=
|
||||||
github.com/jbenet/goprocess v0.1.3/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4=
|
github.com/jbenet/goprocess v0.1.3/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4=
|
||||||
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||||
|
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||||
github.com/jmoiron/sqlx v0.0.0-20190426154859-38398a30ed85 h1:+LZtdhpMITOXE+MztQPPcwUl+eqYjwlXXLHrd0yWlxw=
|
github.com/jmoiron/sqlx v0.0.0-20190426154859-38398a30ed85 h1:+LZtdhpMITOXE+MztQPPcwUl+eqYjwlXXLHrd0yWlxw=
|
||||||
github.com/jmoiron/sqlx v0.0.0-20190426154859-38398a30ed85/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks=
|
github.com/jmoiron/sqlx v0.0.0-20190426154859-38398a30ed85/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks=
|
||||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||||
@ -723,6 +728,8 @@ github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljT
|
|||||||
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||||
github.com/vulcanize/go-ethereum v1.5.10-0.20200116224441-2a980ec3dcb8 h1:BHt0OW0rTgndFjSju7brF3dPceXWQuEV0IdtY8BjjT8=
|
github.com/vulcanize/go-ethereum v1.5.10-0.20200116224441-2a980ec3dcb8 h1:BHt0OW0rTgndFjSju7brF3dPceXWQuEV0IdtY8BjjT8=
|
||||||
github.com/vulcanize/go-ethereum v1.5.10-0.20200116224441-2a980ec3dcb8/go.mod h1:a9TqabFudpDu1nucId+k9S8R9whYaHnGBLKFouA5EAo=
|
github.com/vulcanize/go-ethereum v1.5.10-0.20200116224441-2a980ec3dcb8/go.mod h1:a9TqabFudpDu1nucId+k9S8R9whYaHnGBLKFouA5EAo=
|
||||||
|
github.com/vulcanize/go-ethereum v1.5.10-0.20200311182536-d07dc803d290 h1:uMWt+x6JhVT7GyL983weZSxv1zDBxvGlI9HNkcTnUeg=
|
||||||
|
github.com/vulcanize/go-ethereum v1.5.10-0.20200311182536-d07dc803d290/go.mod h1:7oC0Ni6dosMv5pxMigm6s0hN8g4haJMBnqmmo0D9YfQ=
|
||||||
github.com/vulcanize/go-ipfs v0.4.22-alpha h1:W+6njT14KWllMhABRFtPndqHw8SHCt5SqD4YX528kxM=
|
github.com/vulcanize/go-ipfs v0.4.22-alpha h1:W+6njT14KWllMhABRFtPndqHw8SHCt5SqD4YX528kxM=
|
||||||
github.com/vulcanize/go-ipfs v0.4.22-alpha/go.mod h1:uaekWWeoaA0A9Dv1LObOKCSh9kIzTpZ5RbKW4g5CQHE=
|
github.com/vulcanize/go-ipfs v0.4.22-alpha/go.mod h1:uaekWWeoaA0A9Dv1LObOKCSh9kIzTpZ5RbKW4g5CQHE=
|
||||||
github.com/vulcanize/go-ipfs-config v0.0.8-alpha h1:peaFvbEcPShF6ymOd8flqKkFz4YfcrNr/UOO7FmbWoQ=
|
github.com/vulcanize/go-ipfs-config v0.0.8-alpha h1:peaFvbEcPShF6ymOd8flqKkFz4YfcrNr/UOO7FmbWoQ=
|
||||||
|
@ -14,7 +14,7 @@
|
|||||||
// You should have received a copy of the GNU Affero General Public License
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
package getter_test
|
package integration_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/ethereum/go-ethereum/ethclient"
|
"github.com/ethereum/go-ethereum/ethclient"
|
@ -65,11 +65,11 @@ func (fetcher GethRPCStorageFetcher) FetchStorageDiffs(out chan<- utils.StorageD
|
|||||||
accounts := utils.GetAccountsFromDiff(*stateDiff)
|
accounts := utils.GetAccountsFromDiff(*stateDiff)
|
||||||
logrus.Trace(fmt.Sprintf("iterating through %d accounts on stateDiff for block %d", len(accounts), stateDiff.BlockNumber))
|
logrus.Trace(fmt.Sprintf("iterating through %d accounts on stateDiff for block %d", len(accounts), stateDiff.BlockNumber))
|
||||||
for _, account := range accounts {
|
for _, account := range accounts {
|
||||||
logrus.Trace(fmt.Sprintf("iterating through %d Storage values on account with key %s", len(account.Storage), common.BytesToHash(account.Key).Hex()))
|
logrus.Trace(fmt.Sprintf("iterating through %d Storage values on account with key %s", len(account.Storage), common.BytesToHash(account.LeafKey).Hex()))
|
||||||
for _, storage := range account.Storage {
|
for _, storage := range account.Storage {
|
||||||
diff, formatErr := utils.FromGethStateDiff(account, stateDiff, storage)
|
diff, formatErr := utils.FromGethStateDiff(account, stateDiff, storage)
|
||||||
if formatErr != nil {
|
if formatErr != nil {
|
||||||
logrus.Error("failed to format utils.StorageDiff from storage with key: ", common.BytesToHash(storage.Key), "from account with key: ", common.BytesToHash(account.Key))
|
logrus.Error("failed to format utils.StorageDiff from storage with key: ", common.BytesToHash(storage.LeafKey), "from account with key: ", common.BytesToHash(account.LeafKey))
|
||||||
errs <- formatErr
|
errs <- formatErr
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -127,11 +127,11 @@ func (bf *backFiller) backFillRange(blockHeights []uint64, diffChan chan utils.S
|
|||||||
}
|
}
|
||||||
accounts := utils.GetAccountsFromDiff(*stateDiff)
|
accounts := utils.GetAccountsFromDiff(*stateDiff)
|
||||||
for _, account := range accounts {
|
for _, account := range accounts {
|
||||||
logrus.Trace(fmt.Sprintf("iterating through %d Storage values on account with key %s", len(account.Storage), common.BytesToHash(account.Key).Hex()))
|
logrus.Trace(fmt.Sprintf("iterating through %d Storage values on account with key %s", len(account.Storage), common.BytesToHash(account.LeafKey).Hex()))
|
||||||
for _, storage := range account.Storage {
|
for _, storage := range account.Storage {
|
||||||
diff, formatErr := utils.FromGethStateDiff(account, stateDiff, storage)
|
diff, formatErr := utils.FromGethStateDiff(account, stateDiff, storage)
|
||||||
if formatErr != nil {
|
if formatErr != nil {
|
||||||
logrus.Error("failed to format utils.StorageDiff from storage with key: ", common.BytesToHash(storage.Key), "from account with key: ", common.BytesToHash(account.Key))
|
logrus.Error("failed to format utils.StorageDiff from storage with key: ", common.BytesToHash(storage.LeafKey), "from account with key: ", common.BytesToHash(account.LeafKey))
|
||||||
errChan <- formatErr
|
errChan <- formatErr
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -59,16 +59,16 @@ func FromParityCsvRow(csvRow []string) (StorageDiffInput, error) {
|
|||||||
|
|
||||||
func FromGethStateDiff(account statediff.AccountDiff, stateDiff *statediff.StateDiff, storage statediff.StorageDiff) (StorageDiffInput, error) {
|
func FromGethStateDiff(account statediff.AccountDiff, stateDiff *statediff.StateDiff, storage statediff.StorageDiff) (StorageDiffInput, error) {
|
||||||
var decodedValue []byte
|
var decodedValue []byte
|
||||||
err := rlp.DecodeBytes(storage.Value, &decodedValue)
|
err := rlp.DecodeBytes(storage.NodeValue, &decodedValue)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return StorageDiffInput{}, err
|
return StorageDiffInput{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return StorageDiffInput{
|
return StorageDiffInput{
|
||||||
HashedAddress: common.BytesToHash(account.Key),
|
HashedAddress: common.BytesToHash(account.LeafKey),
|
||||||
BlockHash: stateDiff.BlockHash,
|
BlockHash: stateDiff.BlockHash,
|
||||||
BlockHeight: int(stateDiff.BlockNumber.Int64()),
|
BlockHeight: int(stateDiff.BlockNumber.Int64()),
|
||||||
StorageKey: common.BytesToHash(storage.Key),
|
StorageKey: common.BytesToHash(storage.LeafKey),
|
||||||
StorageValue: common.BytesToHash(decodedValue),
|
StorageValue: common.BytesToHash(decodedValue),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
@ -67,7 +67,7 @@ var _ = Describe("Storage row parsing", func() {
|
|||||||
|
|
||||||
Describe("FromGethStateDiff", func() {
|
Describe("FromGethStateDiff", func() {
|
||||||
var (
|
var (
|
||||||
accountDiff = statediff.AccountDiff{Key: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 0}}
|
accountDiff = statediff.AccountDiff{LeafKey: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 0}}
|
||||||
stateDiff = &statediff.StateDiff{
|
stateDiff = &statediff.StateDiff{
|
||||||
BlockNumber: big.NewInt(rand.Int63()),
|
BlockNumber: big.NewInt(rand.Int63()),
|
||||||
BlockHash: fakes.FakeHash,
|
BlockHash: fakes.FakeHash,
|
||||||
@ -80,19 +80,20 @@ var _ = Describe("Storage row parsing", func() {
|
|||||||
Expect(encodeErr).NotTo(HaveOccurred())
|
Expect(encodeErr).NotTo(HaveOccurred())
|
||||||
|
|
||||||
storageDiff := statediff.StorageDiff{
|
storageDiff := statediff.StorageDiff{
|
||||||
Key: []byte{0, 9, 8, 7, 6, 5, 4, 3, 2, 1},
|
LeafKey: []byte{0, 9, 8, 7, 6, 5, 4, 3, 2, 1},
|
||||||
Value: storageValueRlp,
|
NodeValue: storageValueRlp,
|
||||||
|
NodeType: statediff.Leaf,
|
||||||
}
|
}
|
||||||
|
|
||||||
result, err := utils.FromGethStateDiff(accountDiff, stateDiff, storageDiff)
|
result, err := utils.FromGethStateDiff(accountDiff, stateDiff, storageDiff)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
expectedAddress := common.BytesToHash(accountDiff.Key)
|
expectedAddress := common.BytesToHash(accountDiff.LeafKey)
|
||||||
Expect(result.HashedAddress).To(Equal(expectedAddress))
|
Expect(result.HashedAddress).To(Equal(expectedAddress))
|
||||||
Expect(result.BlockHash).To(Equal(fakes.FakeHash))
|
Expect(result.BlockHash).To(Equal(fakes.FakeHash))
|
||||||
expectedBlockHeight := int(stateDiff.BlockNumber.Int64())
|
expectedBlockHeight := int(stateDiff.BlockNumber.Int64())
|
||||||
Expect(result.BlockHeight).To(Equal(expectedBlockHeight))
|
Expect(result.BlockHeight).To(Equal(expectedBlockHeight))
|
||||||
expectedStorageKey := common.BytesToHash(storageDiff.Key)
|
expectedStorageKey := common.BytesToHash(storageDiff.LeafKey)
|
||||||
Expect(result.StorageKey).To(Equal(expectedStorageKey))
|
Expect(result.StorageKey).To(Equal(expectedStorageKey))
|
||||||
expectedStorageValue := common.BytesToHash(storageValueBytes)
|
expectedStorageValue := common.BytesToHash(storageValueBytes)
|
||||||
Expect(result.StorageValue).To(Equal(expectedStorageValue))
|
Expect(result.StorageValue).To(Equal(expectedStorageValue))
|
||||||
@ -104,8 +105,9 @@ var _ = Describe("Storage row parsing", func() {
|
|||||||
Expect(encodeErr).NotTo(HaveOccurred())
|
Expect(encodeErr).NotTo(HaveOccurred())
|
||||||
|
|
||||||
storageDiff := statediff.StorageDiff{
|
storageDiff := statediff.StorageDiff{
|
||||||
Key: []byte{0, 9, 8, 7, 6, 5, 4, 3, 2, 1},
|
LeafKey: []byte{0, 9, 8, 7, 6, 5, 4, 3, 2, 1},
|
||||||
Value: storageValueRlp,
|
NodeValue: storageValueRlp,
|
||||||
|
NodeType: statediff.Leaf,
|
||||||
}
|
}
|
||||||
|
|
||||||
result, err := utils.FromGethStateDiff(accountDiff, stateDiff, storageDiff)
|
result, err := utils.FromGethStateDiff(accountDiff, stateDiff, storageDiff)
|
||||||
|
@ -22,15 +22,9 @@ import (
|
|||||||
|
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/eth/core"
|
"github.com/vulcanize/vulcanizedb/pkg/eth/core"
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/super_node"
|
"github.com/vulcanize/vulcanizedb/pkg/super_node"
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// ISuperNodeStreamer is the interface for streaming SuperNodePayloads from a vulcanizeDB super node
|
// SuperNodeStreamer is the underlying struct for the shared.SuperNodeStreamer interface
|
||||||
type ISuperNodeStreamer interface {
|
|
||||||
Stream(payloadChan chan super_node.SubscriptionPayload, params shared.SubscriptionSettings) (*rpc.ClientSubscription, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SuperNodeStreamer is the underlying struct for the ISuperNodeStreamer interface
|
|
||||||
type SuperNodeStreamer struct {
|
type SuperNodeStreamer struct {
|
||||||
Client core.RPCClient
|
Client core.RPCClient
|
||||||
}
|
}
|
||||||
@ -43,6 +37,6 @@ func NewSuperNodeStreamer(client core.RPCClient) *SuperNodeStreamer {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Stream is the main loop for subscribing to data from a vulcanizedb super node
|
// Stream is the main loop for subscribing to data from a vulcanizedb super node
|
||||||
func (sds *SuperNodeStreamer) Stream(payloadChan chan super_node.SubscriptionPayload, params shared.SubscriptionSettings) (*rpc.ClientSubscription, error) {
|
func (sds *SuperNodeStreamer) Stream(payloadChan chan super_node.SubscriptionPayload, rlpParams []byte) (*rpc.ClientSubscription, error) {
|
||||||
return sds.Client.Subscribe("vdb", payloadChan, "stream", params)
|
return sds.Client.Subscribe("vdb", payloadChan, "stream", rlpParams)
|
||||||
}
|
}
|
||||||
|
@ -41,22 +41,24 @@ var (
|
|||||||
SmallStorageValue = common.Hex2Bytes("03")
|
SmallStorageValue = common.Hex2Bytes("03")
|
||||||
SmallStorageValueRlp, _ = rlp.EncodeToBytes(SmallStorageValue)
|
SmallStorageValueRlp, _ = rlp.EncodeToBytes(SmallStorageValue)
|
||||||
storageWithSmallValue = []statediff.StorageDiff{{
|
storageWithSmallValue = []statediff.StorageDiff{{
|
||||||
Key: StorageKey,
|
LeafKey: StorageKey,
|
||||||
Value: SmallStorageValueRlp,
|
NodeValue: SmallStorageValueRlp,
|
||||||
|
NodeType: statediff.Leaf,
|
||||||
Path: StoragePath,
|
Path: StoragePath,
|
||||||
Proof: [][]byte{},
|
|
||||||
}}
|
}}
|
||||||
LargeStorageValue = common.Hex2Bytes("00191b53778c567b14b50ba0000")
|
LargeStorageValue = common.Hex2Bytes("00191b53778c567b14b50ba0000")
|
||||||
LargeStorageValueRlp, _ = rlp.EncodeToBytes(LargeStorageValue)
|
LargeStorageValueRlp, _ = rlp.EncodeToBytes(LargeStorageValue)
|
||||||
storageWithLargeValue = []statediff.StorageDiff{{
|
storageWithLargeValue = []statediff.StorageDiff{{
|
||||||
Key: StorageKey,
|
LeafKey: StorageKey,
|
||||||
Value: LargeStorageValueRlp,
|
NodeValue: LargeStorageValueRlp,
|
||||||
Path: StoragePath,
|
Path: StoragePath,
|
||||||
Proof: [][]byte{},
|
NodeType: statediff.Leaf,
|
||||||
}}
|
}}
|
||||||
StorageWithBadValue = statediff.StorageDiff{
|
StorageWithBadValue = statediff.StorageDiff{
|
||||||
Key: StorageKey,
|
LeafKey: StorageKey,
|
||||||
Value: []byte{0, 1, 2},
|
NodeValue: []byte{0, 1, 2},
|
||||||
|
NodeType: statediff.Leaf,
|
||||||
|
Path: StoragePath,
|
||||||
// this storage value will fail to be decoded as an RLP with the following error message:
|
// this storage value will fail to be decoded as an RLP with the following error message:
|
||||||
// "input contains more than one value"
|
// "input contains more than one value"
|
||||||
}
|
}
|
||||||
@ -74,26 +76,26 @@ var (
|
|||||||
valueBytes, _ = rlp.EncodeToBytes(testAccount)
|
valueBytes, _ = rlp.EncodeToBytes(testAccount)
|
||||||
CreatedAccountDiffs = []statediff.AccountDiff{
|
CreatedAccountDiffs = []statediff.AccountDiff{
|
||||||
{
|
{
|
||||||
Key: ContractLeafKey.Bytes(),
|
LeafKey: ContractLeafKey.Bytes(),
|
||||||
Value: valueBytes,
|
NodeValue: valueBytes,
|
||||||
Storage: storageWithSmallValue,
|
Storage: storageWithSmallValue,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
UpdatedAccountDiffs = []statediff.AccountDiff{{
|
UpdatedAccountDiffs = []statediff.AccountDiff{{
|
||||||
Key: AnotherContractLeafKey.Bytes(),
|
LeafKey: AnotherContractLeafKey.Bytes(),
|
||||||
Value: valueBytes,
|
NodeValue: valueBytes,
|
||||||
Storage: storageWithLargeValue,
|
Storage: storageWithLargeValue,
|
||||||
}}
|
}}
|
||||||
UpdatedAccountDiffs2 = []statediff.AccountDiff{{
|
UpdatedAccountDiffs2 = []statediff.AccountDiff{{
|
||||||
Key: AnotherContractLeafKey.Bytes(),
|
LeafKey: AnotherContractLeafKey.Bytes(),
|
||||||
Value: valueBytes,
|
NodeValue: valueBytes,
|
||||||
Storage: storageWithSmallValue,
|
Storage: storageWithSmallValue,
|
||||||
}}
|
}}
|
||||||
|
|
||||||
DeletedAccountDiffs = []statediff.AccountDiff{{
|
DeletedAccountDiffs = []statediff.AccountDiff{{
|
||||||
Key: AnotherContractLeafKey.Bytes(),
|
LeafKey: AnotherContractLeafKey.Bytes(),
|
||||||
Value: valueBytes,
|
NodeValue: valueBytes,
|
||||||
Storage: storageWithSmallValue,
|
Storage: storageWithSmallValue,
|
||||||
}}
|
}}
|
||||||
|
|
||||||
|
20
main.go
20
main.go
@ -1,23 +1,31 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/vulcanize/vulcanizedb/cmd"
|
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
|
"github.com/spf13/viper"
|
||||||
|
|
||||||
|
"github.com/vulcanize/vulcanizedb/cmd"
|
||||||
|
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
logrus.SetFormatter(&logrus.JSONFormatter{
|
logrus.SetFormatter(&logrus.TextFormatter{
|
||||||
PrettyPrint: true,
|
FullTimestamp: true,
|
||||||
})
|
})
|
||||||
file, err := os.OpenFile("vulcanizedb.log",
|
logfile := viper.GetString("logfile")
|
||||||
|
if logfile != "" {
|
||||||
|
file, err := os.OpenFile(logfile,
|
||||||
os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
|
os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
logrus.SetOutput(file)
|
logrus.SetOutput(file)
|
||||||
} else {
|
} else {
|
||||||
logrus.Info("Failed to log to file, using default stderr")
|
logrus.SetOutput(os.Stdout)
|
||||||
|
logrus.Info("Failed to log to file, using default stdout")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
logrus.SetOutput(os.Stdout)
|
||||||
}
|
}
|
||||||
|
|
||||||
cmd.Execute()
|
cmd.Execute()
|
||||||
}
|
}
|
||||||
|
@ -16,7 +16,20 @@
|
|||||||
|
|
||||||
package config
|
package config
|
||||||
|
|
||||||
import "fmt"
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/spf13/viper"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Env variables
|
||||||
|
const (
|
||||||
|
DATABASE_NAME = "DATABASE_NAME"
|
||||||
|
DATABASE_HOSTNAME = "DATABASE_HOSTNAME"
|
||||||
|
DATABASE_PORT = "DATABASE_PORT"
|
||||||
|
DATABASE_USER = "DATABASE_USER"
|
||||||
|
DATABASE_PASSWORD = "DATABASE_PASSWORD"
|
||||||
|
)
|
||||||
|
|
||||||
type Database struct {
|
type Database struct {
|
||||||
Hostname string
|
Hostname string
|
||||||
@ -37,3 +50,16 @@ func DbConnectionString(dbConfig Database) string {
|
|||||||
}
|
}
|
||||||
return fmt.Sprintf("postgresql://%s:%d/%s?sslmode=disable", dbConfig.Hostname, dbConfig.Port, dbConfig.Name)
|
return fmt.Sprintf("postgresql://%s:%d/%s?sslmode=disable", dbConfig.Hostname, dbConfig.Port, dbConfig.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *Database) Init() {
|
||||||
|
viper.BindEnv("database.name", DATABASE_NAME)
|
||||||
|
viper.BindEnv("database.hostname", DATABASE_HOSTNAME)
|
||||||
|
viper.BindEnv("database.port", DATABASE_PORT)
|
||||||
|
viper.BindEnv("database.user", DATABASE_USER)
|
||||||
|
viper.BindEnv("database.password", DATABASE_PASSWORD)
|
||||||
|
d.Name = viper.GetString("database.name")
|
||||||
|
d.Hostname = viper.GetString("database.hostname")
|
||||||
|
d.Port = viper.GetInt("database.port")
|
||||||
|
d.User = viper.GetString("database.user")
|
||||||
|
d.Password = viper.GetString("database.password")
|
||||||
|
}
|
||||||
|
@ -75,3 +75,43 @@ func staticRewardByBlockNumber(blockNumber int64) *big.Int {
|
|||||||
}
|
}
|
||||||
return staticBlockReward
|
return staticBlockReward
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func CalcEthBlockReward(header *types.Header, uncles []*types.Header, txs types.Transactions, receipts types.Receipts) *big.Int {
|
||||||
|
staticBlockReward := staticRewardByBlockNumber(header.Number.Int64())
|
||||||
|
transactionFees := calcEthTransactionFees(txs, receipts)
|
||||||
|
uncleInclusionRewards := calcEthUncleInclusionRewards(header, uncles)
|
||||||
|
tmp := transactionFees.Add(transactionFees, uncleInclusionRewards)
|
||||||
|
return tmp.Add(tmp, staticBlockReward)
|
||||||
|
}
|
||||||
|
|
||||||
|
func CalcUncleMinerReward(blockNumber, uncleBlockNumber int64) *big.Int {
|
||||||
|
staticBlockReward := staticRewardByBlockNumber(blockNumber)
|
||||||
|
rewardDiv8 := staticBlockReward.Div(staticBlockReward, big.NewInt(8))
|
||||||
|
mainBlock := big.NewInt(blockNumber)
|
||||||
|
uncleBlock := big.NewInt(uncleBlockNumber)
|
||||||
|
uncleBlockPlus8 := uncleBlock.Add(uncleBlock, big.NewInt(8))
|
||||||
|
uncleBlockPlus8MinusMainBlock := uncleBlockPlus8.Sub(uncleBlockPlus8, mainBlock)
|
||||||
|
return rewardDiv8.Mul(rewardDiv8, uncleBlockPlus8MinusMainBlock)
|
||||||
|
}
|
||||||
|
|
||||||
|
func calcEthTransactionFees(txs types.Transactions, receipts types.Receipts) *big.Int {
|
||||||
|
transactionFees := new(big.Int)
|
||||||
|
for i, transaction := range txs {
|
||||||
|
receipt := receipts[i]
|
||||||
|
gasPrice := big.NewInt(transaction.GasPrice().Int64())
|
||||||
|
gasUsed := big.NewInt(int64(receipt.GasUsed))
|
||||||
|
transactionFee := gasPrice.Mul(gasPrice, gasUsed)
|
||||||
|
transactionFees = transactionFees.Add(transactionFees, transactionFee)
|
||||||
|
}
|
||||||
|
return transactionFees
|
||||||
|
}
|
||||||
|
|
||||||
|
func calcEthUncleInclusionRewards(header *types.Header, uncles []*types.Header) *big.Int {
|
||||||
|
uncleInclusionRewards := new(big.Int)
|
||||||
|
for range uncles {
|
||||||
|
staticBlockReward := staticRewardByBlockNumber(header.Number.Int64())
|
||||||
|
staticBlockReward.Div(staticBlockReward, big.NewInt(32))
|
||||||
|
uncleInclusionRewards.Add(uncleInclusionRewards, staticBlockReward)
|
||||||
|
}
|
||||||
|
return uncleInclusionRewards
|
||||||
|
}
|
||||||
|
@ -83,6 +83,10 @@ func makePropertiesReader(client core.RPCClient) IPropertiesReader {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func getNodeType(client core.RPCClient) core.NodeType {
|
func getNodeType(client core.RPCClient) core.NodeType {
|
||||||
|
// TODO: fix this
|
||||||
|
// This heuristics for figuring out the node type are not usefull...
|
||||||
|
// for example we often port forward remote nodes to localhost
|
||||||
|
// and geth does not have to expose the admin api...
|
||||||
if strings.Contains(client.IpcPath(), "infura") {
|
if strings.Contains(client.IpcPath(), "infura") {
|
||||||
return core.INFURA
|
return core.INFURA
|
||||||
}
|
}
|
||||||
|
@ -18,13 +18,18 @@ package dag_putters
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/btcsuite/btcd/wire"
|
node "github.com/ipfs/go-ipld-format"
|
||||||
|
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
|
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/ipfs/ipld"
|
"github.com/vulcanize/vulcanizedb/pkg/ipfs/ipld"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
duplicateKeyErrorString = "pq: duplicate key value violates unique constraint"
|
||||||
|
)
|
||||||
|
|
||||||
type BtcHeaderDagPutter struct {
|
type BtcHeaderDagPutter struct {
|
||||||
adder *ipfs.IPFS
|
adder *ipfs.IPFS
|
||||||
}
|
}
|
||||||
@ -33,17 +38,13 @@ func NewBtcHeaderDagPutter(adder *ipfs.IPFS) *BtcHeaderDagPutter {
|
|||||||
return &BtcHeaderDagPutter{adder: adder}
|
return &BtcHeaderDagPutter{adder: adder}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (bhdp *BtcHeaderDagPutter) DagPut(raw interface{}) ([]string, error) {
|
func (bhdp *BtcHeaderDagPutter) DagPut(n node.Node) (string, error) {
|
||||||
header, ok := raw.(*wire.BlockHeader)
|
header, ok := n.(*ipld.BtcHeader)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("BtcHeaderDagPutter expected input type %T got %T", &wire.BlockHeader{}, raw)
|
return "", fmt.Errorf("BtcHeaderDagPutter expected input type %T got %T", &ipld.BtcHeader{}, n)
|
||||||
}
|
}
|
||||||
node, err := ipld.NewBtcHeader(header)
|
if err := bhdp.adder.Add(header); err != nil && !strings.Contains(err.Error(), duplicateKeyErrorString) {
|
||||||
if err != nil {
|
return "", err
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
if err := bhdp.adder.Add(node); err != nil {
|
return header.Cid().String(), nil
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return []string{node.Cid().String()}, nil
|
|
||||||
}
|
}
|
||||||
|
@ -18,8 +18,9 @@ package dag_putters
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/btcsuite/btcutil"
|
node "github.com/ipfs/go-ipld-format"
|
||||||
|
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
|
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/ipfs/ipld"
|
"github.com/vulcanize/vulcanizedb/pkg/ipfs/ipld"
|
||||||
@ -33,21 +34,13 @@ func NewBtcTxDagPutter(adder *ipfs.IPFS) *BtcTxDagPutter {
|
|||||||
return &BtcTxDagPutter{adder: adder}
|
return &BtcTxDagPutter{adder: adder}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (etdp *BtcTxDagPutter) DagPut(raw interface{}) ([]string, error) {
|
func (etdp *BtcTxDagPutter) DagPut(n node.Node) (string, error) {
|
||||||
transactions, ok := raw.([]*btcutil.Tx)
|
transaction, ok := n.(*ipld.BtcTx)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("BtcTxDagPutter expected input type %T got %T", []*btcutil.Tx{}, raw)
|
return "", fmt.Errorf("BtcTxDagPutter expected input type %T got %T", &ipld.BtcTx{}, n)
|
||||||
}
|
}
|
||||||
cids := make([]string, len(transactions))
|
if err := etdp.adder.Add(transaction); err != nil && !strings.Contains(err.Error(), duplicateKeyErrorString) {
|
||||||
for i, transaction := range transactions {
|
return "", err
|
||||||
node, err := ipld.NewBtcTx(transaction.MsgTx())
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
if err := etdp.adder.Add(node); err != nil {
|
return transaction.Cid().String(), nil
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
cids[i] = node.Cid().String()
|
|
||||||
}
|
|
||||||
return cids, nil
|
|
||||||
}
|
}
|
||||||
|
46
pkg/ipfs/dag_putters/btc_tx_trie.go
Normal file
46
pkg/ipfs/dag_putters/btc_tx_trie.go
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
// VulcanizeDB
|
||||||
|
// Copyright © 2019 Vulcanize
|
||||||
|
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package dag_putters
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
node "github.com/ipfs/go-ipld-format"
|
||||||
|
|
||||||
|
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
|
||||||
|
"github.com/vulcanize/vulcanizedb/pkg/ipfs/ipld"
|
||||||
|
)
|
||||||
|
|
||||||
|
type BtcTxTrieDagPutter struct {
|
||||||
|
adder *ipfs.IPFS
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewBtcTxTrieDagPutter(adder *ipfs.IPFS) *BtcTxTrieDagPutter {
|
||||||
|
return &BtcTxTrieDagPutter{adder: adder}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (etdp *BtcTxTrieDagPutter) DagPut(n node.Node) (string, error) {
|
||||||
|
txTrieNode, ok := n.(*ipld.BtcTxTrie)
|
||||||
|
if !ok {
|
||||||
|
return "", fmt.Errorf("BtcTxTrieDagPutter expected input type %T got %T", &ipld.BtcTxTrie{}, n)
|
||||||
|
}
|
||||||
|
if err := etdp.adder.Add(txTrieNode); err != nil && !strings.Contains(err.Error(), duplicateKeyErrorString) {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return txTrieNode.Cid().String(), nil
|
||||||
|
}
|
@ -18,8 +18,9 @@ package dag_putters
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
node "github.com/ipfs/go-ipld-format"
|
||||||
|
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
|
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/ipfs/ipld"
|
"github.com/vulcanize/vulcanizedb/pkg/ipfs/ipld"
|
||||||
@ -33,17 +34,13 @@ func NewEthBlockHeaderDagPutter(adder *ipfs.IPFS) *EthHeaderDagPutter {
|
|||||||
return &EthHeaderDagPutter{adder: adder}
|
return &EthHeaderDagPutter{adder: adder}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (bhdp *EthHeaderDagPutter) DagPut(raw interface{}) ([]string, error) {
|
func (bhdp *EthHeaderDagPutter) DagPut(n node.Node) (string, error) {
|
||||||
header, ok := raw.(*types.Header)
|
header, ok := n.(*ipld.EthHeader)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("EthHeaderDagPutter expected input type %T got %T", &types.Header{}, raw)
|
return "", fmt.Errorf("EthHeaderDagPutter expected input type %T got %T", &ipld.EthHeader{}, n)
|
||||||
}
|
}
|
||||||
node, err := ipld.NewEthHeader(header)
|
if err := bhdp.adder.Add(header); err != nil && !strings.Contains(err.Error(), duplicateKeyErrorString) {
|
||||||
if err != nil {
|
return "", err
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
if err := bhdp.adder.Add(node); err != nil {
|
return header.Cid().String(), nil
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return []string{node.Cid().String()}, nil
|
|
||||||
}
|
}
|
||||||
|
@ -18,8 +18,9 @@ package dag_putters
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
node "github.com/ipfs/go-ipld-format"
|
||||||
|
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
|
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/ipfs/ipld"
|
"github.com/vulcanize/vulcanizedb/pkg/ipfs/ipld"
|
||||||
@ -33,21 +34,13 @@ func NewEthReceiptDagPutter(adder *ipfs.IPFS) *EthReceiptDagPutter {
|
|||||||
return &EthReceiptDagPutter{adder: adder}
|
return &EthReceiptDagPutter{adder: adder}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (erdp *EthReceiptDagPutter) DagPut(raw interface{}) ([]string, error) {
|
func (erdp *EthReceiptDagPutter) DagPut(n node.Node) (string, error) {
|
||||||
receipts, ok := raw.(types.Receipts)
|
receipt, ok := n.(*ipld.EthReceipt)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("EthReceiptDagPutter expected input type %T got type %T", types.Receipts{}, raw)
|
return "", fmt.Errorf("EthReceiptDagPutter expected input type %T got type %T", &ipld.EthReceipt{}, n)
|
||||||
}
|
}
|
||||||
cids := make([]string, len(receipts))
|
if err := erdp.adder.Add(receipt); err != nil && !strings.Contains(err.Error(), duplicateKeyErrorString) {
|
||||||
for i, receipt := range receipts {
|
return "", err
|
||||||
node, err := ipld.NewReceipt((*types.ReceiptForStorage)(receipt))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
if err := erdp.adder.Add(node); err != nil {
|
return receipt.Cid().String(), nil
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
cids[i] = node.Cid().String()
|
|
||||||
}
|
|
||||||
return cids, nil
|
|
||||||
}
|
}
|
||||||
|
46
pkg/ipfs/dag_putters/eth_receipt_trie.go
Normal file
46
pkg/ipfs/dag_putters/eth_receipt_trie.go
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
// VulcanizeDB
|
||||||
|
// Copyright © 2019 Vulcanize
|
||||||
|
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package dag_putters
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
node "github.com/ipfs/go-ipld-format"
|
||||||
|
|
||||||
|
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
|
||||||
|
"github.com/vulcanize/vulcanizedb/pkg/ipfs/ipld"
|
||||||
|
)
|
||||||
|
|
||||||
|
type EthRctTrieDagPutter struct {
|
||||||
|
adder *ipfs.IPFS
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewEthRctTrieDagPutter(adder *ipfs.IPFS) *EthRctTrieDagPutter {
|
||||||
|
return &EthRctTrieDagPutter{adder: adder}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (etdp *EthRctTrieDagPutter) DagPut(n node.Node) (string, error) {
|
||||||
|
rctTrieNode, ok := n.(*ipld.EthRctTrie)
|
||||||
|
if !ok {
|
||||||
|
return "", fmt.Errorf("EthRctTrieDagPutter expected input type %T got %T", &ipld.EthRctTrie{}, n)
|
||||||
|
}
|
||||||
|
if err := etdp.adder.Add(rctTrieNode); err != nil && !strings.Contains(err.Error(), duplicateKeyErrorString) {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return rctTrieNode.Cid().String(), nil
|
||||||
|
}
|
@ -18,6 +18,9 @@ package dag_putters
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
node "github.com/ipfs/go-ipld-format"
|
||||||
|
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
|
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/ipfs/ipld"
|
"github.com/vulcanize/vulcanizedb/pkg/ipfs/ipld"
|
||||||
@ -31,17 +34,13 @@ func NewEthStateDagPutter(adder *ipfs.IPFS) *EthStateDagPutter {
|
|||||||
return &EthStateDagPutter{adder: adder}
|
return &EthStateDagPutter{adder: adder}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (erdp *EthStateDagPutter) DagPut(raw interface{}) ([]string, error) {
|
func (erdp *EthStateDagPutter) DagPut(n node.Node) (string, error) {
|
||||||
stateNodeRLP, ok := raw.([]byte)
|
stateNode, ok := n.(*ipld.EthStateTrie)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("EthStateDagPutter expected input type %T got %T", []byte{}, raw)
|
return "", fmt.Errorf("EthStateDagPutter expected input type %T got %T", &ipld.EthStateTrie{}, n)
|
||||||
}
|
}
|
||||||
node, err := ipld.FromStateTrieRLP(stateNodeRLP)
|
if err := erdp.adder.Add(stateNode); err != nil && !strings.Contains(err.Error(), duplicateKeyErrorString) {
|
||||||
if err != nil {
|
return "", err
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
if err := erdp.adder.Add(node); err != nil {
|
return stateNode.Cid().String(), nil
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return []string{node.Cid().String()}, nil
|
|
||||||
}
|
}
|
||||||
|
@ -18,6 +18,9 @@ package dag_putters
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
node "github.com/ipfs/go-ipld-format"
|
||||||
|
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
|
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/ipfs/ipld"
|
"github.com/vulcanize/vulcanizedb/pkg/ipfs/ipld"
|
||||||
@ -31,17 +34,13 @@ func NewEthStorageDagPutter(adder *ipfs.IPFS) *EthStorageDagPutter {
|
|||||||
return &EthStorageDagPutter{adder: adder}
|
return &EthStorageDagPutter{adder: adder}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (erdp *EthStorageDagPutter) DagPut(raw interface{}) ([]string, error) {
|
func (erdp *EthStorageDagPutter) DagPut(n node.Node) (string, error) {
|
||||||
storageNodeRLP, ok := raw.([]byte)
|
storageNode, ok := n.(*ipld.EthStorageTrie)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("EthStorageDagPutter expected input type %T got %T", []byte{}, raw)
|
return "", fmt.Errorf("EthStorageDagPutter expected input type %T got %T", &ipld.EthStorageTrie{}, n)
|
||||||
}
|
}
|
||||||
node, err := ipld.FromStorageTrieRLP(storageNodeRLP)
|
if err := erdp.adder.Add(storageNode); err != nil && !strings.Contains(err.Error(), duplicateKeyErrorString) {
|
||||||
if err != nil {
|
return "", err
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
if err := erdp.adder.Add(node); err != nil {
|
return storageNode.Cid().String(), nil
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return []string{node.Cid().String()}, nil
|
|
||||||
}
|
}
|
||||||
|
@ -18,8 +18,9 @@ package dag_putters
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
node "github.com/ipfs/go-ipld-format"
|
||||||
|
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
|
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/ipfs/ipld"
|
"github.com/vulcanize/vulcanizedb/pkg/ipfs/ipld"
|
||||||
@ -33,21 +34,13 @@ func NewEthTxsDagPutter(adder *ipfs.IPFS) *EthTxsDagPutter {
|
|||||||
return &EthTxsDagPutter{adder: adder}
|
return &EthTxsDagPutter{adder: adder}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (etdp *EthTxsDagPutter) DagPut(raw interface{}) ([]string, error) {
|
func (etdp *EthTxsDagPutter) DagPut(n node.Node) (string, error) {
|
||||||
transactions, ok := raw.(types.Transactions)
|
transaction, ok := n.(*ipld.EthTx)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("EthTxsDagPutter expected input type %T got %T", types.Transactions{}, raw)
|
return "", fmt.Errorf("EthTxsDagPutter expected input type %T got %T", &ipld.EthTx{}, n)
|
||||||
}
|
}
|
||||||
cids := make([]string, len(transactions))
|
if err := etdp.adder.Add(transaction); err != nil && !strings.Contains(err.Error(), duplicateKeyErrorString) {
|
||||||
for i, transaction := range transactions {
|
return "", err
|
||||||
node, err := ipld.NewEthTx(transaction)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
if err := etdp.adder.Add(node); err != nil {
|
return transaction.Cid().String(), nil
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
cids[i] = node.Cid().String()
|
|
||||||
}
|
|
||||||
return cids, nil
|
|
||||||
}
|
}
|
||||||
|
46
pkg/ipfs/dag_putters/eth_tx_trie.go
Normal file
46
pkg/ipfs/dag_putters/eth_tx_trie.go
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
// VulcanizeDB
|
||||||
|
// Copyright © 2019 Vulcanize
|
||||||
|
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package dag_putters
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
node "github.com/ipfs/go-ipld-format"
|
||||||
|
|
||||||
|
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
|
||||||
|
"github.com/vulcanize/vulcanizedb/pkg/ipfs/ipld"
|
||||||
|
)
|
||||||
|
|
||||||
|
type EthTxTrieDagPutter struct {
|
||||||
|
adder *ipfs.IPFS
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewEthTxTrieDagPutter(adder *ipfs.IPFS) *EthTxTrieDagPutter {
|
||||||
|
return &EthTxTrieDagPutter{adder: adder}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (etdp *EthTxTrieDagPutter) DagPut(n node.Node) (string, error) {
|
||||||
|
txTrieNode, ok := n.(*ipld.EthTxTrie)
|
||||||
|
if !ok {
|
||||||
|
return "", fmt.Errorf("EthTxTrieDagPutter expected input type %T got %T", &ipld.EthTxTrie{}, n)
|
||||||
|
}
|
||||||
|
if err := etdp.adder.Add(txTrieNode); err != nil && !strings.Contains(err.Error(), duplicateKeyErrorString) {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return txTrieNode.Cid().String(), nil
|
||||||
|
}
|
@ -48,7 +48,7 @@ func NewBtcHeader(header *wire.BlockHeader) (*BtcHeader, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
rawdata := w.Bytes()
|
rawdata := w.Bytes()
|
||||||
c, err := rawdataToCid(MBitcoinHeader, rawdata, mh.DBL_SHA2_256)
|
c, err := RawdataToCid(MBitcoinHeader, rawdata, mh.DBL_SHA2_256)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
74
pkg/ipfs/ipld/btc_parser.go
Normal file
74
pkg/ipfs/ipld/btc_parser.go
Normal file
@ -0,0 +1,74 @@
|
|||||||
|
// VulcanizeDB
|
||||||
|
// Copyright © 2019 Vulcanize
|
||||||
|
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package ipld
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/btcsuite/btcd/wire"
|
||||||
|
"github.com/btcsuite/btcutil"
|
||||||
|
node "github.com/ipfs/go-ipld-format"
|
||||||
|
)
|
||||||
|
|
||||||
|
// FromHeaderAndTxs takes a block header and txs and processes it
|
||||||
|
// to return it a set of IPLD nodes for further processing.
|
||||||
|
func FromHeaderAndTxs(header *wire.BlockHeader, txs []*btcutil.Tx) (*BtcHeader, []*BtcTx, []*BtcTxTrie, error) {
|
||||||
|
var txNodes []*BtcTx
|
||||||
|
for _, tx := range txs {
|
||||||
|
txNode, err := NewBtcTx(tx.MsgTx())
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, nil, err
|
||||||
|
}
|
||||||
|
txNodes = append(txNodes, txNode)
|
||||||
|
}
|
||||||
|
txTrie, err := mkMerkleTree(txNodes)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, nil, err
|
||||||
|
}
|
||||||
|
headerNode, err := NewBtcHeader(header)
|
||||||
|
return headerNode, txNodes, txTrie, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func mkMerkleTree(txs []*BtcTx) ([]*BtcTxTrie, error) {
|
||||||
|
layer := make([]node.Node, len(txs))
|
||||||
|
for i, tx := range txs {
|
||||||
|
layer[i] = tx
|
||||||
|
}
|
||||||
|
var out []*BtcTxTrie
|
||||||
|
var next []node.Node
|
||||||
|
for len(layer) > 1 {
|
||||||
|
if len(layer)%2 != 0 {
|
||||||
|
layer = append(layer, layer[len(layer)-1])
|
||||||
|
}
|
||||||
|
for i := 0; i < len(layer)/2; i++ {
|
||||||
|
var left, right node.Node
|
||||||
|
left = layer[i*2]
|
||||||
|
right = layer[(i*2)+1]
|
||||||
|
|
||||||
|
t := &BtcTxTrie{
|
||||||
|
Left: &node.Link{Cid: left.Cid()},
|
||||||
|
Right: &node.Link{Cid: right.Cid()},
|
||||||
|
}
|
||||||
|
|
||||||
|
out = append(out, t)
|
||||||
|
next = append(next, t)
|
||||||
|
}
|
||||||
|
|
||||||
|
layer = next
|
||||||
|
next = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return out, nil
|
||||||
|
}
|
@ -1,3 +1,19 @@
|
|||||||
|
// VulcanizeDB
|
||||||
|
// Copyright © 2019 Vulcanize
|
||||||
|
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
package ipld
|
package ipld
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -7,7 +23,7 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"github.com/btcsuite/btcd/wire"
|
"github.com/btcsuite/btcd/wire"
|
||||||
cid "github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
node "github.com/ipfs/go-ipld-format"
|
node "github.com/ipfs/go-ipld-format"
|
||||||
mh "github.com/multiformats/go-multihash"
|
mh "github.com/multiformats/go-multihash"
|
||||||
)
|
)
|
||||||
@ -33,7 +49,7 @@ func NewBtcTx(tx *wire.MsgTx) (*BtcTx, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
rawdata := w.Bytes()
|
rawdata := w.Bytes()
|
||||||
c, err := rawdataToCid(MBitcoinTx, rawdata, mh.DBL_SHA2_256)
|
c, err := RawdataToCid(MBitcoinTx, rawdata, mh.DBL_SHA2_256)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
110
pkg/ipfs/ipld/btc_tx_trie.go
Normal file
110
pkg/ipfs/ipld/btc_tx_trie.go
Normal file
@ -0,0 +1,110 @@
|
|||||||
|
// VulcanizeDB
|
||||||
|
// Copyright © 2019 Vulcanize
|
||||||
|
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package ipld
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
node "github.com/ipfs/go-ipld-format"
|
||||||
|
mh "github.com/multiformats/go-multihash"
|
||||||
|
)
|
||||||
|
|
||||||
|
type BtcTxTrie struct {
|
||||||
|
Left *node.Link
|
||||||
|
Right *node.Link
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *BtcTxTrie) BTCSha() []byte {
|
||||||
|
return cidToHash(t.Cid())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *BtcTxTrie) Cid() cid.Cid {
|
||||||
|
h, _ := mh.Sum(t.RawData(), mh.DBL_SHA2_256, -1)
|
||||||
|
return cid.NewCidV1(cid.BitcoinTx, h)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *BtcTxTrie) Links() []*node.Link {
|
||||||
|
return []*node.Link{t.Left, t.Right}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *BtcTxTrie) RawData() []byte {
|
||||||
|
out := make([]byte, 64)
|
||||||
|
lbytes := cidToHash(t.Left.Cid)
|
||||||
|
copy(out[:32], lbytes)
|
||||||
|
|
||||||
|
rbytes := cidToHash(t.Right.Cid)
|
||||||
|
copy(out[32:], rbytes)
|
||||||
|
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *BtcTxTrie) Loggable() map[string]interface{} {
|
||||||
|
return map[string]interface{}{
|
||||||
|
"type": "bitcoin_tx_tree",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *BtcTxTrie) Resolve(path []string) (interface{}, []string, error) {
|
||||||
|
if len(path) == 0 {
|
||||||
|
return nil, nil, fmt.Errorf("zero length path")
|
||||||
|
}
|
||||||
|
|
||||||
|
switch path[0] {
|
||||||
|
case "0":
|
||||||
|
return t.Left, path[1:], nil
|
||||||
|
case "1":
|
||||||
|
return t.Right, path[1:], nil
|
||||||
|
default:
|
||||||
|
return nil, nil, fmt.Errorf("no such link")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *BtcTxTrie) Copy() node.Node {
|
||||||
|
nt := *t
|
||||||
|
return &nt
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *BtcTxTrie) ResolveLink(path []string) (*node.Link, []string, error) {
|
||||||
|
out, rest, err := t.Resolve(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
lnk, ok := out.(*node.Link)
|
||||||
|
if ok {
|
||||||
|
return lnk, rest, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, nil, fmt.Errorf("path did not lead to link")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *BtcTxTrie) Size() (uint64, error) {
|
||||||
|
return uint64(len(t.RawData())), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *BtcTxTrie) Stat() (*node.NodeStat, error) {
|
||||||
|
return &node.NodeStat{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *BtcTxTrie) String() string {
|
||||||
|
return fmt.Sprintf("[bitcoin transaction tree]")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *BtcTxTrie) Tree(p string, depth int) []string {
|
||||||
|
return []string{"0", "1"}
|
||||||
|
}
|
@ -20,7 +20,6 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
@ -49,7 +48,7 @@ func NewEthHeader(header *types.Header) (*EthHeader, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
c, err := rawdataToCid(MEthHeader, headerRLP, mh.KECCAK_256)
|
c, err := RawdataToCid(MEthHeader, headerRLP, mh.KECCAK_256)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -60,6 +59,24 @@ func NewEthHeader(header *types.Header) (*EthHeader, error) {
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
OUTPUT
|
||||||
|
*/
|
||||||
|
|
||||||
|
// DecodeEthHeader takes a cid and its raw binary data
|
||||||
|
// from IPFS and returns an EthTx object for further processing.
|
||||||
|
func DecodeEthHeader(c cid.Cid, b []byte) (*EthHeader, error) {
|
||||||
|
var h *types.Header
|
||||||
|
if err := rlp.DecodeBytes(b, h); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &EthHeader{
|
||||||
|
Header: h,
|
||||||
|
cid: c,
|
||||||
|
rawdata: b,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Block INTERFACE
|
Block INTERFACE
|
||||||
*/
|
*/
|
||||||
@ -237,38 +254,3 @@ func (b *EthHeader) MarshalJSON() ([]byte, error) {
|
|||||||
}
|
}
|
||||||
return json.Marshal(out)
|
return json.Marshal(out)
|
||||||
}
|
}
|
||||||
|
|
||||||
// objJSONBlock defines the output of the JSON RPC API for either
|
|
||||||
// "eth_BlockByHash" or "eth_BlockByHeader".
|
|
||||||
type objJSONBlock struct {
|
|
||||||
Result objJSONBlockResult `json:"result"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// objJSONBLockResult is the nested struct that takes
|
|
||||||
// the contents of the JSON field "result".
|
|
||||||
type objJSONBlockResult struct {
|
|
||||||
types.Header // Use its fields and unmarshaler
|
|
||||||
*objJSONBlockResultExt // Add these fields to the parsing
|
|
||||||
}
|
|
||||||
|
|
||||||
// objJSONBLockResultExt facilitates the composition
|
|
||||||
// of the field "result", adding to the
|
|
||||||
// `types.Header` fields, both ommers (their hashes) and transactions.
|
|
||||||
type objJSONBlockResultExt struct {
|
|
||||||
OmmerHashes []common.Hash `json:"uncles"`
|
|
||||||
Transactions []*types.Transaction `json:"transactions"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalJSON overrides the function types.Header.UnmarshalJSON, allowing us
|
|
||||||
// to parse the fields of Header, plus ommer hashes and transactions.
|
|
||||||
// (yes, ommer hashes. You will need to "eth_getUncleCountByBlockHash" per each ommer)
|
|
||||||
func (o *objJSONBlockResult) UnmarshalJSON(input []byte) error {
|
|
||||||
err := o.Header.UnmarshalJSON(input)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
o.objJSONBlockResultExt = &objJSONBlockResultExt{}
|
|
||||||
err = json.Unmarshal(input, o.objJSONBlockResultExt)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
97
pkg/ipfs/ipld/eth_parser.go
Normal file
97
pkg/ipfs/ipld/eth_parser.go
Normal file
@ -0,0 +1,97 @@
|
|||||||
|
// VulcanizeDB
|
||||||
|
// Copyright © 2019 Vulcanize
|
||||||
|
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package ipld
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
// FromBlockAndReceipts takes a block and processes it
|
||||||
|
// to return it a set of IPLD nodes for further processing.
|
||||||
|
func FromBlockAndReceipts(block *types.Block, receipts []*types.Receipt) (*EthHeader, []*EthHeader, []*EthTx, []*EthTxTrie, []*EthReceipt, []*EthRctTrie, error) {
|
||||||
|
// Process the header
|
||||||
|
headerNode, err := NewEthHeader(block.Header())
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, nil, nil, nil, nil, err
|
||||||
|
}
|
||||||
|
// Process the uncles
|
||||||
|
uncleNodes := make([]*EthHeader, len(block.Uncles()))
|
||||||
|
for i, uncle := range block.Uncles() {
|
||||||
|
uncleNode, err := NewEthHeader(uncle)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, nil, nil, nil, nil, err
|
||||||
|
}
|
||||||
|
uncleNodes[i] = uncleNode
|
||||||
|
}
|
||||||
|
// Process the txs
|
||||||
|
ethTxNodes, ethTxTrieNodes, err := processTransactions(block.Transactions(),
|
||||||
|
block.Header().TxHash[:])
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, nil, nil, nil, nil, err
|
||||||
|
}
|
||||||
|
// Process the receipts
|
||||||
|
ethRctNodes, ethRctTrieNodes, err := processReceipts(receipts,
|
||||||
|
block.Header().ReceiptHash[:])
|
||||||
|
return headerNode, uncleNodes, ethTxNodes, ethTxTrieNodes, ethRctNodes, ethRctTrieNodes, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// processTransactions will take the found transactions in a parsed block body
|
||||||
|
// to return IPLD node slices for eth-tx and eth-tx-trie
|
||||||
|
func processTransactions(txs []*types.Transaction, expectedTxRoot []byte) ([]*EthTx, []*EthTxTrie, error) {
|
||||||
|
var ethTxNodes []*EthTx
|
||||||
|
transactionTrie := newTxTrie()
|
||||||
|
|
||||||
|
for idx, tx := range txs {
|
||||||
|
ethTx, err := NewEthTx(tx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
ethTxNodes = append(ethTxNodes, ethTx)
|
||||||
|
transactionTrie.add(idx, ethTx.RawData())
|
||||||
|
}
|
||||||
|
|
||||||
|
if !bytes.Equal(transactionTrie.rootHash(), expectedTxRoot) {
|
||||||
|
return nil, nil, fmt.Errorf("wrong transaction hash computed")
|
||||||
|
}
|
||||||
|
|
||||||
|
return ethTxNodes, transactionTrie.getNodes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// processReceipts will take in receipts
|
||||||
|
// to return IPLD node slices for eth-rct and eth-rct-trie
|
||||||
|
func processReceipts(rcts []*types.Receipt, expectedRctRoot []byte) ([]*EthReceipt, []*EthRctTrie, error) {
|
||||||
|
var ethRctNodes []*EthReceipt
|
||||||
|
receiptTrie := newRctTrie()
|
||||||
|
|
||||||
|
for idx, rct := range rcts {
|
||||||
|
ethRct, err := NewReceipt(rct)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
ethRctNodes = append(ethRctNodes, ethRct)
|
||||||
|
receiptTrie.add(idx, ethRct.RawData())
|
||||||
|
}
|
||||||
|
|
||||||
|
if !bytes.Equal(receiptTrie.rootHash(), expectedRctRoot) {
|
||||||
|
return nil, nil, fmt.Errorf("wrong receipt hash computed")
|
||||||
|
}
|
||||||
|
|
||||||
|
return ethRctNodes, receiptTrie.getNodes(), nil
|
||||||
|
}
|
@ -29,7 +29,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type EthReceipt struct {
|
type EthReceipt struct {
|
||||||
*types.ReceiptForStorage
|
*types.Receipt
|
||||||
|
|
||||||
rawdata []byte
|
rawdata []byte
|
||||||
cid cid.Cid
|
cid cid.Cid
|
||||||
@ -43,22 +43,40 @@ var _ node.Node = (*EthReceipt)(nil)
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
// NewReceipt converts a types.ReceiptForStorage to an EthReceipt IPLD node
|
// NewReceipt converts a types.ReceiptForStorage to an EthReceipt IPLD node
|
||||||
func NewReceipt(receipt *types.ReceiptForStorage) (*EthReceipt, error) {
|
func NewReceipt(receipt *types.Receipt) (*EthReceipt, error) {
|
||||||
receiptRLP, err := rlp.EncodeToBytes(receipt)
|
receiptRLP, err := rlp.EncodeToBytes(receipt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
c, err := rawdataToCid(MEthTxReceipt, receiptRLP, mh.KECCAK_256)
|
c, err := RawdataToCid(MEthTxReceipt, receiptRLP, mh.KECCAK_256)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return &EthReceipt{
|
return &EthReceipt{
|
||||||
ReceiptForStorage: receipt,
|
Receipt: receipt,
|
||||||
cid: c,
|
cid: c,
|
||||||
rawdata: receiptRLP,
|
rawdata: receiptRLP,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
OUTPUT
|
||||||
|
*/
|
||||||
|
|
||||||
|
// DecodeEthReceipt takes a cid and its raw binary data
|
||||||
|
// from IPFS and returns an EthTx object for further processing.
|
||||||
|
func DecodeEthReceipt(c cid.Cid, b []byte) (*EthReceipt, error) {
|
||||||
|
var r *types.Receipt
|
||||||
|
if err := rlp.DecodeBytes(b, r); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &EthReceipt{
|
||||||
|
Receipt: r,
|
||||||
|
cid: c,
|
||||||
|
rawdata: b,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Block INTERFACE
|
Block INTERFACE
|
||||||
*/
|
*/
|
||||||
@ -158,7 +176,7 @@ func (r *EthReceipt) Stat() (*node.NodeStat, error) {
|
|||||||
|
|
||||||
// Size will go away. It is here to comply with the interface.
|
// Size will go away. It is here to comply with the interface.
|
||||||
func (r *EthReceipt) Size() (uint64, error) {
|
func (r *EthReceipt) Size() (uint64, error) {
|
||||||
return strconv.ParseUint((*types.Receipt)(r.ReceiptForStorage).Size().String(), 10, 64)
|
return strconv.ParseUint(r.Receipt.Size().String(), 10, 64)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
152
pkg/ipfs/ipld/eth_receipt_trie.go
Normal file
152
pkg/ipfs/ipld/eth_receipt_trie.go
Normal file
@ -0,0 +1,152 @@
|
|||||||
|
// VulcanizeDB
|
||||||
|
// Copyright © 2019 Vulcanize
|
||||||
|
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package ipld
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
node "github.com/ipfs/go-ipld-format"
|
||||||
|
"github.com/multiformats/go-multihash"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
|
)
|
||||||
|
|
||||||
|
// EthRctTrie (eth-tx-trie codec 0x92) represents
|
||||||
|
// a node from the transaction trie in ethereum.
|
||||||
|
type EthRctTrie struct {
|
||||||
|
*TrieNode
|
||||||
|
}
|
||||||
|
|
||||||
|
// Static (compile time) check that EthRctTrie satisfies the node.Node interface.
|
||||||
|
var _ node.Node = (*EthRctTrie)(nil)
|
||||||
|
|
||||||
|
/*
|
||||||
|
INPUT
|
||||||
|
*/
|
||||||
|
|
||||||
|
// To create a proper trie of the eth-tx-trie objects, it is required
|
||||||
|
// to input all transactions belonging to a forest in a single step.
|
||||||
|
// We are adding the transactions, and creating its trie on
|
||||||
|
// block body parsing time.
|
||||||
|
|
||||||
|
/*
|
||||||
|
OUTPUT
|
||||||
|
*/
|
||||||
|
|
||||||
|
// DecodeEthRctTrie returns an EthRctTrie object from its cid and rawdata.
|
||||||
|
func DecodeEthRctTrie(c cid.Cid, b []byte) (*EthRctTrie, error) {
|
||||||
|
tn, err := decodeTrieNode(c, b, decodeEthRctTrieLeaf)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &EthRctTrie{TrieNode: tn}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// decodeEthRctTrieLeaf parses a eth-rct-trie leaf
|
||||||
|
//from decoded RLP elements
|
||||||
|
func decodeEthRctTrieLeaf(i []interface{}) ([]interface{}, error) {
|
||||||
|
var r types.Receipt
|
||||||
|
err := rlp.DecodeBytes(i[1].([]byte), &r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
c, err := RawdataToCid(MEthTxReceipt, i[1].([]byte), multihash.KECCAK_256)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return []interface{}{
|
||||||
|
i[0].([]byte),
|
||||||
|
&EthReceipt{
|
||||||
|
Receipt: &r,
|
||||||
|
cid: c,
|
||||||
|
rawdata: i[1].([]byte),
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Block INTERFACE
|
||||||
|
*/
|
||||||
|
|
||||||
|
// RawData returns the binary of the RLP encode of the transaction.
|
||||||
|
func (t *EthRctTrie) RawData() []byte {
|
||||||
|
return t.rawdata
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cid returns the cid of the transaction.
|
||||||
|
func (t *EthRctTrie) Cid() cid.Cid {
|
||||||
|
return t.cid
|
||||||
|
}
|
||||||
|
|
||||||
|
// String is a helper for output
|
||||||
|
func (t *EthRctTrie) String() string {
|
||||||
|
return fmt.Sprintf("<EthereumRctTrie %s>", t.cid)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Loggable returns in a map the type of IPLD Link.
|
||||||
|
func (t *EthRctTrie) Loggable() map[string]interface{} {
|
||||||
|
return map[string]interface{}{
|
||||||
|
"type": "eth-rct-trie",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
EthRctTrie functions
|
||||||
|
*/
|
||||||
|
|
||||||
|
// rctTrie wraps a localTrie for use on the receipt trie.
|
||||||
|
type rctTrie struct {
|
||||||
|
*localTrie
|
||||||
|
}
|
||||||
|
|
||||||
|
// newRctTrie initializes and returns a rctTrie.
|
||||||
|
func newRctTrie() *rctTrie {
|
||||||
|
return &rctTrie{
|
||||||
|
localTrie: newLocalTrie(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// getNodes invokes the localTrie, which computes the root hash of the
|
||||||
|
// transaction trie and returns its database keys, to return a slice
|
||||||
|
// of EthRctTrie nodes.
|
||||||
|
func (rt *rctTrie) getNodes() []*EthRctTrie {
|
||||||
|
keys := rt.getKeys()
|
||||||
|
var out []*EthRctTrie
|
||||||
|
it := rt.trie.NodeIterator([]byte{})
|
||||||
|
for it.Next(true) {
|
||||||
|
|
||||||
|
}
|
||||||
|
for _, k := range keys {
|
||||||
|
rawdata, err := rt.db.Get(k)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
c, err := RawdataToCid(MEthTxReceiptTrie, rawdata, multihash.KECCAK_256)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
tn := &TrieNode{
|
||||||
|
cid: c,
|
||||||
|
rawdata: rawdata,
|
||||||
|
}
|
||||||
|
out = append(out, &EthRctTrie{TrieNode: tn})
|
||||||
|
}
|
||||||
|
|
||||||
|
return out
|
||||||
|
}
|
@ -21,14 +21,15 @@ import (
|
|||||||
|
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
node "github.com/ipfs/go-ipld-format"
|
node "github.com/ipfs/go-ipld-format"
|
||||||
mh "github.com/multiformats/go-multihash"
|
"github.com/multiformats/go-multihash"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
)
|
)
|
||||||
|
|
||||||
// EthStateTrie (eth-state-trie, codec 0x96), represents
|
// EthStateTrie (eth-state-trie, codec 0x96), represents
|
||||||
// a node from the state trie in ethereum.
|
// a node from the satte trie in ethereum.
|
||||||
type EthStateTrie struct {
|
type EthStateTrie struct {
|
||||||
cid cid.Cid
|
*TrieNode
|
||||||
rawdata []byte
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Static (compile time) check that EthStateTrie satisfies the node.Node interface.
|
// Static (compile time) check that EthStateTrie satisfies the node.Node interface.
|
||||||
@ -38,16 +39,51 @@ var _ node.Node = (*EthStateTrie)(nil)
|
|||||||
INPUT
|
INPUT
|
||||||
*/
|
*/
|
||||||
|
|
||||||
// FromStateTrieRLP takes the RLP bytes of an ethereum
|
// FromStateTrieRLP takes the RLP representation of an ethereum
|
||||||
// state trie node to return it as an IPLD node for further processing.
|
// state trie node to return it as an IPLD node for further processing.
|
||||||
func FromStateTrieRLP(stateNodeRLP []byte) (*EthStateTrie, error) {
|
func FromStateTrieRLP(raw []byte) (*EthStateTrie, error) {
|
||||||
c, err := rawdataToCid(MEthStateTrie, stateNodeRLP, mh.KECCAK_256)
|
c, err := RawdataToCid(MEthStateTrie, raw, multihash.KECCAK_256)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return &EthStateTrie{
|
// Let's run the whole mile and process the nodeKind and
|
||||||
|
// its elements, in case somebody would need this function
|
||||||
|
// to parse an RLP element from the filesystem
|
||||||
|
return DecodeEthStateTrie(c, raw)
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
OUTPUT
|
||||||
|
*/
|
||||||
|
|
||||||
|
// DecodeEthStateTrie returns an EthStateTrie object from its cid and rawdata.
|
||||||
|
func DecodeEthStateTrie(c cid.Cid, b []byte) (*EthStateTrie, error) {
|
||||||
|
tn, err := decodeTrieNode(c, b, decodeEthStateTrieLeaf)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &EthStateTrie{TrieNode: tn}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// decodeEthStateTrieLeaf parses a eth-tx-trie leaf
|
||||||
|
// from decoded RLP elements
|
||||||
|
func decodeEthStateTrieLeaf(i []interface{}) ([]interface{}, error) {
|
||||||
|
var account EthAccount
|
||||||
|
err := rlp.DecodeBytes(i[1].([]byte), &account)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
c, err := RawdataToCid(MEthAccountSnapshot, i[1].([]byte), multihash.KECCAK_256)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return []interface{}{
|
||||||
|
i[0].([]byte),
|
||||||
|
&EthAccountSnapshot{
|
||||||
|
EthAccount: &account,
|
||||||
cid: c,
|
cid: c,
|
||||||
rawdata: stateNodeRLP,
|
rawdata: i[1].([]byte),
|
||||||
|
},
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -70,35 +106,6 @@ func (st *EthStateTrie) String() string {
|
|||||||
return fmt.Sprintf("<EthereumStateTrie %s>", st.cid)
|
return fmt.Sprintf("<EthereumStateTrie %s>", st.cid)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy will go away. It is here to comply with the Node interface.
|
|
||||||
func (*EthStateTrie) Copy() node.Node {
|
|
||||||
panic("implement me")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*EthStateTrie) Links() []*node.Link {
|
|
||||||
panic("implement me")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*EthStateTrie) Resolve(path []string) (interface{}, []string, error) {
|
|
||||||
panic("implement me")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*EthStateTrie) ResolveLink(path []string) (*node.Link, []string, error) {
|
|
||||||
panic("implement me")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*EthStateTrie) Size() (uint64, error) {
|
|
||||||
panic("implement me")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*EthStateTrie) Stat() (*node.NodeStat, error) {
|
|
||||||
panic("implement me")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*EthStateTrie) Tree(path string, depth int) []string {
|
|
||||||
panic("implement me")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Loggable returns in a map the type of IPLD Link.
|
// Loggable returns in a map the type of IPLD Link.
|
||||||
func (st *EthStateTrie) Loggable() map[string]interface{} {
|
func (st *EthStateTrie) Loggable() map[string]interface{} {
|
||||||
return map[string]interface{}{
|
return map[string]interface{}{
|
||||||
|
@ -21,14 +21,13 @@ import (
|
|||||||
|
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
node "github.com/ipfs/go-ipld-format"
|
node "github.com/ipfs/go-ipld-format"
|
||||||
mh "github.com/multiformats/go-multihash"
|
"github.com/multiformats/go-multihash"
|
||||||
)
|
)
|
||||||
|
|
||||||
// EthStorageTrie (eth-storage-trie, codec 0x98), represents
|
// EthStorageTrie (eth-storage-trie, codec 0x98), represents
|
||||||
// a node from the storage trie in ethereum.
|
// a node from the storage trie in ethereum.
|
||||||
type EthStorageTrie struct {
|
type EthStorageTrie struct {
|
||||||
cid cid.Cid
|
*TrieNode
|
||||||
rawdata []byte
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Static (compile time) check that EthStorageTrie satisfies the node.Node interface.
|
// Static (compile time) check that EthStorageTrie satisfies the node.Node interface.
|
||||||
@ -38,16 +37,39 @@ var _ node.Node = (*EthStorageTrie)(nil)
|
|||||||
INPUT
|
INPUT
|
||||||
*/
|
*/
|
||||||
|
|
||||||
// FromStorageTrieRLP takes the RLP bytes of an ethereum
|
// FromStorageTrieRLP takes the RLP representation of an ethereum
|
||||||
// storage trie node to return it as an IPLD node for further processing.
|
// storage trie node to return it as an IPLD node for further processing.
|
||||||
func FromStorageTrieRLP(storageNodeRLP []byte) (*EthStorageTrie, error) {
|
func FromStorageTrieRLP(raw []byte) (*EthStorageTrie, error) {
|
||||||
c, err := rawdataToCid(MEthStorageTrie, storageNodeRLP, mh.KECCAK_256)
|
c, err := RawdataToCid(MEthStorageTrie, raw, multihash.KECCAK_256)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return &EthStorageTrie{
|
|
||||||
cid: c,
|
// Let's run the whole mile and process the nodeKind and
|
||||||
rawdata: storageNodeRLP,
|
// its elements, in case somebody would need this function
|
||||||
|
// to parse an RLP element from the filesystem
|
||||||
|
return DecodeEthStorageTrie(c, raw)
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
OUTPUT
|
||||||
|
*/
|
||||||
|
|
||||||
|
// DecodeEthStorageTrie returns an EthStorageTrie object from its cid and rawdata.
|
||||||
|
func DecodeEthStorageTrie(c cid.Cid, b []byte) (*EthStorageTrie, error) {
|
||||||
|
tn, err := decodeTrieNode(c, b, decodeEthStorageTrieLeaf)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &EthStorageTrie{TrieNode: tn}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// decodeEthStorageTrieLeaf parses a eth-tx-trie leaf
|
||||||
|
// from decoded RLP elements
|
||||||
|
func decodeEthStorageTrieLeaf(i []interface{}) ([]interface{}, error) {
|
||||||
|
return []interface{}{
|
||||||
|
i[0].([]byte),
|
||||||
|
i[1].([]byte),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -70,35 +92,6 @@ func (st *EthStorageTrie) String() string {
|
|||||||
return fmt.Sprintf("<EthereumStorageTrie %s>", st.cid)
|
return fmt.Sprintf("<EthereumStorageTrie %s>", st.cid)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy will go away. It is here to comply with the Node interface.
|
|
||||||
func (*EthStorageTrie) Copy() node.Node {
|
|
||||||
panic("implement me")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*EthStorageTrie) Links() []*node.Link {
|
|
||||||
panic("implement me")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*EthStorageTrie) Resolve(path []string) (interface{}, []string, error) {
|
|
||||||
panic("implement me")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*EthStorageTrie) ResolveLink(path []string) (*node.Link, []string, error) {
|
|
||||||
panic("implement me")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*EthStorageTrie) Size() (uint64, error) {
|
|
||||||
panic("implement me")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*EthStorageTrie) Stat() (*node.NodeStat, error) {
|
|
||||||
panic("implement me")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*EthStorageTrie) Tree(path string, depth int) []string {
|
|
||||||
panic("implement me")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Loggable returns in a map the type of IPLD Link.
|
// Loggable returns in a map the type of IPLD Link.
|
||||||
func (st *EthStorageTrie) Loggable() map[string]interface{} {
|
func (st *EthStorageTrie) Loggable() map[string]interface{} {
|
||||||
return map[string]interface{}{
|
return map[string]interface{}{
|
||||||
|
@ -50,7 +50,7 @@ func NewEthTx(tx *types.Transaction) (*EthTx, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
c, err := rawdataToCid(MEthTx, txRLP, mh.KECCAK_256)
|
c, err := RawdataToCid(MEthTx, txRLP, mh.KECCAK_256)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
152
pkg/ipfs/ipld/eth_tx_trie.go
Normal file
152
pkg/ipfs/ipld/eth_tx_trie.go
Normal file
@ -0,0 +1,152 @@
|
|||||||
|
// VulcanizeDB
|
||||||
|
// Copyright © 2019 Vulcanize
|
||||||
|
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package ipld
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
node "github.com/ipfs/go-ipld-format"
|
||||||
|
"github.com/multiformats/go-multihash"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
|
)
|
||||||
|
|
||||||
|
// EthTxTrie (eth-tx-trie codec 0x92) represents
|
||||||
|
// a node from the transaction trie in ethereum.
|
||||||
|
type EthTxTrie struct {
|
||||||
|
*TrieNode
|
||||||
|
}
|
||||||
|
|
||||||
|
// Static (compile time) check that EthTxTrie satisfies the node.Node interface.
|
||||||
|
var _ node.Node = (*EthTxTrie)(nil)
|
||||||
|
|
||||||
|
/*
|
||||||
|
INPUT
|
||||||
|
*/
|
||||||
|
|
||||||
|
// To create a proper trie of the eth-tx-trie objects, it is required
|
||||||
|
// to input all transactions belonging to a forest in a single step.
|
||||||
|
// We are adding the transactions, and creating its trie on
|
||||||
|
// block body parsing time.
|
||||||
|
|
||||||
|
/*
|
||||||
|
OUTPUT
|
||||||
|
*/
|
||||||
|
|
||||||
|
// DecodeEthTxTrie returns an EthTxTrie object from its cid and rawdata.
|
||||||
|
func DecodeEthTxTrie(c cid.Cid, b []byte) (*EthTxTrie, error) {
|
||||||
|
tn, err := decodeTrieNode(c, b, decodeEthTxTrieLeaf)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &EthTxTrie{TrieNode: tn}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// decodeEthTxTrieLeaf parses a eth-tx-trie leaf
|
||||||
|
//from decoded RLP elements
|
||||||
|
func decodeEthTxTrieLeaf(i []interface{}) ([]interface{}, error) {
|
||||||
|
var t types.Transaction
|
||||||
|
err := rlp.DecodeBytes(i[1].([]byte), &t)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
c, err := RawdataToCid(MEthTx, i[1].([]byte), multihash.KECCAK_256)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return []interface{}{
|
||||||
|
i[0].([]byte),
|
||||||
|
&EthTx{
|
||||||
|
Transaction: &t,
|
||||||
|
cid: c,
|
||||||
|
rawdata: i[1].([]byte),
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Block INTERFACE
|
||||||
|
*/
|
||||||
|
|
||||||
|
// RawData returns the binary of the RLP encode of the transaction.
|
||||||
|
func (t *EthTxTrie) RawData() []byte {
|
||||||
|
return t.rawdata
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cid returns the cid of the transaction.
|
||||||
|
func (t *EthTxTrie) Cid() cid.Cid {
|
||||||
|
return t.cid
|
||||||
|
}
|
||||||
|
|
||||||
|
// String is a helper for output
|
||||||
|
func (t *EthTxTrie) String() string {
|
||||||
|
return fmt.Sprintf("<EthereumTxTrie %s>", t.cid)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Loggable returns in a map the type of IPLD Link.
|
||||||
|
func (t *EthTxTrie) Loggable() map[string]interface{} {
|
||||||
|
return map[string]interface{}{
|
||||||
|
"type": "eth-tx-trie",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
EthTxTrie functions
|
||||||
|
*/
|
||||||
|
|
||||||
|
// txTrie wraps a localTrie for use on the transaction trie.
|
||||||
|
type txTrie struct {
|
||||||
|
*localTrie
|
||||||
|
}
|
||||||
|
|
||||||
|
// newTxTrie initializes and returns a txTrie.
|
||||||
|
func newTxTrie() *txTrie {
|
||||||
|
return &txTrie{
|
||||||
|
localTrie: newLocalTrie(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// getNodes invokes the localTrie, which computes the root hash of the
|
||||||
|
// transaction trie and returns its database keys, to return a slice
|
||||||
|
// of EthTxTrie nodes.
|
||||||
|
func (tt *txTrie) getNodes() []*EthTxTrie {
|
||||||
|
keys := tt.getKeys()
|
||||||
|
var out []*EthTxTrie
|
||||||
|
it := tt.trie.NodeIterator([]byte{})
|
||||||
|
for it.Next(true) {
|
||||||
|
|
||||||
|
}
|
||||||
|
for _, k := range keys {
|
||||||
|
rawdata, err := tt.db.Get(k)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
c, err := RawdataToCid(MEthTxTrie, rawdata, multihash.KECCAK_256)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
tn := &TrieNode{
|
||||||
|
cid: c,
|
||||||
|
rawdata: rawdata,
|
||||||
|
}
|
||||||
|
out = append(out, &EthTxTrie{TrieNode: tn})
|
||||||
|
}
|
||||||
|
|
||||||
|
return out
|
||||||
|
}
|
@ -17,7 +17,13 @@
|
|||||||
package ipld
|
package ipld
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||||
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
mh "github.com/multiformats/go-multihash"
|
mh "github.com/multiformats/go-multihash"
|
||||||
)
|
)
|
||||||
@ -40,9 +46,9 @@ const (
|
|||||||
MBitcoinTx = 0xb1
|
MBitcoinTx = 0xb1
|
||||||
)
|
)
|
||||||
|
|
||||||
// rawdataToCid takes the desired codec and a slice of bytes
|
// RawdataToCid takes the desired codec and a slice of bytes
|
||||||
// and returns the proper cid of the object.
|
// and returns the proper cid of the object.
|
||||||
func rawdataToCid(codec uint64, rawdata []byte, multiHash uint64) (cid.Cid, error) {
|
func RawdataToCid(codec uint64, rawdata []byte, multiHash uint64) (cid.Cid, error) {
|
||||||
c, err := cid.Prefix{
|
c, err := cid.Prefix{
|
||||||
Codec: codec,
|
Codec: codec,
|
||||||
Version: 1,
|
Version: 1,
|
||||||
@ -87,3 +93,59 @@ func sha256ToCid(codec uint64, h []byte) cid.Cid {
|
|||||||
|
|
||||||
return cid.NewCidV1(codec, hash)
|
return cid.NewCidV1(codec, hash)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// getRLP encodes the given object to RLP returning its bytes.
|
||||||
|
func getRLP(object interface{}) []byte {
|
||||||
|
buf := new(bytes.Buffer)
|
||||||
|
if err := rlp.Encode(buf, object); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf.Bytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
// localTrie wraps a go-ethereum trie and its underlying memory db.
|
||||||
|
// It contributes to the creation of the trie node objects.
|
||||||
|
type localTrie struct {
|
||||||
|
keys [][]byte
|
||||||
|
db ethdb.Database
|
||||||
|
trie *trie.Trie
|
||||||
|
}
|
||||||
|
|
||||||
|
// newLocalTrie initializes and returns a localTrie object
|
||||||
|
func newLocalTrie() *localTrie {
|
||||||
|
var err error
|
||||||
|
lt := &localTrie{}
|
||||||
|
lt.db = rawdb.NewMemoryDatabase()
|
||||||
|
lt.trie, err = trie.New(common.Hash{}, trie.NewDatabase(lt.db))
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return lt
|
||||||
|
}
|
||||||
|
|
||||||
|
// add receives the index of an object and its rawdata value
|
||||||
|
// and includes it into the localTrie
|
||||||
|
func (lt *localTrie) add(idx int, rawdata []byte) {
|
||||||
|
key, err := rlp.EncodeToBytes(uint(idx))
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
lt.keys = append(lt.keys, key)
|
||||||
|
if err := lt.db.Put(key, rawdata); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
lt.trie.Update(key, rawdata)
|
||||||
|
}
|
||||||
|
|
||||||
|
// rootHash returns the computed trie root.
|
||||||
|
// Useful for sanity checks on parsed data.
|
||||||
|
func (lt *localTrie) rootHash() []byte {
|
||||||
|
return lt.trie.Hash().Bytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
// getKeys returns the stored keys of the memory database
|
||||||
|
// of the localTrie for further processing.
|
||||||
|
func (lt *localTrie) getKeys() [][]byte {
|
||||||
|
return lt.keys
|
||||||
|
}
|
||||||
|
444
pkg/ipfs/ipld/trie_node.go
Normal file
444
pkg/ipfs/ipld/trie_node.go
Normal file
@ -0,0 +1,444 @@
|
|||||||
|
// VulcanizeDB
|
||||||
|
// Copyright © 2019 Vulcanize
|
||||||
|
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package ipld
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
node "github.com/ipfs/go-ipld-format"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TrieNode is the general abstraction for
|
||||||
|
//ethereum IPLD trie nodes.
|
||||||
|
type TrieNode struct {
|
||||||
|
// leaf, extension or branch
|
||||||
|
nodeKind string
|
||||||
|
|
||||||
|
// If leaf or extension: [0] is key, [1] is val.
|
||||||
|
// If branch: [0] - [16] are children.
|
||||||
|
elements []interface{}
|
||||||
|
|
||||||
|
// IPLD block information
|
||||||
|
cid cid.Cid
|
||||||
|
rawdata []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
OUTPUT
|
||||||
|
*/
|
||||||
|
|
||||||
|
type trieNodeLeafDecoder func([]interface{}) ([]interface{}, error)
|
||||||
|
|
||||||
|
// decodeTrieNode returns a TrieNode object from an IPLD block's
|
||||||
|
// cid and rawdata.
|
||||||
|
func decodeTrieNode(c cid.Cid, b []byte,
|
||||||
|
leafDecoder trieNodeLeafDecoder) (*TrieNode, error) {
|
||||||
|
var (
|
||||||
|
i, decoded, elements []interface{}
|
||||||
|
nodeKind string
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
|
||||||
|
if err = rlp.DecodeBytes(b, &i); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
codec := c.Type()
|
||||||
|
switch len(i) {
|
||||||
|
case 2:
|
||||||
|
nodeKind, decoded, err = decodeCompactKey(i)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if nodeKind == "extension" {
|
||||||
|
elements, err = parseTrieNodeExtension(decoded, codec)
|
||||||
|
}
|
||||||
|
if nodeKind == "leaf" {
|
||||||
|
elements, err = leafDecoder(decoded)
|
||||||
|
}
|
||||||
|
if nodeKind != "extension" && nodeKind != "leaf" {
|
||||||
|
return nil, fmt.Errorf("unexpected nodeKind returned from decoder")
|
||||||
|
}
|
||||||
|
case 17:
|
||||||
|
nodeKind = "branch"
|
||||||
|
elements, err = parseTrieNodeBranch(i, codec)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("unknown trie node type")
|
||||||
|
}
|
||||||
|
|
||||||
|
return &TrieNode{
|
||||||
|
nodeKind: nodeKind,
|
||||||
|
elements: elements,
|
||||||
|
rawdata: b,
|
||||||
|
cid: c,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// decodeCompactKey takes a compact key, and returns its nodeKind and value.
|
||||||
|
func decodeCompactKey(i []interface{}) (string, []interface{}, error) {
|
||||||
|
first := i[0].([]byte)
|
||||||
|
last := i[1].([]byte)
|
||||||
|
|
||||||
|
switch first[0] / 16 {
|
||||||
|
case '\x00':
|
||||||
|
return "extension", []interface{}{
|
||||||
|
nibbleToByte(first)[2:],
|
||||||
|
last,
|
||||||
|
}, nil
|
||||||
|
case '\x01':
|
||||||
|
return "extension", []interface{}{
|
||||||
|
nibbleToByte(first)[1:],
|
||||||
|
last,
|
||||||
|
}, nil
|
||||||
|
case '\x02':
|
||||||
|
return "leaf", []interface{}{
|
||||||
|
nibbleToByte(first)[2:],
|
||||||
|
last,
|
||||||
|
}, nil
|
||||||
|
case '\x03':
|
||||||
|
return "leaf", []interface{}{
|
||||||
|
nibbleToByte(first)[1:],
|
||||||
|
last,
|
||||||
|
}, nil
|
||||||
|
default:
|
||||||
|
return "", nil, fmt.Errorf("unknown hex prefix")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseTrieNodeExtension helper improves readability
|
||||||
|
func parseTrieNodeExtension(i []interface{}, codec uint64) ([]interface{}, error) {
|
||||||
|
return []interface{}{
|
||||||
|
i[0].([]byte),
|
||||||
|
keccak256ToCid(codec, i[1].([]byte)),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseTrieNodeBranch helper improves readability
|
||||||
|
func parseTrieNodeBranch(i []interface{}, codec uint64) ([]interface{}, error) {
|
||||||
|
var out []interface{}
|
||||||
|
|
||||||
|
for _, vi := range i {
|
||||||
|
v, ok := vi.([]byte)
|
||||||
|
// Sometimes this throws "panic: interface conversion: interface {} is []interface {}, not []uint8"
|
||||||
|
// Figure out why, and if it is okay to continue
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
switch len(v) {
|
||||||
|
case 0:
|
||||||
|
out = append(out, nil)
|
||||||
|
case 32:
|
||||||
|
out = append(out, keccak256ToCid(codec, v))
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("unrecognized object: %v", v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Node INTERFACE
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Resolve resolves a path through this node, stopping at any link boundary
|
||||||
|
// and returning the object found as well as the remaining path to traverse
|
||||||
|
func (t *TrieNode) Resolve(p []string) (interface{}, []string, error) {
|
||||||
|
switch t.nodeKind {
|
||||||
|
case "extension":
|
||||||
|
return t.resolveTrieNodeExtension(p)
|
||||||
|
case "leaf":
|
||||||
|
return t.resolveTrieNodeLeaf(p)
|
||||||
|
case "branch":
|
||||||
|
return t.resolveTrieNodeBranch(p)
|
||||||
|
default:
|
||||||
|
return nil, nil, fmt.Errorf("nodeKind case not implemented")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tree lists all paths within the object under 'path', and up to the given depth.
|
||||||
|
// To list the entire object (similar to `find .`) pass "" and -1
|
||||||
|
func (t *TrieNode) Tree(p string, depth int) []string {
|
||||||
|
if p != "" || depth == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var out []string
|
||||||
|
|
||||||
|
switch t.nodeKind {
|
||||||
|
case "extension":
|
||||||
|
var val string
|
||||||
|
for _, e := range t.elements[0].([]byte) {
|
||||||
|
val += fmt.Sprintf("%x", e)
|
||||||
|
}
|
||||||
|
return []string{val}
|
||||||
|
case "branch":
|
||||||
|
for i, elem := range t.elements {
|
||||||
|
if _, ok := elem.(*cid.Cid); ok {
|
||||||
|
out = append(out, fmt.Sprintf("%x", i))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResolveLink is a helper function that calls resolve and asserts the
|
||||||
|
// output is a link
|
||||||
|
func (t *TrieNode) ResolveLink(p []string) (*node.Link, []string, error) {
|
||||||
|
obj, rest, err := t.Resolve(p)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
lnk, ok := obj.(*node.Link)
|
||||||
|
if !ok {
|
||||||
|
return nil, nil, fmt.Errorf("was not a link")
|
||||||
|
}
|
||||||
|
|
||||||
|
return lnk, rest, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy will go away. It is here to comply with the interface.
|
||||||
|
func (t *TrieNode) Copy() node.Node {
|
||||||
|
panic("dont use this yet")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Links is a helper function that returns all links within this object
|
||||||
|
func (t *TrieNode) Links() []*node.Link {
|
||||||
|
var out []*node.Link
|
||||||
|
|
||||||
|
for _, i := range t.elements {
|
||||||
|
c, ok := i.(cid.Cid)
|
||||||
|
if ok {
|
||||||
|
out = append(out, &node.Link{Cid: c})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stat will go away. It is here to comply with the interface.
|
||||||
|
func (t *TrieNode) Stat() (*node.NodeStat, error) {
|
||||||
|
return &node.NodeStat{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Size will go away. It is here to comply with the interface.
|
||||||
|
func (t *TrieNode) Size() (uint64, error) {
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
TrieNode functions
|
||||||
|
*/
|
||||||
|
|
||||||
|
// MarshalJSON processes the transaction trie into readable JSON format.
|
||||||
|
func (t *TrieNode) MarshalJSON() ([]byte, error) {
|
||||||
|
var out map[string]interface{}
|
||||||
|
|
||||||
|
switch t.nodeKind {
|
||||||
|
case "extension":
|
||||||
|
fallthrough
|
||||||
|
case "leaf":
|
||||||
|
var hexPrefix string
|
||||||
|
for _, e := range t.elements[0].([]byte) {
|
||||||
|
hexPrefix += fmt.Sprintf("%x", e)
|
||||||
|
}
|
||||||
|
|
||||||
|
// if we got a byte we need to do this casting otherwise
|
||||||
|
// it will be marshaled to a base64 encoded value
|
||||||
|
if _, ok := t.elements[1].([]byte); ok {
|
||||||
|
var hexVal string
|
||||||
|
for _, e := range t.elements[1].([]byte) {
|
||||||
|
hexVal += fmt.Sprintf("%x", e)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.elements[1] = hexVal
|
||||||
|
}
|
||||||
|
|
||||||
|
out = map[string]interface{}{
|
||||||
|
"type": t.nodeKind,
|
||||||
|
hexPrefix: t.elements[1],
|
||||||
|
}
|
||||||
|
|
||||||
|
case "branch":
|
||||||
|
out = map[string]interface{}{
|
||||||
|
"type": "branch",
|
||||||
|
"0": t.elements[0],
|
||||||
|
"1": t.elements[1],
|
||||||
|
"2": t.elements[2],
|
||||||
|
"3": t.elements[3],
|
||||||
|
"4": t.elements[4],
|
||||||
|
"5": t.elements[5],
|
||||||
|
"6": t.elements[6],
|
||||||
|
"7": t.elements[7],
|
||||||
|
"8": t.elements[8],
|
||||||
|
"9": t.elements[9],
|
||||||
|
"a": t.elements[10],
|
||||||
|
"b": t.elements[11],
|
||||||
|
"c": t.elements[12],
|
||||||
|
"d": t.elements[13],
|
||||||
|
"e": t.elements[14],
|
||||||
|
"f": t.elements[15],
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("nodeKind %s not supported", t.nodeKind)
|
||||||
|
}
|
||||||
|
|
||||||
|
return json.Marshal(out)
|
||||||
|
}
|
||||||
|
|
||||||
|
// nibbleToByte expands the nibbles of a byte slice into their own bytes.
|
||||||
|
func nibbleToByte(k []byte) []byte {
|
||||||
|
var out []byte
|
||||||
|
|
||||||
|
for _, b := range k {
|
||||||
|
out = append(out, b/16)
|
||||||
|
out = append(out, b%16)
|
||||||
|
}
|
||||||
|
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resolve reading conveniences
|
||||||
|
func (t *TrieNode) resolveTrieNodeExtension(p []string) (interface{}, []string, error) {
|
||||||
|
nibbles := t.elements[0].([]byte)
|
||||||
|
idx, rest := shiftFromPath(p, len(nibbles))
|
||||||
|
if len(idx) < len(nibbles) {
|
||||||
|
return nil, nil, fmt.Errorf("not enough nibbles to traverse this extension")
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, i := range idx {
|
||||||
|
if getHexIndex(string(i)) == -1 {
|
||||||
|
return nil, nil, fmt.Errorf("invalid path element")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, n := range nibbles {
|
||||||
|
if string(idx[i]) != fmt.Sprintf("%x", n) {
|
||||||
|
return nil, nil, fmt.Errorf("no such link in this extension")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &node.Link{Cid: t.elements[1].(cid.Cid)}, rest, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TrieNode) resolveTrieNodeLeaf(p []string) (interface{}, []string, error) {
|
||||||
|
nibbles := t.elements[0].([]byte)
|
||||||
|
|
||||||
|
if len(nibbles) != 0 {
|
||||||
|
idx, rest := shiftFromPath(p, len(nibbles))
|
||||||
|
if len(idx) < len(nibbles) {
|
||||||
|
return nil, nil, fmt.Errorf("not enough nibbles to traverse this leaf")
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, i := range idx {
|
||||||
|
if getHexIndex(string(i)) == -1 {
|
||||||
|
return nil, nil, fmt.Errorf("invalid path element")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, n := range nibbles {
|
||||||
|
if string(idx[i]) != fmt.Sprintf("%x", n) {
|
||||||
|
return nil, nil, fmt.Errorf("no such link in this extension")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
p = rest
|
||||||
|
}
|
||||||
|
|
||||||
|
link, ok := t.elements[1].(node.Node)
|
||||||
|
if !ok {
|
||||||
|
return nil, nil, fmt.Errorf("leaf children is not an IPLD node")
|
||||||
|
}
|
||||||
|
|
||||||
|
return link.Resolve(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TrieNode) resolveTrieNodeBranch(p []string) (interface{}, []string, error) {
|
||||||
|
idx, rest := shiftFromPath(p, 1)
|
||||||
|
hidx := getHexIndex(idx)
|
||||||
|
if hidx == -1 {
|
||||||
|
return nil, nil, fmt.Errorf("incorrect path")
|
||||||
|
}
|
||||||
|
|
||||||
|
child := t.elements[hidx]
|
||||||
|
if child != nil {
|
||||||
|
return &node.Link{Cid: child.(cid.Cid)}, rest, nil
|
||||||
|
}
|
||||||
|
return nil, nil, fmt.Errorf("no such link in this branch")
|
||||||
|
}
|
||||||
|
|
||||||
|
// shiftFromPath extracts from a given path (as a slice of strings)
|
||||||
|
// the given number of elements as a single string, returning whatever
|
||||||
|
// it has not taken.
|
||||||
|
//
|
||||||
|
// Examples:
|
||||||
|
// ["0", "a", "something"] and 1 -> "0" and ["a", "something"]
|
||||||
|
// ["ab", "c", "d", "1"] and 2 -> "ab" and ["c", "d", "1"]
|
||||||
|
// ["abc", "d", "1"] and 2 -> "ab" and ["c", "d", "1"]
|
||||||
|
func shiftFromPath(p []string, i int) (string, []string) {
|
||||||
|
var (
|
||||||
|
out string
|
||||||
|
rest []string
|
||||||
|
)
|
||||||
|
|
||||||
|
for _, pe := range p {
|
||||||
|
re := ""
|
||||||
|
for _, c := range pe {
|
||||||
|
if len(out) < i {
|
||||||
|
out += string(c)
|
||||||
|
} else {
|
||||||
|
re += string(c)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(out) == i && re != "" {
|
||||||
|
rest = append(rest, re)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return out, rest
|
||||||
|
}
|
||||||
|
|
||||||
|
// getHexIndex returns to you the integer 0 - 15 equivalent to your
|
||||||
|
// string character if applicable, or -1 otherwise.
|
||||||
|
func getHexIndex(s string) int {
|
||||||
|
if len(s) != 1 {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
|
||||||
|
c := byte(s[0])
|
||||||
|
switch {
|
||||||
|
case '0' <= c && c <= '9':
|
||||||
|
return int(c - '0')
|
||||||
|
case 'a' <= c && c <= 'f':
|
||||||
|
return int(c - 'a' + 10)
|
||||||
|
}
|
||||||
|
|
||||||
|
return -1
|
||||||
|
}
|
@ -19,39 +19,35 @@ package mocks
|
|||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
|
||||||
|
node "github.com/ipfs/go-ipld-format"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
)
|
)
|
||||||
|
|
||||||
// DagPutter is a mock for testing the ipfs publisher
|
// DagPutter is a mock for testing the ipfs publisher
|
||||||
type DagPutter struct {
|
type DagPutter struct {
|
||||||
CIDsToReturn []string
|
PassedNode node.Node
|
||||||
PassedRaw interface{}
|
|
||||||
ErrToReturn error
|
ErrToReturn error
|
||||||
}
|
}
|
||||||
|
|
||||||
// DagPut returns the pre-loaded CIDs or error
|
// DagPut returns the pre-loaded CIDs or error
|
||||||
func (dp *DagPutter) DagPut(raw interface{}) ([]string, error) {
|
func (dp *DagPutter) DagPut(n node.Node) (string, error) {
|
||||||
dp.PassedRaw = raw
|
dp.PassedNode = n
|
||||||
return dp.CIDsToReturn, dp.ErrToReturn
|
return n.Cid().String(), dp.ErrToReturn
|
||||||
}
|
}
|
||||||
|
|
||||||
// MappedDagPutter is a mock for testing the ipfs publisher
|
// MappedDagPutter is a mock for testing the ipfs publisher
|
||||||
type MappedDagPutter struct {
|
type MappedDagPutter struct {
|
||||||
CIDsToReturn map[common.Hash][]string
|
CIDsToReturn map[common.Hash]string
|
||||||
PassedRaw interface{}
|
PassedNode node.Node
|
||||||
ErrToReturn error
|
ErrToReturn error
|
||||||
}
|
}
|
||||||
|
|
||||||
// DagPut returns the pre-loaded CIDs or error
|
// DagPut returns the pre-loaded CIDs or error
|
||||||
func (mdp *MappedDagPutter) DagPut(raw interface{}) ([]string, error) {
|
func (mdp *MappedDagPutter) DagPut(n node.Node) (string, error) {
|
||||||
mdp.PassedRaw = raw
|
|
||||||
if mdp.CIDsToReturn == nil {
|
if mdp.CIDsToReturn == nil {
|
||||||
return nil, errors.New("mapped dag putter needs to be initialized with a map of cids to return")
|
return "", errors.New("mapped dag putter needs to be initialized with a map of cids to return")
|
||||||
}
|
}
|
||||||
by, ok := raw.([]byte)
|
hash := common.BytesToHash(n.RawData())
|
||||||
if !ok {
|
|
||||||
return nil, errors.New("mapped dag putters can only dag put []byte values")
|
|
||||||
}
|
|
||||||
hash := common.BytesToHash(by)
|
|
||||||
return mdp.CIDsToReturn[hash], nil
|
return mdp.CIDsToReturn[hash], nil
|
||||||
}
|
}
|
||||||
|
@ -19,12 +19,15 @@ package super_node
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
|
"github.com/ethereum/go-ethereum/p2p"
|
||||||
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/rpc"
|
"github.com/ethereum/go-ethereum/rpc"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
|
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/eth/core"
|
"github.com/vulcanize/vulcanizedb/pkg/eth/core"
|
||||||
|
"github.com/vulcanize/vulcanizedb/pkg/super_node/btc"
|
||||||
|
"github.com/vulcanize/vulcanizedb/pkg/super_node/eth"
|
||||||
|
"github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
|
||||||
)
|
)
|
||||||
|
|
||||||
// APIName is the namespace used for the state diffing service API
|
// APIName is the namespace used for the state diffing service API
|
||||||
@ -46,7 +49,24 @@ func NewPublicSuperNodeAPI(superNodeInterface SuperNode) *PublicSuperNodeAPI {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Stream is the public method to setup a subscription that fires off super node payloads as they are processed
|
// Stream is the public method to setup a subscription that fires off super node payloads as they are processed
|
||||||
func (api *PublicSuperNodeAPI) Stream(ctx context.Context, params shared.SubscriptionSettings) (*rpc.Subscription, error) {
|
func (api *PublicSuperNodeAPI) Stream(ctx context.Context, rlpParams []byte) (*rpc.Subscription, error) {
|
||||||
|
var params shared.SubscriptionSettings
|
||||||
|
switch api.sn.Chain() {
|
||||||
|
case shared.Ethereum:
|
||||||
|
var ethParams eth.SubscriptionSettings
|
||||||
|
if err := rlp.DecodeBytes(rlpParams, ðParams); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
params = ðParams
|
||||||
|
case shared.Bitcoin:
|
||||||
|
var btcParams btc.SubscriptionSettings
|
||||||
|
if err := rlp.DecodeBytes(rlpParams, &btcParams); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
params = &btcParams
|
||||||
|
default:
|
||||||
|
panic("SuperNode is not configured for a specific chain type")
|
||||||
|
}
|
||||||
// ensure that the RPC connection supports subscriptions
|
// ensure that the RPC connection supports subscriptions
|
||||||
notifier, supported := rpc.NotifierFromContext(ctx)
|
notifier, supported := rpc.NotifierFromContext(ctx)
|
||||||
if !supported {
|
if !supported {
|
||||||
@ -85,6 +105,41 @@ func (api *PublicSuperNodeAPI) Stream(ctx context.Context, params shared.Subscri
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Node is a public rpc method to allow transformers to fetch the node info for the super node
|
// Node is a public rpc method to allow transformers to fetch the node info for the super node
|
||||||
func (api *PublicSuperNodeAPI) Node() core.Node {
|
// NOTE: this is the node info for the node that the super node is syncing from, not the node info for the super node itself
|
||||||
|
func (api *PublicSuperNodeAPI) Node() *core.Node {
|
||||||
return api.sn.Node()
|
return api.sn.Node()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Chain returns the chain type that this super node instance supports
|
||||||
|
func (api *PublicSuperNodeAPI) Chain() shared.ChainType {
|
||||||
|
return api.sn.Chain()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Struct for holding super node meta data
|
||||||
|
type InfoAPI struct{}
|
||||||
|
|
||||||
|
// NewPublicSuperNodeAPI creates a new PublicSuperNodeAPI with the provided underlying SyncPublishScreenAndServe process
|
||||||
|
func NewInfoAPI() *InfoAPI {
|
||||||
|
return &InfoAPI{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Modules returns modules supported by this api
|
||||||
|
func (iapi *InfoAPI) Modules() map[string]string {
|
||||||
|
return map[string]string{
|
||||||
|
"vdb": "Stream",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NodeInfo gathers and returns a collection of metadata for the super node
|
||||||
|
func (iapi *InfoAPI) NodeInfo() *p2p.NodeInfo {
|
||||||
|
return &p2p.NodeInfo{
|
||||||
|
// TODO: formalize this
|
||||||
|
ID: "vulcanizeDB",
|
||||||
|
Name: "superNode",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Version returns the version of the super node
|
||||||
|
func (iapi *InfoAPI) Version() string {
|
||||||
|
return VersionWithMeta
|
||||||
|
}
|
||||||
|
@ -17,7 +17,7 @@
|
|||||||
package super_node
|
package super_node
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
@ -30,13 +30,13 @@ import (
|
|||||||
|
|
||||||
const (
|
const (
|
||||||
DefaultMaxBatchSize uint64 = 100
|
DefaultMaxBatchSize uint64 = 100
|
||||||
defaultMaxBatchNumber int64 = 10
|
DefaultMaxBatchNumber int64 = 50
|
||||||
)
|
)
|
||||||
|
|
||||||
// BackFillInterface for filling in gaps in the super node
|
// BackFillInterface for filling in gaps in the super node
|
||||||
type BackFillInterface interface {
|
type BackFillInterface interface {
|
||||||
// Method for the super node to periodically check for and fill in gaps in its data using an archival node
|
// Method for the super node to periodically check for and fill in gaps in its data using an archival node
|
||||||
FillGaps(wg *sync.WaitGroup)
|
FillGapsInSuperNode(wg *sync.WaitGroup)
|
||||||
}
|
}
|
||||||
|
|
||||||
// BackFillService for filling in gaps in the super node
|
// BackFillService for filling in gaps in the super node
|
||||||
@ -52,17 +52,21 @@ type BackFillService struct {
|
|||||||
// Interface for fetching payloads over at historical blocks; over http
|
// Interface for fetching payloads over at historical blocks; over http
|
||||||
Fetcher shared.PayloadFetcher
|
Fetcher shared.PayloadFetcher
|
||||||
// Channel for forwarding backfill payloads to the ScreenAndServe process
|
// Channel for forwarding backfill payloads to the ScreenAndServe process
|
||||||
ScreenAndServeChan chan shared.StreamedIPLDs
|
ScreenAndServeChan chan shared.ConvertedData
|
||||||
// Check frequency
|
// Check frequency
|
||||||
GapCheckFrequency time.Duration
|
GapCheckFrequency time.Duration
|
||||||
// Size of batch fetches
|
// Size of batch fetches
|
||||||
BatchSize uint64
|
BatchSize uint64
|
||||||
|
// Number of goroutines
|
||||||
|
BatchNumber int64
|
||||||
// Channel for receiving quit signal
|
// Channel for receiving quit signal
|
||||||
QuitChan chan bool
|
QuitChan chan bool
|
||||||
|
// Chain type
|
||||||
|
chain shared.ChainType
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewBackFillService returns a new BackFillInterface
|
// NewBackFillService returns a new BackFillInterface
|
||||||
func NewBackFillService(settings *shared.SuperNodeConfig, screenAndServeChan chan shared.StreamedIPLDs) (BackFillInterface, error) {
|
func NewBackFillService(settings *Config, screenAndServeChan chan shared.ConvertedData) (BackFillInterface, error) {
|
||||||
publisher, err := NewIPLDPublisher(settings.Chain, settings.IPFSPath)
|
publisher, err := NewIPLDPublisher(settings.Chain, settings.IPFSPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -87,6 +91,10 @@ func NewBackFillService(settings *shared.SuperNodeConfig, screenAndServeChan cha
|
|||||||
if batchSize == 0 {
|
if batchSize == 0 {
|
||||||
batchSize = DefaultMaxBatchSize
|
batchSize = DefaultMaxBatchSize
|
||||||
}
|
}
|
||||||
|
batchNumber := int64(settings.BatchNumber)
|
||||||
|
if batchNumber == 0 {
|
||||||
|
batchNumber = DefaultMaxBatchNumber
|
||||||
|
}
|
||||||
return &BackFillService{
|
return &BackFillService{
|
||||||
Indexer: indexer,
|
Indexer: indexer,
|
||||||
Converter: converter,
|
Converter: converter,
|
||||||
@ -95,14 +103,15 @@ func NewBackFillService(settings *shared.SuperNodeConfig, screenAndServeChan cha
|
|||||||
Fetcher: fetcher,
|
Fetcher: fetcher,
|
||||||
GapCheckFrequency: settings.Frequency,
|
GapCheckFrequency: settings.Frequency,
|
||||||
BatchSize: batchSize,
|
BatchSize: batchSize,
|
||||||
|
BatchNumber: int64(batchNumber),
|
||||||
ScreenAndServeChan: screenAndServeChan,
|
ScreenAndServeChan: screenAndServeChan,
|
||||||
QuitChan: settings.Quit,
|
QuitChan: settings.Quit,
|
||||||
|
chain: settings.Chain,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// FillGaps periodically checks for and fills in gaps in the super node db
|
// FillGapsInSuperNode periodically checks for and fills in gaps in the super node db
|
||||||
// this requires a core.RpcClient that is pointed at an archival node with the StateDiffAt method exposed
|
func (bfs *BackFillService) FillGapsInSuperNode(wg *sync.WaitGroup) {
|
||||||
func (bfs *BackFillService) FillGaps(wg *sync.WaitGroup) {
|
|
||||||
ticker := time.NewTicker(bfs.GapCheckFrequency)
|
ticker := time.NewTicker(bfs.GapCheckFrequency)
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
|
|
||||||
@ -110,60 +119,44 @@ func (bfs *BackFillService) FillGaps(wg *sync.WaitGroup) {
|
|||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-bfs.QuitChan:
|
case <-bfs.QuitChan:
|
||||||
log.Info("quiting FillGaps process")
|
log.Infof("quiting %s FillGapsInSuperNode process", bfs.chain.String())
|
||||||
wg.Done()
|
wg.Done()
|
||||||
return
|
return
|
||||||
case <-ticker.C:
|
case <-ticker.C:
|
||||||
log.Info("searching for gaps in the super node database")
|
log.Infof("searching for gaps in the %s super node database", bfs.chain.String())
|
||||||
startingBlock, err := bfs.Retriever.RetrieveFirstBlockNumber()
|
startingBlock, err := bfs.Retriever.RetrieveFirstBlockNumber()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(err)
|
log.Errorf("super node db backfill RetrieveFirstBlockNumber error for chain %s: %v", bfs.chain.String(), err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if startingBlock != 0 {
|
if startingBlock != 0 {
|
||||||
log.Info("found gap at the beginning of the sync")
|
log.Infof("found gap at the beginning of the %s sync", bfs.chain.String())
|
||||||
bfs.fillGaps(0, uint64(startingBlock-1))
|
if err := bfs.backFill(0, uint64(startingBlock-1)); err != nil {
|
||||||
|
log.Error(err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
gaps, err := bfs.Retriever.RetrieveGapsInData()
|
gaps, err := bfs.Retriever.RetrieveGapsInData()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(err)
|
log.Errorf("super node db backfill RetrieveGapsInData error for chain %s: %v", bfs.chain.String(), err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
for _, gap := range gaps {
|
for _, gap := range gaps {
|
||||||
if err := bfs.fillGaps(gap.Start, gap.Stop); err != nil {
|
if err := bfs.backFill(gap.Start, gap.Stop); err != nil {
|
||||||
log.Error(err)
|
log.Error(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
log.Info("fillGaps goroutine successfully spun up")
|
log.Infof("%s fillGaps goroutine successfully spun up", bfs.chain.String())
|
||||||
}
|
|
||||||
|
|
||||||
func (bfs *BackFillService) fillGaps(startingBlock, endingBlock uint64) error {
|
|
||||||
log.Infof("going to fill in gap from %d to %d", startingBlock, endingBlock)
|
|
||||||
errChan := make(chan error)
|
|
||||||
done := make(chan bool)
|
|
||||||
err := bfs.backFill(startingBlock, endingBlock, errChan, done)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case err := <-errChan:
|
|
||||||
log.Error(err)
|
|
||||||
case <-done:
|
|
||||||
log.Infof("finished filling in gap from %d to %d", startingBlock, endingBlock)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// backFill fetches, processes, and returns utils.StorageDiffs over a range of blocks
|
// backFill fetches, processes, and returns utils.StorageDiffs over a range of blocks
|
||||||
// It splits a large range up into smaller chunks, batch fetching and processing those chunks concurrently
|
// It splits a large range up into smaller chunks, batch fetching and processing those chunks concurrently
|
||||||
func (bfs *BackFillService) backFill(startingBlock, endingBlock uint64, errChan chan error, done chan bool) error {
|
func (bfs *BackFillService) backFill(startingBlock, endingBlock uint64) error {
|
||||||
|
log.Infof("filling in %s gap from %d to %d", bfs.chain.String(), startingBlock, endingBlock)
|
||||||
if endingBlock < startingBlock {
|
if endingBlock < startingBlock {
|
||||||
return errors.New("backfill: ending block number needs to be greater than starting block number")
|
return fmt.Errorf("super node %s db backfill: ending block number needs to be greater than starting block number", bfs.chain.String())
|
||||||
}
|
}
|
||||||
//
|
//
|
||||||
// break the range up into bins of smaller ranges
|
// break the range up into bins of smaller ranges
|
||||||
@ -174,28 +167,27 @@ func (bfs *BackFillService) backFill(startingBlock, endingBlock uint64, errChan
|
|||||||
// int64 for atomic incrementing and decrementing to track the number of active processing goroutines we have
|
// int64 for atomic incrementing and decrementing to track the number of active processing goroutines we have
|
||||||
var activeCount int64
|
var activeCount int64
|
||||||
// channel for processing goroutines to signal when they are done
|
// channel for processing goroutines to signal when they are done
|
||||||
processingDone := make(chan [2]uint64)
|
processingDone := make(chan bool)
|
||||||
forwardDone := make(chan bool)
|
forwardDone := make(chan bool)
|
||||||
|
|
||||||
// for each block range bin spin up a goroutine to batch fetch and process state diffs for that range
|
// for each block range bin spin up a goroutine to batch fetch and process data for that range
|
||||||
go func() {
|
go func() {
|
||||||
for _, blockHeights := range blockRangeBins {
|
for _, blockHeights := range blockRangeBins {
|
||||||
// if we have reached our limit of active goroutines
|
// if we have reached our limit of active goroutines
|
||||||
// wait for one to finish before starting the next
|
// wait for one to finish before starting the next
|
||||||
if atomic.AddInt64(&activeCount, 1) > defaultMaxBatchNumber {
|
if atomic.AddInt64(&activeCount, 1) > bfs.BatchNumber {
|
||||||
// this blocks until a process signals it has finished
|
// this blocks until a process signals it has finished
|
||||||
<-forwardDone
|
<-forwardDone
|
||||||
}
|
}
|
||||||
go func(blockHeights []uint64) {
|
go func(blockHeights []uint64) {
|
||||||
payloads, err := bfs.Fetcher.FetchAt(blockHeights)
|
payloads, err := bfs.Fetcher.FetchAt(blockHeights)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errChan <- err
|
log.Errorf("%s super node historical data fetcher error: %s", bfs.chain.String(), err.Error())
|
||||||
}
|
}
|
||||||
for _, payload := range payloads {
|
for _, payload := range payloads {
|
||||||
ipldPayload, err := bfs.Converter.Convert(payload)
|
ipldPayload, err := bfs.Converter.Convert(payload)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errChan <- err
|
log.Errorf("%s super node historical data converter error: %s", bfs.chain.String(), err.Error())
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
// If there is a ScreenAndServe process listening, forward payload to it
|
// If there is a ScreenAndServe process listening, forward payload to it
|
||||||
select {
|
select {
|
||||||
@ -204,42 +196,36 @@ func (bfs *BackFillService) backFill(startingBlock, endingBlock uint64, errChan
|
|||||||
}
|
}
|
||||||
cidPayload, err := bfs.Publisher.Publish(ipldPayload)
|
cidPayload, err := bfs.Publisher.Publish(ipldPayload)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errChan <- err
|
log.Errorf("%s super node historical data publisher error: %s", bfs.chain.String(), err.Error())
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
if err := bfs.Indexer.Index(cidPayload); err != nil {
|
if err := bfs.Indexer.Index(cidPayload); err != nil {
|
||||||
errChan <- err
|
log.Errorf("%s super node historical data indexer error: %s", bfs.chain.String(), err.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// when this goroutine is done, send out a signal
|
// when this goroutine is done, send out a signal
|
||||||
processingDone <- [2]uint64{blockHeights[0], blockHeights[len(blockHeights)-1]}
|
log.Infof("finished filling in %s gap from %d to %d", bfs.chain.String(), blockHeights[0], blockHeights[len(blockHeights)-1])
|
||||||
|
processingDone <- true
|
||||||
}(blockHeights)
|
}(blockHeights)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// goroutine that listens on the processingDone chan
|
// listen on the processingDone chan
|
||||||
// keeps track of the number of processing goroutines that have finished
|
// keeps track of the number of processing goroutines that have finished
|
||||||
// when they have all finished, sends the final signal out
|
// when they have all finished, return
|
||||||
go func() {
|
|
||||||
goroutinesFinished := 0
|
goroutinesFinished := 0
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case doneWithHeights := <-processingDone:
|
case <-processingDone:
|
||||||
atomic.AddInt64(&activeCount, -1)
|
atomic.AddInt64(&activeCount, -1)
|
||||||
select {
|
select {
|
||||||
// if we are waiting for a process to finish, signal that one has
|
// if we are waiting for a process to finish, signal that one has
|
||||||
case forwardDone <- true:
|
case forwardDone <- true:
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
log.Infof("finished filling in gap sub-bin from %d to %d", doneWithHeights[0], doneWithHeights[1])
|
|
||||||
goroutinesFinished++
|
goroutinesFinished++
|
||||||
if goroutinesFinished >= len(blockRangeBins) {
|
if goroutinesFinished >= len(blockRangeBins) {
|
||||||
done <- true
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -41,10 +41,10 @@ var _ = Describe("BackFiller", func() {
|
|||||||
ReturnErr: nil,
|
ReturnErr: nil,
|
||||||
}
|
}
|
||||||
mockConverter := &mocks.IterativePayloadConverter{
|
mockConverter := &mocks.IterativePayloadConverter{
|
||||||
ReturnIPLDPayload: []eth.IPLDPayload{mocks.MockIPLDPayload, mocks.MockIPLDPayload},
|
ReturnIPLDPayload: []eth.ConvertedPayload{mocks.MockConvertedPayload, mocks.MockConvertedPayload},
|
||||||
ReturnErr: nil,
|
ReturnErr: nil,
|
||||||
}
|
}
|
||||||
mockRetriever := &mocks2.MockCIDRetriever{
|
mockRetriever := &mocks2.CIDRetriever{
|
||||||
FirstBlockNumberToReturn: 0,
|
FirstBlockNumberToReturn: 0,
|
||||||
GapsToRetrieve: []shared.Gap{
|
GapsToRetrieve: []shared.Gap{
|
||||||
{
|
{
|
||||||
@ -52,7 +52,7 @@ var _ = Describe("BackFiller", func() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
mockFetcher := &mocks2.IPLDFetcher{
|
mockFetcher := &mocks2.PayloadFetcher{
|
||||||
PayloadsToReturn: map[uint64]shared.RawChainData{
|
PayloadsToReturn: map[uint64]shared.RawChainData{
|
||||||
100: mocks.MockStateDiffPayload,
|
100: mocks.MockStateDiffPayload,
|
||||||
101: mocks.MockStateDiffPayload,
|
101: mocks.MockStateDiffPayload,
|
||||||
@ -67,18 +67,19 @@ var _ = Describe("BackFiller", func() {
|
|||||||
Retriever: mockRetriever,
|
Retriever: mockRetriever,
|
||||||
GapCheckFrequency: time.Second * 2,
|
GapCheckFrequency: time.Second * 2,
|
||||||
BatchSize: super_node.DefaultMaxBatchSize,
|
BatchSize: super_node.DefaultMaxBatchSize,
|
||||||
|
BatchNumber: super_node.DefaultMaxBatchNumber,
|
||||||
QuitChan: quitChan,
|
QuitChan: quitChan,
|
||||||
}
|
}
|
||||||
wg := &sync.WaitGroup{}
|
wg := &sync.WaitGroup{}
|
||||||
backfiller.FillGaps(wg)
|
backfiller.FillGapsInSuperNode(wg)
|
||||||
time.Sleep(time.Second * 3)
|
time.Sleep(time.Second * 3)
|
||||||
quitChan <- true
|
quitChan <- true
|
||||||
Expect(len(mockCidRepo.PassedCIDPayload)).To(Equal(2))
|
Expect(len(mockCidRepo.PassedCIDPayload)).To(Equal(2))
|
||||||
Expect(mockCidRepo.PassedCIDPayload[0]).To(Equal(mocks.MockCIDPayload))
|
Expect(mockCidRepo.PassedCIDPayload[0]).To(Equal(mocks.MockCIDPayload))
|
||||||
Expect(mockCidRepo.PassedCIDPayload[1]).To(Equal(mocks.MockCIDPayload))
|
Expect(mockCidRepo.PassedCIDPayload[1]).To(Equal(mocks.MockCIDPayload))
|
||||||
Expect(len(mockPublisher.PassedIPLDPayload)).To(Equal(2))
|
Expect(len(mockPublisher.PassedIPLDPayload)).To(Equal(2))
|
||||||
Expect(mockPublisher.PassedIPLDPayload[0]).To(Equal(mocks.MockIPLDPayload))
|
Expect(mockPublisher.PassedIPLDPayload[0]).To(Equal(mocks.MockConvertedPayload))
|
||||||
Expect(mockPublisher.PassedIPLDPayload[1]).To(Equal(mocks.MockIPLDPayload))
|
Expect(mockPublisher.PassedIPLDPayload[1]).To(Equal(mocks.MockConvertedPayload))
|
||||||
Expect(len(mockConverter.PassedStatediffPayload)).To(Equal(2))
|
Expect(len(mockConverter.PassedStatediffPayload)).To(Equal(2))
|
||||||
Expect(mockConverter.PassedStatediffPayload[0]).To(Equal(mocks.MockStateDiffPayload))
|
Expect(mockConverter.PassedStatediffPayload[0]).To(Equal(mocks.MockStateDiffPayload))
|
||||||
Expect(mockConverter.PassedStatediffPayload[1]).To(Equal(mocks.MockStateDiffPayload))
|
Expect(mockConverter.PassedStatediffPayload[1]).To(Equal(mocks.MockStateDiffPayload))
|
||||||
@ -96,10 +97,10 @@ var _ = Describe("BackFiller", func() {
|
|||||||
ReturnErr: nil,
|
ReturnErr: nil,
|
||||||
}
|
}
|
||||||
mockConverter := &mocks.IterativePayloadConverter{
|
mockConverter := &mocks.IterativePayloadConverter{
|
||||||
ReturnIPLDPayload: []eth.IPLDPayload{mocks.MockIPLDPayload},
|
ReturnIPLDPayload: []eth.ConvertedPayload{mocks.MockConvertedPayload},
|
||||||
ReturnErr: nil,
|
ReturnErr: nil,
|
||||||
}
|
}
|
||||||
mockRetriever := &mocks2.MockCIDRetriever{
|
mockRetriever := &mocks2.CIDRetriever{
|
||||||
FirstBlockNumberToReturn: 0,
|
FirstBlockNumberToReturn: 0,
|
||||||
GapsToRetrieve: []shared.Gap{
|
GapsToRetrieve: []shared.Gap{
|
||||||
{
|
{
|
||||||
@ -107,7 +108,7 @@ var _ = Describe("BackFiller", func() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
mockFetcher := &mocks2.IPLDFetcher{
|
mockFetcher := &mocks2.PayloadFetcher{
|
||||||
PayloadsToReturn: map[uint64]shared.RawChainData{
|
PayloadsToReturn: map[uint64]shared.RawChainData{
|
||||||
100: mocks.MockStateDiffPayload,
|
100: mocks.MockStateDiffPayload,
|
||||||
},
|
},
|
||||||
@ -121,16 +122,17 @@ var _ = Describe("BackFiller", func() {
|
|||||||
Retriever: mockRetriever,
|
Retriever: mockRetriever,
|
||||||
GapCheckFrequency: time.Second * 2,
|
GapCheckFrequency: time.Second * 2,
|
||||||
BatchSize: super_node.DefaultMaxBatchSize,
|
BatchSize: super_node.DefaultMaxBatchSize,
|
||||||
|
BatchNumber: super_node.DefaultMaxBatchNumber,
|
||||||
QuitChan: quitChan,
|
QuitChan: quitChan,
|
||||||
}
|
}
|
||||||
wg := &sync.WaitGroup{}
|
wg := &sync.WaitGroup{}
|
||||||
backfiller.FillGaps(wg)
|
backfiller.FillGapsInSuperNode(wg)
|
||||||
time.Sleep(time.Second * 3)
|
time.Sleep(time.Second * 3)
|
||||||
quitChan <- true
|
quitChan <- true
|
||||||
Expect(len(mockCidRepo.PassedCIDPayload)).To(Equal(1))
|
Expect(len(mockCidRepo.PassedCIDPayload)).To(Equal(1))
|
||||||
Expect(mockCidRepo.PassedCIDPayload[0]).To(Equal(mocks.MockCIDPayload))
|
Expect(mockCidRepo.PassedCIDPayload[0]).To(Equal(mocks.MockCIDPayload))
|
||||||
Expect(len(mockPublisher.PassedIPLDPayload)).To(Equal(1))
|
Expect(len(mockPublisher.PassedIPLDPayload)).To(Equal(1))
|
||||||
Expect(mockPublisher.PassedIPLDPayload[0]).To(Equal(mocks.MockIPLDPayload))
|
Expect(mockPublisher.PassedIPLDPayload[0]).To(Equal(mocks.MockConvertedPayload))
|
||||||
Expect(len(mockConverter.PassedStatediffPayload)).To(Equal(1))
|
Expect(len(mockConverter.PassedStatediffPayload)).To(Equal(1))
|
||||||
Expect(mockConverter.PassedStatediffPayload[0]).To(Equal(mocks.MockStateDiffPayload))
|
Expect(mockConverter.PassedStatediffPayload[0]).To(Equal(mocks.MockStateDiffPayload))
|
||||||
Expect(mockRetriever.CalledTimes).To(Equal(1))
|
Expect(mockRetriever.CalledTimes).To(Equal(1))
|
||||||
@ -147,14 +149,14 @@ var _ = Describe("BackFiller", func() {
|
|||||||
ReturnErr: nil,
|
ReturnErr: nil,
|
||||||
}
|
}
|
||||||
mockConverter := &mocks.IterativePayloadConverter{
|
mockConverter := &mocks.IterativePayloadConverter{
|
||||||
ReturnIPLDPayload: []eth.IPLDPayload{mocks.MockIPLDPayload, mocks.MockIPLDPayload},
|
ReturnIPLDPayload: []eth.ConvertedPayload{mocks.MockConvertedPayload, mocks.MockConvertedPayload},
|
||||||
ReturnErr: nil,
|
ReturnErr: nil,
|
||||||
}
|
}
|
||||||
mockRetriever := &mocks2.MockCIDRetriever{
|
mockRetriever := &mocks2.CIDRetriever{
|
||||||
FirstBlockNumberToReturn: 3,
|
FirstBlockNumberToReturn: 3,
|
||||||
GapsToRetrieve: []shared.Gap{},
|
GapsToRetrieve: []shared.Gap{},
|
||||||
}
|
}
|
||||||
mockFetcher := &mocks2.IPLDFetcher{
|
mockFetcher := &mocks2.PayloadFetcher{
|
||||||
PayloadsToReturn: map[uint64]shared.RawChainData{
|
PayloadsToReturn: map[uint64]shared.RawChainData{
|
||||||
1: mocks.MockStateDiffPayload,
|
1: mocks.MockStateDiffPayload,
|
||||||
2: mocks.MockStateDiffPayload,
|
2: mocks.MockStateDiffPayload,
|
||||||
@ -169,18 +171,19 @@ var _ = Describe("BackFiller", func() {
|
|||||||
Retriever: mockRetriever,
|
Retriever: mockRetriever,
|
||||||
GapCheckFrequency: time.Second * 2,
|
GapCheckFrequency: time.Second * 2,
|
||||||
BatchSize: super_node.DefaultMaxBatchSize,
|
BatchSize: super_node.DefaultMaxBatchSize,
|
||||||
|
BatchNumber: super_node.DefaultMaxBatchNumber,
|
||||||
QuitChan: quitChan,
|
QuitChan: quitChan,
|
||||||
}
|
}
|
||||||
wg := &sync.WaitGroup{}
|
wg := &sync.WaitGroup{}
|
||||||
backfiller.FillGaps(wg)
|
backfiller.FillGapsInSuperNode(wg)
|
||||||
time.Sleep(time.Second * 3)
|
time.Sleep(time.Second * 3)
|
||||||
quitChan <- true
|
quitChan <- true
|
||||||
Expect(len(mockCidRepo.PassedCIDPayload)).To(Equal(2))
|
Expect(len(mockCidRepo.PassedCIDPayload)).To(Equal(2))
|
||||||
Expect(mockCidRepo.PassedCIDPayload[0]).To(Equal(mocks.MockCIDPayload))
|
Expect(mockCidRepo.PassedCIDPayload[0]).To(Equal(mocks.MockCIDPayload))
|
||||||
Expect(mockCidRepo.PassedCIDPayload[1]).To(Equal(mocks.MockCIDPayload))
|
Expect(mockCidRepo.PassedCIDPayload[1]).To(Equal(mocks.MockCIDPayload))
|
||||||
Expect(len(mockPublisher.PassedIPLDPayload)).To(Equal(2))
|
Expect(len(mockPublisher.PassedIPLDPayload)).To(Equal(2))
|
||||||
Expect(mockPublisher.PassedIPLDPayload[0]).To(Equal(mocks.MockIPLDPayload))
|
Expect(mockPublisher.PassedIPLDPayload[0]).To(Equal(mocks.MockConvertedPayload))
|
||||||
Expect(mockPublisher.PassedIPLDPayload[1]).To(Equal(mocks.MockIPLDPayload))
|
Expect(mockPublisher.PassedIPLDPayload[1]).To(Equal(mocks.MockConvertedPayload))
|
||||||
Expect(len(mockConverter.PassedStatediffPayload)).To(Equal(2))
|
Expect(len(mockConverter.PassedStatediffPayload)).To(Equal(2))
|
||||||
Expect(mockConverter.PassedStatediffPayload[0]).To(Equal(mocks.MockStateDiffPayload))
|
Expect(mockConverter.PassedStatediffPayload[0]).To(Equal(mocks.MockStateDiffPayload))
|
||||||
Expect(mockConverter.PassedStatediffPayload[1]).To(Equal(mocks.MockStateDiffPayload))
|
Expect(mockConverter.PassedStatediffPayload[1]).To(Equal(mocks.MockStateDiffPayload))
|
||||||
|
176
pkg/super_node/btc/cleaner.go
Normal file
176
pkg/super_node/btc/cleaner.go
Normal file
@ -0,0 +1,176 @@
|
|||||||
|
// VulcanizeDB
|
||||||
|
// Copyright © 2019 Vulcanize
|
||||||
|
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package btc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/jmoiron/sqlx"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
|
||||||
|
"github.com/vulcanize/vulcanizedb/pkg/postgres"
|
||||||
|
"github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Cleaner satisfies the shared.Cleaner interface fo bitcoin
|
||||||
|
type Cleaner struct {
|
||||||
|
db *postgres.DB
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCleaner returns a new Cleaner struct that satisfies the shared.Cleaner interface
|
||||||
|
func NewCleaner(db *postgres.DB) *Cleaner {
|
||||||
|
return &Cleaner{
|
||||||
|
db: db,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clean removes the specified data from the db within the provided block range
|
||||||
|
func (c *Cleaner) Clean(rngs [][2]uint64, t shared.DataType) error {
|
||||||
|
tx, err := c.db.Beginx()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, rng := range rngs {
|
||||||
|
logrus.Infof("btc db cleaner cleaning up block range %d to %d", rng[0], rng[1])
|
||||||
|
if err := c.clean(tx, rng, t); err != nil {
|
||||||
|
if err := tx.Rollback(); err != nil {
|
||||||
|
logrus.Error(err)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := tx.Commit(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
logrus.Infof("btc db cleaner vacuum analyzing cleaned tables to free up space from deleted rows")
|
||||||
|
return c.vacuumAnalyze(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cleaner) clean(tx *sqlx.Tx, rng [2]uint64, t shared.DataType) error {
|
||||||
|
switch t {
|
||||||
|
case shared.Full, shared.Headers:
|
||||||
|
return c.cleanFull(tx, rng)
|
||||||
|
case shared.Transactions:
|
||||||
|
if err := c.cleanTransactionIPLDs(tx, rng); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return c.cleanTransactionMetaData(tx, rng)
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("btc cleaner unrecognized type: %s", t.String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cleaner) vacuumAnalyze(t shared.DataType) error {
|
||||||
|
switch t {
|
||||||
|
case shared.Full, shared.Headers:
|
||||||
|
if err := c.vacuumHeaders(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := c.vacuumTxs(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := c.vacuumTxInputs(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := c.vacuumTxOutputs(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
case shared.Transactions:
|
||||||
|
if err := c.vacuumTxs(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := c.vacuumTxInputs(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := c.vacuumTxOutputs(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("btc cleaner unrecognized type: %s", t.String())
|
||||||
|
}
|
||||||
|
return c.vacuumIPLDs()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cleaner) vacuumHeaders() error {
|
||||||
|
_, err := c.db.Exec(`VACUUM ANALYZE btc.header_cids`)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cleaner) vacuumTxs() error {
|
||||||
|
_, err := c.db.Exec(`VACUUM ANALYZE btc.transaction_cids`)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cleaner) vacuumTxInputs() error {
|
||||||
|
_, err := c.db.Exec(`VACUUM ANALYZE btc.tx_inputs`)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cleaner) vacuumTxOutputs() error {
|
||||||
|
_, err := c.db.Exec(`VACUUM ANALYZE btc.tx_outputs`)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cleaner) vacuumIPLDs() error {
|
||||||
|
_, err := c.db.Exec(`VACUUM ANALYZE public.blocks`)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cleaner) cleanFull(tx *sqlx.Tx, rng [2]uint64) error {
|
||||||
|
if err := c.cleanTransactionIPLDs(tx, rng); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := c.cleanHeaderIPLDs(tx, rng); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return c.cleanHeaderMetaData(tx, rng)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cleaner) cleanTransactionIPLDs(tx *sqlx.Tx, rng [2]uint64) error {
|
||||||
|
pgStr := `DELETE FROM public.blocks A
|
||||||
|
USING btc.transaction_cids B, btc.header_cids C
|
||||||
|
WHERE A.key = B.cid
|
||||||
|
AND B.header_id = C.id
|
||||||
|
AND C.block_number BETWEEN $1 AND $2`
|
||||||
|
_, err := tx.Exec(pgStr, rng[0], rng[1])
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cleaner) cleanTransactionMetaData(tx *sqlx.Tx, rng [2]uint64) error {
|
||||||
|
pgStr := `DELETE FROM btc.transaction_cids A
|
||||||
|
USING btc.header_cids B
|
||||||
|
WHERE A.header_id = B.id
|
||||||
|
AND B.block_number BETWEEN $1 AND $2`
|
||||||
|
_, err := tx.Exec(pgStr, rng[0], rng[1])
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cleaner) cleanHeaderIPLDs(tx *sqlx.Tx, rng [2]uint64) error {
|
||||||
|
pgStr := `DELETE FROM public.blocks A
|
||||||
|
USING btc.header_cids B
|
||||||
|
WHERE A.key = B.cid
|
||||||
|
AND B.block_number BETWEEN $1 AND $2`
|
||||||
|
_, err := tx.Exec(pgStr, rng[0], rng[1])
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cleaner) cleanHeaderMetaData(tx *sqlx.Tx, rng [2]uint64) error {
|
||||||
|
pgStr := `DELETE FROM btc.header_cids
|
||||||
|
WHERE block_number BETWEEN $1 AND $2`
|
||||||
|
_, err := tx.Exec(pgStr, rng[0], rng[1])
|
||||||
|
return err
|
||||||
|
}
|
288
pkg/super_node/btc/cleaner_test.go
Normal file
288
pkg/super_node/btc/cleaner_test.go
Normal file
@ -0,0 +1,288 @@
|
|||||||
|
// VulcanizeDB
|
||||||
|
// Copyright © 2019 Vulcanize
|
||||||
|
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package btc_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/big"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
|
. "github.com/onsi/ginkgo"
|
||||||
|
. "github.com/onsi/gomega"
|
||||||
|
|
||||||
|
"github.com/vulcanize/vulcanizedb/pkg/postgres"
|
||||||
|
"github.com/vulcanize/vulcanizedb/pkg/super_node/btc"
|
||||||
|
"github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// Block 0
|
||||||
|
// header variables
|
||||||
|
blockHash1 = crypto.Keccak256Hash([]byte{00, 02})
|
||||||
|
blocKNumber1 = big.NewInt(0)
|
||||||
|
headerCid1 = "mockHeader1CID"
|
||||||
|
parentHash = crypto.Keccak256Hash([]byte{00, 01})
|
||||||
|
totalDifficulty = "50000000000000000000"
|
||||||
|
reward = "5000000000000000000"
|
||||||
|
headerModel1 = btc.HeaderModel{
|
||||||
|
BlockHash: blockHash1.String(),
|
||||||
|
BlockNumber: blocKNumber1.String(),
|
||||||
|
ParentHash: parentHash.String(),
|
||||||
|
CID: headerCid1,
|
||||||
|
}
|
||||||
|
|
||||||
|
// tx variables
|
||||||
|
tx1CID = "mockTx1CID"
|
||||||
|
tx2CID = "mockTx2CID"
|
||||||
|
tx1Hash = crypto.Keccak256Hash([]byte{01, 01})
|
||||||
|
tx2Hash = crypto.Keccak256Hash([]byte{01, 02})
|
||||||
|
opHash = crypto.Keccak256Hash([]byte{02, 01})
|
||||||
|
txModels1 = []btc.TxModelWithInsAndOuts{
|
||||||
|
{
|
||||||
|
Index: 0,
|
||||||
|
CID: tx1CID,
|
||||||
|
TxHash: tx1Hash.String(),
|
||||||
|
SegWit: true,
|
||||||
|
TxInputs: []btc.TxInput{
|
||||||
|
{
|
||||||
|
Index: 0,
|
||||||
|
TxWitness: []string{"mockWitness"},
|
||||||
|
SignatureScript: []byte{01},
|
||||||
|
PreviousOutPointIndex: 0,
|
||||||
|
PreviousOutPointHash: opHash.String(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
TxOutputs: []btc.TxOutput{
|
||||||
|
{
|
||||||
|
Index: 0,
|
||||||
|
Value: 50000000,
|
||||||
|
PkScript: []byte{02},
|
||||||
|
ScriptClass: 0,
|
||||||
|
RequiredSigs: 1,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Index: 1,
|
||||||
|
CID: tx2CID,
|
||||||
|
TxHash: tx2Hash.String(),
|
||||||
|
SegWit: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
mockCIDPayload1 = &btc.CIDPayload{
|
||||||
|
HeaderCID: headerModel1,
|
||||||
|
TransactionCIDs: txModels1,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Block 1
|
||||||
|
// header variables
|
||||||
|
blockHash2 = crypto.Keccak256Hash([]byte{00, 03})
|
||||||
|
blocKNumber2 = big.NewInt(1)
|
||||||
|
headerCid2 = "mockHeaderCID2"
|
||||||
|
headerModel2 = btc.HeaderModel{
|
||||||
|
BlockNumber: blocKNumber2.String(),
|
||||||
|
BlockHash: blockHash2.String(),
|
||||||
|
ParentHash: blockHash1.String(),
|
||||||
|
CID: headerCid2,
|
||||||
|
}
|
||||||
|
|
||||||
|
// tx variables
|
||||||
|
tx3CID = "mockTx3CID"
|
||||||
|
tx3Hash = crypto.Keccak256Hash([]byte{01, 03})
|
||||||
|
txModels2 = []btc.TxModelWithInsAndOuts{
|
||||||
|
{
|
||||||
|
Index: 0,
|
||||||
|
CID: tx3CID,
|
||||||
|
TxHash: tx3Hash.String(),
|
||||||
|
SegWit: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
mockCIDPayload2 = &btc.CIDPayload{
|
||||||
|
HeaderCID: headerModel2,
|
||||||
|
TransactionCIDs: txModels2,
|
||||||
|
}
|
||||||
|
rngs = [][2]uint64{{0, 1}}
|
||||||
|
cids = []string{
|
||||||
|
headerCid1,
|
||||||
|
headerCid2,
|
||||||
|
tx1CID,
|
||||||
|
tx2CID,
|
||||||
|
tx3CID,
|
||||||
|
}
|
||||||
|
mockData = []byte{'\x01'}
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ = Describe("Cleaner", func() {
|
||||||
|
var (
|
||||||
|
db *postgres.DB
|
||||||
|
repo *btc.CIDIndexer
|
||||||
|
cleaner *btc.Cleaner
|
||||||
|
)
|
||||||
|
BeforeEach(func() {
|
||||||
|
var err error
|
||||||
|
db, err = shared.SetupDB()
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
repo = btc.NewCIDIndexer(db)
|
||||||
|
cleaner = btc.NewCleaner(db)
|
||||||
|
})
|
||||||
|
|
||||||
|
Describe("Clean", func() {
|
||||||
|
BeforeEach(func() {
|
||||||
|
err := repo.Index(mockCIDPayload1)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
err = repo.Index(mockCIDPayload2)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
|
for _, cid := range cids {
|
||||||
|
_, err = db.Exec(`INSERT INTO public.blocks (key, data) VALUES ($1, $2)`, cid, mockData)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
}
|
||||||
|
|
||||||
|
tx, err := db.Beginx()
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
var startingIPFSBlocksCount int
|
||||||
|
pgStr := `SELECT COUNT(*) FROM public.blocks`
|
||||||
|
err = tx.Get(&startingIPFSBlocksCount, pgStr)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
var startingTxCount int
|
||||||
|
pgStr = `SELECT COUNT(*) FROM btc.transaction_cids`
|
||||||
|
err = tx.Get(&startingTxCount, pgStr)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
var startingHeaderCount int
|
||||||
|
pgStr = `SELECT COUNT(*) FROM btc.header_cids`
|
||||||
|
err = tx.Get(&startingHeaderCount, pgStr)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
|
err = tx.Commit()
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
|
Expect(startingIPFSBlocksCount).To(Equal(5))
|
||||||
|
Expect(startingTxCount).To(Equal(3))
|
||||||
|
Expect(startingHeaderCount).To(Equal(2))
|
||||||
|
})
|
||||||
|
AfterEach(func() {
|
||||||
|
btc.TearDownDB(db)
|
||||||
|
})
|
||||||
|
It("Cleans everything", func() {
|
||||||
|
err := cleaner.Clean(rngs, shared.Full)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
|
tx, err := db.Beginx()
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
var txCount int
|
||||||
|
pgStr := `SELECT COUNT(*) FROM btc.transaction_cids`
|
||||||
|
err = tx.Get(&txCount, pgStr)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
var txInCount int
|
||||||
|
pgStr = `SELECT COUNT(*) FROM btc.tx_inputs`
|
||||||
|
err = tx.Get(&txInCount, pgStr)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
var txOutCount int
|
||||||
|
pgStr = `SELECT COUNT(*) FROM btc.tx_outputs`
|
||||||
|
err = tx.Get(&txOutCount, pgStr)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
var headerCount int
|
||||||
|
pgStr = `SELECT COUNT(*) FROM btc.header_cids`
|
||||||
|
err = tx.Get(&headerCount, pgStr)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
var blocksCount int
|
||||||
|
pgStr = `SELECT COUNT(*) FROM public.blocks`
|
||||||
|
err = tx.Get(&blocksCount, pgStr)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
|
err = tx.Commit()
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
|
Expect(blocksCount).To(Equal(0))
|
||||||
|
Expect(txCount).To(Equal(0))
|
||||||
|
Expect(txInCount).To(Equal(0))
|
||||||
|
Expect(txOutCount).To(Equal(0))
|
||||||
|
Expect(headerCount).To(Equal(0))
|
||||||
|
})
|
||||||
|
It("Cleans headers and all linked data", func() {
|
||||||
|
err := cleaner.Clean(rngs, shared.Headers)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
|
tx, err := db.Beginx()
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
var txCount int
|
||||||
|
pgStr := `SELECT COUNT(*) FROM btc.transaction_cids`
|
||||||
|
err = tx.Get(&txCount, pgStr)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
var txInCount int
|
||||||
|
pgStr = `SELECT COUNT(*) FROM btc.tx_inputs`
|
||||||
|
err = tx.Get(&txInCount, pgStr)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
var txOutCount int
|
||||||
|
pgStr = `SELECT COUNT(*) FROM btc.tx_outputs`
|
||||||
|
err = tx.Get(&txOutCount, pgStr)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
var headerCount int
|
||||||
|
pgStr = `SELECT COUNT(*) FROM btc.header_cids`
|
||||||
|
err = tx.Get(&headerCount, pgStr)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
var blocksCount int
|
||||||
|
pgStr = `SELECT COUNT(*) FROM public.blocks`
|
||||||
|
err = tx.Get(&blocksCount, pgStr)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
|
err = tx.Commit()
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
|
Expect(blocksCount).To(Equal(0))
|
||||||
|
Expect(txCount).To(Equal(0))
|
||||||
|
Expect(txInCount).To(Equal(0))
|
||||||
|
Expect(txOutCount).To(Equal(0))
|
||||||
|
Expect(headerCount).To(Equal(0))
|
||||||
|
})
|
||||||
|
It("Cleans transactions", func() {
|
||||||
|
err := cleaner.Clean(rngs, shared.Transactions)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
|
tx, err := db.Beginx()
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
var txCount int
|
||||||
|
pgStr := `SELECT COUNT(*) FROM btc.transaction_cids`
|
||||||
|
err = tx.Get(&txCount, pgStr)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
var txInCount int
|
||||||
|
pgStr = `SELECT COUNT(*) FROM btc.tx_inputs`
|
||||||
|
err = tx.Get(&txInCount, pgStr)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
var txOutCount int
|
||||||
|
pgStr = `SELECT COUNT(*) FROM btc.tx_outputs`
|
||||||
|
err = tx.Get(&txOutCount, pgStr)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
var headerCount int
|
||||||
|
pgStr = `SELECT COUNT(*) FROM btc.header_cids`
|
||||||
|
err = tx.Get(&headerCount, pgStr)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
var blocksCount int
|
||||||
|
pgStr = `SELECT COUNT(*) FROM public.blocks`
|
||||||
|
err = tx.Get(&blocksCount, pgStr)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
|
err = tx.Commit()
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
|
Expect(blocksCount).To(Equal(2))
|
||||||
|
Expect(txCount).To(Equal(0))
|
||||||
|
Expect(txInCount).To(Equal(0))
|
||||||
|
Expect(txOutCount).To(Equal(0))
|
||||||
|
Expect(headerCount).To(Equal(2))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
@ -40,7 +40,7 @@ func NewPayloadConverter(chainConfig *chaincfg.Params) *PayloadConverter {
|
|||||||
|
|
||||||
// Convert method is used to convert a bitcoin BlockPayload to an IPLDPayload
|
// Convert method is used to convert a bitcoin BlockPayload to an IPLDPayload
|
||||||
// Satisfies the shared.PayloadConverter interface
|
// Satisfies the shared.PayloadConverter interface
|
||||||
func (pc *PayloadConverter) Convert(payload shared.RawChainData) (shared.StreamedIPLDs, error) {
|
func (pc *PayloadConverter) Convert(payload shared.RawChainData) (shared.ConvertedData, error) {
|
||||||
btcBlockPayload, ok := payload.(BlockPayload)
|
btcBlockPayload, ok := payload.(BlockPayload)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("btc converter: expected payload type %T got %T", BlockPayload{}, payload)
|
return nil, fmt.Errorf("btc converter: expected payload type %T got %T", BlockPayload{}, payload)
|
||||||
@ -87,7 +87,7 @@ func (pc *PayloadConverter) Convert(payload shared.RawChainData) (shared.Streame
|
|||||||
}
|
}
|
||||||
txMeta[i] = txModel
|
txMeta[i] = txModel
|
||||||
}
|
}
|
||||||
return IPLDPayload{
|
return ConvertedPayload{
|
||||||
BlockPayload: btcBlockPayload,
|
BlockPayload: btcBlockPayload,
|
||||||
TxMetaData: txMeta,
|
TxMetaData: txMeta,
|
||||||
}, nil
|
}, nil
|
||||||
|
@ -31,10 +31,10 @@ var _ = Describe("Converter", func() {
|
|||||||
converter := btc.NewPayloadConverter(&chaincfg.MainNetParams)
|
converter := btc.NewPayloadConverter(&chaincfg.MainNetParams)
|
||||||
payload, err := converter.Convert(mocks.MockBlockPayload)
|
payload, err := converter.Convert(mocks.MockBlockPayload)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
convertedPayload, ok := payload.(btc.IPLDPayload)
|
convertedPayload, ok := payload.(btc.ConvertedPayload)
|
||||||
Expect(ok).To(BeTrue())
|
Expect(ok).To(BeTrue())
|
||||||
Expect(convertedPayload).To(Equal(mocks.MockIPLDPayload))
|
Expect(convertedPayload).To(Equal(mocks.MockConvertedPayload))
|
||||||
Expect(convertedPayload.Height).To(Equal(mocks.MockBlockHeight))
|
Expect(convertedPayload.BlockHeight).To(Equal(mocks.MockBlockHeight))
|
||||||
Expect(convertedPayload.Header).To(Equal(&mocks.MockBlock.Header))
|
Expect(convertedPayload.Header).To(Equal(&mocks.MockBlock.Header))
|
||||||
Expect(convertedPayload.Txs).To(Equal(mocks.MockTransactions))
|
Expect(convertedPayload.Txs).To(Equal(mocks.MockTransactions))
|
||||||
Expect(convertedPayload.TxMetaData).To(Equal(mocks.MockTxsMetaData))
|
Expect(convertedPayload.TxMetaData).To(Equal(mocks.MockTxsMetaData))
|
||||||
|
@ -21,6 +21,10 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"math/big"
|
"math/big"
|
||||||
|
|
||||||
|
"github.com/multiformats/go-multihash"
|
||||||
|
|
||||||
|
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
|
||||||
|
"github.com/vulcanize/vulcanizedb/pkg/ipfs/ipld"
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
|
"github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -33,37 +37,45 @@ func NewResponseFilterer() *ResponseFilterer {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Filter is used to filter through btc data to extract and package requested data into a Payload
|
// Filter is used to filter through btc data to extract and package requested data into a Payload
|
||||||
func (s *ResponseFilterer) Filter(filter shared.SubscriptionSettings, payload shared.StreamedIPLDs) (shared.ServerResponse, error) {
|
func (s *ResponseFilterer) Filter(filter shared.SubscriptionSettings, payload shared.ConvertedData) (shared.IPLDs, error) {
|
||||||
btcFilters, ok := filter.(*SubscriptionSettings)
|
btcFilters, ok := filter.(*SubscriptionSettings)
|
||||||
if !ok {
|
if !ok {
|
||||||
return StreamResponse{}, fmt.Errorf("btc filterer expected filter type %T got %T", &SubscriptionSettings{}, filter)
|
return IPLDs{}, fmt.Errorf("btc filterer expected filter type %T got %T", &SubscriptionSettings{}, filter)
|
||||||
}
|
}
|
||||||
btcPayload, ok := payload.(IPLDPayload)
|
btcPayload, ok := payload.(ConvertedPayload)
|
||||||
if !ok {
|
if !ok {
|
||||||
return StreamResponse{}, fmt.Errorf("btc filterer expected payload type %T got %T", IPLDPayload{}, payload)
|
return IPLDs{}, fmt.Errorf("btc filterer expected payload type %T got %T", ConvertedPayload{}, payload)
|
||||||
}
|
}
|
||||||
height := int64(btcPayload.Height)
|
height := int64(btcPayload.BlockPayload.BlockHeight)
|
||||||
if checkRange(btcFilters.Start.Int64(), btcFilters.End.Int64(), height) {
|
if checkRange(btcFilters.Start.Int64(), btcFilters.End.Int64(), height) {
|
||||||
response := new(StreamResponse)
|
response := new(IPLDs)
|
||||||
if err := s.filterHeaders(btcFilters.HeaderFilter, response, btcPayload); err != nil {
|
if err := s.filterHeaders(btcFilters.HeaderFilter, response, btcPayload); err != nil {
|
||||||
return StreamResponse{}, err
|
return IPLDs{}, err
|
||||||
}
|
}
|
||||||
if err := s.filterTransactions(btcFilters.TxFilter, response, btcPayload); err != nil {
|
if err := s.filterTransactions(btcFilters.TxFilter, response, btcPayload); err != nil {
|
||||||
return StreamResponse{}, err
|
return IPLDs{}, err
|
||||||
}
|
}
|
||||||
response.BlockNumber = big.NewInt(height)
|
response.BlockNumber = big.NewInt(height)
|
||||||
return *response, nil
|
return *response, nil
|
||||||
}
|
}
|
||||||
return StreamResponse{}, nil
|
return IPLDs{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *ResponseFilterer) filterHeaders(headerFilter HeaderFilter, response *StreamResponse, payload IPLDPayload) error {
|
func (s *ResponseFilterer) filterHeaders(headerFilter HeaderFilter, response *IPLDs, payload ConvertedPayload) error {
|
||||||
if !headerFilter.Off {
|
if !headerFilter.Off {
|
||||||
headerBuffer := new(bytes.Buffer)
|
headerBuffer := new(bytes.Buffer)
|
||||||
if err := payload.Header.Serialize(headerBuffer); err != nil {
|
if err := payload.Header.Serialize(headerBuffer); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
response.SerializedHeaders = append(response.SerializedHeaders, headerBuffer.Bytes())
|
data := headerBuffer.Bytes()
|
||||||
|
cid, err := ipld.RawdataToCid(ipld.MBitcoinHeader, data, multihash.DBL_SHA2_256)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
response.Header = ipfs.BlockModel{
|
||||||
|
Data: data,
|
||||||
|
CID: cid.String(),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -75,15 +87,24 @@ func checkRange(start, end, actual int64) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *ResponseFilterer) filterTransactions(trxFilter TxFilter, response *StreamResponse, payload IPLDPayload) error {
|
func (s *ResponseFilterer) filterTransactions(trxFilter TxFilter, response *IPLDs, payload ConvertedPayload) error {
|
||||||
if !trxFilter.Off {
|
if !trxFilter.Off {
|
||||||
|
response.Transactions = make([]ipfs.BlockModel, 0, len(payload.TxMetaData))
|
||||||
for i, txMeta := range payload.TxMetaData {
|
for i, txMeta := range payload.TxMetaData {
|
||||||
if checkTransaction(txMeta, trxFilter) {
|
if checkTransaction(txMeta, trxFilter) {
|
||||||
trxBuffer := new(bytes.Buffer)
|
trxBuffer := new(bytes.Buffer)
|
||||||
if err := payload.Txs[i].MsgTx().Serialize(trxBuffer); err != nil {
|
if err := payload.Txs[i].MsgTx().Serialize(trxBuffer); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
response.SerializedTxs = append(response.SerializedTxs, trxBuffer.Bytes())
|
data := trxBuffer.Bytes()
|
||||||
|
cid, err := ipld.RawdataToCid(ipld.MBitcoinTx, data, multihash.DBL_SHA2_256)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
response.Transactions = append(response.Transactions, ipfs.BlockModel{
|
||||||
|
Data: data,
|
||||||
|
CID: cid.String(),
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -68,15 +68,15 @@ func (ps *HTTPPayloadStreamer) Stream(payloadChan chan shared.RawChainData) (sha
|
|||||||
if bytes.Equal(blockHashBytes, ps.lastHash) {
|
if bytes.Equal(blockHashBytes, ps.lastHash) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
ps.lastHash = blockHashBytes
|
|
||||||
block, err := client.GetBlock(blockHash)
|
block, err := client.GetBlock(blockHash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errChan <- err
|
errChan <- err
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
ps.lastHash = blockHashBytes
|
||||||
payloadChan <- BlockPayload{
|
payloadChan <- BlockPayload{
|
||||||
Header: &block.Header,
|
Header: &block.Header,
|
||||||
Height: height,
|
BlockHeight: height,
|
||||||
Txs: msgTxsToUtilTxs(block.Transactions),
|
Txs: msgTxsToUtilTxs(block.Transactions),
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
|
@ -21,14 +21,13 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
|
|
||||||
|
|
||||||
"github.com/ipfs/go-block-format"
|
"github.com/ipfs/go-block-format"
|
||||||
"github.com/ipfs/go-blockservice"
|
"github.com/ipfs/go-blockservice"
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
|
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
|
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
|
||||||
|
"github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -52,16 +51,16 @@ func NewIPLDFetcher(ipfsPath string) (*IPLDFetcher, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Fetch is the exported method for fetching and returning all the IPLDS specified in the CIDWrapper
|
// Fetch is the exported method for fetching and returning all the IPLDS specified in the CIDWrapper
|
||||||
func (f *IPLDFetcher) Fetch(cids shared.CIDsForFetching) (shared.FetchedIPLDs, error) {
|
func (f *IPLDFetcher) Fetch(cids shared.CIDsForFetching) (shared.IPLDs, error) {
|
||||||
cidWrapper, ok := cids.(*CIDWrapper)
|
cidWrapper, ok := cids.(*CIDWrapper)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("btc fetcher: expected cids type %T got %T", &CIDWrapper{}, cids)
|
return nil, fmt.Errorf("btc fetcher: expected cids type %T got %T", &CIDWrapper{}, cids)
|
||||||
}
|
}
|
||||||
log.Debug("fetching iplds")
|
log.Debug("fetching iplds")
|
||||||
iplds := new(IPLDWrapper)
|
iplds := IPLDs{}
|
||||||
iplds.BlockNumber = cidWrapper.BlockNumber
|
iplds.BlockNumber = cidWrapper.BlockNumber
|
||||||
var err error
|
var err error
|
||||||
iplds.Headers, err = f.FetchHeaders(cidWrapper.Headers)
|
iplds.Header, err = f.FetchHeader(cidWrapper.Header)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -73,43 +72,48 @@ func (f *IPLDFetcher) Fetch(cids shared.CIDsForFetching) (shared.FetchedIPLDs, e
|
|||||||
}
|
}
|
||||||
|
|
||||||
// FetchHeaders fetches headers
|
// FetchHeaders fetches headers
|
||||||
// It uses the f.fetchBatch method
|
// It uses the f.fetch method
|
||||||
func (f *IPLDFetcher) FetchHeaders(cids []HeaderModel) ([]blocks.Block, error) {
|
func (f *IPLDFetcher) FetchHeader(c HeaderModel) (ipfs.BlockModel, error) {
|
||||||
log.Debug("fetching header iplds")
|
log.Debug("fetching header ipld")
|
||||||
headerCids := make([]cid.Cid, 0, len(cids))
|
|
||||||
for _, c := range cids {
|
|
||||||
dc, err := cid.Decode(c.CID)
|
dc, err := cid.Decode(c.CID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return ipfs.BlockModel{}, err
|
||||||
}
|
}
|
||||||
headerCids = append(headerCids, dc)
|
header, err := f.fetch(dc)
|
||||||
|
if err != nil {
|
||||||
|
return ipfs.BlockModel{}, err
|
||||||
}
|
}
|
||||||
headers := f.fetchBatch(headerCids)
|
return ipfs.BlockModel{
|
||||||
if len(headers) != len(headerCids) {
|
Data: header.RawData(),
|
||||||
log.Errorf("ipfs fetcher: number of header blocks returned (%d) does not match number expected (%d)", len(headers), len(headerCids))
|
CID: header.Cid().String(),
|
||||||
return headers, errUnexpectedNumberOfIPLDs
|
}, nil
|
||||||
}
|
|
||||||
return headers, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// FetchTrxs fetches transactions
|
// FetchTrxs fetches transactions
|
||||||
// It uses the f.fetchBatch method
|
// It uses the f.fetchBatch method
|
||||||
func (f *IPLDFetcher) FetchTrxs(cids []TxModel) ([]blocks.Block, error) {
|
func (f *IPLDFetcher) FetchTrxs(cids []TxModel) ([]ipfs.BlockModel, error) {
|
||||||
log.Debug("fetching transaction iplds")
|
log.Debug("fetching transaction iplds")
|
||||||
trxCids := make([]cid.Cid, 0, len(cids))
|
trxCids := make([]cid.Cid, len(cids))
|
||||||
for _, c := range cids {
|
for i, c := range cids {
|
||||||
dc, err := cid.Decode(c.CID)
|
dc, err := cid.Decode(c.CID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
trxCids = append(trxCids, dc)
|
trxCids[i] = dc
|
||||||
}
|
}
|
||||||
trxs := f.fetchBatch(trxCids)
|
trxs := f.fetchBatch(trxCids)
|
||||||
if len(trxs) != len(trxCids) {
|
trxIPLDs := make([]ipfs.BlockModel, len(trxs))
|
||||||
log.Errorf("ipfs fetcher: number of transaction blocks returned (%d) does not match number expected (%d)", len(trxs), len(trxCids))
|
for i, trx := range trxs {
|
||||||
return trxs, errUnexpectedNumberOfIPLDs
|
trxIPLDs[i] = ipfs.BlockModel{
|
||||||
|
Data: trx.RawData(),
|
||||||
|
CID: trx.Cid().String(),
|
||||||
}
|
}
|
||||||
return trxs, nil
|
}
|
||||||
|
if len(trxIPLDs) != len(trxCids) {
|
||||||
|
log.Errorf("ipfs fetcher: number of transaction blocks returned (%d) does not match number expected (%d)", len(trxs), len(trxCids))
|
||||||
|
return trxIPLDs, errUnexpectedNumberOfIPLDs
|
||||||
|
}
|
||||||
|
return trxIPLDs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// fetch is used to fetch a single cid
|
// fetch is used to fetch a single cid
|
||||||
|
@ -1,17 +0,0 @@
|
|||||||
// VulcanizeDB
|
|
||||||
// Copyright © 2019 Vulcanize
|
|
||||||
|
|
||||||
// This program is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Affero General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// This program is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Affero General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU Affero General Public License
|
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package btc
|
|
@ -26,12 +26,12 @@ import (
|
|||||||
// PayloadConverter is the underlying struct for the Converter interface
|
// PayloadConverter is the underlying struct for the Converter interface
|
||||||
type PayloadConverter struct {
|
type PayloadConverter struct {
|
||||||
PassedStatediffPayload btc.BlockPayload
|
PassedStatediffPayload btc.BlockPayload
|
||||||
ReturnIPLDPayload btc.IPLDPayload
|
ReturnIPLDPayload btc.ConvertedPayload
|
||||||
ReturnErr error
|
ReturnErr error
|
||||||
}
|
}
|
||||||
|
|
||||||
// Convert method is used to convert a geth statediff.Payload to a IPLDPayload
|
// Convert method is used to convert a geth statediff.Payload to a IPLDPayload
|
||||||
func (pc *PayloadConverter) Convert(payload shared.RawChainData) (shared.StreamedIPLDs, error) {
|
func (pc *PayloadConverter) Convert(payload shared.RawChainData) (shared.ConvertedData, error) {
|
||||||
stateDiffPayload, ok := payload.(btc.BlockPayload)
|
stateDiffPayload, ok := payload.(btc.BlockPayload)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("convert expected payload type %T got %T", btc.BlockPayload{}, payload)
|
return nil, fmt.Errorf("convert expected payload type %T got %T", btc.BlockPayload{}, payload)
|
||||||
@ -43,13 +43,13 @@ func (pc *PayloadConverter) Convert(payload shared.RawChainData) (shared.Streame
|
|||||||
// IterativePayloadConverter is the underlying struct for the Converter interface
|
// IterativePayloadConverter is the underlying struct for the Converter interface
|
||||||
type IterativePayloadConverter struct {
|
type IterativePayloadConverter struct {
|
||||||
PassedStatediffPayload []btc.BlockPayload
|
PassedStatediffPayload []btc.BlockPayload
|
||||||
ReturnIPLDPayload []btc.IPLDPayload
|
ReturnIPLDPayload []btc.ConvertedPayload
|
||||||
ReturnErr error
|
ReturnErr error
|
||||||
iteration int
|
iteration int
|
||||||
}
|
}
|
||||||
|
|
||||||
// Convert method is used to convert a geth statediff.Payload to a IPLDPayload
|
// Convert method is used to convert a geth statediff.Payload to a IPLDPayload
|
||||||
func (pc *IterativePayloadConverter) Convert(payload shared.RawChainData) (shared.StreamedIPLDs, error) {
|
func (pc *IterativePayloadConverter) Convert(payload shared.RawChainData) (shared.ConvertedData, error) {
|
||||||
stateDiffPayload, ok := payload.(btc.BlockPayload)
|
stateDiffPayload, ok := payload.(btc.BlockPayload)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("convert expected payload type %T got %T", btc.BlockPayload{}, payload)
|
return nil, fmt.Errorf("convert expected payload type %T got %T", btc.BlockPayload{}, payload)
|
||||||
|
@ -26,16 +26,16 @@ import (
|
|||||||
|
|
||||||
// IPLDPublisher is the underlying struct for the Publisher interface
|
// IPLDPublisher is the underlying struct for the Publisher interface
|
||||||
type IPLDPublisher struct {
|
type IPLDPublisher struct {
|
||||||
PassedIPLDPayload btc.IPLDPayload
|
PassedIPLDPayload btc.ConvertedPayload
|
||||||
ReturnCIDPayload *btc.CIDPayload
|
ReturnCIDPayload *btc.CIDPayload
|
||||||
ReturnErr error
|
ReturnErr error
|
||||||
}
|
}
|
||||||
|
|
||||||
// Publish publishes an IPLDPayload to IPFS and returns the corresponding CIDPayload
|
// Publish publishes an IPLDPayload to IPFS and returns the corresponding CIDPayload
|
||||||
func (pub *IPLDPublisher) Publish(payload shared.StreamedIPLDs) (shared.CIDsForIndexing, error) {
|
func (pub *IPLDPublisher) Publish(payload shared.ConvertedData) (shared.CIDsForIndexing, error) {
|
||||||
ipldPayload, ok := payload.(btc.IPLDPayload)
|
ipldPayload, ok := payload.(btc.ConvertedPayload)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("publish expected payload type %T got %T", &btc.IPLDPayload{}, payload)
|
return nil, fmt.Errorf("publish expected payload type %T got %T", &btc.ConvertedPayload{}, payload)
|
||||||
}
|
}
|
||||||
pub.PassedIPLDPayload = ipldPayload
|
pub.PassedIPLDPayload = ipldPayload
|
||||||
return pub.ReturnCIDPayload, pub.ReturnErr
|
return pub.ReturnCIDPayload, pub.ReturnErr
|
||||||
@ -43,17 +43,17 @@ func (pub *IPLDPublisher) Publish(payload shared.StreamedIPLDs) (shared.CIDsForI
|
|||||||
|
|
||||||
// IterativeIPLDPublisher is the underlying struct for the Publisher interface; used in testing
|
// IterativeIPLDPublisher is the underlying struct for the Publisher interface; used in testing
|
||||||
type IterativeIPLDPublisher struct {
|
type IterativeIPLDPublisher struct {
|
||||||
PassedIPLDPayload []btc.IPLDPayload
|
PassedIPLDPayload []btc.ConvertedPayload
|
||||||
ReturnCIDPayload []*btc.CIDPayload
|
ReturnCIDPayload []*btc.CIDPayload
|
||||||
ReturnErr error
|
ReturnErr error
|
||||||
iteration int
|
iteration int
|
||||||
}
|
}
|
||||||
|
|
||||||
// Publish publishes an IPLDPayload to IPFS and returns the corresponding CIDPayload
|
// Publish publishes an IPLDPayload to IPFS and returns the corresponding CIDPayload
|
||||||
func (pub *IterativeIPLDPublisher) Publish(payload shared.StreamedIPLDs) (shared.CIDsForIndexing, error) {
|
func (pub *IterativeIPLDPublisher) Publish(payload shared.ConvertedData) (shared.CIDsForIndexing, error) {
|
||||||
ipldPayload, ok := payload.(btc.IPLDPayload)
|
ipldPayload, ok := payload.(btc.ConvertedPayload)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("publish expected payload type %T got %T", &btc.IPLDPayload{}, payload)
|
return nil, fmt.Errorf("publish expected payload type %T got %T", &btc.ConvertedPayload{}, payload)
|
||||||
}
|
}
|
||||||
pub.PassedIPLDPayload = append(pub.PassedIPLDPayload, ipldPayload)
|
pub.PassedIPLDPayload = append(pub.PassedIPLDPayload, ipldPayload)
|
||||||
if len(pub.ReturnCIDPayload) < pub.iteration+1 {
|
if len(pub.ReturnCIDPayload) < pub.iteration+1 {
|
||||||
|
@ -231,7 +231,7 @@ var (
|
|||||||
MockBlockPayload = btc.BlockPayload{
|
MockBlockPayload = btc.BlockPayload{
|
||||||
Header: &MockBlock.Header,
|
Header: &MockBlock.Header,
|
||||||
Txs: MockTransactions,
|
Txs: MockTransactions,
|
||||||
Height: MockBlockHeight,
|
BlockHeight: MockBlockHeight,
|
||||||
}
|
}
|
||||||
sClass1, addresses1, numOfSigs1, _ = txscript.ExtractPkScriptAddrs([]byte{
|
sClass1, addresses1, numOfSigs1, _ = txscript.ExtractPkScriptAddrs([]byte{
|
||||||
0x41, // OP_DATA_65
|
0x41, // OP_DATA_65
|
||||||
@ -677,7 +677,7 @@ var (
|
|||||||
Timestamp: MockBlock.Header.Timestamp.UnixNano(),
|
Timestamp: MockBlock.Header.Timestamp.UnixNano(),
|
||||||
Bits: MockBlock.Header.Bits,
|
Bits: MockBlock.Header.Bits,
|
||||||
}
|
}
|
||||||
MockIPLDPayload = btc.IPLDPayload{
|
MockConvertedPayload = btc.ConvertedPayload{
|
||||||
BlockPayload: MockBlockPayload,
|
BlockPayload: MockBlockPayload,
|
||||||
TxMetaData: MockTxsMetaData,
|
TxMetaData: MockTxsMetaData,
|
||||||
}
|
}
|
||||||
|
@ -55,7 +55,7 @@ func (fetcher *PayloadFetcher) FetchAt(blockHeights []uint64) ([]shared.RawChain
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
blockPayloads[i] = BlockPayload{
|
blockPayloads[i] = BlockPayload{
|
||||||
Height: int64(height),
|
BlockHeight: int64(height),
|
||||||
Header: &block.Header,
|
Header: &block.Header,
|
||||||
Txs: msgTxsToUtilTxs(block.Transactions),
|
Txs: msgTxsToUtilTxs(block.Transactions),
|
||||||
}
|
}
|
||||||
|
@ -1,17 +0,0 @@
|
|||||||
// VulcanizeDB
|
|
||||||
// Copyright © 2019 Vulcanize
|
|
||||||
|
|
||||||
// This program is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Affero General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// This program is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Affero General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU Affero General Public License
|
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package btc
|
|
@ -17,15 +17,12 @@
|
|||||||
package btc
|
package btc
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"github.com/btcsuite/btcd/wire"
|
|
||||||
"github.com/btcsuite/btcutil"
|
|
||||||
|
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
|
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/ipfs/dag_putters"
|
"github.com/vulcanize/vulcanizedb/pkg/ipfs/dag_putters"
|
||||||
|
"github.com/vulcanize/vulcanizedb/pkg/ipfs/ipld"
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
|
"github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -33,6 +30,7 @@ import (
|
|||||||
type IPLDPublisher struct {
|
type IPLDPublisher struct {
|
||||||
HeaderPutter shared.DagPutter
|
HeaderPutter shared.DagPutter
|
||||||
TransactionPutter shared.DagPutter
|
TransactionPutter shared.DagPutter
|
||||||
|
TransactionTriePutter shared.DagPutter
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewIPLDPublisher creates a pointer to a new Publisher which satisfies the IPLDPublisher interface
|
// NewIPLDPublisher creates a pointer to a new Publisher which satisfies the IPLDPublisher interface
|
||||||
@ -44,30 +42,36 @@ func NewIPLDPublisher(ipfsPath string) (*IPLDPublisher, error) {
|
|||||||
return &IPLDPublisher{
|
return &IPLDPublisher{
|
||||||
HeaderPutter: dag_putters.NewBtcHeaderDagPutter(node),
|
HeaderPutter: dag_putters.NewBtcHeaderDagPutter(node),
|
||||||
TransactionPutter: dag_putters.NewBtcTxDagPutter(node),
|
TransactionPutter: dag_putters.NewBtcTxDagPutter(node),
|
||||||
|
TransactionTriePutter: dag_putters.NewBtcTxTrieDagPutter(node),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Publish publishes an IPLDPayload to IPFS and returns the corresponding CIDPayload
|
// Publish publishes an IPLDPayload to IPFS and returns the corresponding CIDPayload
|
||||||
func (pub *IPLDPublisher) Publish(payload shared.StreamedIPLDs) (shared.CIDsForIndexing, error) {
|
func (pub *IPLDPublisher) Publish(payload shared.ConvertedData) (shared.CIDsForIndexing, error) {
|
||||||
ipldPayload, ok := payload.(IPLDPayload)
|
ipldPayload, ok := payload.(ConvertedPayload)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("eth publisher expected payload type %T got %T", &IPLDPayload{}, payload)
|
return nil, fmt.Errorf("eth publisher expected payload type %T got %T", &ConvertedPayload{}, payload)
|
||||||
|
}
|
||||||
|
// Generate nodes
|
||||||
|
headerNode, txNodes, txTrieNodes, err := ipld.FromHeaderAndTxs(ipldPayload.Header, ipldPayload.Txs)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
// Process and publish headers
|
// Process and publish headers
|
||||||
headerCid, err := pub.publishHeader(ipldPayload.Header)
|
headerCid, err := pub.publishHeader(headerNode)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
header := HeaderModel{
|
header := HeaderModel{
|
||||||
CID: headerCid,
|
CID: headerCid,
|
||||||
ParentHash: ipldPayload.Header.PrevBlock.String(),
|
ParentHash: ipldPayload.Header.PrevBlock.String(),
|
||||||
BlockNumber: strconv.Itoa(int(ipldPayload.Height)),
|
BlockNumber: strconv.Itoa(int(ipldPayload.BlockPayload.BlockHeight)),
|
||||||
BlockHash: ipldPayload.Header.BlockHash().String(),
|
BlockHash: ipldPayload.Header.BlockHash().String(),
|
||||||
Timestamp: ipldPayload.Header.Timestamp.UnixNano(),
|
Timestamp: ipldPayload.Header.Timestamp.UnixNano(),
|
||||||
Bits: ipldPayload.Header.Bits,
|
Bits: ipldPayload.Header.Bits,
|
||||||
}
|
}
|
||||||
// Process and publish transactions
|
// Process and publish transactions
|
||||||
transactionCids, err := pub.publishTransactions(ipldPayload.Txs, ipldPayload.TxMetaData)
|
transactionCids, err := pub.publishTransactions(txNodes, txTrieNodes, ipldPayload.TxMetaData)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -78,25 +82,22 @@ func (pub *IPLDPublisher) Publish(payload shared.StreamedIPLDs) (shared.CIDsForI
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pub *IPLDPublisher) publishHeader(header *wire.BlockHeader) (string, error) {
|
func (pub *IPLDPublisher) publishHeader(header *ipld.BtcHeader) (string, error) {
|
||||||
cids, err := pub.HeaderPutter.DagPut(header)
|
cid, err := pub.HeaderPutter.DagPut(header)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
return cids[0], nil
|
return cid, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pub *IPLDPublisher) publishTransactions(transactions []*btcutil.Tx, trxMeta []TxModelWithInsAndOuts) ([]TxModelWithInsAndOuts, error) {
|
func (pub *IPLDPublisher) publishTransactions(transactions []*ipld.BtcTx, txTrie []*ipld.BtcTxTrie, trxMeta []TxModelWithInsAndOuts) ([]TxModelWithInsAndOuts, error) {
|
||||||
transactionCids, err := pub.TransactionPutter.DagPut(transactions)
|
txCids := make([]TxModelWithInsAndOuts, len(transactions))
|
||||||
|
for i, tx := range transactions {
|
||||||
|
cid, err := pub.TransactionPutter.DagPut(tx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if len(transactionCids) != len(trxMeta) {
|
txCids[i] = TxModelWithInsAndOuts{
|
||||||
return nil, errors.New("expected one CID for each transaction")
|
|
||||||
}
|
|
||||||
mappedTrxCids := make([]TxModelWithInsAndOuts, len(transactionCids))
|
|
||||||
for i, cid := range transactionCids {
|
|
||||||
mappedTrxCids[i] = TxModelWithInsAndOuts{
|
|
||||||
CID: cid,
|
CID: cid,
|
||||||
Index: trxMeta[i].Index,
|
Index: trxMeta[i].Index,
|
||||||
TxHash: trxMeta[i].TxHash,
|
TxHash: trxMeta[i].TxHash,
|
||||||
@ -106,5 +107,11 @@ func (pub *IPLDPublisher) publishTransactions(transactions []*btcutil.Tx, trxMet
|
|||||||
TxOutputs: trxMeta[i].TxOutputs,
|
TxOutputs: trxMeta[i].TxOutputs,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return mappedTrxCids, nil
|
for _, txNode := range txTrie {
|
||||||
|
// We don't do anything with the tx trie cids atm
|
||||||
|
if _, err := pub.TransactionTriePutter.DagPut(txNode); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return txCids, nil
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,9 @@
|
|||||||
package btc_test
|
package btc_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
. "github.com/onsi/gomega"
|
||||||
|
|
||||||
@ -26,25 +29,47 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
mockHeaderDagPutter *mocks2.DagPutter
|
mockHeaderDagPutter *mocks2.MappedDagPutter
|
||||||
mockTrxDagPutter *mocks2.DagPutter
|
mockTrxDagPutter *mocks2.MappedDagPutter
|
||||||
|
mockTrxTrieDagPutter *mocks2.DagPutter
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ = Describe("Publisher", func() {
|
var _ = Describe("Publisher", func() {
|
||||||
BeforeEach(func() {
|
BeforeEach(func() {
|
||||||
mockHeaderDagPutter = new(mocks2.DagPutter)
|
mockHeaderDagPutter = new(mocks2.MappedDagPutter)
|
||||||
mockTrxDagPutter = new(mocks2.DagPutter)
|
mockTrxDagPutter = new(mocks2.MappedDagPutter)
|
||||||
|
mockTrxTrieDagPutter = new(mocks2.DagPutter)
|
||||||
})
|
})
|
||||||
|
|
||||||
Describe("Publish", func() {
|
Describe("Publish", func() {
|
||||||
It("Publishes the passed IPLDPayload objects to IPFS and returns a CIDPayload for indexing", func() {
|
It("Publishes the passed IPLDPayload objects to IPFS and returns a CIDPayload for indexing", func() {
|
||||||
mockHeaderDagPutter.CIDsToReturn = []string{"mockHeaderCID"}
|
by := new(bytes.Buffer)
|
||||||
mockTrxDagPutter.CIDsToReturn = []string{"mockTrxCID1", "mockTrxCID2", "mockTrxCID3"}
|
err := mocks.MockConvertedPayload.BlockPayload.Header.Serialize(by)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
headerBytes := by.Bytes()
|
||||||
|
err = mocks.MockTransactions[0].MsgTx().Serialize(by)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
tx1Bytes := by.Bytes()
|
||||||
|
err = mocks.MockTransactions[1].MsgTx().Serialize(by)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
tx2Bytes := by.Bytes()
|
||||||
|
err = mocks.MockTransactions[2].MsgTx().Serialize(by)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
tx3Bytes := by.Bytes()
|
||||||
|
mockHeaderDagPutter.CIDsToReturn = map[common.Hash]string{
|
||||||
|
common.BytesToHash(headerBytes): "mockHeaderCID",
|
||||||
|
}
|
||||||
|
mockTrxDagPutter.CIDsToReturn = map[common.Hash]string{
|
||||||
|
common.BytesToHash(tx1Bytes): "mockTrxCID1",
|
||||||
|
common.BytesToHash(tx2Bytes): "mockTrxCID2",
|
||||||
|
common.BytesToHash(tx3Bytes): "mockTrxCID3",
|
||||||
|
}
|
||||||
publisher := btc.IPLDPublisher{
|
publisher := btc.IPLDPublisher{
|
||||||
HeaderPutter: mockHeaderDagPutter,
|
HeaderPutter: mockHeaderDagPutter,
|
||||||
TransactionPutter: mockTrxDagPutter,
|
TransactionPutter: mockTrxDagPutter,
|
||||||
|
TransactionTriePutter: mockTrxTrieDagPutter,
|
||||||
}
|
}
|
||||||
payload, err := publisher.Publish(mocks.MockIPLDPayload)
|
payload, err := publisher.Publish(mocks.MockConvertedPayload)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
cidPayload, ok := payload.(*btc.CIDPayload)
|
cidPayload, ok := payload.(*btc.CIDPayload)
|
||||||
Expect(ok).To(BeTrue())
|
Expect(ok).To(BeTrue())
|
||||||
|
@ -1,54 +0,0 @@
|
|||||||
// VulcanizeDB
|
|
||||||
// Copyright © 2019 Vulcanize
|
|
||||||
|
|
||||||
// This program is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Affero General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// This program is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Affero General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU Affero General Public License
|
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package btc
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
|
|
||||||
|
|
||||||
"github.com/ipfs/go-block-format"
|
|
||||||
)
|
|
||||||
|
|
||||||
// IPLDResolver satisfies the IPLDResolver interface for bitcoin
|
|
||||||
type IPLDResolver struct{}
|
|
||||||
|
|
||||||
// NewIPLDResolver returns a pointer to an IPLDResolver which satisfies the IPLDResolver interface
|
|
||||||
func NewIPLDResolver() *IPLDResolver {
|
|
||||||
return &IPLDResolver{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Resolve is the exported method for resolving all of the BTC IPLDs packaged in an IpfsBlockWrapper
|
|
||||||
func (eir *IPLDResolver) Resolve(iplds shared.FetchedIPLDs) (shared.ServerResponse, error) {
|
|
||||||
ipfsBlocks, ok := iplds.(*IPLDWrapper)
|
|
||||||
if !ok {
|
|
||||||
return StreamResponse{}, fmt.Errorf("eth resolver expected iplds type %T got %T", &IPLDWrapper{}, iplds)
|
|
||||||
}
|
|
||||||
return StreamResponse{
|
|
||||||
BlockNumber: ipfsBlocks.BlockNumber,
|
|
||||||
SerializedHeaders: eir.resolve(ipfsBlocks.Headers),
|
|
||||||
SerializedTxs: eir.resolve(ipfsBlocks.Transactions),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (eir *IPLDResolver) resolve(iplds []blocks.Block) [][]byte {
|
|
||||||
rlps := make([][]byte, 0, len(iplds))
|
|
||||||
for _, ipld := range iplds {
|
|
||||||
rlps = append(rlps, ipld.RawData())
|
|
||||||
}
|
|
||||||
return rlps
|
|
||||||
}
|
|
@ -57,7 +57,7 @@ func (ecr *CIDRetriever) RetrieveLastBlockNumber() (int64, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Retrieve is used to retrieve all of the CIDs which conform to the passed StreamFilters
|
// Retrieve is used to retrieve all of the CIDs which conform to the passed StreamFilters
|
||||||
func (ecr *CIDRetriever) Retrieve(filter shared.SubscriptionSettings, blockNumber int64) (shared.CIDsForFetching, bool, error) {
|
func (ecr *CIDRetriever) Retrieve(filter shared.SubscriptionSettings, blockNumber int64) ([]shared.CIDsForFetching, bool, error) {
|
||||||
streamFilter, ok := filter.(*SubscriptionSettings)
|
streamFilter, ok := filter.(*SubscriptionSettings)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, true, fmt.Errorf("btc retriever expected filter type %T got %T", &SubscriptionSettings{}, filter)
|
return nil, true, fmt.Errorf("btc retriever expected filter type %T got %T", &SubscriptionSettings{}, filter)
|
||||||
@ -68,11 +68,8 @@ func (ecr *CIDRetriever) Retrieve(filter shared.SubscriptionSettings, blockNumbe
|
|||||||
return nil, true, err
|
return nil, true, err
|
||||||
}
|
}
|
||||||
|
|
||||||
cw := new(CIDWrapper)
|
|
||||||
cw.BlockNumber = big.NewInt(blockNumber)
|
|
||||||
// Retrieve cached header CIDs
|
// Retrieve cached header CIDs
|
||||||
if !streamFilter.HeaderFilter.Off {
|
headers, err := ecr.RetrieveHeaderCIDs(tx, blockNumber)
|
||||||
cw.Headers, err = ecr.RetrieveHeaderCIDs(tx, blockNumber)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err := tx.Rollback(); err != nil {
|
if err := tx.Rollback(); err != nil {
|
||||||
log.Error(err)
|
log.Error(err)
|
||||||
@ -80,10 +77,18 @@ func (ecr *CIDRetriever) Retrieve(filter shared.SubscriptionSettings, blockNumbe
|
|||||||
log.Error("header cid retrieval error")
|
log.Error("header cid retrieval error")
|
||||||
return nil, true, err
|
return nil, true, err
|
||||||
}
|
}
|
||||||
|
cws := make([]shared.CIDsForFetching, len(headers))
|
||||||
|
empty := true
|
||||||
|
for i, header := range headers {
|
||||||
|
cw := new(CIDWrapper)
|
||||||
|
cw.BlockNumber = big.NewInt(blockNumber)
|
||||||
|
if !streamFilter.HeaderFilter.Off {
|
||||||
|
cw.Header = header
|
||||||
|
empty = false
|
||||||
}
|
}
|
||||||
// Retrieve cached trx CIDs
|
// Retrieve cached trx CIDs
|
||||||
if !streamFilter.TxFilter.Off {
|
if !streamFilter.TxFilter.Off {
|
||||||
cw.Transactions, err = ecr.RetrieveTxCIDs(tx, streamFilter.TxFilter, blockNumber)
|
cw.Transactions, err = ecr.RetrieveTxCIDs(tx, streamFilter.TxFilter, header.ID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err := tx.Rollback(); err != nil {
|
if err := tx.Rollback(); err != nil {
|
||||||
log.Error(err)
|
log.Error(err)
|
||||||
@ -91,15 +96,14 @@ func (ecr *CIDRetriever) Retrieve(filter shared.SubscriptionSettings, blockNumbe
|
|||||||
log.Error("transaction cid retrieval error")
|
log.Error("transaction cid retrieval error")
|
||||||
return nil, true, err
|
return nil, true, err
|
||||||
}
|
}
|
||||||
|
if len(cw.Transactions) > 0 {
|
||||||
|
empty = false
|
||||||
}
|
}
|
||||||
return cw, empty(cw), tx.Commit()
|
}
|
||||||
|
cws[i] = cw
|
||||||
}
|
}
|
||||||
|
|
||||||
func empty(cidWrapper *CIDWrapper) bool {
|
return cws, empty, tx.Commit()
|
||||||
if len(cidWrapper.Transactions) > 0 || len(cidWrapper.Headers) > 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// RetrieveHeaderCIDs retrieves and returns all of the header cids at the provided blockheight
|
// RetrieveHeaderCIDs retrieves and returns all of the header cids at the provided blockheight
|
||||||
@ -111,32 +115,10 @@ func (ecr *CIDRetriever) RetrieveHeaderCIDs(tx *sqlx.Tx, blockNumber int64) ([]H
|
|||||||
return headers, tx.Select(&headers, pgStr, blockNumber)
|
return headers, tx.Select(&headers, pgStr, blockNumber)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
type TxModel struct {
|
|
||||||
ID int64 `db:"id"`
|
|
||||||
HeaderID int64 `db:"header_id"`
|
|
||||||
Index int64 `db:"index"`
|
|
||||||
TxHash string `db:"tx_hash"`
|
|
||||||
CID string `db:"cid"`
|
|
||||||
SegWit bool `db:"segwit"`
|
|
||||||
WitnessHash string `db:"witness_hash"`
|
|
||||||
}
|
|
||||||
// TxFilter contains filter settings for txs
|
|
||||||
type TxFilter struct {
|
|
||||||
Off bool
|
|
||||||
Index int64 // allow filtering by index so that we can filter for only coinbase transactions (index 0) if we want to
|
|
||||||
Segwit bool // allow filtering for segwit trxs
|
|
||||||
WitnessHashes []string // allow filtering for specific witness hashes
|
|
||||||
PkScriptClass uint8 // allow filtering for txs that have at least one tx output with the specified pkscript class
|
|
||||||
MultiSig bool // allow filtering for txs that have at least one tx output that requires more than one signature
|
|
||||||
Addresses []string // allow filtering for txs that have at least one tx output with at least one of the provided addresses
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
|
|
||||||
// RetrieveTxCIDs retrieves and returns all of the trx cids at the provided blockheight that conform to the provided filter parameters
|
// RetrieveTxCIDs retrieves and returns all of the trx cids at the provided blockheight that conform to the provided filter parameters
|
||||||
// also returns the ids for the returned transaction cids
|
// also returns the ids for the returned transaction cids
|
||||||
func (ecr *CIDRetriever) RetrieveTxCIDs(tx *sqlx.Tx, txFilter TxFilter, blockNumber int64) ([]TxModel, error) {
|
func (ecr *CIDRetriever) RetrieveTxCIDs(tx *sqlx.Tx, txFilter TxFilter, headerID int64) ([]TxModel, error) {
|
||||||
log.Debug("retrieving transaction cids for block ", blockNumber)
|
log.Debug("retrieving transaction cids for header id ", headerID)
|
||||||
args := make([]interface{}, 0, 3)
|
args := make([]interface{}, 0, 3)
|
||||||
results := make([]TxModel, 0)
|
results := make([]TxModel, 0)
|
||||||
id := 1
|
id := 1
|
||||||
@ -147,8 +129,8 @@ func (ecr *CIDRetriever) RetrieveTxCIDs(tx *sqlx.Tx, txFilter TxFilter, blockNum
|
|||||||
WHERE transaction_cids.header_id = header_cids.id
|
WHERE transaction_cids.header_id = header_cids.id
|
||||||
AND tx_inputs.tx_id = transaction_cids.id
|
AND tx_inputs.tx_id = transaction_cids.id
|
||||||
AND tx_outputs.tx_id = transaction_cids.id
|
AND tx_outputs.tx_id = transaction_cids.id
|
||||||
AND header_cids.block_number = $%d`, id)
|
AND header_cids.id = $%d`, id)
|
||||||
args = append(args, blockNumber)
|
args = append(args, headerID)
|
||||||
id++
|
id++
|
||||||
if txFilter.Segwit {
|
if txFilter.Segwit {
|
||||||
pgStr += ` AND transaction_cids.segwit = true`
|
pgStr += ` AND transaction_cids.segwit = true`
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user