update to use ReceiptForStorage; expose rpc server over ws

This commit is contained in:
Ian Norden 2019-06-07 11:01:29 -05:00
parent 723c7c6244
commit e7cdd6247e
7 changed files with 130 additions and 46 deletions

View File

@ -95,10 +95,10 @@ func streamSubscribe() {
continue continue
} }
fmt.Printf("Transaction with hash %s\n", trx.Hash().Hex()) fmt.Printf("Transaction with hash %s\n", trx.Hash().Hex())
fmt.Printf("trx: %v", trx) fmt.Printf("trx: %v\n", trx)
} }
for _, rctRlp := range payload.ReceiptsRlp { for _, rctRlp := range payload.ReceiptsRlp {
var rct types.Receipt var rct types.ReceiptForStorage
buff := bytes.NewBuffer(rctRlp) buff := bytes.NewBuffer(rctRlp)
stream := rlp.NewStream(buff, 0) stream := rlp.NewStream(buff, 0)
err = rct.DecodeRLP(stream) err = rct.DecodeRLP(stream)
@ -107,7 +107,7 @@ func streamSubscribe() {
continue continue
} }
fmt.Printf("Receipt with block hash %s, trx hash %s\n", rct.BlockHash.Hex(), rct.TxHash.Hex()) fmt.Printf("Receipt with block hash %s, trx hash %s\n", rct.BlockHash.Hex(), rct.TxHash.Hex())
fmt.Printf("rct: %v", rct) fmt.Printf("rct: %v\n", rct)
for _, l := range rct.Logs { for _, l := range rct.Logs {
if len(l.Topics) < 1 { if len(l.Topics) < 1 {
log.Error(fmt.Sprintf("log only has %d topics", len(l.Topics))) log.Error(fmt.Sprintf("log only has %d topics", len(l.Topics)))

View File

@ -16,11 +16,14 @@
package cmd package cmd
import ( import (
"os"
"path/filepath"
syn "sync" syn "sync"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/vulcanize/vulcanizedb/pkg/ipfs" "github.com/vulcanize/vulcanizedb/pkg/ipfs"
"github.com/vulcanize/vulcanizedb/utils" "github.com/vulcanize/vulcanizedb/utils"
@ -65,9 +68,29 @@ func syncPublishScreenAndServe() {
log.Fatal(err) log.Fatal(err)
} }
processor.ScreenAndServe(wg, forwardPayloadChan, forwardQuitChan) processor.ScreenAndServe(wg, forwardPayloadChan, forwardQuitChan)
var ipcPath string
ipcPath = viper.GetString("server.ipcPath")
if ipcPath == "" {
home, err := os.UserHomeDir()
if err != nil {
log.Fatal(err)
}
ipcPath = filepath.Join(home, ".vulcanize/vulcanize.ipc")
}
_, _, err = rpc.StartIPCEndpoint(vulcPath, processor.APIs()) _, _, err = rpc.StartIPCEndpoint(vulcPath, processor.APIs())
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
var wsEndpoint string
wsEndpoint = viper.GetString("server.wsEndpoint")
if wsEndpoint == "" {
wsEndpoint = "127.0.0.1:2019"
}
_, _, err = rpc.StartWSEndpoint(wsEndpoint, processor.APIs(), []string{"vulcanizedb"}, nil, true)
if err != nil {
log.Fatal(err)
}
wg.Wait() wg.Wait()
} }

View File

@ -0,0 +1,52 @@
[database]
name = "vulcanize_demo"
hostname = "localhost"
port = 5432
[client]
ipcPath = "ws://127.0.0.1:8546"
[server]
ipcPath = "/Users/iannorden/.vulcanize/vulcanize.ipc"
wsEndpoint = "127.0.0.1:2019"
[subscription]
path = "ws://127.0.0.1:2019"
backfill = true
backfillOnly = false
startingBlock = 0
endingBlock = 0
[subscription.headerFilter]
off = false
finalOnly = true
[subscription.trxFilter]
off = false
src = [
"0xde0B295669a9FD93d5F28D9Ec85E40f4cb697BAe",
]
dst = [
"0xde0B295669a9FD93d5F28D9Ec85E40f4cb697BAe",
]
[subscription.receiptFilter]
off = false
topic0s = [
"0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef",
"0x930a61a57a70a73c2a503615b87e2e54fe5b9cdeacda518270b852296ab1a377"
]
[subscription.stateFilter]
off = false
addresses = [
"0xde0B295669a9FD93d5F28D9Ec85E40f4cb697BAe"
]
intermediateNodes = false
[subscription.storageFilter]
off = true
addresses = [
"",
""
]
storageKeys = [
"",
""
]
intermediateNodes = false

View File

@ -65,6 +65,8 @@ func (ecr *EthCIDRetriever) RetrieveCIDs(streamFilters config.Subscription) ([]C
if err != nil { if err != nil {
return nil, err return nil, err
} }
log.Debug("backfill starting block:", streamFilters.StartingBlock)
log.Debug("backfill ending block:", endingBlock)
for i := streamFilters.StartingBlock; i <= endingBlock; i++ { for i := streamFilters.StartingBlock; i <= endingBlock; i++ {
cw := CidWrapper{} cw := CidWrapper{}
if !streamFilters.HeaderFilter.Off { if !streamFilters.HeaderFilter.Off {
@ -115,7 +117,7 @@ func (ecr *EthCIDRetriever) RetrieveCIDs(streamFilters config.Subscription) ([]C
} }
func (ecr *EthCIDRetriever) retrieveHeaderCIDs(tx *sqlx.Tx, streamFilters config.Subscription, blockNumber int64) ([]string, error) { func (ecr *EthCIDRetriever) retrieveHeaderCIDs(tx *sqlx.Tx, streamFilters config.Subscription, blockNumber int64) ([]string, error) {
log.Debug("retrieving header cids") log.Debug("retrieving header cids for block ", blockNumber)
headers := make([]string, 0) headers := make([]string, 0)
pgStr := `SELECT cid FROM header_cids pgStr := `SELECT cid FROM header_cids
WHERE block_number = $1` WHERE block_number = $1`
@ -127,7 +129,7 @@ func (ecr *EthCIDRetriever) retrieveHeaderCIDs(tx *sqlx.Tx, streamFilters config
} }
func (ecr *EthCIDRetriever) retrieveTrxCIDs(tx *sqlx.Tx, streamFilters config.Subscription, blockNumber int64) ([]string, []int64, error) { func (ecr *EthCIDRetriever) retrieveTrxCIDs(tx *sqlx.Tx, streamFilters config.Subscription, blockNumber int64) ([]string, []int64, error) {
log.Debug("retrieving transaction cids") log.Debug("retrieving transaction cids for block ", blockNumber)
args := make([]interface{}, 0, 3) args := make([]interface{}, 0, 3)
type result struct { type result struct {
ID int64 `db:"id"` ID int64 `db:"id"`
@ -159,7 +161,7 @@ func (ecr *EthCIDRetriever) retrieveTrxCIDs(tx *sqlx.Tx, streamFilters config.Su
} }
func (ecr *EthCIDRetriever) retrieveRctCIDs(tx *sqlx.Tx, streamFilters config.Subscription, blockNumber int64, trxIds []int64) ([]string, error) { func (ecr *EthCIDRetriever) retrieveRctCIDs(tx *sqlx.Tx, streamFilters config.Subscription, blockNumber int64, trxIds []int64) ([]string, error) {
log.Debug("retrieving receipt cids") log.Debug("retrieving receipt cids for block ", blockNumber)
args := make([]interface{}, 0, 2) args := make([]interface{}, 0, 2)
pgStr := `SELECT receipt_cids.cid FROM receipt_cids, transaction_cids, header_cids pgStr := `SELECT receipt_cids.cid FROM receipt_cids, transaction_cids, header_cids
WHERE receipt_cids.tx_id = transaction_cids.id WHERE receipt_cids.tx_id = transaction_cids.id
@ -182,7 +184,7 @@ func (ecr *EthCIDRetriever) retrieveRctCIDs(tx *sqlx.Tx, streamFilters config.Su
} }
func (ecr *EthCIDRetriever) retrieveStateCIDs(tx *sqlx.Tx, streamFilters config.Subscription, blockNumber int64) ([]StateNodeCID, error) { func (ecr *EthCIDRetriever) retrieveStateCIDs(tx *sqlx.Tx, streamFilters config.Subscription, blockNumber int64) ([]StateNodeCID, error) {
log.Debug("retrieving state cids") log.Debug("retrieving state cids for block ", blockNumber)
args := make([]interface{}, 0, 2) args := make([]interface{}, 0, 2)
pgStr := `SELECT state_cids.cid, state_cids.state_key FROM state_cids INNER JOIN header_cids ON (state_cids.header_id = header_cids.id) pgStr := `SELECT state_cids.cid, state_cids.state_key FROM state_cids INNER JOIN header_cids ON (state_cids.header_id = header_cids.id)
WHERE header_cids.block_number = $1` WHERE header_cids.block_number = $1`
@ -205,7 +207,7 @@ func (ecr *EthCIDRetriever) retrieveStateCIDs(tx *sqlx.Tx, streamFilters config.
} }
func (ecr *EthCIDRetriever) retrieveStorageCIDs(tx *sqlx.Tx, streamFilters config.Subscription, blockNumber int64) ([]StorageNodeCID, error) { func (ecr *EthCIDRetriever) retrieveStorageCIDs(tx *sqlx.Tx, streamFilters config.Subscription, blockNumber int64) ([]StorageNodeCID, error) {
log.Debug("retrieving storage cids") log.Debug("retrieving storage cids for block ", blockNumber)
args := make([]interface{}, 0, 3) args := make([]interface{}, 0, 3)
pgStr := `SELECT storage_cids.cid, state_cids.state_key, storage_cids.storage_key FROM storage_cids, state_cids, header_cids pgStr := `SELECT storage_cids.cid, state_cids.state_key, storage_cids.storage_key FROM storage_cids, state_cids, header_cids
WHERE storage_cids.state_id = state_cids.id WHERE storage_cids.state_id = state_cids.id

View File

@ -128,8 +128,9 @@ func (s *Screener) filerReceipts(streamFilters *config.Subscription, response *R
if !streamFilters.ReceiptFilter.Off && checkRange(streamFilters.StartingBlock, streamFilters.EndingBlock, payload.BlockNumber.Int64()) { if !streamFilters.ReceiptFilter.Off && checkRange(streamFilters.StartingBlock, streamFilters.EndingBlock, payload.BlockNumber.Int64()) {
for i, receipt := range payload.Receipts { for i, receipt := range payload.Receipts {
if checkReceipts(receipt, streamFilters.ReceiptFilter.Topic0s, payload.ReceiptMetaData[i].Topic0s, trxHashes) { if checkReceipts(receipt, streamFilters.ReceiptFilter.Topic0s, payload.ReceiptMetaData[i].Topic0s, trxHashes) {
receiptForStorage := (*types.ReceiptForStorage)(receipt)
receiptBuffer := new(bytes.Buffer) receiptBuffer := new(bytes.Buffer)
err := receipt.EncodeRLP(receiptBuffer) err := receiptForStorage.EncodeRLP(receiptBuffer)
if err != nil { if err != nil {
return err return err
} }

View File

@ -209,8 +209,7 @@ func (sap *Service) processResponse(payload IPLDPayload) error {
// Subscribe is used by the API to subscribe to the service loop // Subscribe is used by the API to subscribe to the service loop
func (sap *Service) Subscribe(id rpc.ID, sub chan<- ResponsePayload, quitChan chan<- bool, streamFilters *config.Subscription) { func (sap *Service) Subscribe(id rpc.ID, sub chan<- ResponsePayload, quitChan chan<- bool, streamFilters *config.Subscription) {
log.Info("Subscribing to the seed node service") log.Info("Subscribing to the seed node service")
sap.Lock() subscription := Subscription{
sap.Subscriptions[id] = Subscription{
PayloadChan: sub, PayloadChan: sub,
QuitChan: quitChan, QuitChan: quitChan,
StreamFilters: streamFilters, StreamFilters: streamFilters,
@ -218,40 +217,46 @@ func (sap *Service) Subscribe(id rpc.ID, sub chan<- ResponsePayload, quitChan ch
// If the subscription requests a backfill, use the Postgres index to lookup and retrieve historical data // If the subscription requests a backfill, use the Postgres index to lookup and retrieve historical data
// Otherwise we only filter new data as it is streamed in from the state diffing geth node // Otherwise we only filter new data as it is streamed in from the state diffing geth node
if streamFilters.BackFill || streamFilters.BackFillOnly { if streamFilters.BackFill || streamFilters.BackFillOnly {
log.Debug("back-filling data for id", id) sap.backFill(subscription, id)
// Retrieve cached CIDs relevant to this subscriber }
cidWrappers, err := sap.Retriever.RetrieveCIDs(*streamFilters) if !streamFilters.BackFillOnly {
if err != nil { sap.Lock()
log.Error(err) sap.Subscriptions[id] = subscription
sap.serve(id, ResponsePayload{ sap.Unlock()
ErrMsg: "CID retrieval error: " + err.Error(), }
}) }
return
} func (sap *Service) backFill(sub Subscription, id rpc.ID) {
for _, cidWrapper := range cidWrappers { log.Debug("back-filling data for id", id)
blocksWrapper, err := sap.Fetcher.FetchCIDs(cidWrapper) // Retrieve cached CIDs relevant to this subscriber
if err != nil { cidWrappers, err := sap.Retriever.RetrieveCIDs(*sub.StreamFilters)
log.Error(err) if err != nil {
sap.serve(id, ResponsePayload{ sub.PayloadChan <- ResponsePayload{
ErrMsg: "IPLD fetching error: " + err.Error(), ErrMsg: "CID retrieval error: " + err.Error(),
}) }
return }
} for _, cidWrapper := range cidWrappers {
backFillIplds, err := sap.Resolver.ResolveIPLDs(*blocksWrapper) blocksWrapper, err := sap.Fetcher.FetchCIDs(cidWrapper)
if err != nil { if err != nil {
log.Error(err) log.Error(err)
sap.serve(id, ResponsePayload{ sub.PayloadChan <- ResponsePayload{
ErrMsg: "IPLD resolving error: " + err.Error(), ErrMsg: "IPLD fetching error: " + err.Error(),
}) }
return }
} backFillIplds, err := sap.Resolver.ResolveIPLDs(*blocksWrapper)
sap.serve(id, *backFillIplds) if err != nil {
} log.Error(err)
if streamFilters.BackFillOnly { sub.PayloadChan <- ResponsePayload{
delete(sap.Subscriptions, id) ErrMsg: "IPLD resolving error: " + err.Error(),
}
}
select {
case sub.PayloadChan <- *backFillIplds:
log.Infof("sending seed node back-fill payload to subscription %s", id)
default:
log.Infof("unable to send back-fill ppayload to subscription %s; channel has no receiver", id)
} }
} }
sap.Unlock()
} }
// Unsubscribe is used to unsubscribe to the StateDiffingService loop // Unsubscribe is used to unsubscribe to the StateDiffingService loop

View File

@ -22,7 +22,8 @@ func (dagPutter *EthBlockReceiptDagPutter) DagPut(raw interface{}) ([]string, er
input := raw.(types.Receipts) input := raw.(types.Receipts)
var output []string var output []string
for _, r := range input { for _, r := range input {
node, err := getReceiptNode(r) receiptForStorage := (*types.ReceiptForStorage)(r)
node, err := getReceiptNode(receiptForStorage)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -35,7 +36,7 @@ func (dagPutter *EthBlockReceiptDagPutter) DagPut(raw interface{}) ([]string, er
return output, nil return output, nil
} }
func getReceiptNode(receipt *types.Receipt) (*EthReceiptNode, error) { func getReceiptNode(receipt *types.ReceiptForStorage) (*EthReceiptNode, error) {
buffer := new(bytes.Buffer) buffer := new(bytes.Buffer)
err := receipt.EncodeRLP(buffer) err := receipt.EncodeRLP(buffer)
if err != nil { if err != nil {