2019-05-17 06:27:02 +00:00
|
|
|
// VulcanizeDB
|
|
|
|
// Copyright © 2019 Vulcanize
|
|
|
|
|
|
|
|
// This program is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Affero General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
|
|
|
|
// This program is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Affero General Public License for more details.
|
|
|
|
|
|
|
|
// You should have received a copy of the GNU Affero General Public License
|
|
|
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
2019-10-02 14:10:37 +00:00
|
|
|
package super_node
|
2019-05-17 06:27:02 +00:00
|
|
|
|
|
|
|
import (
|
|
|
|
"sync"
|
|
|
|
|
2019-06-18 17:28:57 +00:00
|
|
|
"github.com/ethereum/go-ethereum/common"
|
|
|
|
"github.com/ethereum/go-ethereum/crypto"
|
2019-05-17 06:27:02 +00:00
|
|
|
"github.com/ethereum/go-ethereum/node"
|
|
|
|
"github.com/ethereum/go-ethereum/p2p"
|
2019-08-26 02:13:40 +00:00
|
|
|
"github.com/ethereum/go-ethereum/params"
|
2019-06-18 17:28:57 +00:00
|
|
|
"github.com/ethereum/go-ethereum/rlp"
|
2019-05-17 06:27:02 +00:00
|
|
|
"github.com/ethereum/go-ethereum/rpc"
|
|
|
|
"github.com/ethereum/go-ethereum/statediff"
|
|
|
|
log "github.com/sirupsen/logrus"
|
|
|
|
|
2019-08-14 18:49:30 +00:00
|
|
|
"github.com/vulcanize/vulcanizedb/libraries/shared/streamer"
|
2019-06-18 17:28:57 +00:00
|
|
|
"github.com/vulcanize/vulcanizedb/pkg/config"
|
2019-05-17 06:27:02 +00:00
|
|
|
"github.com/vulcanize/vulcanizedb/pkg/core"
|
|
|
|
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
2019-08-26 02:13:40 +00:00
|
|
|
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
|
2019-05-17 06:27:02 +00:00
|
|
|
)
|
|
|
|
|
2019-10-02 14:10:37 +00:00
|
|
|
const (
|
|
|
|
payloadChanBufferSize = 20000 // the max eth sub buffer size
|
|
|
|
)
|
2019-05-17 06:27:02 +00:00
|
|
|
|
2019-08-26 02:13:40 +00:00
|
|
|
// NodeInterface is the top level interface for streaming, converting to IPLDs, publishing,
|
2019-06-18 17:28:57 +00:00
|
|
|
// and indexing all Ethereum data; screening this data; and serving it up to subscribed clients
|
2019-06-07 02:09:27 +00:00
|
|
|
// This service is compatible with the Ethereum service interface (node.Service)
|
2019-08-26 02:13:40 +00:00
|
|
|
type NodeInterface interface {
|
2019-05-17 06:27:02 +00:00
|
|
|
// APIs(), Protocols(), Start() and Stop()
|
|
|
|
node.Service
|
|
|
|
// Main event loop for syncAndPublish processes
|
2019-08-05 17:56:15 +00:00
|
|
|
SyncAndPublish(wg *sync.WaitGroup, forwardPayloadChan chan<- ipfs.IPLDPayload, forwardQuitchan chan<- bool) error
|
2019-05-17 06:27:02 +00:00
|
|
|
// Main event loop for handling client pub-sub
|
2019-10-08 19:51:38 +00:00
|
|
|
ScreenAndServe(wg *sync.WaitGroup, screenAndServePayload <-chan ipfs.IPLDPayload, screenAndServeQuit <-chan bool)
|
2019-05-17 06:27:02 +00:00
|
|
|
// Method to subscribe to receive state diff processing output
|
2019-10-02 14:10:37 +00:00
|
|
|
Subscribe(id rpc.ID, sub chan<- streamer.SuperNodePayload, quitChan chan<- bool, streamFilters config.Subscription)
|
2019-05-17 06:27:02 +00:00
|
|
|
// Method to unsubscribe from state diff processing
|
2019-06-18 17:28:57 +00:00
|
|
|
Unsubscribe(id rpc.ID)
|
2019-09-13 19:41:50 +00:00
|
|
|
// Method to access the Geth node info for this service
|
|
|
|
Node() core.Node
|
2019-05-17 06:27:02 +00:00
|
|
|
}
|
|
|
|
|
2019-10-02 14:10:37 +00:00
|
|
|
// Service is the underlying struct for the super node
|
2019-05-17 06:27:02 +00:00
|
|
|
type Service struct {
|
|
|
|
// Used to sync access to the Subscriptions
|
|
|
|
sync.Mutex
|
|
|
|
// Interface for streaming statediff payloads over a geth rpc subscription
|
2019-11-04 19:14:05 +00:00
|
|
|
Streamer streamer.Streamer
|
2019-05-17 06:27:02 +00:00
|
|
|
// Interface for converting statediff payloads into ETH-IPLD object payloads
|
2019-08-05 17:56:15 +00:00
|
|
|
Converter ipfs.PayloadConverter
|
2019-05-17 06:27:02 +00:00
|
|
|
// Interface for publishing the ETH-IPLD payloads to IPFS
|
2019-08-05 17:56:15 +00:00
|
|
|
Publisher ipfs.IPLDPublisher
|
2019-05-17 06:27:02 +00:00
|
|
|
// Interface for indexing the CIDs of the published ETH-IPLDs in Postgres
|
|
|
|
Repository CIDRepository
|
2019-05-21 19:27:24 +00:00
|
|
|
// Interface for filtering and serving data according to subscribed clients according to their specification
|
2019-08-05 17:56:15 +00:00
|
|
|
Filterer ResponseFilterer
|
2019-05-21 19:27:24 +00:00
|
|
|
// Interface for fetching ETH-IPLD objects from IPFS
|
2019-10-02 14:10:37 +00:00
|
|
|
IPLDFetcher ipfs.IPLDFetcher
|
2019-05-21 19:27:24 +00:00
|
|
|
// Interface for searching and retrieving CIDs from Postgres index
|
|
|
|
Retriever CIDRetriever
|
|
|
|
// Interface for resolving ipfs blocks to their data types
|
2019-08-05 17:56:15 +00:00
|
|
|
Resolver ipfs.IPLDResolver
|
2019-05-17 06:27:02 +00:00
|
|
|
// Chan the processor uses to subscribe to state diff payloads from the Streamer
|
|
|
|
PayloadChan chan statediff.Payload
|
|
|
|
// Used to signal shutdown of the service
|
|
|
|
QuitChan chan bool
|
2019-06-18 17:28:57 +00:00
|
|
|
// A mapping of rpc.IDs to their subscription channels, mapped to their subscription type (hash of the StreamFilters)
|
|
|
|
Subscriptions map[common.Hash]map[rpc.ID]Subscription
|
|
|
|
// A mapping of subscription hash type to the corresponding StreamFilters
|
|
|
|
SubscriptionTypes map[common.Hash]config.Subscription
|
2019-08-26 02:13:40 +00:00
|
|
|
// Number of workers
|
|
|
|
WorkerPoolSize int
|
2019-10-02 14:10:37 +00:00
|
|
|
// Info for the Geth node that this super node is working with
|
2019-10-08 19:51:38 +00:00
|
|
|
GethNode core.Node
|
2019-05-17 06:27:02 +00:00
|
|
|
}
|
|
|
|
|
2019-10-02 14:10:37 +00:00
|
|
|
// NewSuperNode creates a new super_node.Interface using an underlying super_node.Service struct
|
2019-11-04 19:14:05 +00:00
|
|
|
func NewSuperNode(ipfsPath string, db *postgres.DB, rpcClient core.RPCClient, qc chan bool, workers int, node core.Node) (NodeInterface, error) {
|
2019-10-08 19:51:38 +00:00
|
|
|
ipfsInitErr := ipfs.InitIPFSPlugins()
|
|
|
|
if ipfsInitErr != nil {
|
|
|
|
return nil, ipfsInitErr
|
2019-05-17 06:27:02 +00:00
|
|
|
}
|
2019-10-08 19:51:38 +00:00
|
|
|
publisher, newPublisherErr := ipfs.NewIPLDPublisher(ipfsPath)
|
|
|
|
if newPublisherErr != nil {
|
|
|
|
return nil, newPublisherErr
|
|
|
|
}
|
|
|
|
ipldFetcher, newFetcherErr := ipfs.NewIPLDFetcher(ipfsPath)
|
|
|
|
if newFetcherErr != nil {
|
|
|
|
return nil, newFetcherErr
|
2019-05-21 19:27:24 +00:00
|
|
|
}
|
2019-05-17 06:27:02 +00:00
|
|
|
return &Service{
|
2019-08-05 17:56:15 +00:00
|
|
|
Streamer: streamer.NewStateDiffStreamer(rpcClient),
|
2019-06-18 17:28:57 +00:00
|
|
|
Repository: NewCIDRepository(db),
|
2019-08-09 16:22:29 +00:00
|
|
|
Converter: ipfs.NewPayloadConverter(params.MainnetChainConfig),
|
2019-06-18 17:28:57 +00:00
|
|
|
Publisher: publisher,
|
2019-08-05 17:56:15 +00:00
|
|
|
Filterer: NewResponseFilterer(),
|
2019-10-02 14:10:37 +00:00
|
|
|
IPLDFetcher: ipldFetcher,
|
2019-06-18 17:28:57 +00:00
|
|
|
Retriever: NewCIDRetriever(db),
|
2019-08-05 17:56:15 +00:00
|
|
|
Resolver: ipfs.NewIPLDResolver(),
|
2019-06-18 17:28:57 +00:00
|
|
|
PayloadChan: make(chan statediff.Payload, payloadChanBufferSize),
|
|
|
|
QuitChan: qc,
|
|
|
|
Subscriptions: make(map[common.Hash]map[rpc.ID]Subscription),
|
|
|
|
SubscriptionTypes: make(map[common.Hash]config.Subscription),
|
2019-08-26 02:13:40 +00:00
|
|
|
WorkerPoolSize: workers,
|
2019-10-08 19:51:38 +00:00
|
|
|
GethNode: node,
|
2019-05-17 06:27:02 +00:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Protocols exports the services p2p protocols, this service has none
|
|
|
|
func (sap *Service) Protocols() []p2p.Protocol {
|
|
|
|
return []p2p.Protocol{}
|
|
|
|
}
|
|
|
|
|
2019-10-02 14:10:37 +00:00
|
|
|
// APIs returns the RPC descriptors the super node service offers
|
2019-05-17 06:27:02 +00:00
|
|
|
func (sap *Service) APIs() []rpc.API {
|
|
|
|
return []rpc.API{
|
|
|
|
{
|
|
|
|
Namespace: APIName,
|
|
|
|
Version: APIVersion,
|
2019-10-02 14:10:37 +00:00
|
|
|
Service: NewPublicSuperNodeAPI(sap),
|
2019-05-17 06:27:02 +00:00
|
|
|
Public: true,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// SyncAndPublish is the backend processing loop which streams data from geth, converts it to iplds, publishes them to ipfs, and indexes their cids
|
2019-06-07 13:42:10 +00:00
|
|
|
// This continues on no matter if or how many subscribers there are, it then forwards the data to the ScreenAndServe() loop
|
|
|
|
// which filters and sends relevant data to client subscriptions, if there are any
|
2019-08-05 17:56:15 +00:00
|
|
|
func (sap *Service) SyncAndPublish(wg *sync.WaitGroup, screenAndServePayload chan<- ipfs.IPLDPayload, screenAndServeQuit chan<- bool) error {
|
2019-10-08 19:51:38 +00:00
|
|
|
sub, streamErr := sap.Streamer.Stream(sap.PayloadChan)
|
|
|
|
if streamErr != nil {
|
|
|
|
return streamErr
|
2019-05-17 06:27:02 +00:00
|
|
|
}
|
|
|
|
wg.Add(1)
|
2019-06-25 20:31:14 +00:00
|
|
|
|
|
|
|
// Channels for forwarding data to the publishAndIndex workers
|
2019-08-05 17:56:15 +00:00
|
|
|
publishAndIndexPayload := make(chan ipfs.IPLDPayload, payloadChanBufferSize)
|
2019-08-26 02:13:40 +00:00
|
|
|
publishAndIndexQuit := make(chan bool, sap.WorkerPoolSize)
|
2019-06-25 20:31:14 +00:00
|
|
|
// publishAndIndex worker pool to handle publishing and indexing concurrently, while
|
|
|
|
// limiting the number of Postgres connections we can possibly open so as to prevent error
|
2019-08-26 02:13:40 +00:00
|
|
|
for i := 0; i < sap.WorkerPoolSize; i++ {
|
2019-06-25 20:31:14 +00:00
|
|
|
sap.publishAndIndex(i, publishAndIndexPayload, publishAndIndexQuit)
|
|
|
|
}
|
2019-05-17 06:27:02 +00:00
|
|
|
go func() {
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case payload := <-sap.PayloadChan:
|
2019-10-08 19:51:38 +00:00
|
|
|
ipldPayload, convertErr := sap.Converter.Convert(payload)
|
|
|
|
if convertErr != nil {
|
|
|
|
log.Error(convertErr)
|
2019-05-17 06:27:02 +00:00
|
|
|
continue
|
|
|
|
}
|
2019-05-21 19:27:24 +00:00
|
|
|
// If we have a ScreenAndServe process running, forward the payload to it
|
2019-05-17 06:27:02 +00:00
|
|
|
select {
|
2019-06-25 20:31:14 +00:00
|
|
|
case screenAndServePayload <- *ipldPayload:
|
2019-05-17 06:27:02 +00:00
|
|
|
default:
|
|
|
|
}
|
2019-06-25 20:31:14 +00:00
|
|
|
// Forward the payload to the publishAndIndex workers
|
|
|
|
select {
|
|
|
|
case publishAndIndexPayload <- *ipldPayload:
|
|
|
|
default:
|
2019-05-17 06:27:02 +00:00
|
|
|
}
|
2019-10-08 19:51:38 +00:00
|
|
|
case subErr := <-sub.Err():
|
|
|
|
log.Error(subErr)
|
2019-05-17 06:27:02 +00:00
|
|
|
case <-sap.QuitChan:
|
2019-05-21 19:27:24 +00:00
|
|
|
// If we have a ScreenAndServe process running, forward the quit signal to it
|
2019-05-17 06:27:02 +00:00
|
|
|
select {
|
2019-06-25 20:31:14 +00:00
|
|
|
case screenAndServeQuit <- true:
|
2019-05-17 06:27:02 +00:00
|
|
|
default:
|
|
|
|
}
|
2019-06-25 20:31:14 +00:00
|
|
|
// Also forward a quit signal for each of the workers
|
2019-08-26 02:13:40 +00:00
|
|
|
for i := 0; i < sap.WorkerPoolSize; i++ {
|
2019-06-25 20:31:14 +00:00
|
|
|
select {
|
|
|
|
case publishAndIndexQuit <- true:
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
}
|
2019-05-17 06:27:02 +00:00
|
|
|
log.Info("quiting SyncAndPublish process")
|
|
|
|
wg.Done()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
2019-10-08 19:51:38 +00:00
|
|
|
log.Info("syncAndPublish goroutine successfully spun up")
|
2019-05-17 06:27:02 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-08-05 17:56:15 +00:00
|
|
|
func (sap *Service) publishAndIndex(id int, publishAndIndexPayload <-chan ipfs.IPLDPayload, publishAndIndexQuit <-chan bool) {
|
2019-06-25 20:31:14 +00:00
|
|
|
go func() {
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case payload := <-publishAndIndexPayload:
|
2019-10-08 19:51:38 +00:00
|
|
|
cidPayload, publishErr := sap.Publisher.Publish(&payload)
|
|
|
|
if publishErr != nil {
|
|
|
|
log.Errorf("worker %d error: %v", id, publishErr)
|
2019-06-25 20:31:14 +00:00
|
|
|
continue
|
|
|
|
}
|
2019-10-08 19:51:38 +00:00
|
|
|
indexErr := sap.Repository.Index(cidPayload)
|
|
|
|
if indexErr != nil {
|
|
|
|
log.Errorf("worker %d error: %v", id, indexErr)
|
2019-06-25 20:31:14 +00:00
|
|
|
}
|
|
|
|
case <-publishAndIndexQuit:
|
2019-08-28 18:41:49 +00:00
|
|
|
log.Infof("quiting publishAndIndex worker %d", id)
|
2019-06-25 20:31:14 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
2019-10-08 19:51:38 +00:00
|
|
|
log.Info("publishAndIndex goroutine successfully spun up")
|
2019-06-25 20:31:14 +00:00
|
|
|
}
|
|
|
|
|
2019-06-07 13:42:10 +00:00
|
|
|
// ScreenAndServe is the loop used to screen data streamed from the state diffing eth node
|
|
|
|
// and send the appropriate portions of it to a requesting client subscription, according to their subscription configuration
|
2019-10-08 19:51:38 +00:00
|
|
|
func (sap *Service) ScreenAndServe(wg *sync.WaitGroup, screenAndServePayload <-chan ipfs.IPLDPayload, screenAndServeQuit <-chan bool) {
|
|
|
|
wg.Add(1)
|
2019-05-17 06:27:02 +00:00
|
|
|
go func() {
|
|
|
|
for {
|
|
|
|
select {
|
2019-06-25 20:31:14 +00:00
|
|
|
case payload := <-screenAndServePayload:
|
2019-10-08 19:51:38 +00:00
|
|
|
sendErr := sap.sendResponse(payload)
|
|
|
|
if sendErr != nil {
|
|
|
|
log.Error(sendErr)
|
2019-05-21 19:27:24 +00:00
|
|
|
}
|
2019-06-25 20:31:14 +00:00
|
|
|
case <-screenAndServeQuit:
|
2019-05-21 19:27:24 +00:00
|
|
|
log.Info("quiting ScreenAndServe process")
|
2019-10-08 19:51:38 +00:00
|
|
|
wg.Done()
|
2019-05-17 06:27:02 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
2019-10-08 19:51:38 +00:00
|
|
|
log.Info("screenAndServe goroutine successfully spun up")
|
2019-05-17 06:27:02 +00:00
|
|
|
}
|
|
|
|
|
2019-08-05 17:56:15 +00:00
|
|
|
func (sap *Service) sendResponse(payload ipfs.IPLDPayload) error {
|
2019-06-25 20:31:14 +00:00
|
|
|
sap.Lock()
|
2019-06-18 17:28:57 +00:00
|
|
|
for ty, subs := range sap.Subscriptions {
|
2019-06-25 20:31:14 +00:00
|
|
|
// Retrieve the subscription parameters for this subscription type
|
2019-06-18 17:28:57 +00:00
|
|
|
subConfig, ok := sap.SubscriptionTypes[ty]
|
|
|
|
if !ok {
|
2019-06-25 20:31:14 +00:00
|
|
|
log.Errorf("subscription configuration for subscription type %s not available", ty.Hex())
|
|
|
|
continue
|
2019-06-18 17:28:57 +00:00
|
|
|
}
|
2019-10-08 19:51:38 +00:00
|
|
|
response, filterErr := sap.Filterer.FilterResponse(subConfig, payload)
|
|
|
|
if filterErr != nil {
|
|
|
|
log.Error(filterErr)
|
2019-06-25 20:31:14 +00:00
|
|
|
continue
|
2019-05-21 19:27:24 +00:00
|
|
|
}
|
2019-06-25 20:31:14 +00:00
|
|
|
for id, sub := range subs {
|
|
|
|
select {
|
2019-08-28 19:43:27 +00:00
|
|
|
case sub.PayloadChan <- response:
|
2019-10-02 14:10:37 +00:00
|
|
|
log.Infof("sending super node payload to subscription %s", id)
|
2019-06-25 20:31:14 +00:00
|
|
|
default:
|
|
|
|
log.Infof("unable to send payload to subscription %s; channel has no receiver", id)
|
|
|
|
}
|
2019-06-18 17:28:57 +00:00
|
|
|
}
|
2019-05-21 19:27:24 +00:00
|
|
|
}
|
2019-06-25 20:31:14 +00:00
|
|
|
sap.Unlock()
|
2019-05-21 19:27:24 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Subscribe is used by the API to subscribe to the service loop
|
2019-10-02 14:10:37 +00:00
|
|
|
func (sap *Service) Subscribe(id rpc.ID, sub chan<- streamer.SuperNodePayload, quitChan chan<- bool, streamFilters config.Subscription) {
|
|
|
|
log.Info("Subscribing to the super node service")
|
2019-06-18 17:28:57 +00:00
|
|
|
// Subscription type is defined as the hash of its content
|
|
|
|
// Group subscriptions by type and screen payloads once for subs of the same type
|
2019-10-08 19:51:38 +00:00
|
|
|
by, encodeErr := rlp.EncodeToBytes(streamFilters)
|
|
|
|
if encodeErr != nil {
|
|
|
|
log.Error(encodeErr)
|
2019-06-18 17:28:57 +00:00
|
|
|
}
|
|
|
|
subscriptionHash := crypto.Keccak256(by)
|
|
|
|
subscriptionType := common.BytesToHash(subscriptionHash)
|
2019-06-07 16:01:29 +00:00
|
|
|
subscription := Subscription{
|
2019-06-18 17:28:57 +00:00
|
|
|
PayloadChan: sub,
|
|
|
|
QuitChan: quitChan,
|
2019-05-17 06:27:02 +00:00
|
|
|
}
|
2019-05-21 19:27:24 +00:00
|
|
|
// If the subscription requests a backfill, use the Postgres index to lookup and retrieve historical data
|
|
|
|
// Otherwise we only filter new data as it is streamed in from the state diffing geth node
|
2019-06-07 13:42:10 +00:00
|
|
|
if streamFilters.BackFill || streamFilters.BackFillOnly {
|
2019-06-18 17:28:57 +00:00
|
|
|
sap.backFill(subscription, id, streamFilters)
|
2019-06-07 16:01:29 +00:00
|
|
|
}
|
|
|
|
if !streamFilters.BackFillOnly {
|
|
|
|
sap.Lock()
|
2019-06-18 17:28:57 +00:00
|
|
|
if sap.Subscriptions[subscriptionType] == nil {
|
|
|
|
sap.Subscriptions[subscriptionType] = make(map[rpc.ID]Subscription)
|
|
|
|
}
|
|
|
|
sap.Subscriptions[subscriptionType][id] = subscription
|
|
|
|
sap.SubscriptionTypes[subscriptionType] = streamFilters
|
2019-06-07 16:01:29 +00:00
|
|
|
sap.Unlock()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-18 17:28:57 +00:00
|
|
|
func (sap *Service) backFill(sub Subscription, id rpc.ID, con config.Subscription) {
|
2019-06-07 16:01:29 +00:00
|
|
|
log.Debug("back-filling data for id", id)
|
|
|
|
// Retrieve cached CIDs relevant to this subscriber
|
2019-07-02 17:38:12 +00:00
|
|
|
var endingBlock int64
|
|
|
|
var startingBlock int64
|
2019-10-08 19:51:38 +00:00
|
|
|
var retrieveFirstBlockErr error
|
|
|
|
var retrieveLastBlockErr error
|
|
|
|
startingBlock, retrieveFirstBlockErr = sap.Retriever.RetrieveFirstBlockNumber()
|
|
|
|
if retrieveFirstBlockErr != nil {
|
2019-10-02 14:10:37 +00:00
|
|
|
sub.PayloadChan <- streamer.SuperNodePayload{
|
2019-10-08 19:51:38 +00:00
|
|
|
ErrMsg: "unable to set block range start; error: " + retrieveFirstBlockErr.Error(),
|
2019-06-07 16:01:29 +00:00
|
|
|
}
|
|
|
|
}
|
2019-07-02 17:38:12 +00:00
|
|
|
if startingBlock < con.StartingBlock.Int64() {
|
|
|
|
startingBlock = con.StartingBlock.Int64()
|
|
|
|
}
|
2019-10-08 19:51:38 +00:00
|
|
|
endingBlock, retrieveLastBlockErr = sap.Retriever.RetrieveLastBlockNumber()
|
|
|
|
if retrieveLastBlockErr != nil {
|
2019-10-02 14:10:37 +00:00
|
|
|
sub.PayloadChan <- streamer.SuperNodePayload{
|
2019-10-08 19:51:38 +00:00
|
|
|
ErrMsg: "unable to set block range end; error: " + retrieveLastBlockErr.Error(),
|
2019-06-07 16:01:29 +00:00
|
|
|
}
|
2019-07-02 17:38:12 +00:00
|
|
|
}
|
2019-08-28 22:07:36 +00:00
|
|
|
if endingBlock > con.EndingBlock.Int64() && con.EndingBlock.Int64() > 0 && con.EndingBlock.Int64() > startingBlock {
|
|
|
|
endingBlock = con.EndingBlock.Int64()
|
|
|
|
}
|
2019-07-02 17:38:12 +00:00
|
|
|
log.Debug("backfill starting block:", con.StartingBlock)
|
|
|
|
log.Debug("backfill ending block:", endingBlock)
|
|
|
|
// Backfilled payloads are sent concurrently to the streamed payloads, so the receiver needs to pay attention to
|
|
|
|
// the blocknumbers in the payloads they receive to keep things in order
|
|
|
|
// TODO: separate backfill into a different rpc subscription method altogether?
|
|
|
|
go func() {
|
2019-10-02 14:10:37 +00:00
|
|
|
for i := startingBlock; i <= endingBlock; i++ {
|
2019-10-08 19:51:38 +00:00
|
|
|
cidWrapper, retrieveCIDsErr := sap.Retriever.RetrieveCIDs(con, i)
|
|
|
|
if retrieveCIDsErr != nil {
|
2019-10-02 14:10:37 +00:00
|
|
|
sub.PayloadChan <- streamer.SuperNodePayload{
|
2019-10-08 19:51:38 +00:00
|
|
|
ErrMsg: "CID retrieval error: " + retrieveCIDsErr.Error(),
|
2019-07-02 17:38:12 +00:00
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
2019-08-05 17:56:15 +00:00
|
|
|
if ipfs.EmptyCIDWrapper(*cidWrapper) {
|
2019-07-02 17:38:12 +00:00
|
|
|
continue
|
|
|
|
}
|
2019-10-08 19:51:38 +00:00
|
|
|
blocksWrapper, fetchIPLDsErr := sap.IPLDFetcher.FetchIPLDs(*cidWrapper)
|
|
|
|
if fetchIPLDsErr != nil {
|
|
|
|
log.Error(fetchIPLDsErr)
|
2019-10-02 14:10:37 +00:00
|
|
|
sub.PayloadChan <- streamer.SuperNodePayload{
|
2019-10-08 19:51:38 +00:00
|
|
|
ErrMsg: "IPLD fetching error: " + fetchIPLDsErr.Error(),
|
2019-07-02 17:38:12 +00:00
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
2019-10-08 19:51:38 +00:00
|
|
|
backFillIplds := sap.Resolver.ResolveIPLDs(*blocksWrapper)
|
2019-07-02 17:38:12 +00:00
|
|
|
select {
|
2019-08-28 19:43:27 +00:00
|
|
|
case sub.PayloadChan <- backFillIplds:
|
2019-10-02 14:10:37 +00:00
|
|
|
log.Infof("sending super node back-fill payload to subscription %s", id)
|
2019-07-02 17:38:12 +00:00
|
|
|
default:
|
|
|
|
log.Infof("unable to send back-fill payload to subscription %s; channel has no receiver", id)
|
2019-05-21 19:27:24 +00:00
|
|
|
}
|
|
|
|
}
|
2019-07-02 17:38:12 +00:00
|
|
|
}()
|
2019-05-17 06:27:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Unsubscribe is used to unsubscribe to the StateDiffingService loop
|
2019-06-18 17:28:57 +00:00
|
|
|
func (sap *Service) Unsubscribe(id rpc.ID) {
|
2019-10-02 14:10:37 +00:00
|
|
|
log.Info("Unsubscribing from the super node service")
|
2019-05-17 06:27:02 +00:00
|
|
|
sap.Lock()
|
2019-06-18 17:28:57 +00:00
|
|
|
for ty := range sap.Subscriptions {
|
|
|
|
delete(sap.Subscriptions[ty], id)
|
|
|
|
if len(sap.Subscriptions[ty]) == 0 {
|
|
|
|
// If we removed the last subscription of this type, remove the subscription type outright
|
|
|
|
delete(sap.Subscriptions, ty)
|
|
|
|
delete(sap.SubscriptionTypes, ty)
|
|
|
|
}
|
2019-05-17 06:27:02 +00:00
|
|
|
}
|
|
|
|
sap.Unlock()
|
|
|
|
}
|
|
|
|
|
2019-05-21 19:27:24 +00:00
|
|
|
// Start is used to begin the service
|
2019-05-17 06:27:02 +00:00
|
|
|
func (sap *Service) Start(*p2p.Server) error {
|
2019-10-02 14:10:37 +00:00
|
|
|
log.Info("Starting super node service")
|
2019-05-17 06:27:02 +00:00
|
|
|
wg := new(sync.WaitGroup)
|
2019-08-05 17:56:15 +00:00
|
|
|
payloadChan := make(chan ipfs.IPLDPayload, payloadChanBufferSize)
|
2019-06-07 02:09:27 +00:00
|
|
|
quitChan := make(chan bool, 1)
|
|
|
|
if err := sap.SyncAndPublish(wg, payloadChan, quitChan); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-10-08 19:51:38 +00:00
|
|
|
sap.ScreenAndServe(wg, payloadChan, quitChan)
|
2019-05-17 06:27:02 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-05-21 19:27:24 +00:00
|
|
|
// Stop is used to close down the service
|
2019-05-17 06:27:02 +00:00
|
|
|
func (sap *Service) Stop() error {
|
2019-10-02 14:10:37 +00:00
|
|
|
log.Info("Stopping super node service")
|
2019-05-17 06:27:02 +00:00
|
|
|
close(sap.QuitChan)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-09-13 19:41:50 +00:00
|
|
|
// Node returns the Geth node info for this service
|
|
|
|
func (sap *Service) Node() core.Node {
|
2019-10-08 19:51:38 +00:00
|
|
|
return sap.GethNode
|
2019-09-13 19:41:50 +00:00
|
|
|
}
|
|
|
|
|
2019-05-17 06:27:02 +00:00
|
|
|
// close is used to close all listening subscriptions
|
|
|
|
func (sap *Service) close() {
|
|
|
|
sap.Lock()
|
2019-06-18 17:28:57 +00:00
|
|
|
for ty, subs := range sap.Subscriptions {
|
|
|
|
for id, sub := range subs {
|
|
|
|
select {
|
|
|
|
case sub.QuitChan <- true:
|
|
|
|
log.Infof("closing subscription %s", id)
|
|
|
|
default:
|
|
|
|
log.Infof("unable to close subscription %s; channel has no receiver", id)
|
|
|
|
}
|
2019-05-17 06:27:02 +00:00
|
|
|
}
|
2019-06-18 17:28:57 +00:00
|
|
|
delete(sap.Subscriptions, ty)
|
|
|
|
delete(sap.SubscriptionTypes, ty)
|
2019-05-17 06:27:02 +00:00
|
|
|
}
|
|
|
|
sap.Unlock()
|
|
|
|
}
|