2019-05-17 06:27:02 +00:00
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
2019-10-02 14:10:37 +00:00
package super_node
2019-05-17 06:27:02 +00:00
import (
2020-01-17 23:16:01 +00:00
"fmt"
2019-05-17 06:27:02 +00:00
"sync"
2019-06-18 17:28:57 +00:00
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
2019-05-17 06:27:02 +00:00
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p"
2019-06-18 17:28:57 +00:00
"github.com/ethereum/go-ethereum/rlp"
2019-05-17 06:27:02 +00:00
"github.com/ethereum/go-ethereum/rpc"
log "github.com/sirupsen/logrus"
2020-01-29 19:00:07 +00:00
"github.com/vulcanize/vulcanizedb/pkg/eth/core"
2020-02-10 15:00:55 +00:00
"github.com/vulcanize/vulcanizedb/pkg/postgres"
2020-01-17 23:16:01 +00:00
"github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
2019-05-17 06:27:02 +00:00
)
2019-10-02 14:10:37 +00:00
const (
2020-04-27 18:19:19 +00:00
PayloadChanBufferSize = 2000
2019-10-02 14:10:37 +00:00
)
2019-05-17 06:27:02 +00:00
2020-01-17 23:16:01 +00:00
// SuperNode is the top level interface for streaming, converting to IPLDs, publishing,
2020-04-23 20:56:37 +00:00
// and indexing all chain data; screening this data; and serving it up to subscribed clients
2019-06-07 02:09:27 +00:00
// This service is compatible with the Ethereum service interface (node.Service)
2020-01-17 23:16:01 +00:00
type SuperNode interface {
2019-05-17 06:27:02 +00:00
// APIs(), Protocols(), Start() and Stop()
node . Service
2020-02-19 04:42:53 +00:00
// Data processing event loop
2020-04-23 20:56:37 +00:00
Sync ( wg * sync . WaitGroup , forwardPayloadChan chan <- shared . ConvertedData ) error
2020-02-19 04:42:53 +00:00
// Pub-Sub handling event loop
2020-04-23 20:56:37 +00:00
Serve ( wg * sync . WaitGroup , screenAndServePayload <- chan shared . ConvertedData )
2020-02-19 04:42:53 +00:00
// Method to subscribe to the service
2020-01-31 18:03:37 +00:00
Subscribe ( id rpc . ID , sub chan <- SubscriptionPayload , quitChan chan <- bool , params shared . SubscriptionSettings )
2020-02-19 04:42:53 +00:00
// Method to unsubscribe from the service
2019-06-18 17:28:57 +00:00
Unsubscribe ( id rpc . ID )
2020-02-19 04:42:53 +00:00
// Method to access the node info for the service
2020-02-25 22:38:27 +00:00
Node ( ) * core . Node
2020-02-23 23:14:29 +00:00
// Method to access chain type
Chain ( ) shared . ChainType
2019-05-17 06:27:02 +00:00
}
2019-10-02 14:10:37 +00:00
// Service is the underlying struct for the super node
2019-05-17 06:27:02 +00:00
type Service struct {
// Used to sync access to the Subscriptions
sync . Mutex
2020-01-17 23:16:01 +00:00
// Interface for streaming payloads over an rpc subscription
Streamer shared . PayloadStreamer
// Interface for converting raw payloads into IPLD object payloads
Converter shared . PayloadConverter
// Interface for publishing the IPLD payloads to IPFS
Publisher shared . IPLDPublisher
// Interface for indexing the CIDs of the published IPLDs in Postgres
Indexer shared . CIDIndexer
2019-05-21 19:27:24 +00:00
// Interface for filtering and serving data according to subscribed clients according to their specification
2020-01-17 23:16:01 +00:00
Filterer shared . ResponseFilterer
// Interface for fetching IPLD objects from IPFS
IPLDFetcher shared . IPLDFetcher
2019-05-21 19:27:24 +00:00
// Interface for searching and retrieving CIDs from Postgres index
2020-01-17 23:16:01 +00:00
Retriever shared . CIDRetriever
// Chan the processor uses to subscribe to payloads from the Streamer
2020-01-31 18:03:37 +00:00
PayloadChan chan shared . RawChainData
2019-05-17 06:27:02 +00:00
// Used to signal shutdown of the service
QuitChan chan bool
2019-06-18 17:28:57 +00:00
// A mapping of rpc.IDs to their subscription channels, mapped to their subscription type (hash of the StreamFilters)
Subscriptions map [ common . Hash ] map [ rpc . ID ] Subscription
2020-01-17 23:16:01 +00:00
// A mapping of subscription params hash to the corresponding subscription params
2020-01-31 18:03:37 +00:00
SubscriptionTypes map [ common . Hash ] shared . SubscriptionSettings
2019-10-02 14:10:37 +00:00
// Info for the Geth node that this super node is working with
2020-02-25 22:38:27 +00:00
NodeInfo * core . Node
2020-01-17 23:16:01 +00:00
// Number of publishAndIndex workers
WorkerPoolSize int
// chain type for this service
2020-02-03 18:22:29 +00:00
chain shared . ChainType
2020-01-17 23:16:01 +00:00
// Path to ipfs data dir
ipfsPath string
// Underlying db
db * postgres . DB
2020-05-12 19:53:50 +00:00
// wg for syncing serve processes
serveWg * sync . WaitGroup
2019-05-17 06:27:02 +00:00
}
2019-10-02 14:10:37 +00:00
// NewSuperNode creates a new super_node.Interface using an underlying super_node.Service struct
2020-02-25 22:38:27 +00:00
func NewSuperNode ( settings * Config ) ( SuperNode , error ) {
2020-01-17 23:16:01 +00:00
sn := new ( Service )
var err error
// If we are syncing, initialize the needed interfaces
if settings . Sync {
sn . Streamer , sn . PayloadChan , err = NewPayloadStreamer ( settings . Chain , settings . WSClient )
if err != nil {
return nil , err
}
2020-01-31 18:03:37 +00:00
sn . Converter , err = NewPayloadConverter ( settings . Chain )
2020-01-17 23:16:01 +00:00
if err != nil {
return nil , err
}
2020-05-06 21:16:56 +00:00
sn . Publisher , err = NewIPLDPublisher ( settings . Chain , settings . IPFSPath , settings . SyncDBConn , settings . IPFSMode )
2020-01-17 23:16:01 +00:00
if err != nil {
return nil , err
}
2020-05-06 21:16:56 +00:00
sn . Indexer , err = NewCIDIndexer ( settings . Chain , settings . SyncDBConn , settings . IPFSMode )
2020-01-17 23:16:01 +00:00
if err != nil {
return nil , err
}
sn . Filterer , err = NewResponseFilterer ( settings . Chain )
if err != nil {
return nil , err
}
2019-10-08 19:51:38 +00:00
}
2020-01-17 23:16:01 +00:00
// If we are serving, initialize the needed interfaces
if settings . Serve {
2020-05-06 21:16:56 +00:00
sn . Retriever , err = NewCIDRetriever ( settings . Chain , settings . ServeDBConn )
2020-01-17 23:16:01 +00:00
if err != nil {
return nil , err
}
2020-05-06 21:16:56 +00:00
sn . IPLDFetcher , err = NewIPLDFetcher ( settings . Chain , settings . IPFSPath , settings . ServeDBConn , settings . IPFSMode )
2020-01-17 23:16:01 +00:00
if err != nil {
return nil , err
}
2020-05-06 21:16:56 +00:00
sn . db = settings . ServeDBConn
2019-05-21 19:27:24 +00:00
}
2020-05-12 19:53:50 +00:00
sn . QuitChan = make ( chan bool )
2020-01-17 23:16:01 +00:00
sn . Subscriptions = make ( map [ common . Hash ] map [ rpc . ID ] Subscription )
2020-01-31 18:03:37 +00:00
sn . SubscriptionTypes = make ( map [ common . Hash ] shared . SubscriptionSettings )
2020-01-17 23:16:01 +00:00
sn . WorkerPoolSize = settings . Workers
2020-02-25 22:38:27 +00:00
sn . NodeInfo = & settings . NodeInfo
2020-01-17 23:16:01 +00:00
sn . ipfsPath = settings . IPFSPath
sn . chain = settings . Chain
return sn , nil
2019-05-17 06:27:02 +00:00
}
// Protocols exports the services p2p protocols, this service has none
func ( sap * Service ) Protocols ( ) [ ] p2p . Protocol {
return [ ] p2p . Protocol { }
}
2019-10-02 14:10:37 +00:00
// APIs returns the RPC descriptors the super node service offers
2019-05-17 06:27:02 +00:00
func ( sap * Service ) APIs ( ) [ ] rpc . API {
2020-02-25 22:38:27 +00:00
ifnoAPI := NewInfoAPI ( )
2020-01-17 23:16:01 +00:00
apis := [ ] rpc . API {
2019-05-17 06:27:02 +00:00
{
Namespace : APIName ,
Version : APIVersion ,
2019-10-02 14:10:37 +00:00
Service : NewPublicSuperNodeAPI ( sap ) ,
2019-05-17 06:27:02 +00:00
Public : true ,
} ,
2020-02-25 22:38:27 +00:00
{
Namespace : "rpc" ,
Version : APIVersion ,
Service : ifnoAPI ,
Public : true ,
} ,
{
Namespace : "net" ,
Version : APIVersion ,
Service : ifnoAPI ,
Public : true ,
} ,
{
Namespace : "admin" ,
Version : APIVersion ,
Service : ifnoAPI ,
Public : true ,
} ,
2019-05-17 06:27:02 +00:00
}
2020-01-17 23:16:01 +00:00
chainAPI , err := NewPublicAPI ( sap . chain , sap . db , sap . ipfsPath )
if err != nil {
log . Error ( err )
return apis
}
return append ( apis , chainAPI )
2019-05-17 06:27:02 +00:00
}
2020-04-23 20:56:37 +00:00
// Sync streams incoming raw chain data and converts it for further processing
2020-02-19 04:42:53 +00:00
// It forwards the converted data to the publishAndIndex process(es) it spins up
// If forwards the converted data to a ScreenAndServe process if it there is one listening on the passed screenAndServePayload channel
// This continues on no matter if or how many subscribers there are
2020-04-23 20:56:37 +00:00
func ( sap * Service ) Sync ( wg * sync . WaitGroup , screenAndServePayload chan <- shared . ConvertedData ) error {
2020-01-17 23:16:01 +00:00
sub , err := sap . Streamer . Stream ( sap . PayloadChan )
if err != nil {
return err
2019-05-17 06:27:02 +00:00
}
2020-05-12 19:53:50 +00:00
// spin up publishAndIndex worker goroutines
2020-02-20 22:12:52 +00:00
publishAndIndexPayload := make ( chan shared . ConvertedData , PayloadChanBufferSize )
2020-05-12 19:53:50 +00:00
for i := 1 ; i <= sap . WorkerPoolSize ; i ++ {
go sap . publishAndIndex ( wg , i , publishAndIndexPayload )
log . Debugf ( "%s publishAndIndex worker %d successfully spun up" , sap . chain . String ( ) , i )
2019-06-25 20:31:14 +00:00
}
2019-05-17 06:27:02 +00:00
go func ( ) {
2020-05-12 19:53:50 +00:00
wg . Add ( 1 )
defer wg . Done ( )
2019-05-17 06:27:02 +00:00
for {
select {
case payload := <- sap . PayloadChan :
2020-01-17 23:16:01 +00:00
ipldPayload , err := sap . Converter . Convert ( payload )
if err != nil {
2020-03-08 16:43:29 +00:00
log . Errorf ( "super node conversion error for chain %s: %v" , sap . chain . String ( ) , err )
2019-05-17 06:27:02 +00:00
continue
}
2020-04-23 20:56:37 +00:00
log . Infof ( "%s data streamed at head height %d" , sap . chain . String ( ) , ipldPayload . Height ( ) )
2020-01-31 18:03:37 +00:00
// If we have a ScreenAndServe process running, forward the iplds to it
2019-05-17 06:27:02 +00:00
select {
2020-02-05 01:02:01 +00:00
case screenAndServePayload <- ipldPayload :
2019-05-17 06:27:02 +00:00
default :
}
2019-06-25 20:31:14 +00:00
// Forward the payload to the publishAndIndex workers
2020-04-27 18:19:19 +00:00
// this channel acts as a ring buffer
select {
case publishAndIndexPayload <- ipldPayload :
default :
<- publishAndIndexPayload
publishAndIndexPayload <- ipldPayload
}
2020-01-17 23:16:01 +00:00
case err := <- sub . Err ( ) :
2020-03-08 16:43:29 +00:00
log . Errorf ( "super node subscription error for chain %s: %v" , sap . chain . String ( ) , err )
2019-05-17 06:27:02 +00:00
case <- sap . QuitChan :
2020-05-12 19:53:50 +00:00
log . Infof ( "quiting %s Sync process" , sap . chain . String ( ) )
2019-05-17 06:27:02 +00:00
return
}
}
} ( )
2020-04-23 20:56:37 +00:00
log . Infof ( "%s Sync goroutine successfully spun up" , sap . chain . String ( ) )
2019-05-17 06:27:02 +00:00
return nil
}
2020-02-19 04:42:53 +00:00
// publishAndIndex is spun up by SyncAndConvert and receives converted chain data from that process
// it publishes this data to IPFS and indexes their CIDs with useful metadata in Postgres
2020-05-12 19:53:50 +00:00
func ( sap * Service ) publishAndIndex ( wg * sync . WaitGroup , id int , publishAndIndexPayload <- chan shared . ConvertedData ) {
wg . Add ( 1 )
defer wg . Done ( )
for {
select {
case payload := <- publishAndIndexPayload :
log . Debugf ( "%s super node publishAndIndex worker %d publishing data streamed at head height %d" , sap . chain . String ( ) , id , payload . Height ( ) )
cidPayload , err := sap . Publisher . Publish ( payload )
if err != nil {
log . Errorf ( "%s super node publishAndIndex worker %d publishing error: %v" , sap . chain . String ( ) , id , err )
continue
}
log . Debugf ( "%s super node publishAndIndex worker %d indexing data streamed at head height %d" , sap . chain . String ( ) , id , payload . Height ( ) )
if err := sap . Indexer . Index ( cidPayload ) ; err != nil {
log . Errorf ( "%s super node publishAndIndex worker %d indexing error: %v" , sap . chain . String ( ) , id , err )
2019-06-25 20:31:14 +00:00
}
2020-05-12 19:53:50 +00:00
case <- sap . QuitChan :
log . Infof ( "%s super node publishAndIndex worker %d shutting down" , sap . chain . String ( ) , id )
return
2019-06-25 20:31:14 +00:00
}
2020-05-12 19:53:50 +00:00
}
2019-06-25 20:31:14 +00:00
}
2020-05-12 19:53:50 +00:00
// Serve listens for incoming converter data off the screenAndServePayload from the Sync process
2020-02-19 04:42:53 +00:00
// It filters and sends this data to any subscribers to the service
2020-05-12 19:53:50 +00:00
// This process can also be stood up alone, without an screenAndServePayload attached to a Sync process
2020-02-19 04:42:53 +00:00
// and it will hang on the WaitGroup indefinitely, allowing the Service to serve historical data requests only
2020-04-23 20:56:37 +00:00
func ( sap * Service ) Serve ( wg * sync . WaitGroup , screenAndServePayload <- chan shared . ConvertedData ) {
2020-05-12 19:53:50 +00:00
sap . serveWg = wg
2019-05-17 06:27:02 +00:00
go func ( ) {
2020-05-12 19:53:50 +00:00
wg . Add ( 1 )
defer wg . Done ( )
2019-05-17 06:27:02 +00:00
for {
select {
2019-06-25 20:31:14 +00:00
case payload := <- screenAndServePayload :
2020-02-19 04:42:53 +00:00
sap . filterAndServe ( payload )
2020-02-13 22:50:56 +00:00
case <- sap . QuitChan :
2020-05-12 19:53:50 +00:00
log . Infof ( "quiting %s Serve process" , sap . chain . String ( ) )
2019-05-17 06:27:02 +00:00
return
}
}
} ( )
2020-04-23 20:56:37 +00:00
log . Infof ( "%s Serve goroutine successfully spun up" , sap . chain . String ( ) )
2019-05-17 06:27:02 +00:00
}
2020-02-19 04:42:53 +00:00
// filterAndServe filters the payload according to each subscription type and sends to the subscriptions
2020-02-20 22:12:52 +00:00
func ( sap * Service ) filterAndServe ( payload shared . ConvertedData ) {
2020-05-12 19:53:50 +00:00
log . Debugf ( "sending %s payload to subscriptions" , sap . chain . String ( ) )
2019-06-25 20:31:14 +00:00
sap . Lock ( )
2020-05-12 19:53:50 +00:00
sap . serveWg . Add ( 1 )
defer sap . Unlock ( )
defer sap . serveWg . Done ( )
2019-06-18 17:28:57 +00:00
for ty , subs := range sap . Subscriptions {
2019-06-25 20:31:14 +00:00
// Retrieve the subscription parameters for this subscription type
2019-06-18 17:28:57 +00:00
subConfig , ok := sap . SubscriptionTypes [ ty ]
if ! ok {
2020-03-08 16:43:29 +00:00
log . Errorf ( "super node %s subscription configuration for subscription type %s not available" , sap . chain . String ( ) , ty . Hex ( ) )
2020-01-17 23:16:01 +00:00
sap . closeType ( ty )
2019-06-25 20:31:14 +00:00
continue
2019-06-18 17:28:57 +00:00
}
2020-02-19 22:09:33 +00:00
if subConfig . EndingBlock ( ) . Int64 ( ) > 0 && subConfig . EndingBlock ( ) . Int64 ( ) < payload . Height ( ) {
// We are not out of range for this subscription type
// close it, and continue to the next
sap . closeType ( ty )
continue
}
2020-01-17 23:16:01 +00:00
response , err := sap . Filterer . Filter ( subConfig , payload )
if err != nil {
2020-03-08 16:43:29 +00:00
log . Errorf ( "super node filtering error for chain %s: %v" , sap . chain . String ( ) , err )
2020-01-17 23:16:01 +00:00
sap . closeType ( ty )
2019-06-25 20:31:14 +00:00
continue
2019-05-21 19:27:24 +00:00
}
2020-02-20 22:12:52 +00:00
responseRLP , err := rlp . EncodeToBytes ( response )
if err != nil {
2020-03-08 16:43:29 +00:00
log . Errorf ( "super node rlp encoding error for chain %s: %v" , sap . chain . String ( ) , err )
2020-02-20 22:12:52 +00:00
continue
}
2019-06-25 20:31:14 +00:00
for id , sub := range subs {
select {
2020-02-20 22:12:52 +00:00
case sub . PayloadChan <- SubscriptionPayload { Data : responseRLP , Err : "" , Flag : EmptyFlag , Height : response . Height ( ) } :
2020-02-25 22:38:27 +00:00
log . Debugf ( "sending super node %s payload to subscription %s" , sap . chain . String ( ) , id )
2019-06-25 20:31:14 +00:00
default :
2020-02-25 22:38:27 +00:00
log . Infof ( "unable to send %s payload to subscription %s; channel has no receiver" , sap . chain . String ( ) , id )
2019-06-25 20:31:14 +00:00
}
2019-06-18 17:28:57 +00:00
}
2019-05-21 19:27:24 +00:00
}
}
2020-02-19 04:42:53 +00:00
// Subscribe is used by the API to remotely subscribe to the service loop
2020-01-31 18:03:37 +00:00
// The params must be rlp serializable and satisfy the SubscriptionSettings() interface
func ( sap * Service ) Subscribe ( id rpc . ID , sub chan <- SubscriptionPayload , quitChan chan <- bool , params shared . SubscriptionSettings ) {
2020-05-12 19:53:50 +00:00
sap . serveWg . Add ( 1 )
defer sap . serveWg . Done ( )
2020-02-25 22:38:27 +00:00
log . Infof ( "New %s subscription %s" , sap . chain . String ( ) , id )
2019-06-07 16:01:29 +00:00
subscription := Subscription {
2020-01-17 23:16:01 +00:00
ID : id ,
2019-06-18 17:28:57 +00:00
PayloadChan : sub ,
QuitChan : quitChan ,
2019-05-17 06:27:02 +00:00
}
2020-01-17 23:16:01 +00:00
if params . ChainType ( ) != sap . chain {
sendNonBlockingErr ( subscription , fmt . Errorf ( "subscription %s is for chain %s, service supports chain %s" , id , params . ChainType ( ) . String ( ) , sap . chain . String ( ) ) )
sendNonBlockingQuit ( subscription )
return
}
2020-02-19 04:42:53 +00:00
// Subscription type is defined as the hash of the rlp-serialized subscription settings
2020-01-17 23:16:01 +00:00
by , err := rlp . EncodeToBytes ( params )
if err != nil {
sendNonBlockingErr ( subscription , err )
sendNonBlockingQuit ( subscription )
return
}
subscriptionType := crypto . Keccak256Hash ( by )
if ! params . HistoricalDataOnly ( ) {
// Add subscriber
2019-06-07 16:01:29 +00:00
sap . Lock ( )
2019-06-18 17:28:57 +00:00
if sap . Subscriptions [ subscriptionType ] == nil {
sap . Subscriptions [ subscriptionType ] = make ( map [ rpc . ID ] Subscription )
}
sap . Subscriptions [ subscriptionType ] [ id ] = subscription
2020-01-17 23:16:01 +00:00
sap . SubscriptionTypes [ subscriptionType ] = params
2019-06-07 16:01:29 +00:00
sap . Unlock ( )
}
2020-02-19 22:09:33 +00:00
// If the subscription requests a backfill, use the Postgres index to lookup and retrieve historical data
// Otherwise we only filter new data as it is streamed in from the state diffing geth node
if params . HistoricalData ( ) || params . HistoricalDataOnly ( ) {
if err := sap . sendHistoricalData ( subscription , id , params ) ; err != nil {
2020-05-12 19:53:50 +00:00
sendNonBlockingErr ( subscription , fmt . Errorf ( "%s super node subscriber backfill error: %v" , sap . chain . String ( ) , err ) )
2020-02-19 22:09:33 +00:00
sendNonBlockingQuit ( subscription )
return
}
}
2019-06-07 16:01:29 +00:00
}
2020-02-19 04:42:53 +00:00
// sendHistoricalData sends historical data to the requesting subscription
func ( sap * Service ) sendHistoricalData ( sub Subscription , id rpc . ID , params shared . SubscriptionSettings ) error {
2020-02-25 22:38:27 +00:00
log . Infof ( "Sending %s historical data to subscription %s" , sap . chain . String ( ) , id )
2019-06-07 16:01:29 +00:00
// Retrieve cached CIDs relevant to this subscriber
2019-07-02 17:38:12 +00:00
var endingBlock int64
var startingBlock int64
2020-01-17 23:16:01 +00:00
var err error
startingBlock , err = sap . Retriever . RetrieveFirstBlockNumber ( )
if err != nil {
return err
2019-06-07 16:01:29 +00:00
}
2020-01-17 23:16:01 +00:00
if startingBlock < params . StartingBlock ( ) . Int64 ( ) {
startingBlock = params . StartingBlock ( ) . Int64 ( )
2019-07-02 17:38:12 +00:00
}
2020-01-17 23:16:01 +00:00
endingBlock , err = sap . Retriever . RetrieveLastBlockNumber ( )
if err != nil {
return err
2019-07-02 17:38:12 +00:00
}
2020-01-17 23:16:01 +00:00
if endingBlock > params . EndingBlock ( ) . Int64 ( ) && params . EndingBlock ( ) . Int64 ( ) > 0 && params . EndingBlock ( ) . Int64 ( ) > startingBlock {
endingBlock = params . EndingBlock ( ) . Int64 ( )
2019-08-28 22:07:36 +00:00
}
2020-02-25 22:38:27 +00:00
log . Debugf ( "%s historical data starting block: %d" , sap . chain . String ( ) , params . StartingBlock ( ) . Int64 ( ) )
log . Debugf ( "%s historical data ending block: %d" , sap . chain . String ( ) , endingBlock )
2019-07-02 17:38:12 +00:00
go func ( ) {
2020-05-12 19:53:50 +00:00
sap . serveWg . Add ( 1 )
defer sap . serveWg . Done ( )
2019-10-02 14:10:37 +00:00
for i := startingBlock ; i <= endingBlock ; i ++ {
2020-05-12 19:53:50 +00:00
select {
case <- sap . QuitChan :
log . Infof ( "%s super node historical data feed to subscription %s closed" , sap . chain . String ( ) , id )
return
default :
}
2020-02-23 23:14:29 +00:00
cidWrappers , empty , err := sap . Retriever . Retrieve ( params , i )
2020-01-17 23:16:01 +00:00
if err != nil {
2020-05-12 19:53:50 +00:00
sendNonBlockingErr ( sub , fmt . Errorf ( " %s super node CID Retrieval error at block %d\r%s" , sap . chain . String ( ) , i , err . Error ( ) ) )
2019-07-02 17:38:12 +00:00
continue
}
2020-01-17 23:16:01 +00:00
if empty {
2019-07-02 17:38:12 +00:00
continue
}
2020-02-23 23:14:29 +00:00
for _ , cids := range cidWrappers {
response , err := sap . IPLDFetcher . Fetch ( cids )
if err != nil {
2020-05-12 19:53:50 +00:00
sendNonBlockingErr ( sub , fmt . Errorf ( "%s super node IPLD Fetching error at block %d\r%s" , sap . chain . String ( ) , i , err . Error ( ) ) )
2020-02-23 23:14:29 +00:00
continue
}
responseRLP , err := rlp . EncodeToBytes ( response )
if err != nil {
log . Error ( err )
continue
}
select {
case sub . PayloadChan <- SubscriptionPayload { Data : responseRLP , Err : "" , Flag : EmptyFlag , Height : response . Height ( ) } :
2020-02-25 22:38:27 +00:00
log . Debugf ( "sending super node historical data payload to %s subscription %s" , sap . chain . String ( ) , id )
2020-02-23 23:14:29 +00:00
default :
2020-05-12 19:53:50 +00:00
log . Infof ( "unable to send backFill payload to %s subscription %s; channel has no receiver" , sap . chain . String ( ) , id )
2020-02-23 23:14:29 +00:00
}
2019-05-21 19:27:24 +00:00
}
}
2020-02-19 04:42:53 +00:00
// when we are done backfilling send an empty payload signifying so in the msg
select {
2020-02-19 22:09:33 +00:00
case sub . PayloadChan <- SubscriptionPayload { Data : nil , Err : "" , Flag : BackFillCompleteFlag } :
2020-05-12 19:53:50 +00:00
log . Debugf ( "sending backFill completion notice to %s subscription %s" , sap . chain . String ( ) , id )
2020-02-19 04:42:53 +00:00
default :
2020-05-12 19:53:50 +00:00
log . Infof ( "unable to send backFill completion notice to %s subscription %s" , sap . chain . String ( ) , id )
2020-02-19 04:42:53 +00:00
}
2019-07-02 17:38:12 +00:00
} ( )
2020-01-17 23:16:01 +00:00
return nil
2019-05-17 06:27:02 +00:00
}
2020-02-19 04:42:53 +00:00
// Unsubscribe is used by the API to remotely unsubscribe to the StateDiffingService loop
2019-06-18 17:28:57 +00:00
func ( sap * Service ) Unsubscribe ( id rpc . ID ) {
2020-02-25 22:38:27 +00:00
log . Infof ( "Unsubscribing %s from the %s super node service" , id , sap . chain . String ( ) )
2019-05-17 06:27:02 +00:00
sap . Lock ( )
2019-06-18 17:28:57 +00:00
for ty := range sap . Subscriptions {
delete ( sap . Subscriptions [ ty ] , id )
if len ( sap . Subscriptions [ ty ] ) == 0 {
// If we removed the last subscription of this type, remove the subscription type outright
delete ( sap . Subscriptions , ty )
delete ( sap . SubscriptionTypes , ty )
}
2019-05-17 06:27:02 +00:00
}
sap . Unlock ( )
}
2019-05-21 19:27:24 +00:00
// Start is used to begin the service
2020-02-19 04:42:53 +00:00
// This is mostly just to satisfy the node.Service interface
2019-05-17 06:27:02 +00:00
func ( sap * Service ) Start ( * p2p . Server ) error {
2020-02-25 22:38:27 +00:00
log . Infof ( "Starting %s super node service" , sap . chain . String ( ) )
2019-05-17 06:27:02 +00:00
wg := new ( sync . WaitGroup )
2020-02-20 22:12:52 +00:00
payloadChan := make ( chan shared . ConvertedData , PayloadChanBufferSize )
2020-04-23 20:56:37 +00:00
if err := sap . Sync ( wg , payloadChan ) ; err != nil {
2019-06-07 02:09:27 +00:00
return err
}
2020-04-23 20:56:37 +00:00
sap . Serve ( wg , payloadChan )
2019-05-17 06:27:02 +00:00
return nil
}
2019-05-21 19:27:24 +00:00
// Stop is used to close down the service
2020-02-19 04:42:53 +00:00
// This is mostly just to satisfy the node.Service interface
2019-05-17 06:27:02 +00:00
func ( sap * Service ) Stop ( ) error {
2020-02-25 22:38:27 +00:00
log . Infof ( "Stopping %s super node service" , sap . chain . String ( ) )
2020-01-17 23:16:01 +00:00
sap . Lock ( )
2019-05-17 06:27:02 +00:00
close ( sap . QuitChan )
2020-01-17 23:16:01 +00:00
sap . close ( )
sap . Unlock ( )
2019-05-17 06:27:02 +00:00
return nil
}
2020-01-17 23:16:01 +00:00
// Node returns the node info for this service
2020-02-25 22:38:27 +00:00
func ( sap * Service ) Node ( ) * core . Node {
2020-01-17 23:16:01 +00:00
return sap . NodeInfo
2019-09-13 19:41:50 +00:00
}
2020-02-23 23:14:29 +00:00
// Chain returns the chain type for this service
func ( sap * Service ) Chain ( ) shared . ChainType {
return sap . chain
}
2019-05-17 06:27:02 +00:00
// close is used to close all listening subscriptions
2020-01-17 23:16:01 +00:00
// close needs to be called with subscription access locked
2019-05-17 06:27:02 +00:00
func ( sap * Service ) close ( ) {
2020-02-25 22:38:27 +00:00
log . Infof ( "Closing all %s subscriptions" , sap . chain . String ( ) )
2020-01-17 23:16:01 +00:00
for subType , subs := range sap . Subscriptions {
for _ , sub := range subs {
sendNonBlockingQuit ( sub )
2019-05-17 06:27:02 +00:00
}
2020-01-17 23:16:01 +00:00
delete ( sap . Subscriptions , subType )
delete ( sap . SubscriptionTypes , subType )
2019-05-17 06:27:02 +00:00
}
2020-01-17 23:16:01 +00:00
}
// closeType is used to close all subscriptions of given type
// closeType needs to be called with subscription access locked
func ( sap * Service ) closeType ( subType common . Hash ) {
2020-02-25 22:38:27 +00:00
log . Infof ( "Closing all %s subscriptions of type %s" , sap . chain . String ( ) , subType . String ( ) )
2020-01-17 23:16:01 +00:00
subs := sap . Subscriptions [ subType ]
for _ , sub := range subs {
sendNonBlockingQuit ( sub )
}
delete ( sap . Subscriptions , subType )
delete ( sap . SubscriptionTypes , subType )
2019-05-17 06:27:02 +00:00
}