plugeth-statediff/service.go

928 lines
30 KiB
Go
Raw Normal View History

2023-06-14 12:43:34 +00:00
// Copyright 2019 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package statediff
import (
"bytes"
"errors"
2023-06-14 12:43:34 +00:00
"fmt"
"math/big"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/rlp"
plugeth "github.com/openrelayxyz/plugeth-utils/core"
2023-06-14 12:43:34 +00:00
"github.com/thoas/go-funk"
"github.com/cerc-io/plugeth-statediff/indexer/database/metrics"
"github.com/cerc-io/plugeth-statediff/indexer/interfaces"
types2 "github.com/cerc-io/plugeth-statediff/types"
"github.com/cerc-io/plugeth-statediff/utils/log"
2023-06-14 12:43:34 +00:00
)
const (
chainEventChanSize = 20000
genesisBlockNumber = 0
defaultRetryLimit = 3 // default retry limit once deadlock is detected.
pgDeadlockDetected = "deadlock detected" // 40P01 https://www.postgresql.org/docs/current/errcodes-appendix.html
2023-06-14 12:43:34 +00:00
)
var (
errTypeAssertionFailed = errors.New("type assertion failed")
errUnexpectedOperation = errors.New("unexpected operation")
)
2023-06-14 12:43:34 +00:00
var defaultWriteLoopParams = Params{
IncludeBlock: true,
IncludeReceipts: true,
IncludeTD: true,
IncludeCode: true,
2023-06-14 12:43:34 +00:00
}
// Service is the underlying struct for the state diffing service
type Service struct {
// Used to build the state diff objects
Builder Builder
// Used to subscribe to chain events (blocks)
BlockChain BlockChain
2023-06-14 12:43:34 +00:00
// Cache the last block so that we can avoid having to lookup the next block's parent
BlockCache BlockCache
// The publicBackendAPI which provides useful information about the current state
BackendAPI plugeth.Backend
// Used to signal shutdown of the service
QuitChan chan bool
2023-06-14 12:43:34 +00:00
// Interface for publishing statediffs as PG-IPLD objects
indexer interfaces.StateDiffIndexer
// Should the statediff service wait for geth to sync to head?
ShouldWaitForSync bool
2023-06-14 12:43:34 +00:00
// Whether to enable writing state diffs directly to track blockchain head.
enableWriteLoop bool
// Parameters to use in the service write loop, if enabled
writeLoopParams ParamsWithMutex
2023-06-14 12:43:34 +00:00
// Size of the worker pool
numWorkers uint
// Number of retry for aborted transactions due to deadlock.
maxRetry uint
// Sequential ID for RPC subscriptions
lastSubID uint64
// A mapping of RpcIDs to their subscription channels, mapped to their subscription type (hash
// of the Params RLP)
Subscriptions map[common.Hash]map[SubID]Subscription
// A mapping of subscription params rlp hash to the corresponding subscription params
SubscriptionTypes map[common.Hash]Params
// Number of current subscribers
subscribers int32
// Used to sync access to the Subscriptions
subscriptionsMutex sync.Mutex
2023-06-14 12:43:34 +00:00
// Write job status subscriptions
jobStatusSubs map[SubID]jobStatusSubscription
jobStatusSubsMutex sync.RWMutex
// Sequential ID for write jobs
2023-06-14 12:43:34 +00:00
lastJobID uint64
// Map of block number to in-flight jobs (for WriteStateDiffAt)
2023-06-14 12:43:34 +00:00
currentJobs map[uint64]JobID
currentJobsMutex sync.Mutex
// All in-progress statediff jobs
currentBlocks map[string]bool
currentBlocksMutex sync.Mutex
2023-06-14 12:43:34 +00:00
}
// ID for identifying client subscriptions
type SubID uint64
// ID used for tracking in-progress jobs (0 for invalid)
2023-06-14 12:43:34 +00:00
type JobID uint64
// JobStatus represents the status of a completed job
type JobStatus struct {
ID JobID
Err error
}
type jobStatusSubscription struct {
2023-06-14 12:43:34 +00:00
statusChan chan<- JobStatus
quitChan chan<- bool
}
// BlockCache caches the last block for safe access from different service loops
type BlockCache struct {
sync.Mutex
blocks map[common.Hash]*types.Block
maxSize uint
}
type workerParams struct {
chainEventCh <-chan core.ChainEvent
wg *sync.WaitGroup
id uint
}
2023-06-14 12:43:34 +00:00
func NewBlockCache(max uint) BlockCache {
return BlockCache{
blocks: make(map[common.Hash]*types.Block),
maxSize: max,
}
}
// NewService creates a new state diffing service with the given config and backend
func NewService(cfg Config, blockChain BlockChain, backend plugeth.Backend, indexer interfaces.StateDiffIndexer) (*Service, error) {
2023-06-14 12:43:34 +00:00
workers := cfg.NumWorkers
if workers == 0 {
workers = 1
}
quitCh := make(chan bool)
sds := &Service{
BlockChain: blockChain,
Builder: NewBuilder(blockChain.StateCache()),
QuitChan: quitCh,
Subscriptions: make(map[common.Hash]map[SubID]Subscription),
2023-06-14 12:43:34 +00:00
SubscriptionTypes: make(map[common.Hash]Params),
BlockCache: NewBlockCache(workers),
BackendAPI: backend,
ShouldWaitForSync: cfg.WaitForSync,
2023-06-14 12:43:34 +00:00
indexer: indexer,
enableWriteLoop: cfg.EnableWriteLoop,
numWorkers: workers,
maxRetry: defaultRetryLimit,
jobStatusSubs: map[SubID]jobStatusSubscription{},
2023-06-14 12:43:34 +00:00
currentJobs: map[uint64]JobID{},
currentBlocks: map[string]bool{},
2023-06-22 04:18:35 +00:00
writeLoopParams: ParamsWithMutex{Params: defaultWriteLoopParams},
2023-06-14 12:43:34 +00:00
}
if indexer != nil {
err := loadWatchedAddresses(indexer, &sds.writeLoopParams)
if err != nil {
return nil, err
}
2023-06-14 12:43:34 +00:00
indexer.ReportDBMetrics(10*time.Second, quitCh)
}
return sds, nil
2023-06-14 12:43:34 +00:00
}
// Return the parent block of currentBlock, using the cached block if available;
// and cache the passed block
func (lbc *BlockCache) getParentBlock(currentBlock *types.Block, bc BlockChain) *types.Block {
2023-06-14 12:43:34 +00:00
lbc.Lock()
parentHash := currentBlock.ParentHash()
var parentBlock *types.Block
if block, ok := lbc.blocks[parentHash]; ok {
parentBlock = block
if len(lbc.blocks) > int(lbc.maxSize) {
delete(lbc.blocks, parentHash)
}
} else {
parentBlock = bc.GetBlockByHash(parentHash)
}
lbc.blocks[currentBlock.Hash()] = currentBlock
lbc.Unlock()
return parentBlock
}
// WriteLoop event loop for progressively processing and writing diffs directly to DB
2023-06-14 12:43:34 +00:00
func (sds *Service) WriteLoop(chainEventCh chan core.ChainEvent) {
log.Info("Starting statediff write loop")
log := log.New("context", "statediff writing")
sub := sds.BlockChain.SubscribeChainEvent(chainEventCh)
defer sub.Unsubscribe()
2023-06-14 12:43:34 +00:00
var wg sync.WaitGroup
chainEventFwd := make(chan core.ChainEvent, chainEventChanSize)
defer func() {
log.Info("Quitting")
close(chainEventFwd)
}()
2023-06-14 12:43:34 +00:00
wg.Add(1)
go func() {
defer wg.Done()
for {
select {
case event := <-chainEventCh:
// First process metrics for chain events, then forward to workers
2023-06-14 12:43:34 +00:00
lastHeight := defaultStatediffMetrics.lastEventHeight.Value()
block := event.Block
log.Debug("Chain event received", "number", block.Number(), "hash", event.Hash)
nextHeight := int64(block.Number().Uint64())
2023-06-14 12:43:34 +00:00
if nextHeight-lastHeight != 1 {
log.Warn("Received block out-of-order", "next", nextHeight, "last", lastHeight)
2023-06-14 12:43:34 +00:00
}
defaultStatediffMetrics.lastEventHeight.Update(nextHeight)
defaultStatediffMetrics.writeLoopChannelLen.Update(int64(len(chainEventCh)))
chainEventFwd <- event
case err := <-sub.Err():
if err != nil {
log.Error("Error from subscription", "error", err)
2023-06-14 12:43:34 +00:00
}
close(sds.QuitChan)
2023-06-14 12:43:34 +00:00
return
case <-sds.QuitChan:
return
}
}
}()
wg.Add(int(sds.numWorkers))
for worker := uint(0); worker < sds.numWorkers; worker++ {
params := workerParams{chainEventCh: chainEventFwd, wg: &wg, id: worker}
go sds.writeLoopWorker(params)
}
wg.Wait()
}
func (sds *Service) writeGenesisStateDiff(currBlock *types.Block, logger log.Logger) {
2023-06-14 12:43:34 +00:00
// For genesis block we need to return the entire state trie hence we diff it with an empty trie.
log.Info("Writing genesis state diff", "number", genesisBlockNumber)
sds.writeLoopParams.RLock()
defer sds.writeLoopParams.RUnlock()
err := sds.writeStateDiffWithRetry(currBlock, common.Hash{}, sds.writeLoopParams.Params)
2023-06-14 12:43:34 +00:00
if err != nil {
log.Error("failed to write state diff", "number",
2023-06-22 04:20:34 +00:00
genesisBlockNumber, "error", err)
2023-06-14 12:43:34 +00:00
return
}
defaultStatediffMetrics.lastStatediffHeight.Update(genesisBlockNumber)
}
func (sds *Service) writeLoopWorker(params workerParams) {
log := log.New("context", "statediff writing", "worker", params.id)
2023-06-14 12:43:34 +00:00
defer params.wg.Done()
for {
select {
2023-06-23 08:41:43 +00:00
case event := <-params.chainEventCh:
block := event.Block
parent := sds.BlockCache.getParentBlock(block, sds.BlockChain)
if parent == nil {
log.Error("Parent block is nil, skipping this block", "number", block.Number())
2023-06-14 12:43:34 +00:00
continue
}
// chainEvent streams block from block 1, but we also need to include data from the genesis block.
if parent.Number().Uint64() == genesisBlockNumber {
sds.writeGenesisStateDiff(parent, log)
2023-06-14 12:43:34 +00:00
}
2023-06-23 08:41:43 +00:00
log.Info("Writing state diff", "number", block.Number())
sds.writeLoopParams.RLock()
err := sds.writeStateDiffWithRetry(block, parent.Root(), sds.writeLoopParams.Params)
sds.writeLoopParams.RUnlock()
2023-06-14 12:43:34 +00:00
if err != nil {
log.Error("failed to write state diff",
2023-06-23 08:41:43 +00:00
"number", block.Number(),
"hash", block.Hash(),
2023-06-22 04:20:34 +00:00
"error", err)
2023-06-14 12:43:34 +00:00
continue
}
// FIXME: reported height will be non-monotonic with concurrent workers
defaultStatediffMetrics.lastStatediffHeight.Update(int64(block.Number().Uint64()))
2023-06-14 12:43:34 +00:00
case <-sds.QuitChan:
log.Info("Quitting")
2023-06-14 12:43:34 +00:00
return
}
}
}
// PublishLoop processes and publishes statediff payloads to subscribed clients
func (sds *Service) PublishLoop(chainEventCh chan core.ChainEvent) {
log.Info("Starting statediff publish loop")
log := log.New("context", "statediff publishing")
sub := sds.BlockChain.SubscribeChainEvent(chainEventCh)
defer func() {
log.Info("Quitting")
sds.close()
sub.Unsubscribe()
}()
2023-06-14 12:43:34 +00:00
for {
select {
//Notify chain event channel of events
case event := <-chainEventCh:
2023-06-14 12:43:34 +00:00
defaultStatediffMetrics.serviceLoopChannelLen.Update(int64(len(chainEventCh)))
block := event.Block
log.Debug("Chain event received", "number", block.Number(), "hash", event.Hash)
2023-06-14 12:43:34 +00:00
// if we don't have any subscribers, do not process a statediff
if atomic.LoadInt32(&sds.subscribers) == 0 {
2023-06-23 08:41:43 +00:00
log.Debug("Currently no subscribers, skipping block")
2023-06-14 12:43:34 +00:00
continue
}
parent := sds.BlockCache.getParentBlock(block, sds.BlockChain)
if parent == nil {
2023-06-23 08:41:43 +00:00
log.Error("Parent block is nil, skipping block", "number", block.Number())
2023-06-14 12:43:34 +00:00
continue
}
// chainEvent streams block from block 1, but we also need to include data from the genesis block.
if parent.Number().Uint64() == genesisBlockNumber {
2023-06-14 12:43:34 +00:00
// For genesis block we need to return the entire state trie hence we diff it with an empty trie.
sds.streamStateDiff(parent, common.Hash{})
2023-06-14 12:43:34 +00:00
}
sds.streamStateDiff(block, parent.Root())
case err := <-sub.Err():
if err != nil {
log.Error("error from subscription", "error", err)
}
2023-06-14 12:43:34 +00:00
close(sds.QuitChan)
return
case <-sds.QuitChan:
return
}
}
}
// streamStateDiff builds and delivers diff payloads for each subscription according to their
// subscription type
2023-06-14 12:43:34 +00:00
func (sds *Service) streamStateDiff(currentBlock *types.Block, parentRoot common.Hash) {
sds.subscriptionsMutex.Lock()
2023-06-14 12:43:34 +00:00
for ty, subs := range sds.Subscriptions {
params, ok := sds.SubscriptionTypes[ty]
if !ok {
log.Error("no parameter set associated with this subscription", "sub.type", ty.String())
2023-06-14 12:43:34 +00:00
sds.closeType(ty)
continue
}
// create payload for this subscription type
payload, err := sds.processStateDiff(currentBlock, parentRoot, params)
if err != nil {
2023-06-23 08:41:43 +00:00
log.Error("statediff processing error",
2023-06-22 04:20:34 +00:00
"number", currentBlock.Number(), "parameters", params, "error", err)
2023-06-14 12:43:34 +00:00
continue
}
for id, sub := range subs {
select {
case sub.PayloadChan <- *payload:
2023-06-23 08:41:43 +00:00
log.Debug("sending statediff payload at head", "number", currentBlock.Number(), "sub.id", id)
2023-06-14 12:43:34 +00:00
default:
log.Info("unable to send statediff payload; channel has no receiver", "sub.id", id)
2023-06-14 12:43:34 +00:00
}
}
}
sds.subscriptionsMutex.Unlock()
2023-06-14 12:43:34 +00:00
}
// StateDiffAt returns a state diff object payload at the specific blockheight
// This operation cannot be performed back past the point of db pruning; it requires an archival
// node for historical data
2023-06-14 12:43:34 +00:00
func (sds *Service) StateDiffAt(blockNumber uint64, params Params) (*Payload, error) {
2023-06-23 08:41:43 +00:00
log.Info("Sending state diff", "number", blockNumber)
2023-06-14 12:43:34 +00:00
currentBlock := sds.BlockChain.GetBlockByNumber(blockNumber)
parentRoot := common.Hash{}
if blockNumber != 0 {
parentRoot = sds.BlockChain.GetBlockByHash(currentBlock.ParentHash()).Root()
2023-06-14 12:43:34 +00:00
}
return sds.processStateDiff(currentBlock, parentRoot, sds.maybeReplaceWatchedAddresses(params))
2023-06-14 12:43:34 +00:00
}
// StateDiffFor returns a state diff object payload for the specific blockhash
// This operation cannot be performed back past the point of db pruning; it requires an archival
// node for historical data
2023-06-14 12:43:34 +00:00
func (sds *Service) StateDiffFor(blockHash common.Hash, params Params) (*Payload, error) {
2023-06-23 08:41:43 +00:00
log.Info("Sending state diff", "hash", blockHash)
2023-06-14 12:43:34 +00:00
currentBlock := sds.BlockChain.GetBlockByHash(blockHash)
parentRoot := common.Hash{}
if currentBlock.NumberU64() != 0 {
parentRoot = sds.BlockChain.GetBlockByHash(currentBlock.ParentHash()).Root()
2023-06-14 12:43:34 +00:00
}
return sds.processStateDiff(currentBlock, parentRoot, sds.maybeReplaceWatchedAddresses(params))
}
2023-06-14 12:43:34 +00:00
// use watched addresses from statediffing write loop if not provided
// compute leaf paths of watched addresses in the params
func (sds *Service) maybeReplaceWatchedAddresses(params Params) Params {
if params.WatchedAddresses == nil && sds.writeLoopParams.WatchedAddresses != nil {
sds.writeLoopParams.RLock()
params.WatchedAddresses = make([]common.Address, len(sds.writeLoopParams.WatchedAddresses))
copy(params.WatchedAddresses, sds.writeLoopParams.WatchedAddresses)
sds.writeLoopParams.RUnlock()
2023-06-14 12:43:34 +00:00
}
params.ComputeWatchedAddressesLeafPaths()
return params
2023-06-14 12:43:34 +00:00
}
// processStateDiff method builds the state diff payload from the current block, parent state root, and provided params
func (sds *Service) processStateDiff(currentBlock *types.Block, parentRoot common.Hash, params Params) (*Payload, error) {
stateDiff, err := sds.Builder.BuildStateDiffObject(Args{
NewStateRoot: currentBlock.Root(),
OldStateRoot: parentRoot,
BlockHash: currentBlock.Hash(),
BlockNumber: currentBlock.Number(),
}, params)
// allow dereferencing of parent, keep current locked as it should be the next parent
// sds.BlockChain.UnlockTrie(parentRoot)
// if err != nil {
// return nil, err
// }
2023-06-14 12:43:34 +00:00
stateDiffRlp, err := rlp.EncodeToBytes(&stateDiff)
if err != nil {
return nil, err
}
log.Debug("statediff RLP payload for block",
2023-06-23 08:41:43 +00:00
"number", currentBlock.Number(), "byte size", len(stateDiffRlp))
2023-06-14 12:43:34 +00:00
return sds.newPayload(stateDiffRlp, currentBlock, params)
}
func (sds *Service) newPayload(stateObject []byte, block *types.Block, params Params) (*Payload, error) {
payload := &Payload{
StateObjectRlp: stateObject,
}
if params.IncludeBlock {
blockBuff := new(bytes.Buffer)
if err := block.EncodeRLP(blockBuff); err != nil {
return nil, err
}
payload.BlockRlp = blockBuff.Bytes()
}
if params.IncludeTD {
payload.TotalDifficulty = sds.BlockChain.GetTd(block.Hash(), block.NumberU64())
}
if params.IncludeReceipts {
receiptBuff := new(bytes.Buffer)
receipts := sds.BlockChain.GetReceiptsByHash(block.Hash())
if err := rlp.Encode(receiptBuff, receipts); err != nil {
return nil, err
}
payload.ReceiptsRlp = receiptBuff.Bytes()
}
return payload, nil
}
// Subscribe is used by the API to subscribe to the service loop
func (sds *Service) Subscribe(sub chan<- Payload, quitChan chan<- bool, params Params) SubID {
2023-06-14 12:43:34 +00:00
log.Info("Subscribing to the statediff service")
if atomic.CompareAndSwapInt32(&sds.subscribers, 0, 1) {
log.Info("State diffing subscription received; beginning statediff processing")
}
// compute leaf paths of watched addresses in the params
params.ComputeWatchedAddressesLeafPaths()
// Subscription type is defined as the hash of the rlp-serialized subscription params
by, err := rlp.EncodeToBytes(&params)
if err != nil {
log.Error("State diffing params need to be rlp-serializable")
return 0
2023-06-14 12:43:34 +00:00
}
subscriptionType := crypto.Keccak256Hash(by)
id := SubID(atomic.AddUint64(&sds.lastSubID, 1))
2023-06-14 12:43:34 +00:00
// Add subscriber
sds.subscriptionsMutex.Lock()
2023-06-14 12:43:34 +00:00
if sds.Subscriptions[subscriptionType] == nil {
sds.Subscriptions[subscriptionType] = make(map[SubID]Subscription)
2023-06-14 12:43:34 +00:00
}
sds.Subscriptions[subscriptionType][id] = Subscription{
PayloadChan: sub,
QuitChan: quitChan,
}
sds.SubscriptionTypes[subscriptionType] = params
sds.subscriptionsMutex.Unlock()
return id
2023-06-14 12:43:34 +00:00
}
// Unsubscribe is used to unsubscribe from the service loop
func (sds *Service) Unsubscribe(id SubID) error {
log.Info("Unsubscribing from the statediff service", "sub.id", id)
sds.subscriptionsMutex.Lock()
2023-06-14 12:43:34 +00:00
for ty := range sds.Subscriptions {
delete(sds.Subscriptions[ty], id)
if len(sds.Subscriptions[ty]) == 0 {
// If we removed the last subscription of this type, remove the subscription type outright
delete(sds.Subscriptions, ty)
delete(sds.SubscriptionTypes, ty)
}
}
if len(sds.Subscriptions) == 0 {
if atomic.CompareAndSwapInt32(&sds.subscribers, 1, 0) {
log.Info("No more subscriptions; halting statediff processing")
}
}
sds.subscriptionsMutex.Unlock()
2023-06-14 12:43:34 +00:00
return nil
}
// IsSyncing returns true if geth is still syncing, and false if it has caught up to head.
func (sds *Service) IsSyncing() bool {
progress := sds.BackendAPI.Downloader().Progress()
return progress.CurrentBlock() < progress.HighestBlock()
2023-06-14 12:43:34 +00:00
}
// WaitForSync continuously checks the status of geth syncing, only returning once it has caught
// up to head.
func (sds *Service) WaitForSync() {
synced := false
for !synced {
if !sds.IsSyncing() {
log.Debug("Geth has completed syncing")
synced = true
2023-06-14 12:43:34 +00:00
} else {
time.Sleep(1 * time.Second)
}
}
}
// Start is used to begin the service
func (sds *Service) Start() error {
log.Info("Starting statediff service")
if sds.ShouldWaitForSync {
log.Info("Statediff service waiting until geth has caught up to the head of the chain")
sds.WaitForSync()
2023-06-14 12:43:34 +00:00
}
chainEventCh := make(chan core.ChainEvent, chainEventChanSize)
go sds.PublishLoop(chainEventCh)
2023-06-14 12:43:34 +00:00
if sds.enableWriteLoop {
log.Debug("Starting statediff DB write loop", "params", sds.writeLoopParams.Params)
2023-06-14 12:43:34 +00:00
chainEventCh := make(chan core.ChainEvent, chainEventChanSize)
go sds.WriteLoop(chainEventCh)
}
return nil
}
// Stop is used to close down the service
func (sds *Service) Stop() error {
log.Info("Stopping statediff service")
close(sds.QuitChan)
var err error
if sds.indexer != nil {
if err = sds.indexer.Close(); err != nil {
log.Error("Error closing indexer", "error", err)
}
}
return err
2023-06-14 12:43:34 +00:00
}
// close is used to close all listening subscriptions
func (sds *Service) close() {
sds.subscriptionsMutex.Lock()
2023-06-14 12:43:34 +00:00
for ty, subs := range sds.Subscriptions {
for id, sub := range subs {
select {
case sub.QuitChan <- true:
log.Info("closing subscription", "sub.id", id)
2023-06-14 12:43:34 +00:00
default:
log.Info("unable to close subscription; channel has no receiver", "sub.id", id)
2023-06-14 12:43:34 +00:00
}
delete(sds.Subscriptions[ty], id)
}
delete(sds.Subscriptions, ty)
delete(sds.SubscriptionTypes, ty)
}
sds.subscriptionsMutex.Unlock()
2023-06-14 12:43:34 +00:00
}
// closeType is used to close all subscriptions of given type
// NOTE: this needs to be called with subscription access locked
2023-06-14 12:43:34 +00:00
func (sds *Service) closeType(subType common.Hash) {
subs := sds.Subscriptions[subType]
for id, sub := range subs {
sendNonBlockingQuit(id, sub)
}
delete(sds.Subscriptions, subType)
delete(sds.SubscriptionTypes, subType)
}
func sendNonBlockingQuit(id SubID, sub Subscription) {
2023-06-14 12:43:34 +00:00
select {
case sub.QuitChan <- true:
log.Info("closing subscription", "sub.id", id)
2023-06-14 12:43:34 +00:00
default:
log.Info("unable to close subscription; channel has no receiver", "sub.id", id)
2023-06-14 12:43:34 +00:00
}
}
// WriteStateDiffAt writes a state diff at the specific blockheight directly to the database
// This operation cannot be performed back past the point of db pruning; it requires an archival node
// for historical data
func (sds *Service) WriteStateDiffAt(blockNumber uint64, params Params) JobID {
sds.currentJobsMutex.Lock()
defer sds.currentJobsMutex.Unlock()
if id, has := sds.currentJobs[blockNumber]; has {
return id
}
sds.lastJobID++
id := JobID(sds.lastJobID)
2023-06-14 12:43:34 +00:00
sds.currentJobs[blockNumber] = id
2023-06-14 12:43:34 +00:00
go func() {
err := sds.writeStateDiffAt(blockNumber, params)
if err != nil {
log.Error("failed to write state diff", "error", err)
}
2023-06-14 12:43:34 +00:00
sds.currentJobsMutex.Lock()
delete(sds.currentJobs, blockNumber)
sds.currentJobsMutex.Unlock()
sds.jobStatusSubsMutex.RLock()
defer sds.jobStatusSubsMutex.RUnlock()
2023-06-14 12:43:34 +00:00
for _, sub := range sds.jobStatusSubs {
sub.statusChan <- JobStatus{id, err}
}
}()
return id
}
func (sds *Service) writeStateDiffAt(blockNumber uint64, params Params) error {
log.Info("Writing state diff at", "number", blockNumber)
2023-06-14 12:43:34 +00:00
currentBlock := sds.BlockChain.GetBlockByNumber(blockNumber)
parentRoot := common.Hash{}
if blockNumber != 0 {
parentBlock := sds.BlockChain.GetBlockByHash(currentBlock.ParentHash())
parentRoot = parentBlock.Root()
}
return sds.writeStateDiffWithRetry(currentBlock, parentRoot, sds.maybeReplaceWatchedAddresses(params))
2023-06-14 12:43:34 +00:00
}
// WriteStateDiffFor writes a state diff for the specific blockhash directly to the database
// This operation cannot be performed back past the point of db pruning; it requires an archival node
// for historical data
func (sds *Service) WriteStateDiffFor(blockHash common.Hash, params Params) error {
log.Info("Writing state diff for", "hash", blockHash)
2023-06-14 12:43:34 +00:00
currentBlock := sds.BlockChain.GetBlockByHash(blockHash)
parentRoot := common.Hash{}
if currentBlock.NumberU64() != 0 {
parentBlock := sds.BlockChain.GetBlockByHash(currentBlock.ParentHash())
parentRoot = parentBlock.Root()
}
return sds.writeStateDiffWithRetry(currentBlock, parentRoot, sds.maybeReplaceWatchedAddresses(params))
2023-06-14 12:43:34 +00:00
}
// Claim exclusive access for state diffing the specified block.
// Returns true and a function to release access if successful, else false, nil.
func (sds *Service) claimExclusiveAccess(block *types.Block) (bool, func()) {
sds.currentBlocksMutex.Lock()
defer sds.currentBlocksMutex.Unlock()
key := fmt.Sprintf("%s,%d", block.Hash().Hex(), block.NumberU64())
if sds.currentBlocks[key] {
return false, nil
}
sds.currentBlocks[key] = true
return true, func() {
sds.currentBlocksMutex.Lock()
defer sds.currentBlocksMutex.Unlock()
delete(sds.currentBlocks, key)
}
}
2023-06-14 12:43:34 +00:00
// Writes a state diff from the current block, parent state root, and provided params
func (sds *Service) writeStateDiff(block *types.Block, parentRoot common.Hash, params Params) error {
log := log.New("hash", block.Hash(), "number", block.Number())
if granted, relinquish := sds.claimExclusiveAccess(block); granted {
defer relinquish()
} else {
log.Info("Not writing, statediff in progress.")
return nil
}
if done, _ := sds.indexer.HasBlock(block.Hash(), block.NumberU64()); done {
log.Info("Not writing, statediff already done.")
return nil
}
2023-06-23 11:32:42 +00:00
var totalDifficulty = big.NewInt(0)
2023-06-14 12:43:34 +00:00
var receipts types.Receipts
var err error
var tx interfaces.Batch
start := countStateDiffBegin(block, log)
defer countStateDiffEnd(start, log, &err)
if sds.indexer == nil {
return fmt.Errorf("indexer is not set; cannot write indexed diffs")
}
2023-06-14 12:43:34 +00:00
if params.IncludeTD {
totalDifficulty = sds.BlockChain.GetTd(block.Hash(), block.NumberU64())
}
if params.IncludeReceipts {
receipts = sds.BlockChain.GetReceiptsByHash(block.Hash())
}
tx, err = sds.indexer.PushBlock(block, receipts, totalDifficulty)
if err != nil {
return err
}
output := func(node types2.StateLeafNode) error {
defer metrics.ReportAndUpdateDuration("statediff output", time.Now(), log,
2023-06-22 04:20:34 +00:00
metrics.IndexerMetrics.OutputTimer)
2023-06-14 12:43:34 +00:00
return sds.indexer.PushStateNode(tx, node, block.Hash().String())
}
ipldOutput := func(c types2.IPLD) error {
defer metrics.ReportAndUpdateDuration("statediff ipldOutput", time.Now(), log,
2023-06-22 04:20:34 +00:00
metrics.IndexerMetrics.IPLDOutputTimer)
2023-06-14 12:43:34 +00:00
return sds.indexer.PushIPLD(tx, c)
}
err = sds.Builder.WriteStateDiffObject(Args{
NewStateRoot: block.Root(),
OldStateRoot: parentRoot,
BlockHash: block.Hash(),
BlockNumber: block.Number(),
}, params, output, ipldOutput)
2023-06-14 12:43:34 +00:00
// TODO this anti-pattern needs to be sorted out eventually
2023-06-21 13:32:07 +00:00
if err = tx.Submit(err); err != nil {
2023-06-14 12:43:34 +00:00
return fmt.Errorf("batch transaction submission failed: %w", err)
}
// allow dereferencing of parent, keep current locked as it should be the next parent
// TODO never locked
// sds.BlockChain.UnlockTrie(parentRoot)
2023-06-14 12:43:34 +00:00
return nil
}
// Wrapper function on writeStateDiff to retry when the deadlock is detected.
func (sds *Service) writeStateDiffWithRetry(block *types.Block, parentRoot common.Hash, params Params) error {
var err error
for i := uint(0); i < sds.maxRetry; i++ {
err = sds.writeStateDiff(block, parentRoot, params)
if err != nil && strings.Contains(err.Error(), pgDeadlockDetected) {
2023-06-14 12:43:34 +00:00
// Retry only when the deadlock is detected.
if i+1 < sds.maxRetry {
2023-06-23 08:41:43 +00:00
log.Warn("deadlock detected while writing statediff", "error", err, "retry number", i)
2023-06-14 12:43:34 +00:00
}
continue
}
break
}
return err
}
// SubscribeWriteStatus is used by the API to subscribe to the job status updates
func (sds *Service) SubscribeWriteStatus(sub chan<- JobStatus) SubID {
id := SubID(atomic.AddUint64(&sds.lastSubID, 1))
log.Info("Subscribing to job status updates", "sub.id", id)
sds.jobStatusSubsMutex.Lock()
sds.jobStatusSubs[id] = jobStatusSubscription{
2023-06-14 12:43:34 +00:00
statusChan: sub,
}
sds.jobStatusSubsMutex.Unlock()
return id
2023-06-14 12:43:34 +00:00
}
// UnsubscribeWriteStatus is used to unsubscribe from job status updates
func (sds *Service) UnsubscribeWriteStatus(id SubID) {
log.Info("Unsubscribing from job status updates", "sub.id", id)
sds.jobStatusSubsMutex.Lock()
2023-06-14 12:43:34 +00:00
delete(sds.jobStatusSubs, id)
sds.jobStatusSubsMutex.Unlock()
2023-06-14 12:43:34 +00:00
}
// WatchAddress performs one of following operations on the watched addresses in sds.writeLoopParams and the db:
2023-06-14 12:43:34 +00:00
// add | remove | set | clear
func (sds *Service) WatchAddress(operation types2.OperationType, args []types2.WatchAddressArg) error {
sds.writeLoopParams.Lock()
log.Debug("WatchAddress: locked sds.writeLoopParams")
defer sds.writeLoopParams.Unlock()
2023-06-14 12:43:34 +00:00
// get the current block number
currentBlockNumber := sds.BlockChain.CurrentBlock().Number
switch operation {
case types2.Add:
// filter out args having an already watched address with a warning
filteredArgs, ok := funk.Filter(args, func(arg types2.WatchAddressArg) bool {
if funk.Contains(sds.writeLoopParams.WatchedAddresses, plugeth.HexToAddress(arg.Address)) {
2023-06-14 12:43:34 +00:00
log.Warn("Address already being watched", "address", arg.Address)
return false
}
return true
}).([]types2.WatchAddressArg)
if !ok {
return fmt.Errorf("add: filtered args %w", errTypeAssertionFailed)
2023-06-14 12:43:34 +00:00
}
// get addresses from the filtered args
filteredAddresses, err := MapWatchAddressArgsToAddresses(filteredArgs)
if err != nil {
return fmt.Errorf("add: filtered addresses %w", err)
2023-06-14 12:43:34 +00:00
}
// update the db
if sds.indexer != nil {
err = sds.indexer.InsertWatchedAddresses(filteredArgs, currentBlockNumber)
if err != nil {
return err
}
}
// update in-memory params
sds.writeLoopParams.WatchedAddresses = append(sds.writeLoopParams.WatchedAddresses, filteredAddresses...)
sds.writeLoopParams.ComputeWatchedAddressesLeafPaths()
2023-06-14 12:43:34 +00:00
case types2.Remove:
// get addresses from args
argAddresses, err := MapWatchAddressArgsToAddresses(args)
if err != nil {
return fmt.Errorf("remove: mapped addresses %w", err)
2023-06-14 12:43:34 +00:00
}
// remove the provided addresses from currently watched addresses
addresses, ok := funk.Subtract(sds.writeLoopParams.WatchedAddresses, argAddresses).([]common.Address)
2023-06-14 12:43:34 +00:00
if !ok {
return fmt.Errorf("remove: filtered addresses %w", errTypeAssertionFailed)
2023-06-14 12:43:34 +00:00
}
// update the db
if sds.indexer != nil {
err = sds.indexer.RemoveWatchedAddresses(args)
if err != nil {
return err
}
}
// update in-memory params
sds.writeLoopParams.WatchedAddresses = addresses
sds.writeLoopParams.ComputeWatchedAddressesLeafPaths()
2023-06-14 12:43:34 +00:00
case types2.Set:
// get addresses from args
argAddresses, err := MapWatchAddressArgsToAddresses(args)
if err != nil {
return fmt.Errorf("set: mapped addresses %w", err)
2023-06-14 12:43:34 +00:00
}
// update the db
if sds.indexer != nil {
err = sds.indexer.SetWatchedAddresses(args, currentBlockNumber)
if err != nil {
return err
}
}
// update in-memory params
sds.writeLoopParams.WatchedAddresses = argAddresses
sds.writeLoopParams.ComputeWatchedAddressesLeafPaths()
2023-06-14 12:43:34 +00:00
case types2.Clear:
// update the db
if sds.indexer != nil {
err := sds.indexer.ClearWatchedAddresses()
if err != nil {
return err
}
}
// update in-memory params
sds.writeLoopParams.WatchedAddresses = []common.Address{}
sds.writeLoopParams.ComputeWatchedAddressesLeafPaths()
2023-06-14 12:43:34 +00:00
default:
return fmt.Errorf("%w: %v", errUnexpectedOperation, operation)
2023-06-14 12:43:34 +00:00
}
return nil
}
// loadWatchedAddresses loads watched addresses from an indexer to params
func loadWatchedAddresses(indexer interfaces.StateDiffIndexer, params *ParamsWithMutex) error {
2023-06-14 12:43:34 +00:00
watchedAddresses, err := indexer.LoadWatchedAddresses()
if err != nil {
return err
}
params.Lock()
defer params.Unlock()
2023-06-14 12:43:34 +00:00
params.WatchedAddresses = watchedAddresses
params.ComputeWatchedAddressesLeafPaths()
2023-06-14 12:43:34 +00:00
return nil
}
// MapWatchAddressArgsToAddresses maps []WatchAddressArg to corresponding []core.Address
2023-06-14 12:43:34 +00:00
func MapWatchAddressArgsToAddresses(args []types2.WatchAddressArg) ([]common.Address, error) {
addresses, ok := funk.Map(args, func(arg types2.WatchAddressArg) common.Address {
return common.HexToAddress(arg.Address)
}).([]common.Address)
if !ok {
return nil, errTypeAssertionFailed
2023-06-14 12:43:34 +00:00
}
return addresses, nil
}