2020-07-14 20:02:44 +00:00
|
|
|
package processor
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"database/sql"
|
|
|
|
"encoding/json"
|
2020-07-30 17:31:21 +00:00
|
|
|
"math"
|
2020-07-14 20:02:44 +00:00
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"golang.org/x/sync/errgroup"
|
|
|
|
"golang.org/x/xerrors"
|
|
|
|
|
|
|
|
"github.com/filecoin-project/go-address"
|
|
|
|
"github.com/ipfs/go-cid"
|
|
|
|
logging "github.com/ipfs/go-log/v2"
|
|
|
|
|
|
|
|
"github.com/filecoin-project/specs-actors/actors/abi"
|
|
|
|
"github.com/filecoin-project/specs-actors/actors/builtin"
|
|
|
|
|
|
|
|
"github.com/filecoin-project/lotus/api"
|
|
|
|
"github.com/filecoin-project/lotus/chain/types"
|
2020-07-30 17:31:21 +00:00
|
|
|
cw_util "github.com/filecoin-project/lotus/cmd/lotus-chainwatch/util"
|
2020-07-14 20:02:44 +00:00
|
|
|
"github.com/filecoin-project/lotus/lib/parmap"
|
|
|
|
)
|
|
|
|
|
|
|
|
var log = logging.Logger("processor")
|
|
|
|
|
|
|
|
type Processor struct {
|
|
|
|
db *sql.DB
|
|
|
|
|
2020-07-30 17:31:21 +00:00
|
|
|
node api.FullNode
|
|
|
|
ctxStore *cw_util.APIIpldStore
|
2020-07-14 20:02:44 +00:00
|
|
|
|
2020-07-16 00:22:48 +00:00
|
|
|
genesisTs *types.TipSet
|
|
|
|
|
2020-07-14 20:02:44 +00:00
|
|
|
// number of blocks processed at a time
|
|
|
|
batch int
|
2020-08-03 23:27:38 +00:00
|
|
|
|
|
|
|
// process communication channels
|
|
|
|
sectorDealEvents chan *SectorDealEvent
|
2020-07-14 20:02:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type ActorTips map[types.TipSetKey][]actorInfo
|
|
|
|
|
|
|
|
type actorInfo struct {
|
|
|
|
act types.Actor
|
|
|
|
|
|
|
|
stateroot cid.Cid
|
|
|
|
height abi.ChainEpoch // so that we can walk the actor changes in chronological order.
|
|
|
|
|
|
|
|
tsKey types.TipSetKey
|
|
|
|
parentTsKey types.TipSetKey
|
|
|
|
|
|
|
|
addr address.Address
|
|
|
|
state string
|
|
|
|
}
|
|
|
|
|
2020-07-30 17:31:21 +00:00
|
|
|
func NewProcessor(ctx context.Context, db *sql.DB, node api.FullNode, batch int) *Processor {
|
|
|
|
ctxStore := cw_util.NewAPIIpldStore(ctx, node)
|
2020-07-14 20:02:44 +00:00
|
|
|
return &Processor{
|
2020-07-30 17:31:21 +00:00
|
|
|
db: db,
|
|
|
|
ctxStore: ctxStore,
|
|
|
|
node: node,
|
|
|
|
batch: batch,
|
2020-07-14 20:02:44 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *Processor) setupSchemas() error {
|
2020-08-03 23:27:38 +00:00
|
|
|
// maintain order, subsequent calls create tables with foreign keys.
|
|
|
|
if err := p.setupMiners(); err != nil {
|
2020-07-14 20:02:44 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-08-03 23:27:38 +00:00
|
|
|
if err := p.setupMarket(); err != nil {
|
2020-07-14 20:02:44 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := p.setupRewards(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := p.setupMessages(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := p.setupCommonActors(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *Processor) Start(ctx context.Context) {
|
2020-07-16 14:45:31 +00:00
|
|
|
log.Debug("Starting Processor")
|
2020-07-14 20:02:44 +00:00
|
|
|
|
|
|
|
if err := p.setupSchemas(); err != nil {
|
|
|
|
log.Fatalw("Failed to setup processor", "error", err)
|
|
|
|
}
|
|
|
|
|
2020-07-16 00:22:48 +00:00
|
|
|
var err error
|
|
|
|
p.genesisTs, err = p.node.ChainGetGenesis(ctx)
|
|
|
|
if err != nil {
|
|
|
|
log.Fatalw("Failed to get genesis state from lotus", "error", err.Error())
|
|
|
|
}
|
|
|
|
|
2020-07-14 20:02:44 +00:00
|
|
|
go p.subMpool(ctx)
|
|
|
|
|
|
|
|
// main processor loop
|
|
|
|
go func() {
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
2020-07-30 17:31:21 +00:00
|
|
|
log.Info("Stopping Processor...")
|
2020-07-14 20:02:44 +00:00
|
|
|
return
|
|
|
|
default:
|
2020-07-30 17:31:21 +00:00
|
|
|
loopStart := time.Now()
|
2020-07-14 20:02:44 +00:00
|
|
|
toProcess, err := p.unprocessedBlocks(ctx, p.batch)
|
|
|
|
if err != nil {
|
|
|
|
log.Fatalw("Failed to get unprocessed blocks", "error", err)
|
|
|
|
}
|
|
|
|
|
2020-07-16 14:46:44 +00:00
|
|
|
if len(toProcess) == 0 {
|
2020-07-30 17:31:21 +00:00
|
|
|
log.Info("No unprocessed blocks. Wait then try again...")
|
|
|
|
time.Sleep(time.Second * 30)
|
2020-07-16 14:46:44 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2020-07-14 20:02:44 +00:00
|
|
|
// TODO special case genesis state handling here to avoid all the special cases that will be needed for it else where
|
|
|
|
// before doing "normal" processing.
|
|
|
|
|
2020-07-27 23:55:21 +00:00
|
|
|
actorChanges, nullRounds, err := p.collectActorChanges(ctx, toProcess)
|
2020-07-14 20:02:44 +00:00
|
|
|
if err != nil {
|
|
|
|
log.Fatalw("Failed to collect actor changes", "error", err)
|
|
|
|
}
|
2020-07-30 17:31:21 +00:00
|
|
|
log.Infow("Collected Actor Changes",
|
|
|
|
"MarketChanges", len(actorChanges[builtin.StorageMarketActorCodeID]),
|
|
|
|
"MinerChanges", len(actorChanges[builtin.StorageMinerActorCodeID]),
|
|
|
|
"RewardChanges", len(actorChanges[builtin.RewardActorCodeID]),
|
|
|
|
"AccountChanges", len(actorChanges[builtin.AccountActorCodeID]),
|
|
|
|
"nullRounds", len(nullRounds))
|
2020-07-14 20:02:44 +00:00
|
|
|
|
|
|
|
grp, ctx := errgroup.WithContext(ctx)
|
|
|
|
|
|
|
|
grp.Go(func() error {
|
|
|
|
if err := p.HandleMarketChanges(ctx, actorChanges[builtin.StorageMarketActorCodeID]); err != nil {
|
|
|
|
return xerrors.Errorf("Failed to handle market changes: %w", err)
|
|
|
|
}
|
2020-07-30 17:31:21 +00:00
|
|
|
log.Info("Processed Market Changes")
|
2020-07-14 20:02:44 +00:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
grp.Go(func() error {
|
|
|
|
if err := p.HandleMinerChanges(ctx, actorChanges[builtin.StorageMinerActorCodeID]); err != nil {
|
|
|
|
return xerrors.Errorf("Failed to handle miner changes: %w", err)
|
|
|
|
}
|
2020-07-30 17:31:21 +00:00
|
|
|
log.Info("Processed Miner Changes")
|
2020-07-14 20:02:44 +00:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
grp.Go(func() error {
|
2020-07-27 23:55:21 +00:00
|
|
|
if err := p.HandleRewardChanges(ctx, actorChanges[builtin.RewardActorCodeID], nullRounds); err != nil {
|
2020-07-14 20:02:44 +00:00
|
|
|
return xerrors.Errorf("Failed to handle reward changes: %w", err)
|
|
|
|
}
|
2020-07-30 17:31:21 +00:00
|
|
|
log.Info("Processed Reward Changes")
|
2020-07-14 20:02:44 +00:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
grp.Go(func() error {
|
|
|
|
if err := p.HandleMessageChanges(ctx, toProcess); err != nil {
|
|
|
|
return xerrors.Errorf("Failed to handle message changes: %w", err)
|
|
|
|
}
|
2020-07-30 17:31:21 +00:00
|
|
|
log.Info("Processed Message Changes")
|
2020-07-14 20:02:44 +00:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
grp.Go(func() error {
|
|
|
|
if err := p.HandleCommonActorsChanges(ctx, actorChanges); err != nil {
|
|
|
|
return xerrors.Errorf("Failed to handle common actor changes: %w", err)
|
|
|
|
}
|
2020-07-30 17:31:21 +00:00
|
|
|
log.Info("Processed CommonActor Changes")
|
2020-07-14 20:02:44 +00:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
if err := grp.Wait(); err != nil {
|
|
|
|
log.Errorw("Failed to handle actor changes...retrying", "error", err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := p.markBlocksProcessed(ctx, toProcess); err != nil {
|
|
|
|
log.Fatalw("Failed to mark blocks as processed", "error", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := p.refreshViews(); err != nil {
|
|
|
|
log.Errorw("Failed to refresh views", "error", err)
|
|
|
|
}
|
2020-07-30 17:31:21 +00:00
|
|
|
log.Infow("Processed Batch", "duration", time.Since(loopStart).String())
|
2020-07-14 20:02:44 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *Processor) refreshViews() error {
|
|
|
|
if _, err := p.db.Exec(`refresh materialized view state_heights`); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-07-27 23:55:21 +00:00
|
|
|
func (p *Processor) collectActorChanges(ctx context.Context, toProcess map[cid.Cid]*types.BlockHeader) (map[cid.Cid]ActorTips, []types.TipSetKey, error) {
|
2020-07-14 20:02:44 +00:00
|
|
|
start := time.Now()
|
|
|
|
defer func() {
|
2020-07-16 14:45:31 +00:00
|
|
|
log.Debugw("Collected Actor Changes", "duration", time.Since(start).String())
|
2020-07-14 20:02:44 +00:00
|
|
|
}()
|
|
|
|
// ActorCode - > tipset->[]actorInfo
|
|
|
|
out := map[cid.Cid]ActorTips{}
|
|
|
|
var outMu sync.Mutex
|
|
|
|
|
|
|
|
// map of addresses to changed actors
|
|
|
|
var changes map[string]types.Actor
|
|
|
|
actorsSeen := map[cid.Cid]struct{}{}
|
|
|
|
|
2020-07-27 23:55:21 +00:00
|
|
|
var nullRounds []types.TipSetKey
|
|
|
|
var nullBlkMu sync.Mutex
|
|
|
|
|
2020-07-14 20:02:44 +00:00
|
|
|
// collect all actor state that has changes between block headers
|
|
|
|
paDone := 0
|
|
|
|
parmap.Par(50, parmap.MapArr(toProcess), func(bh *types.BlockHeader) {
|
|
|
|
paDone++
|
|
|
|
if paDone%100 == 0 {
|
2020-07-16 14:45:31 +00:00
|
|
|
log.Debugw("Collecting actor changes", "done", paDone, "percent", (paDone*100)/len(toProcess))
|
2020-07-14 20:02:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
pts, err := p.node.ChainGetTipSet(ctx, types.NewTipSetKey(bh.Parents...))
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
2020-07-27 23:55:21 +00:00
|
|
|
if pts.ParentState().Equals(bh.ParentStateRoot) {
|
|
|
|
nullBlkMu.Lock()
|
|
|
|
nullRounds = append(nullRounds, pts.Key())
|
|
|
|
nullBlkMu.Unlock()
|
|
|
|
}
|
|
|
|
|
2020-07-14 20:02:44 +00:00
|
|
|
// collect all actors that had state changes between the blockheader parent-state and its grandparent-state.
|
|
|
|
// TODO: changes will contain deleted actors, this causes needless processing further down the pipeline, consider
|
|
|
|
// a separate strategy for deleted actors
|
|
|
|
changes, err = p.node.StateChangedActors(ctx, pts.ParentState(), bh.ParentStateRoot)
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// record the state of all actors that have changed
|
|
|
|
for a, act := range changes {
|
|
|
|
act := act
|
|
|
|
a := a
|
|
|
|
|
2020-07-30 17:31:21 +00:00
|
|
|
// ignore actors that were deleted.
|
|
|
|
has, err := p.node.ChainHasObj(ctx, act.Head)
|
|
|
|
if err != nil {
|
|
|
|
log.Fatal(err)
|
|
|
|
}
|
|
|
|
if !has {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2020-07-14 20:02:44 +00:00
|
|
|
addr, err := address.NewFromString(a)
|
|
|
|
if err != nil {
|
2020-07-27 23:55:21 +00:00
|
|
|
log.Fatal(err.Error())
|
2020-07-14 20:02:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ast, err := p.node.StateReadState(ctx, addr, pts.Key())
|
|
|
|
if err != nil {
|
2020-07-27 23:55:21 +00:00
|
|
|
log.Fatal(err.Error())
|
2020-07-14 20:02:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// TODO look here for an empty state, maybe thats a sign the actor was deleted?
|
|
|
|
|
|
|
|
state, err := json.Marshal(ast.State)
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
outMu.Lock()
|
|
|
|
if _, ok := actorsSeen[act.Head]; !ok {
|
|
|
|
_, ok := out[act.Code]
|
|
|
|
if !ok {
|
|
|
|
out[act.Code] = map[types.TipSetKey][]actorInfo{}
|
|
|
|
}
|
|
|
|
out[act.Code][pts.Key()] = append(out[act.Code][pts.Key()], actorInfo{
|
|
|
|
act: act,
|
|
|
|
stateroot: bh.ParentStateRoot,
|
|
|
|
height: bh.Height,
|
|
|
|
tsKey: pts.Key(),
|
|
|
|
parentTsKey: pts.Parents(),
|
|
|
|
addr: addr,
|
|
|
|
state: string(state),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
actorsSeen[act.Head] = struct{}{}
|
|
|
|
outMu.Unlock()
|
|
|
|
}
|
|
|
|
})
|
2020-07-27 23:55:21 +00:00
|
|
|
return out, nullRounds, nil
|
2020-07-14 20:02:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (p *Processor) unprocessedBlocks(ctx context.Context, batch int) (map[cid.Cid]*types.BlockHeader, error) {
|
|
|
|
start := time.Now()
|
|
|
|
defer func() {
|
2020-07-16 14:45:31 +00:00
|
|
|
log.Debugw("Gathered Blocks to process", "duration", time.Since(start).String())
|
2020-07-14 20:02:44 +00:00
|
|
|
}()
|
|
|
|
rows, err := p.db.Query(`
|
|
|
|
with toProcess as (
|
|
|
|
select blocks.cid, blocks.height, rank() over (order by height) as rnk
|
|
|
|
from blocks
|
|
|
|
left join blocks_synced bs on blocks.cid = bs.cid
|
|
|
|
where bs.processed_at is null and blocks.height > 0
|
|
|
|
)
|
|
|
|
select cid
|
|
|
|
from toProcess
|
|
|
|
where rnk <= $1
|
|
|
|
`, batch)
|
|
|
|
if err != nil {
|
|
|
|
return nil, xerrors.Errorf("Failed to query for unprocessed blocks: %w", err)
|
|
|
|
}
|
|
|
|
out := map[cid.Cid]*types.BlockHeader{}
|
|
|
|
|
2020-07-30 17:31:21 +00:00
|
|
|
minBlock := abi.ChainEpoch(math.MaxInt64)
|
|
|
|
maxBlock := abi.ChainEpoch(0)
|
2020-07-14 20:02:44 +00:00
|
|
|
// TODO consider parallel execution here for getting the blocks from the api as is done in fetchMessages()
|
|
|
|
for rows.Next() {
|
|
|
|
if rows.Err() != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
var c string
|
|
|
|
if err := rows.Scan(&c); err != nil {
|
|
|
|
return nil, xerrors.Errorf("Failed to scan unprocessed blocks: %w", err)
|
|
|
|
}
|
|
|
|
ci, err := cid.Parse(c)
|
|
|
|
if err != nil {
|
|
|
|
return nil, xerrors.Errorf("Failed to parse unprocessed blocks: %w", err)
|
|
|
|
}
|
|
|
|
bh, err := p.node.ChainGetBlock(ctx, ci)
|
|
|
|
if err != nil {
|
|
|
|
// this is a pretty serious issue.
|
|
|
|
return nil, xerrors.Errorf("Failed to get block header %s: %w", ci.String(), err)
|
|
|
|
}
|
|
|
|
out[ci] = bh
|
2020-07-30 17:31:21 +00:00
|
|
|
if bh.Height < minBlock {
|
|
|
|
minBlock = bh.Height
|
|
|
|
}
|
|
|
|
if bh.Height > maxBlock {
|
|
|
|
maxBlock = bh.Height
|
|
|
|
}
|
2020-07-14 20:02:44 +00:00
|
|
|
}
|
2020-07-30 17:31:21 +00:00
|
|
|
log.Infow("Gathered Blocks to Process", "start", minBlock, "end", maxBlock)
|
2020-07-14 20:02:44 +00:00
|
|
|
return out, rows.Close()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *Processor) markBlocksProcessed(ctx context.Context, processed map[cid.Cid]*types.BlockHeader) error {
|
|
|
|
start := time.Now()
|
2020-07-30 17:31:21 +00:00
|
|
|
processedHeight := abi.ChainEpoch(0)
|
2020-07-14 20:02:44 +00:00
|
|
|
defer func() {
|
2020-07-16 14:45:31 +00:00
|
|
|
log.Debugw("Marked blocks as Processed", "duration", time.Since(start).String())
|
2020-07-30 17:31:21 +00:00
|
|
|
log.Infow("Processed Blocks", "height", processedHeight)
|
2020-07-14 20:02:44 +00:00
|
|
|
}()
|
|
|
|
tx, err := p.db.Begin()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
processedAt := time.Now().Unix()
|
|
|
|
stmt, err := tx.Prepare(`update blocks_synced set processed_at=$1 where cid=$2`)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-07-30 17:31:21 +00:00
|
|
|
for c, bh := range processed {
|
|
|
|
if bh.Height > processedHeight {
|
|
|
|
processedHeight = bh.Height
|
|
|
|
}
|
2020-07-14 20:02:44 +00:00
|
|
|
if _, err := stmt.Exec(processedAt, c.String()); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := stmt.Close(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return tx.Commit()
|
|
|
|
}
|