2019-11-13 05:51:36 +00:00
|
|
|
package chain
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2020-10-22 23:35:26 +00:00
|
|
|
"os"
|
2019-11-15 02:27:43 +00:00
|
|
|
"sort"
|
2020-10-29 17:08:58 +00:00
|
|
|
"strconv"
|
2020-10-22 21:33:05 +00:00
|
|
|
"strings"
|
2019-11-13 05:51:36 +00:00
|
|
|
"sync"
|
2020-10-28 09:36:34 +00:00
|
|
|
"time"
|
2019-11-13 05:51:36 +00:00
|
|
|
|
2022-08-25 18:20:41 +00:00
|
|
|
"github.com/libp2p/go-libp2p/core/peer"
|
2022-06-14 15:00:51 +00:00
|
|
|
|
2020-10-27 20:23:16 +00:00
|
|
|
"github.com/filecoin-project/go-address"
|
2022-06-14 15:00:51 +00:00
|
|
|
|
2020-10-26 12:11:19 +00:00
|
|
|
"github.com/filecoin-project/lotus/build"
|
2019-11-13 05:51:36 +00:00
|
|
|
"github.com/filecoin-project/lotus/chain/types"
|
|
|
|
)
|
|
|
|
|
2020-10-28 08:33:22 +00:00
|
|
|
var (
|
2020-11-11 04:20:31 +00:00
|
|
|
BootstrapPeerThreshold = build.BootstrapPeerThreshold
|
2019-11-13 05:51:36 +00:00
|
|
|
|
2020-10-28 08:33:22 +00:00
|
|
|
RecentSyncBufferSize = 10
|
|
|
|
MaxSyncWorkers = 5
|
2020-10-28 12:08:06 +00:00
|
|
|
SyncWorkerHistory = 3
|
2020-10-27 21:48:31 +00:00
|
|
|
|
2020-10-29 15:27:13 +00:00
|
|
|
InitialSyncTimeThreshold = 15 * time.Minute
|
|
|
|
|
2020-10-28 08:33:22 +00:00
|
|
|
coalesceTipsets = false
|
|
|
|
)
|
2020-10-22 23:35:26 +00:00
|
|
|
|
|
|
|
func init() {
|
2020-10-27 17:00:34 +00:00
|
|
|
coalesceTipsets = os.Getenv("LOTUS_SYNC_FORMTS_PEND") == "yes"
|
2020-10-29 17:08:58 +00:00
|
|
|
|
|
|
|
if bootstrapPeerThreshold := os.Getenv("LOTUS_SYNC_BOOTSTRAP_PEERS"); bootstrapPeerThreshold != "" {
|
|
|
|
threshold, err := strconv.Atoi(bootstrapPeerThreshold)
|
|
|
|
if err != nil {
|
|
|
|
log.Errorf("failed to parse 'LOTUS_SYNC_BOOTSTRAP_PEERS' env var: %s", err)
|
|
|
|
} else {
|
|
|
|
BootstrapPeerThreshold = threshold
|
|
|
|
}
|
|
|
|
}
|
2020-10-22 23:35:26 +00:00
|
|
|
}
|
|
|
|
|
2019-11-13 05:51:36 +00:00
|
|
|
type SyncFunc func(context.Context, *types.TipSet) error
|
|
|
|
|
2020-09-14 20:58:59 +00:00
|
|
|
// SyncManager manages the chain synchronization process, both at bootstrap time
|
|
|
|
// and during ongoing operation.
|
|
|
|
//
|
|
|
|
// It receives candidate chain heads in the form of tipsets from peers,
|
|
|
|
// and schedules them onto sync workers, deduplicating processing for
|
|
|
|
// already-active syncs.
|
|
|
|
type SyncManager interface {
|
|
|
|
// Start starts the SyncManager.
|
|
|
|
Start()
|
|
|
|
|
|
|
|
// Stop stops the SyncManager.
|
|
|
|
Stop()
|
|
|
|
|
|
|
|
// SetPeerHead informs the SyncManager that the supplied peer reported the
|
|
|
|
// supplied tipset.
|
|
|
|
SetPeerHead(ctx context.Context, p peer.ID, ts *types.TipSet)
|
|
|
|
|
|
|
|
// State retrieves the state of the sync workers.
|
2020-10-10 15:31:04 +00:00
|
|
|
State() []SyncerStateSnapshot
|
2020-09-14 20:58:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type syncManager struct {
|
2020-10-26 12:11:19 +00:00
|
|
|
ctx context.Context
|
|
|
|
cancel func()
|
2019-11-13 05:51:36 +00:00
|
|
|
|
2020-10-26 12:11:19 +00:00
|
|
|
workq chan peerHead
|
|
|
|
statusq chan workerStatus
|
2019-11-13 05:51:36 +00:00
|
|
|
|
2020-10-26 12:11:19 +00:00
|
|
|
nextWorker uint64
|
|
|
|
pend syncBucketSet
|
2020-10-28 08:33:22 +00:00
|
|
|
deferred syncBucketSet
|
2020-10-26 12:11:19 +00:00
|
|
|
heads map[peer.ID]*types.TipSet
|
2020-10-27 21:48:31 +00:00
|
|
|
recent *syncBuffer
|
2019-11-13 05:51:36 +00:00
|
|
|
|
2020-10-29 15:12:47 +00:00
|
|
|
initialSyncDone bool
|
2020-10-28 09:36:34 +00:00
|
|
|
|
2020-10-26 12:11:19 +00:00
|
|
|
mx sync.Mutex
|
|
|
|
state map[uint64]*workerState
|
2019-11-15 02:27:43 +00:00
|
|
|
|
2020-10-28 12:08:06 +00:00
|
|
|
history []*workerState
|
|
|
|
historyI int
|
|
|
|
|
2019-11-13 05:51:36 +00:00
|
|
|
doSync func(context.Context, *types.TipSet) error
|
|
|
|
}
|
|
|
|
|
2020-09-14 20:58:59 +00:00
|
|
|
var _ SyncManager = (*syncManager)(nil)
|
|
|
|
|
2020-10-26 12:11:19 +00:00
|
|
|
type peerHead struct {
|
2023-08-09 22:00:33 +00:00
|
|
|
// Note: this doesn't _necessarily_ mean that p's head is ts, just that ts is a tipset that p sent to us
|
2020-10-26 12:11:19 +00:00
|
|
|
p peer.ID
|
|
|
|
ts *types.TipSet
|
2019-11-15 21:35:29 +00:00
|
|
|
}
|
|
|
|
|
2020-10-26 12:11:19 +00:00
|
|
|
type workerState struct {
|
|
|
|
id uint64
|
|
|
|
ts *types.TipSet
|
|
|
|
ss *SyncerState
|
2020-10-29 15:27:13 +00:00
|
|
|
dt time.Duration
|
2020-10-26 12:11:19 +00:00
|
|
|
}
|
2019-11-15 21:35:29 +00:00
|
|
|
|
2020-10-26 12:11:19 +00:00
|
|
|
type workerStatus struct {
|
|
|
|
id uint64
|
|
|
|
err error
|
|
|
|
}
|
|
|
|
|
|
|
|
// sync manager interface
|
2020-09-14 20:58:59 +00:00
|
|
|
func NewSyncManager(sync SyncFunc) SyncManager {
|
2020-10-26 12:11:19 +00:00
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
return &syncManager{
|
|
|
|
ctx: ctx,
|
|
|
|
cancel: cancel,
|
|
|
|
|
|
|
|
workq: make(chan peerHead),
|
|
|
|
statusq: make(chan workerStatus),
|
|
|
|
|
2020-10-28 12:08:06 +00:00
|
|
|
heads: make(map[peer.ID]*types.TipSet),
|
|
|
|
state: make(map[uint64]*workerState),
|
|
|
|
recent: newSyncBuffer(RecentSyncBufferSize),
|
|
|
|
history: make([]*workerState, SyncWorkerHistory),
|
2020-10-26 12:11:19 +00:00
|
|
|
|
|
|
|
doSync: sync,
|
2020-10-10 15:31:04 +00:00
|
|
|
}
|
2019-11-13 05:51:36 +00:00
|
|
|
}
|
|
|
|
|
2020-09-14 20:58:59 +00:00
|
|
|
func (sm *syncManager) Start() {
|
2020-10-26 12:11:19 +00:00
|
|
|
go sm.scheduler()
|
2019-11-13 05:51:36 +00:00
|
|
|
}
|
|
|
|
|
2020-09-14 20:58:59 +00:00
|
|
|
func (sm *syncManager) Stop() {
|
2020-10-26 12:11:19 +00:00
|
|
|
select {
|
|
|
|
case <-sm.ctx.Done():
|
|
|
|
default:
|
|
|
|
sm.cancel()
|
|
|
|
}
|
2019-11-15 21:35:29 +00:00
|
|
|
}
|
|
|
|
|
2020-09-14 20:58:59 +00:00
|
|
|
func (sm *syncManager) SetPeerHead(ctx context.Context, p peer.ID, ts *types.TipSet) {
|
2020-10-26 12:11:19 +00:00
|
|
|
select {
|
|
|
|
case sm.workq <- peerHead{p: p, ts: ts}:
|
|
|
|
case <-sm.ctx.Done():
|
|
|
|
case <-ctx.Done():
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (sm *syncManager) State() []SyncerStateSnapshot {
|
|
|
|
sm.mx.Lock()
|
2020-10-28 19:22:07 +00:00
|
|
|
workerStates := make([]*workerState, 0, len(sm.state)+len(sm.history))
|
2020-10-26 12:11:19 +00:00
|
|
|
for _, ws := range sm.state {
|
|
|
|
workerStates = append(workerStates, ws)
|
|
|
|
}
|
2020-10-28 12:08:06 +00:00
|
|
|
for _, ws := range sm.history {
|
|
|
|
if ws != nil {
|
|
|
|
workerStates = append(workerStates, ws)
|
|
|
|
}
|
|
|
|
}
|
2020-10-26 12:11:19 +00:00
|
|
|
sm.mx.Unlock()
|
|
|
|
|
|
|
|
sort.Slice(workerStates, func(i, j int) bool {
|
|
|
|
return workerStates[i].id < workerStates[j].id
|
|
|
|
})
|
|
|
|
|
|
|
|
result := make([]SyncerStateSnapshot, 0, len(workerStates))
|
|
|
|
for _, ws := range workerStates {
|
|
|
|
result = append(result, ws.ss.Snapshot())
|
|
|
|
}
|
|
|
|
|
|
|
|
return result
|
|
|
|
}
|
|
|
|
|
|
|
|
// sync manager internals
|
|
|
|
func (sm *syncManager) scheduler() {
|
2020-10-28 09:36:34 +00:00
|
|
|
ticker := time.NewTicker(time.Minute)
|
|
|
|
tickerC := ticker.C
|
2020-10-26 12:11:19 +00:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case head := <-sm.workq:
|
|
|
|
sm.handlePeerHead(head)
|
|
|
|
case status := <-sm.statusq:
|
|
|
|
sm.handleWorkerStatus(status)
|
2020-10-28 09:36:34 +00:00
|
|
|
case <-tickerC:
|
2020-10-29 15:12:47 +00:00
|
|
|
if sm.initialSyncDone {
|
2020-10-28 09:41:40 +00:00
|
|
|
ticker.Stop()
|
2020-10-28 09:36:34 +00:00
|
|
|
tickerC = nil
|
2020-10-29 15:12:47 +00:00
|
|
|
sm.handleInitialSyncDone()
|
2020-10-28 09:36:34 +00:00
|
|
|
}
|
2020-10-26 12:11:19 +00:00
|
|
|
case <-sm.ctx.Done():
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (sm *syncManager) handlePeerHead(head peerHead) {
|
2020-11-12 19:25:22 +00:00
|
|
|
log.Debugf("new peer head: %s %s", head.p, head.ts)
|
2020-10-26 12:11:19 +00:00
|
|
|
|
|
|
|
// have we started syncing yet?
|
|
|
|
if sm.nextWorker == 0 {
|
|
|
|
// track the peer head until we start syncing
|
|
|
|
sm.heads[head.p] = head.ts
|
|
|
|
|
|
|
|
// not yet; do we have enough peers?
|
|
|
|
if len(sm.heads) < BootstrapPeerThreshold {
|
2020-12-03 01:27:07 +00:00
|
|
|
log.Debugw("not tracking enough peers to start sync worker", "have", len(sm.heads), "need", BootstrapPeerThreshold)
|
2020-10-26 12:11:19 +00:00
|
|
|
// not enough peers; track it and wait
|
|
|
|
return
|
|
|
|
}
|
2019-11-15 02:27:43 +00:00
|
|
|
|
2020-10-26 12:11:19 +00:00
|
|
|
// we are ready to start syncing; select the sync target and spawn a worker
|
|
|
|
target, err := sm.selectInitialSyncTarget()
|
|
|
|
if err != nil {
|
|
|
|
log.Errorf("failed to select initial sync target: %s", err)
|
|
|
|
return
|
2019-11-13 05:51:36 +00:00
|
|
|
}
|
2020-10-26 12:11:19 +00:00
|
|
|
|
|
|
|
log.Infof("selected initial sync target: %s", target)
|
|
|
|
sm.spawnWorker(target)
|
2019-11-13 05:51:36 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-10-26 12:11:19 +00:00
|
|
|
// we have started syncing, add peer head to the queue if applicable and maybe spawn a worker
|
|
|
|
// if there is work to do (possibly in a fork)
|
|
|
|
target, work, err := sm.addSyncTarget(head.ts)
|
|
|
|
if err != nil {
|
|
|
|
log.Warnf("failed to add sync target: %s", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if work {
|
|
|
|
log.Infof("selected sync target: %s", target)
|
|
|
|
sm.spawnWorker(target)
|
|
|
|
}
|
2019-11-13 05:51:36 +00:00
|
|
|
}
|
|
|
|
|
2020-10-26 12:11:19 +00:00
|
|
|
func (sm *syncManager) handleWorkerStatus(status workerStatus) {
|
2020-10-26 13:19:09 +00:00
|
|
|
log.Debugf("worker %d done; status error: %s", status.id, status.err)
|
2020-10-26 12:11:19 +00:00
|
|
|
|
|
|
|
sm.mx.Lock()
|
|
|
|
ws := sm.state[status.id]
|
|
|
|
delete(sm.state, status.id)
|
2020-10-28 12:08:06 +00:00
|
|
|
|
|
|
|
// we track the last few workers for debug purposes
|
|
|
|
sm.history[sm.historyI] = ws
|
|
|
|
sm.historyI++
|
|
|
|
sm.historyI %= len(sm.history)
|
2020-10-26 12:11:19 +00:00
|
|
|
sm.mx.Unlock()
|
|
|
|
|
|
|
|
if status.err != nil {
|
|
|
|
// we failed to sync this target -- log it and try to work on an extended chain
|
|
|
|
// if there is nothing related to be worked on, we stop working on this chain.
|
|
|
|
log.Errorf("error during sync in %s: %s", ws.ts, status.err)
|
2020-10-27 21:48:31 +00:00
|
|
|
} else {
|
|
|
|
// add to the recently synced buffer
|
|
|
|
sm.recent.Push(ws.ts)
|
2020-10-29 17:13:45 +00:00
|
|
|
// if we are still in initial sync and this was fast enough, mark the end of the initial sync
|
2020-10-29 15:27:13 +00:00
|
|
|
if !sm.initialSyncDone && ws.dt < InitialSyncTimeThreshold {
|
|
|
|
sm.initialSyncDone = true
|
|
|
|
}
|
2020-10-26 12:11:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// we are done with this target, select the next sync target and spawn a worker if there is work
|
|
|
|
// to do, because of an extension of this chain.
|
|
|
|
target, work, err := sm.selectSyncTarget(ws.ts)
|
|
|
|
if err != nil {
|
|
|
|
log.Warnf("failed to select sync target: %s", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if work {
|
|
|
|
log.Infof("selected sync target: %s", target)
|
|
|
|
sm.spawnWorker(target)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-29 15:12:47 +00:00
|
|
|
func (sm *syncManager) handleInitialSyncDone() {
|
2020-10-28 09:36:34 +00:00
|
|
|
// we have just finished the initial sync; spawn some additional workers in deferred syncs
|
|
|
|
// as needed (and up to MaxSyncWorkers) to ramp up chain sync
|
|
|
|
for len(sm.state) < MaxSyncWorkers {
|
|
|
|
target, work, err := sm.selectDeferredSyncTarget()
|
|
|
|
if err != nil {
|
|
|
|
log.Errorf("error selecting deferred sync target: %s", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if !work {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-10-28 09:44:16 +00:00
|
|
|
log.Infof("selected deferred sync target: %s", target)
|
2020-10-28 09:36:34 +00:00
|
|
|
sm.spawnWorker(target)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-26 12:11:19 +00:00
|
|
|
func (sm *syncManager) spawnWorker(target *types.TipSet) {
|
|
|
|
id := sm.nextWorker
|
|
|
|
sm.nextWorker++
|
|
|
|
ws := &workerState{
|
|
|
|
id: id,
|
|
|
|
ts: target,
|
|
|
|
ss: new(SyncerState),
|
|
|
|
}
|
2020-10-28 19:22:07 +00:00
|
|
|
ws.ss.data.WorkerID = id
|
2020-10-26 12:11:19 +00:00
|
|
|
|
|
|
|
sm.mx.Lock()
|
|
|
|
sm.state[id] = ws
|
|
|
|
sm.mx.Unlock()
|
|
|
|
|
|
|
|
go sm.worker(ws)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (sm *syncManager) worker(ws *workerState) {
|
2020-10-26 12:36:34 +00:00
|
|
|
log.Infof("worker %d syncing in %s", ws.id, ws.ts)
|
2020-10-26 12:11:19 +00:00
|
|
|
|
|
|
|
start := build.Clock.Now()
|
|
|
|
|
|
|
|
ctx := context.WithValue(sm.ctx, syncStateKey{}, ws.ss)
|
|
|
|
err := sm.doSync(ctx, ws.ts)
|
|
|
|
|
2020-10-29 15:27:13 +00:00
|
|
|
ws.dt = build.Clock.Since(start)
|
|
|
|
log.Infof("worker %d done; took %s", ws.id, ws.dt)
|
2020-10-26 12:11:19 +00:00
|
|
|
select {
|
|
|
|
case sm.statusq <- workerStatus{id: ws.id, err: err}:
|
|
|
|
case <-sm.ctx.Done():
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// selects the initial sync target by examining known peer heads; only called once for the initial
|
|
|
|
// sync.
|
|
|
|
func (sm *syncManager) selectInitialSyncTarget() (*types.TipSet, error) {
|
|
|
|
var buckets syncBucketSet
|
|
|
|
|
|
|
|
var peerHeads []*types.TipSet
|
|
|
|
for _, ts := range sm.heads {
|
|
|
|
peerHeads = append(peerHeads, ts)
|
|
|
|
}
|
|
|
|
// clear the map, we don't use it any longer
|
|
|
|
sm.heads = nil
|
|
|
|
|
|
|
|
sort.Slice(peerHeads, func(i, j int) bool {
|
|
|
|
return peerHeads[i].Height() < peerHeads[j].Height()
|
|
|
|
})
|
|
|
|
|
|
|
|
for _, ts := range peerHeads {
|
|
|
|
buckets.Insert(ts)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(buckets.buckets) > 1 {
|
|
|
|
log.Warn("caution, multiple distinct chains seen during head selections")
|
|
|
|
// TODO: we *could* refuse to sync here without user intervention.
|
|
|
|
// For now, just select the best cluster
|
|
|
|
}
|
|
|
|
|
|
|
|
return buckets.Heaviest(), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// adds a tipset to the potential sync targets; returns true if there is a a tipset to work on.
|
|
|
|
// this could be either a restart, eg because there is no currently scheduled sync work or a worker
|
|
|
|
// failed or a potential fork.
|
|
|
|
func (sm *syncManager) addSyncTarget(ts *types.TipSet) (*types.TipSet, bool, error) {
|
|
|
|
// Note: we don't need the state lock here to access the active worker states, as the only
|
|
|
|
// competing threads that may access it do so through State() which is read only.
|
|
|
|
|
2020-10-27 21:48:31 +00:00
|
|
|
// if we have recently synced this or any heavier tipset we just ignore it; this can happen
|
|
|
|
// with an empty worker set after we just finished syncing to a target
|
|
|
|
if sm.recent.Synced(ts) {
|
|
|
|
return nil, false, nil
|
|
|
|
}
|
|
|
|
|
2020-10-26 12:11:19 +00:00
|
|
|
// if the worker set is empty, we have finished syncing and were waiting for the next tipset
|
|
|
|
// in this case, we just return the tipset as work to be done
|
|
|
|
if len(sm.state) == 0 {
|
|
|
|
return ts, true, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// check if it is related to any active sync; if so insert into the pending sync queue
|
|
|
|
for _, ws := range sm.state {
|
|
|
|
if ts.Equals(ws.ts) {
|
|
|
|
// ignore it, we are already syncing it
|
|
|
|
return nil, false, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if ts.Parents() == ws.ts.Key() {
|
|
|
|
// schedule for syncing next; it's an extension of an active sync
|
|
|
|
sm.pend.Insert(ts)
|
|
|
|
return nil, false, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// check to see if it is related to any pending sync; if so insert it into the pending sync queue
|
|
|
|
if sm.pend.RelatedToAny(ts) {
|
|
|
|
sm.pend.Insert(ts)
|
|
|
|
return nil, false, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// it's not related to any active or pending sync; this could be a fork in which case we
|
|
|
|
// start a new worker to sync it, if it is *heavier* than any active or pending set;
|
|
|
|
// if it is not, we ignore it.
|
|
|
|
for _, ws := range sm.state {
|
2020-10-26 12:51:26 +00:00
|
|
|
if isHeavier(ws.ts, ts) {
|
|
|
|
return nil, false, nil
|
2020-10-26 12:11:19 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pendHeaviest := sm.pend.Heaviest()
|
2020-10-26 12:51:26 +00:00
|
|
|
if pendHeaviest != nil && isHeavier(pendHeaviest, ts) {
|
2020-10-26 12:11:19 +00:00
|
|
|
return nil, false, nil
|
2020-09-14 20:58:59 +00:00
|
|
|
}
|
2020-10-26 12:11:19 +00:00
|
|
|
|
2020-10-28 09:36:34 +00:00
|
|
|
// if we have not finished the initial sync or have too many workers, add it to the deferred queue;
|
|
|
|
// it will be processed once a worker is freed from syncing a chain (or the initial sync finishes)
|
2020-10-29 15:12:47 +00:00
|
|
|
if !sm.initialSyncDone || len(sm.state) >= MaxSyncWorkers {
|
2020-11-12 19:25:22 +00:00
|
|
|
log.Debugf("deferring sync on %s", ts)
|
2020-10-28 08:33:22 +00:00
|
|
|
sm.deferred.Insert(ts)
|
2020-10-29 07:40:41 +00:00
|
|
|
return nil, false, nil
|
2020-10-28 08:33:22 +00:00
|
|
|
}
|
|
|
|
|
2020-10-26 12:11:19 +00:00
|
|
|
// start a new worker, seems heavy enough and unrelated to active or pending syncs
|
|
|
|
return ts, true, nil
|
2020-09-14 20:58:59 +00:00
|
|
|
}
|
|
|
|
|
2020-10-26 12:11:19 +00:00
|
|
|
// selects the next sync target after a worker sync has finished; returns true and a target
|
|
|
|
// TipSet if this chain should continue to sync because there is a heavier related tipset.
|
|
|
|
func (sm *syncManager) selectSyncTarget(done *types.TipSet) (*types.TipSet, bool, error) {
|
|
|
|
// we pop the related bucket and if there is any related tipset, we work on the heaviest one next
|
|
|
|
// if we are not already working on a heavier tipset
|
|
|
|
related := sm.pend.PopRelated(done)
|
|
|
|
if related == nil {
|
2020-10-28 08:33:22 +00:00
|
|
|
return sm.selectDeferredSyncTarget()
|
2020-10-26 12:11:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
heaviest := related.heaviestTipSet()
|
2020-10-27 21:48:31 +00:00
|
|
|
if isHeavier(done, heaviest) {
|
2020-10-28 08:33:22 +00:00
|
|
|
return sm.selectDeferredSyncTarget()
|
2020-10-27 21:48:31 +00:00
|
|
|
}
|
|
|
|
|
2020-10-26 12:11:19 +00:00
|
|
|
for _, ws := range sm.state {
|
2020-10-26 12:51:26 +00:00
|
|
|
if isHeavier(ws.ts, heaviest) {
|
2020-10-28 08:33:22 +00:00
|
|
|
return sm.selectDeferredSyncTarget()
|
2020-10-26 12:11:19 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-30 05:50:26 +00:00
|
|
|
if sm.recent.Synced(heaviest) {
|
|
|
|
return sm.selectDeferredSyncTarget()
|
|
|
|
}
|
|
|
|
|
2020-10-26 12:11:19 +00:00
|
|
|
return heaviest, true, nil
|
|
|
|
}
|
|
|
|
|
2020-10-28 08:33:22 +00:00
|
|
|
// selects a deferred sync target if there is any; these are sync targets that were not related to
|
|
|
|
// active syncs and were deferred because there were too many workers running
|
|
|
|
func (sm *syncManager) selectDeferredSyncTarget() (*types.TipSet, bool, error) {
|
|
|
|
deferredLoop:
|
|
|
|
for !sm.deferred.Empty() {
|
|
|
|
bucket := sm.deferred.Pop()
|
|
|
|
heaviest := bucket.heaviestTipSet()
|
|
|
|
|
2020-10-30 05:50:26 +00:00
|
|
|
if sm.recent.Synced(heaviest) {
|
|
|
|
// we have synced it or something heavier recently, skip it
|
|
|
|
continue deferredLoop
|
|
|
|
}
|
|
|
|
|
2020-10-28 08:33:22 +00:00
|
|
|
if sm.pend.RelatedToAny(heaviest) {
|
|
|
|
// this has converged to a pending sync, insert it to the pending queue
|
|
|
|
sm.pend.Insert(heaviest)
|
|
|
|
continue deferredLoop
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, ws := range sm.state {
|
|
|
|
if ws.ts.Equals(heaviest) || isHeavier(ws.ts, heaviest) {
|
|
|
|
// we have converged and are already syncing it or we are syncing on something heavier
|
|
|
|
// ignore it and pop the next deferred bucket
|
|
|
|
continue deferredLoop
|
|
|
|
}
|
|
|
|
|
|
|
|
if heaviest.Parents() == ws.ts.Key() {
|
|
|
|
// we have converged and we are syncing its parent; insert it to the pending queue
|
|
|
|
sm.pend.Insert(heaviest)
|
|
|
|
continue deferredLoop
|
|
|
|
}
|
|
|
|
|
|
|
|
// it's not related to any active or pending sync and this worker is free, so sync it!
|
|
|
|
return heaviest, true, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil, false, nil
|
|
|
|
}
|
|
|
|
|
2020-10-26 12:51:26 +00:00
|
|
|
func isHeavier(a, b *types.TipSet) bool {
|
|
|
|
return a.ParentWeight().GreaterThan(b.ParentWeight())
|
|
|
|
}
|
|
|
|
|
2020-10-27 21:48:31 +00:00
|
|
|
// sync buffer -- this is a circular buffer of recently synced tipsets
|
|
|
|
type syncBuffer struct {
|
|
|
|
buf []*types.TipSet
|
2020-10-27 22:02:08 +00:00
|
|
|
next int
|
2020-10-27 21:48:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func newSyncBuffer(size int) *syncBuffer {
|
|
|
|
return &syncBuffer{buf: make([]*types.TipSet, size)}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (sb *syncBuffer) Push(ts *types.TipSet) {
|
2020-10-27 22:02:08 +00:00
|
|
|
sb.buf[sb.next] = ts
|
2020-10-27 21:48:31 +00:00
|
|
|
sb.next++
|
2020-10-27 22:02:08 +00:00
|
|
|
sb.next %= len(sb.buf)
|
2020-10-27 21:48:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (sb *syncBuffer) Synced(ts *types.TipSet) bool {
|
2020-10-27 22:02:08 +00:00
|
|
|
for _, rts := range sb.buf {
|
|
|
|
if rts != nil && (rts.Equals(ts) || isHeavier(rts, ts)) {
|
2020-10-27 21:48:31 +00:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2020-10-26 12:11:19 +00:00
|
|
|
// sync buckets and related utilities
|
2019-11-15 02:27:43 +00:00
|
|
|
type syncBucketSet struct {
|
|
|
|
buckets []*syncTargetBucket
|
|
|
|
}
|
|
|
|
|
2020-10-26 12:11:19 +00:00
|
|
|
type syncTargetBucket struct {
|
|
|
|
tips []*types.TipSet
|
|
|
|
}
|
|
|
|
|
2019-12-05 01:18:30 +00:00
|
|
|
func newSyncTargetBucket(tipsets ...*types.TipSet) *syncTargetBucket {
|
|
|
|
var stb syncTargetBucket
|
|
|
|
for _, ts := range tipsets {
|
|
|
|
stb.add(ts)
|
|
|
|
}
|
|
|
|
return &stb
|
|
|
|
}
|
|
|
|
|
2020-10-22 21:33:05 +00:00
|
|
|
func (sbs *syncBucketSet) String() string {
|
|
|
|
var bStrings []string
|
|
|
|
for _, b := range sbs.buckets {
|
|
|
|
var tsStrings []string
|
|
|
|
for _, t := range b.tips {
|
|
|
|
tsStrings = append(tsStrings, t.String())
|
|
|
|
}
|
|
|
|
bStrings = append(bStrings, "["+strings.Join(tsStrings, ",")+"]")
|
|
|
|
}
|
|
|
|
|
|
|
|
return "{" + strings.Join(bStrings, ";") + "}"
|
|
|
|
}
|
|
|
|
|
2019-12-07 11:17:36 +00:00
|
|
|
func (sbs *syncBucketSet) RelatedToAny(ts *types.TipSet) bool {
|
|
|
|
for _, b := range sbs.buckets {
|
|
|
|
if b.sameChainAs(ts) {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2019-11-15 02:27:43 +00:00
|
|
|
func (sbs *syncBucketSet) Insert(ts *types.TipSet) {
|
|
|
|
for _, b := range sbs.buckets {
|
|
|
|
if b.sameChainAs(ts) {
|
|
|
|
b.add(ts)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
2019-12-05 01:18:30 +00:00
|
|
|
sbs.buckets = append(sbs.buckets, newSyncTargetBucket(ts))
|
2019-11-15 02:27:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (sbs *syncBucketSet) Pop() *syncTargetBucket {
|
|
|
|
var bestBuck *syncTargetBucket
|
|
|
|
var bestTs *types.TipSet
|
|
|
|
for _, b := range sbs.buckets {
|
|
|
|
hts := b.heaviestTipSet()
|
|
|
|
if bestBuck == nil || bestTs.ParentWeight().LessThan(hts.ParentWeight()) {
|
|
|
|
bestBuck = b
|
|
|
|
bestTs = hts
|
|
|
|
}
|
|
|
|
}
|
2019-11-15 21:35:29 +00:00
|
|
|
|
|
|
|
sbs.removeBucket(bestBuck)
|
|
|
|
|
2019-11-15 02:27:43 +00:00
|
|
|
return bestBuck
|
|
|
|
}
|
|
|
|
|
2019-11-15 21:35:29 +00:00
|
|
|
func (sbs *syncBucketSet) removeBucket(toremove *syncTargetBucket) {
|
|
|
|
nbuckets := make([]*syncTargetBucket, 0, len(sbs.buckets)-1)
|
|
|
|
for _, b := range sbs.buckets {
|
|
|
|
if b != toremove {
|
|
|
|
nbuckets = append(nbuckets, b)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
sbs.buckets = nbuckets
|
|
|
|
}
|
|
|
|
|
2020-10-22 23:18:13 +00:00
|
|
|
func (sbs *syncBucketSet) PopRelated(ts *types.TipSet) *syncTargetBucket {
|
|
|
|
var bOut *syncTargetBucket
|
2019-11-15 21:35:29 +00:00
|
|
|
for _, b := range sbs.buckets {
|
|
|
|
if b.sameChainAs(ts) {
|
2020-10-22 22:33:35 +00:00
|
|
|
sbs.removeBucket(b)
|
2020-10-22 23:18:13 +00:00
|
|
|
if bOut == nil {
|
|
|
|
bOut = &syncTargetBucket{}
|
|
|
|
}
|
|
|
|
bOut.tips = append(bOut.tips, b.tips...)
|
2019-11-15 21:35:29 +00:00
|
|
|
}
|
|
|
|
}
|
2020-10-21 20:52:35 +00:00
|
|
|
return bOut
|
2019-11-15 21:35:29 +00:00
|
|
|
}
|
|
|
|
|
2019-11-15 02:27:43 +00:00
|
|
|
func (sbs *syncBucketSet) Heaviest() *types.TipSet {
|
|
|
|
// TODO: should also consider factoring in number of peers represented by each bucket here
|
|
|
|
var bestTs *types.TipSet
|
2019-11-15 21:35:29 +00:00
|
|
|
for _, b := range sbs.buckets {
|
2019-11-15 02:27:43 +00:00
|
|
|
bhts := b.heaviestTipSet()
|
|
|
|
if bestTs == nil || bhts.ParentWeight().GreaterThan(bestTs.ParentWeight()) {
|
|
|
|
bestTs = bhts
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return bestTs
|
|
|
|
}
|
|
|
|
|
2019-12-08 11:44:47 +00:00
|
|
|
func (sbs *syncBucketSet) Empty() bool {
|
|
|
|
return len(sbs.buckets) == 0
|
|
|
|
}
|
|
|
|
|
2019-11-15 02:27:43 +00:00
|
|
|
func (stb *syncTargetBucket) sameChainAs(ts *types.TipSet) bool {
|
|
|
|
for _, t := range stb.tips {
|
|
|
|
if ts.Equals(t) {
|
|
|
|
return true
|
|
|
|
}
|
2019-12-16 19:22:56 +00:00
|
|
|
if ts.Key() == t.Parents() {
|
2019-11-15 02:27:43 +00:00
|
|
|
return true
|
|
|
|
}
|
2019-12-16 19:22:56 +00:00
|
|
|
if ts.Parents() == t.Key() {
|
2019-11-15 02:27:43 +00:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
func (stb *syncTargetBucket) add(ts *types.TipSet) {
|
2020-10-27 17:00:34 +00:00
|
|
|
for i, t := range stb.tips {
|
2019-11-15 02:27:43 +00:00
|
|
|
if t.Equals(ts) {
|
|
|
|
return
|
|
|
|
}
|
2020-10-27 17:00:34 +00:00
|
|
|
if coalesceTipsets && t.Height() == ts.Height() &&
|
|
|
|
types.CidArrsEqual(t.Blocks()[0].Parents, ts.Blocks()[0].Parents) {
|
2020-10-27 20:23:16 +00:00
|
|
|
miners := make(map[address.Address]struct{})
|
|
|
|
newTs := []*types.BlockHeader{}
|
|
|
|
for _, b := range t.Blocks() {
|
|
|
|
_, have := miners[b.Miner]
|
|
|
|
if !have {
|
|
|
|
newTs = append(newTs, b)
|
|
|
|
miners[b.Miner] = struct{}{}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for _, b := range ts.Blocks() {
|
|
|
|
_, have := miners[b.Miner]
|
|
|
|
if !have {
|
|
|
|
newTs = append(newTs, b)
|
|
|
|
miners[b.Miner] = struct{}{}
|
|
|
|
}
|
|
|
|
}
|
2020-10-27 17:00:34 +00:00
|
|
|
|
|
|
|
ts2, err := types.NewTipSet(newTs)
|
|
|
|
if err != nil {
|
|
|
|
log.Warnf("error while trying to recombine a tipset in a bucket: %+v", err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
stb.tips[i] = ts2
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-11-15 02:27:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
stb.tips = append(stb.tips, ts)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (stb *syncTargetBucket) heaviestTipSet() *types.TipSet {
|
2019-11-15 21:35:29 +00:00
|
|
|
if stb == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-11-15 02:27:43 +00:00
|
|
|
var best *types.TipSet
|
|
|
|
for _, ts := range stb.tips {
|
|
|
|
if best == nil || ts.ParentWeight().GreaterThan(best.ParentWeight()) {
|
|
|
|
best = ts
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return best
|
|
|
|
}
|