2019-11-13 05:51:36 +00:00
|
|
|
package chain
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2019-11-15 02:27:43 +00:00
|
|
|
"sort"
|
2019-11-13 05:51:36 +00:00
|
|
|
"sync"
|
|
|
|
|
|
|
|
"github.com/filecoin-project/lotus/chain/types"
|
|
|
|
peer "github.com/libp2p/go-libp2p-peer"
|
|
|
|
)
|
|
|
|
|
|
|
|
const BootstrapPeerThreshold = 2
|
|
|
|
|
|
|
|
type SyncFunc func(context.Context, *types.TipSet) error
|
|
|
|
|
|
|
|
type SyncManager struct {
|
|
|
|
lk sync.Mutex
|
|
|
|
peerHeads map[peer.ID]*types.TipSet
|
|
|
|
bootstrapped bool
|
|
|
|
|
|
|
|
bspThresh int
|
|
|
|
|
2019-11-15 21:35:29 +00:00
|
|
|
incomingTipSets chan *types.TipSet
|
|
|
|
syncTargets chan *types.TipSet
|
|
|
|
syncResults chan *syncResult
|
2019-11-13 05:51:36 +00:00
|
|
|
|
2019-11-16 01:05:16 +00:00
|
|
|
syncStates []*SyncerState
|
2019-11-15 02:27:43 +00:00
|
|
|
|
2019-11-13 05:51:36 +00:00
|
|
|
doSync func(context.Context, *types.TipSet) error
|
2019-11-15 02:27:43 +00:00
|
|
|
|
|
|
|
stop chan struct{}
|
2019-11-16 21:36:21 +00:00
|
|
|
|
|
|
|
// Sync Scheduler fields
|
|
|
|
activeSyncs map[types.TipSetKey]*types.TipSet
|
|
|
|
syncQueue syncBucketSet
|
|
|
|
activeSyncTips syncBucketSet
|
|
|
|
nextSyncTarget *syncTargetBucket
|
|
|
|
workerChan chan *types.TipSet
|
2019-11-13 05:51:36 +00:00
|
|
|
}
|
|
|
|
|
2019-11-15 21:35:29 +00:00
|
|
|
type syncResult struct {
|
|
|
|
ts *types.TipSet
|
|
|
|
success bool
|
|
|
|
}
|
|
|
|
|
|
|
|
const syncWorkerCount = 3
|
|
|
|
|
2019-11-13 05:51:36 +00:00
|
|
|
func NewSyncManager(sync SyncFunc) *SyncManager {
|
|
|
|
return &SyncManager{
|
2019-11-15 21:35:29 +00:00
|
|
|
bspThresh: 1,
|
|
|
|
peerHeads: make(map[peer.ID]*types.TipSet),
|
|
|
|
syncTargets: make(chan *types.TipSet),
|
|
|
|
syncResults: make(chan *syncResult),
|
2019-11-16 01:05:16 +00:00
|
|
|
syncStates: make([]*SyncerState, syncWorkerCount),
|
2019-11-15 21:35:29 +00:00
|
|
|
incomingTipSets: make(chan *types.TipSet),
|
|
|
|
activeSyncs: make(map[types.TipSetKey]*types.TipSet),
|
|
|
|
doSync: sync,
|
|
|
|
stop: make(chan struct{}),
|
2019-11-13 05:51:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (sm *SyncManager) Start() {
|
2019-11-15 21:35:29 +00:00
|
|
|
go sm.syncScheduler()
|
2019-11-13 05:51:36 +00:00
|
|
|
for i := 0; i < syncWorkerCount; i++ {
|
|
|
|
go sm.syncWorker(i)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-15 21:35:29 +00:00
|
|
|
func (sm *SyncManager) Stop() {
|
|
|
|
close(sm.stop)
|
|
|
|
}
|
|
|
|
|
2019-11-16 01:05:16 +00:00
|
|
|
func (sm *SyncManager) SetPeerHead(ctx context.Context, p peer.ID, ts *types.TipSet) {
|
|
|
|
log.Info("set peer head!", ts.Height(), ts.Cids())
|
2019-11-13 05:51:36 +00:00
|
|
|
sm.lk.Lock()
|
|
|
|
defer sm.lk.Unlock()
|
|
|
|
sm.peerHeads[p] = ts
|
|
|
|
|
|
|
|
if !sm.bootstrapped {
|
|
|
|
spc := sm.syncedPeerCount()
|
|
|
|
if spc >= sm.bspThresh {
|
|
|
|
// Its go time!
|
2019-11-15 02:27:43 +00:00
|
|
|
target, err := sm.selectSyncTarget()
|
|
|
|
if err != nil {
|
|
|
|
log.Error("failed to select sync target: ", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-11-15 21:35:29 +00:00
|
|
|
sm.incomingTipSets <- target
|
|
|
|
// TODO: is this the right place to say we're bootstrapped? probably want to wait until the sync finishes
|
2019-11-15 02:27:43 +00:00
|
|
|
sm.bootstrapped = true
|
2019-11-13 05:51:36 +00:00
|
|
|
}
|
|
|
|
log.Infof("sync bootstrap has %d peers", spc)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-11-15 21:35:29 +00:00
|
|
|
sm.incomingTipSets <- ts
|
2019-11-13 05:51:36 +00:00
|
|
|
}
|
|
|
|
|
2019-11-15 02:27:43 +00:00
|
|
|
type syncBucketSet struct {
|
|
|
|
buckets []*syncTargetBucket
|
|
|
|
}
|
|
|
|
|
|
|
|
func (sbs *syncBucketSet) Insert(ts *types.TipSet) {
|
|
|
|
for _, b := range sbs.buckets {
|
|
|
|
if b.sameChainAs(ts) {
|
|
|
|
b.add(ts)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
sbs.buckets = append(sbs.buckets, &syncTargetBucket{
|
|
|
|
tips: []*types.TipSet{ts},
|
|
|
|
count: 1,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func (sbs *syncBucketSet) Pop() *syncTargetBucket {
|
|
|
|
var bestBuck *syncTargetBucket
|
|
|
|
var bestTs *types.TipSet
|
|
|
|
for _, b := range sbs.buckets {
|
|
|
|
hts := b.heaviestTipSet()
|
|
|
|
if bestBuck == nil || bestTs.ParentWeight().LessThan(hts.ParentWeight()) {
|
|
|
|
bestBuck = b
|
|
|
|
bestTs = hts
|
|
|
|
}
|
|
|
|
}
|
2019-11-15 21:35:29 +00:00
|
|
|
|
|
|
|
sbs.removeBucket(bestBuck)
|
|
|
|
|
2019-11-15 02:27:43 +00:00
|
|
|
return bestBuck
|
|
|
|
}
|
|
|
|
|
2019-11-15 21:35:29 +00:00
|
|
|
func (sbs *syncBucketSet) removeBucket(toremove *syncTargetBucket) {
|
|
|
|
nbuckets := make([]*syncTargetBucket, 0, len(sbs.buckets)-1)
|
|
|
|
for _, b := range sbs.buckets {
|
|
|
|
if b != toremove {
|
|
|
|
nbuckets = append(nbuckets, b)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
sbs.buckets = nbuckets
|
|
|
|
}
|
|
|
|
|
|
|
|
func (sbs *syncBucketSet) PopRelated(ts *types.TipSet) *syncTargetBucket {
|
|
|
|
for _, b := range sbs.buckets {
|
|
|
|
if b.sameChainAs(ts) {
|
|
|
|
sbs.removeBucket(b)
|
|
|
|
return b
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-11-15 02:27:43 +00:00
|
|
|
func (sbs *syncBucketSet) Heaviest() *types.TipSet {
|
|
|
|
// TODO: should also consider factoring in number of peers represented by each bucket here
|
|
|
|
var bestTs *types.TipSet
|
2019-11-15 21:35:29 +00:00
|
|
|
for _, b := range sbs.buckets {
|
2019-11-15 02:27:43 +00:00
|
|
|
bhts := b.heaviestTipSet()
|
|
|
|
if bestTs == nil || bhts.ParentWeight().GreaterThan(bestTs.ParentWeight()) {
|
|
|
|
bestTs = bhts
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return bestTs
|
|
|
|
}
|
|
|
|
|
|
|
|
type syncTargetBucket struct {
|
|
|
|
tips []*types.TipSet
|
|
|
|
count int
|
|
|
|
}
|
|
|
|
|
|
|
|
func newSyncTargetBucket(tipsets ...*types.TipSet) *syncTargetBucket {
|
|
|
|
var stb syncTargetBucket
|
|
|
|
for _, ts := range tipsets {
|
|
|
|
stb.add(ts)
|
|
|
|
}
|
|
|
|
return &stb
|
|
|
|
}
|
|
|
|
|
|
|
|
func (stb *syncTargetBucket) sameChainAs(ts *types.TipSet) bool {
|
|
|
|
for _, t := range stb.tips {
|
|
|
|
if ts.Equals(t) {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
if types.CidArrsEqual(ts.Cids(), t.Parents()) {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
if types.CidArrsEqual(ts.Parents(), t.Cids()) {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
func (stb *syncTargetBucket) add(ts *types.TipSet) {
|
|
|
|
stb.count++
|
|
|
|
|
|
|
|
for _, t := range stb.tips {
|
|
|
|
if t.Equals(ts) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
stb.tips = append(stb.tips, ts)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (stb *syncTargetBucket) heaviestTipSet() *types.TipSet {
|
2019-11-15 21:35:29 +00:00
|
|
|
if stb == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-11-15 02:27:43 +00:00
|
|
|
var best *types.TipSet
|
|
|
|
for _, ts := range stb.tips {
|
|
|
|
if best == nil || ts.ParentWeight().GreaterThan(best.ParentWeight()) {
|
|
|
|
best = ts
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return best
|
|
|
|
}
|
|
|
|
|
2019-11-13 17:03:56 +00:00
|
|
|
func (sm *SyncManager) selectSyncTarget() (*types.TipSet, error) {
|
2019-11-15 02:27:43 +00:00
|
|
|
var buckets syncBucketSet
|
|
|
|
|
|
|
|
var peerHeads []*types.TipSet
|
|
|
|
for _, ts := range sm.peerHeads {
|
|
|
|
peerHeads = append(peerHeads, ts)
|
|
|
|
}
|
|
|
|
sort.Slice(peerHeads, func(i, j int) bool {
|
|
|
|
return peerHeads[i].Height() < peerHeads[j].Height()
|
|
|
|
})
|
|
|
|
|
|
|
|
for _, ts := range peerHeads {
|
|
|
|
buckets.Insert(ts)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(buckets.buckets) > 1 {
|
|
|
|
log.Warning("caution, multiple distinct chains seen during head selections")
|
|
|
|
// TODO: we *could* refuse to sync here without user intervention.
|
|
|
|
// For now, just select the best cluster
|
|
|
|
}
|
|
|
|
|
|
|
|
return buckets.Heaviest(), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (sm *SyncManager) syncScheduler() {
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case ts, ok := <-sm.incomingTipSets:
|
|
|
|
if !ok {
|
|
|
|
log.Info("shutting down sync scheduler")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-11-16 21:36:21 +00:00
|
|
|
sm.scheduleIncoming(ts)
|
|
|
|
case res := <-sm.syncResults:
|
|
|
|
sm.scheduleProcessResult(res)
|
|
|
|
case sm.workerChan <- sm.nextSyncTarget.heaviestTipSet():
|
|
|
|
sm.scheduleWorkSent()
|
|
|
|
case <-sm.stop:
|
|
|
|
log.Info("sync scheduler shutting down")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-11-15 02:27:43 +00:00
|
|
|
|
2019-11-16 21:36:21 +00:00
|
|
|
func (sm *SyncManager) scheduleIncoming(ts *types.TipSet) {
|
|
|
|
var relatedToActiveSync bool
|
|
|
|
for _, acts := range sm.activeSyncs {
|
|
|
|
if ts.Equals(acts) {
|
|
|
|
break
|
|
|
|
}
|
2019-11-15 02:27:43 +00:00
|
|
|
|
2019-11-16 21:36:21 +00:00
|
|
|
if types.CidArrsEqual(ts.Parents(), acts.Cids()) {
|
|
|
|
// sync this next, after that sync process finishes
|
|
|
|
relatedToActiveSync = true
|
|
|
|
}
|
|
|
|
}
|
2019-11-15 02:27:43 +00:00
|
|
|
|
2019-11-16 21:36:21 +00:00
|
|
|
// if this is related to an active sync process, immediately bucket it
|
|
|
|
// we don't want to start a parallel sync process that duplicates work
|
|
|
|
if relatedToActiveSync {
|
|
|
|
sm.activeSyncTips.Insert(ts)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if sm.nextSyncTarget != nil && sm.nextSyncTarget.sameChainAs(ts) {
|
|
|
|
sm.nextSyncTarget.add(ts)
|
|
|
|
} else {
|
|
|
|
sm.syncQueue.Insert(ts)
|
|
|
|
|
|
|
|
if sm.nextSyncTarget == nil {
|
|
|
|
sm.nextSyncTarget = sm.syncQueue.Pop()
|
|
|
|
sm.workerChan = sm.syncTargets
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-11-15 21:35:29 +00:00
|
|
|
|
2019-11-16 21:36:21 +00:00
|
|
|
func (sm *SyncManager) scheduleProcessResult(res *syncResult) {
|
|
|
|
delete(sm.activeSyncs, res.ts.Key())
|
|
|
|
relbucket := sm.activeSyncTips.PopRelated(res.ts)
|
|
|
|
if relbucket != nil {
|
|
|
|
if res.success {
|
|
|
|
if sm.nextSyncTarget == nil {
|
|
|
|
sm.nextSyncTarget = relbucket
|
|
|
|
sm.workerChan = sm.syncTargets
|
2019-11-15 02:27:43 +00:00
|
|
|
} else {
|
2019-11-16 21:36:21 +00:00
|
|
|
sm.syncQueue.buckets = append(sm.syncQueue.buckets, relbucket)
|
2019-11-15 02:27:43 +00:00
|
|
|
}
|
2019-11-16 21:36:21 +00:00
|
|
|
} else {
|
|
|
|
// TODO: this is the case where we try to sync a chain, and
|
|
|
|
// fail, and we have more blocks on top of that chain that
|
|
|
|
// have come in since. The question is, should we try to
|
|
|
|
// sync these? or just drop them?
|
2019-11-15 02:27:43 +00:00
|
|
|
}
|
|
|
|
}
|
2019-11-13 17:03:56 +00:00
|
|
|
}
|
|
|
|
|
2019-11-16 21:36:21 +00:00
|
|
|
func (sm *SyncManager) scheduleWorkSent() {
|
|
|
|
hts := sm.nextSyncTarget.heaviestTipSet()
|
|
|
|
sm.activeSyncs[hts.Key()] = hts
|
|
|
|
|
|
|
|
if len(sm.syncQueue.buckets) > 0 {
|
|
|
|
sm.nextSyncTarget = sm.syncQueue.Pop()
|
|
|
|
} else {
|
|
|
|
sm.nextSyncTarget = nil
|
|
|
|
sm.workerChan = nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-13 05:51:36 +00:00
|
|
|
func (sm *SyncManager) syncWorker(id int) {
|
2019-11-16 01:05:16 +00:00
|
|
|
ss := &SyncerState{}
|
|
|
|
sm.syncStates[id] = ss
|
2019-11-13 05:51:36 +00:00
|
|
|
for {
|
|
|
|
select {
|
2019-11-15 21:35:29 +00:00
|
|
|
case ts, ok := <-sm.syncTargets:
|
2019-11-13 05:51:36 +00:00
|
|
|
if !ok {
|
|
|
|
log.Info("sync manager worker shutting down")
|
|
|
|
return
|
|
|
|
}
|
2019-11-16 01:05:16 +00:00
|
|
|
log.Info("sync worker go time!", ts.Height(), ts.Cids())
|
2019-11-13 05:51:36 +00:00
|
|
|
|
2019-11-19 20:36:27 +00:00
|
|
|
ctx := context.WithValue(context.TODO(), syncStateKey{}, ss)
|
2019-11-16 01:05:16 +00:00
|
|
|
err := sm.doSync(ctx, ts)
|
2019-11-15 21:35:29 +00:00
|
|
|
if err != nil {
|
2019-11-13 05:51:36 +00:00
|
|
|
log.Errorf("sync error: %+v", err)
|
|
|
|
}
|
2019-11-15 02:27:43 +00:00
|
|
|
|
2019-11-15 21:35:29 +00:00
|
|
|
sm.syncResults <- &syncResult{
|
|
|
|
ts: ts,
|
|
|
|
success: err == nil,
|
|
|
|
}
|
2019-11-13 05:51:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (sm *SyncManager) syncedPeerCount() int {
|
|
|
|
var count int
|
|
|
|
for _, ts := range sm.peerHeads {
|
|
|
|
if ts.Height() > 0 {
|
|
|
|
count++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return count
|
|
|
|
}
|
|
|
|
|
|
|
|
func (sm *SyncManager) IsBootstrapped() bool {
|
|
|
|
sm.lk.Lock()
|
|
|
|
defer sm.lk.Unlock()
|
|
|
|
return sm.bootstrapped
|
|
|
|
}
|