forked from cerc-io/plugeth
eth/fetcher: build longest chain until proven otherwise
This commit is contained in:
parent
2a7411bc96
commit
37c5ff392f
@ -3,7 +3,6 @@ package fetcher
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"math"
|
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -57,8 +56,9 @@ type inject struct {
|
|||||||
type Fetcher struct {
|
type Fetcher struct {
|
||||||
// Various event channels
|
// Various event channels
|
||||||
notify chan *announce
|
notify chan *announce
|
||||||
insert chan *inject
|
inject chan *inject
|
||||||
filter chan chan []*types.Block
|
filter chan chan []*types.Block
|
||||||
|
done chan common.Hash
|
||||||
quit chan struct{}
|
quit chan struct{}
|
||||||
|
|
||||||
// Announce states
|
// Announce states
|
||||||
@ -79,8 +79,9 @@ type Fetcher struct {
|
|||||||
func New(hasBlock hashCheckFn, importBlock blockImporterFn, chainHeight chainHeightFn) *Fetcher {
|
func New(hasBlock hashCheckFn, importBlock blockImporterFn, chainHeight chainHeightFn) *Fetcher {
|
||||||
return &Fetcher{
|
return &Fetcher{
|
||||||
notify: make(chan *announce),
|
notify: make(chan *announce),
|
||||||
insert: make(chan *inject),
|
inject: make(chan *inject),
|
||||||
filter: make(chan chan []*types.Block),
|
filter: make(chan chan []*types.Block),
|
||||||
|
done: make(chan common.Hash),
|
||||||
quit: make(chan struct{}),
|
quit: make(chan struct{}),
|
||||||
announced: make(map[common.Hash][]*announce),
|
announced: make(map[common.Hash][]*announce),
|
||||||
fetching: make(map[common.Hash]*announce),
|
fetching: make(map[common.Hash]*announce),
|
||||||
@ -128,7 +129,7 @@ func (f *Fetcher) Enqueue(peer string, block *types.Block) error {
|
|||||||
block: block,
|
block: block,
|
||||||
}
|
}
|
||||||
select {
|
select {
|
||||||
case f.insert <- op:
|
case f.inject <- op:
|
||||||
return nil
|
return nil
|
||||||
case <-f.quit:
|
case <-f.quit:
|
||||||
return errTerminated
|
return errTerminated
|
||||||
@ -166,8 +167,6 @@ func (f *Fetcher) Filter(blocks types.Blocks) types.Blocks {
|
|||||||
func (f *Fetcher) loop() {
|
func (f *Fetcher) loop() {
|
||||||
// Iterate the block fetching until a quit is requested
|
// Iterate the block fetching until a quit is requested
|
||||||
fetch := time.NewTimer(0)
|
fetch := time.NewTimer(0)
|
||||||
done := make(chan common.Hash)
|
|
||||||
|
|
||||||
for {
|
for {
|
||||||
// Clean up any expired block fetches
|
// Clean up any expired block fetches
|
||||||
for hash, announce := range f.fetching {
|
for hash, announce := range f.fetching {
|
||||||
@ -179,27 +178,19 @@ func (f *Fetcher) loop() {
|
|||||||
// Import any queued blocks that could potentially fit
|
// Import any queued blocks that could potentially fit
|
||||||
height := f.chainHeight()
|
height := f.chainHeight()
|
||||||
for !f.queue.Empty() {
|
for !f.queue.Empty() {
|
||||||
// If too high up the chain, continue later
|
|
||||||
op := f.queue.PopItem().(*inject)
|
op := f.queue.PopItem().(*inject)
|
||||||
if number := op.block.NumberU64(); number > height+1 {
|
number := op.block.NumberU64()
|
||||||
|
|
||||||
|
// If too high up the chain or phase, continue later
|
||||||
|
if number > height+1 {
|
||||||
f.queue.Push(op, -float32(op.block.NumberU64()))
|
f.queue.Push(op, -float32(op.block.NumberU64()))
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
// Otherwise if not known yet, try and import
|
// Otherwise if fresh and still unknown, try and import
|
||||||
hash := op.block.Hash()
|
if number <= height || f.hasBlock(op.block.Hash()) {
|
||||||
if f.hasBlock(hash) {
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// Block may just fit, try to import it
|
f.insert(op.origin, op.block)
|
||||||
glog.V(logger.Debug).Infof("Peer %s: importing block #%d [%x]", op.origin, op.block.NumberU64(), hash.Bytes()[:4])
|
|
||||||
go func() {
|
|
||||||
defer func() { done <- hash }()
|
|
||||||
|
|
||||||
if err := f.importBlock(op.origin, op.block); err != nil {
|
|
||||||
glog.V(logger.Detail).Infof("Peer %s: block #%d [%x] import failed: %v", op.origin, op.block.NumberU64(), hash.Bytes()[:4], err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
}
|
||||||
// Wait for an outside event to occur
|
// Wait for an outside event to occur
|
||||||
select {
|
select {
|
||||||
@ -209,7 +200,6 @@ func (f *Fetcher) loop() {
|
|||||||
|
|
||||||
case notification := <-f.notify:
|
case notification := <-f.notify:
|
||||||
// A block was announced, schedule if it's not yet downloading
|
// A block was announced, schedule if it's not yet downloading
|
||||||
glog.V(logger.Debug).Infof("Peer %s: scheduling %x", notification.origin, notification.hash[:4])
|
|
||||||
if _, ok := f.fetching[notification.hash]; ok {
|
if _, ok := f.fetching[notification.hash]; ok {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@ -218,11 +208,11 @@ func (f *Fetcher) loop() {
|
|||||||
f.reschedule(fetch)
|
f.reschedule(fetch)
|
||||||
}
|
}
|
||||||
|
|
||||||
case op := <-f.insert:
|
case op := <-f.inject:
|
||||||
// A direct block insertion was requested, try and fill any pending gaps
|
// A direct block insertion was requested, try and fill any pending gaps
|
||||||
f.enqueue(op.origin, op.block)
|
f.enqueue(op.origin, op.block)
|
||||||
|
|
||||||
case hash := <-done:
|
case hash := <-f.done:
|
||||||
// A pending import finished, remove all traces of the notification
|
// A pending import finished, remove all traces of the notification
|
||||||
delete(f.announced, hash)
|
delete(f.announced, hash)
|
||||||
delete(f.fetching, hash)
|
delete(f.fetching, hash)
|
||||||
@ -243,8 +233,7 @@ func (f *Fetcher) loop() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Send out all block requests
|
// Send out all block requests
|
||||||
for peer, hashes := range request {
|
for _, hashes := range request {
|
||||||
glog.V(logger.Debug).Infof("Peer %s: explicitly fetching %d blocks", peer, len(hashes))
|
|
||||||
go f.fetching[hashes[0]].fetch(hashes)
|
go f.fetching[hashes[0]].fetch(hashes)
|
||||||
}
|
}
|
||||||
// Schedule the next fetch if blocks are still pending
|
// Schedule the next fetch if blocks are still pending
|
||||||
@ -304,7 +293,6 @@ func (f *Fetcher) reschedule(fetch *time.Timer) {
|
|||||||
earliest = announces[0].time
|
earliest = announces[0].time
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
glog.V(logger.Detail).Infof("Scheduling next fetch in %v", arriveTimeout-time.Since(earliest))
|
|
||||||
fetch.Reset(arriveTimeout - time.Since(earliest))
|
fetch.Reset(arriveTimeout - time.Since(earliest))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -313,9 +301,9 @@ func (f *Fetcher) reschedule(fetch *time.Timer) {
|
|||||||
func (f *Fetcher) enqueue(peer string, block *types.Block) {
|
func (f *Fetcher) enqueue(peer string, block *types.Block) {
|
||||||
hash := block.Hash()
|
hash := block.Hash()
|
||||||
|
|
||||||
// Make sure the block isn't in some weird place
|
// Discard any past or too distant blocks
|
||||||
if math.Abs(float64(f.chainHeight())-float64(block.NumberU64())) > maxQueueDist {
|
if dist := int64(block.NumberU64()) - int64(f.chainHeight()); dist <= 0 || dist > maxQueueDist {
|
||||||
glog.Infof("Peer %s: discarded block #%d [%x] too far from head", peer, block.NumberU64(), hash.Bytes()[:4])
|
glog.Infof("Peer %s: discarded block #%d [%x], distance %d", peer, block.NumberU64(), hash.Bytes()[:4], dist)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Schedule the block for future importing
|
// Schedule the block for future importing
|
||||||
@ -328,3 +316,22 @@ func (f *Fetcher) enqueue(peer string, block *types.Block) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// insert spawns a new goroutine to run a block insertion into the chain. If the
|
||||||
|
// block's number is at the same height as the current import phase, if updates
|
||||||
|
// the phase states accordingly.
|
||||||
|
func (f *Fetcher) insert(peer string, block *types.Block) {
|
||||||
|
hash := block.Hash()
|
||||||
|
|
||||||
|
// Run the import on a new thread
|
||||||
|
glog.V(logger.Debug).Infof("Peer %s: importing block #%d [%x]", peer, block.NumberU64(), hash[:4])
|
||||||
|
go func() {
|
||||||
|
defer func() { f.done <- hash }()
|
||||||
|
|
||||||
|
// Run the actual import and log any issues
|
||||||
|
if err := f.importBlock(peer, block); err != nil {
|
||||||
|
glog.V(logger.Detail).Infof("Peer %s: block #%d [%x] import failed: %v", peer, block.NumberU64(), hash[:4], err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
@ -91,9 +91,15 @@ func (f *fetcherTester) hasBlock(hash common.Hash) bool {
|
|||||||
|
|
||||||
// importBlock injects a new blocks into the simulated chain.
|
// importBlock injects a new blocks into the simulated chain.
|
||||||
func (f *fetcherTester) importBlock(peer string, block *types.Block) error {
|
func (f *fetcherTester) importBlock(peer string, block *types.Block) error {
|
||||||
|
// Make sure the parent in known
|
||||||
if _, ok := f.ownBlocks[block.ParentHash()]; !ok {
|
if _, ok := f.ownBlocks[block.ParentHash()]; !ok {
|
||||||
return errors.New("unknown parent")
|
return errors.New("unknown parent")
|
||||||
}
|
}
|
||||||
|
// Discard any new blocks if the same height already exists
|
||||||
|
if block.NumberU64() <= f.ownBlocks[f.ownHashes[len(f.ownHashes)-1]].NumberU64() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Otherwise build our current chain
|
||||||
f.ownHashes = append(f.ownHashes, block.Hash())
|
f.ownHashes = append(f.ownHashes, block.Hash())
|
||||||
f.ownBlocks[block.Hash()] = block
|
f.ownBlocks[block.Hash()] = block
|
||||||
return nil
|
return nil
|
||||||
@ -363,3 +369,54 @@ func TestDistantDiscarding(t *testing.T) {
|
|||||||
t.Fatalf("fetcher queued future block")
|
t.Fatalf("fetcher queued future block")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Tests that if multiple uncles (i.e. blocks at the same height) are queued for
|
||||||
|
// importing, then they will get inserted in phases, previous heights needing to
|
||||||
|
// complete before the next numbered blocks can begin.
|
||||||
|
func TestCompetingImports(t *testing.T) {
|
||||||
|
// Generate a few soft-forks for concurrent imports
|
||||||
|
hashesA := createHashes(16, knownHash)
|
||||||
|
hashesB := createHashes(16, knownHash)
|
||||||
|
hashesC := createHashes(16, knownHash)
|
||||||
|
|
||||||
|
blocksA := createBlocksFromHashes(hashesA)
|
||||||
|
blocksB := createBlocksFromHashes(hashesB)
|
||||||
|
blocksC := createBlocksFromHashes(hashesC)
|
||||||
|
|
||||||
|
// Create a tester, and override the import to check number reversals
|
||||||
|
tester := newTester()
|
||||||
|
|
||||||
|
first := int32(1)
|
||||||
|
height := uint64(1)
|
||||||
|
tester.fetcher.importBlock = func(peer string, block *types.Block) error {
|
||||||
|
// Check for any phase reordering
|
||||||
|
if prev := atomic.LoadUint64(&height); block.NumberU64() < prev {
|
||||||
|
t.Errorf("phase reversal: have %v, want %v", block.NumberU64(), prev)
|
||||||
|
}
|
||||||
|
atomic.StoreUint64(&height, block.NumberU64())
|
||||||
|
|
||||||
|
// Sleep a bit on the first import not to race with the enqueues
|
||||||
|
if atomic.CompareAndSwapInt32(&first, 1, 0) {
|
||||||
|
time.Sleep(50 * time.Millisecond)
|
||||||
|
}
|
||||||
|
return tester.importBlock(peer, block)
|
||||||
|
}
|
||||||
|
// Queue up everything but with a missing link
|
||||||
|
for i := 0; i < len(hashesA)-2; i++ {
|
||||||
|
tester.fetcher.Enqueue("chain A", blocksA[hashesA[i]])
|
||||||
|
tester.fetcher.Enqueue("chain B", blocksB[hashesB[i]])
|
||||||
|
tester.fetcher.Enqueue("chain C", blocksC[hashesC[i]])
|
||||||
|
}
|
||||||
|
// Add the three missing links, and wait for a full import
|
||||||
|
tester.fetcher.Enqueue("chain A", blocksA[hashesA[len(hashesA)-2]])
|
||||||
|
tester.fetcher.Enqueue("chain B", blocksB[hashesB[len(hashesB)-2]])
|
||||||
|
tester.fetcher.Enqueue("chain C", blocksC[hashesC[len(hashesC)-2]])
|
||||||
|
|
||||||
|
start := time.Now()
|
||||||
|
for len(tester.ownHashes) != len(hashesA) && time.Since(start) < time.Second {
|
||||||
|
time.Sleep(50 * time.Millisecond)
|
||||||
|
}
|
||||||
|
if len(tester.ownHashes) != len(hashesA) {
|
||||||
|
t.Fatalf("chain length mismatch: have %v, want %v", len(tester.ownHashes), len(hashesA))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -8,11 +8,11 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/eth/fetcher"
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/eth/downloader"
|
"github.com/ethereum/go-ethereum/eth/downloader"
|
||||||
|
"github.com/ethereum/go-ethereum/eth/fetcher"
|
||||||
"github.com/ethereum/go-ethereum/event"
|
"github.com/ethereum/go-ethereum/event"
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/logger"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
"github.com/ethereum/go-ethereum/logger/glog"
|
||||||
|
Loading…
Reference in New Issue
Block a user