forked from cerc-io/plugeth
eth: request id dispatcher and direct req/reply APIs (#23576)
* eth: request ID based message dispatcher * eth: fix dispatcher cancellation, rework fetchers idleness tracker * eth/downloader: drop peers who refuse to serve advertised chains
This commit is contained in:
parent
3038e480f5
commit
c10a0a62c3
@ -215,7 +215,7 @@ var (
|
|||||||
defaultSyncMode = ethconfig.Defaults.SyncMode
|
defaultSyncMode = ethconfig.Defaults.SyncMode
|
||||||
SyncModeFlag = TextMarshalerFlag{
|
SyncModeFlag = TextMarshalerFlag{
|
||||||
Name: "syncmode",
|
Name: "syncmode",
|
||||||
Usage: `Blockchain sync mode ("fast", "full", "snap" or "light")`,
|
Usage: `Blockchain sync mode ("snap", "full" or "light")`,
|
||||||
Value: &defaultSyncMode,
|
Value: &defaultSyncMode,
|
||||||
}
|
}
|
||||||
GCModeFlag = cli.StringFlag{
|
GCModeFlag = cli.StringFlag{
|
||||||
|
@ -629,9 +629,9 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, root common.Hash, repair bo
|
|||||||
return rootNumber, bc.loadLastState()
|
return rootNumber, bc.loadLastState()
|
||||||
}
|
}
|
||||||
|
|
||||||
// FastSyncCommitHead sets the current head block to the one defined by the hash
|
// SnapSyncCommitHead sets the current head block to the one defined by the hash
|
||||||
// irrelevant what the chain contents were prior.
|
// irrelevant what the chain contents were prior.
|
||||||
func (bc *BlockChain) FastSyncCommitHead(hash common.Hash) error {
|
func (bc *BlockChain) SnapSyncCommitHead(hash common.Hash) error {
|
||||||
// Make sure that both the block as well at its state trie exists
|
// Make sure that both the block as well at its state trie exists
|
||||||
block := bc.GetBlockByHash(hash)
|
block := bc.GetBlockByHash(hash)
|
||||||
if block == nil {
|
if block == nil {
|
||||||
@ -736,30 +736,24 @@ func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error {
|
|||||||
//
|
//
|
||||||
// Note, this function assumes that the `mu` mutex is held!
|
// Note, this function assumes that the `mu` mutex is held!
|
||||||
func (bc *BlockChain) writeHeadBlock(block *types.Block) {
|
func (bc *BlockChain) writeHeadBlock(block *types.Block) {
|
||||||
// If the block is on a side chain or an unknown one, force other heads onto it too
|
|
||||||
updateHeads := rawdb.ReadCanonicalHash(bc.db, block.NumberU64()) != block.Hash()
|
|
||||||
|
|
||||||
// Add the block to the canonical chain number scheme and mark as the head
|
// Add the block to the canonical chain number scheme and mark as the head
|
||||||
batch := bc.db.NewBatch()
|
batch := bc.db.NewBatch()
|
||||||
|
rawdb.WriteHeadHeaderHash(batch, block.Hash())
|
||||||
|
rawdb.WriteHeadFastBlockHash(batch, block.Hash())
|
||||||
rawdb.WriteCanonicalHash(batch, block.Hash(), block.NumberU64())
|
rawdb.WriteCanonicalHash(batch, block.Hash(), block.NumberU64())
|
||||||
rawdb.WriteTxLookupEntriesByBlock(batch, block)
|
rawdb.WriteTxLookupEntriesByBlock(batch, block)
|
||||||
rawdb.WriteHeadBlockHash(batch, block.Hash())
|
rawdb.WriteHeadBlockHash(batch, block.Hash())
|
||||||
|
|
||||||
// If the block is better than our head or is on a different chain, force update heads
|
|
||||||
if updateHeads {
|
|
||||||
rawdb.WriteHeadHeaderHash(batch, block.Hash())
|
|
||||||
rawdb.WriteHeadFastBlockHash(batch, block.Hash())
|
|
||||||
}
|
|
||||||
// Flush the whole batch into the disk, exit the node if failed
|
// Flush the whole batch into the disk, exit the node if failed
|
||||||
if err := batch.Write(); err != nil {
|
if err := batch.Write(); err != nil {
|
||||||
log.Crit("Failed to update chain indexes and markers", "err", err)
|
log.Crit("Failed to update chain indexes and markers", "err", err)
|
||||||
}
|
}
|
||||||
// Update all in-memory chain markers in the last step
|
// Update all in-memory chain markers in the last step
|
||||||
if updateHeads {
|
|
||||||
bc.hc.SetCurrentHeader(block.Header())
|
bc.hc.SetCurrentHeader(block.Header())
|
||||||
|
|
||||||
bc.currentFastBlock.Store(block)
|
bc.currentFastBlock.Store(block)
|
||||||
headFastBlockGauge.Update(int64(block.NumberU64()))
|
headFastBlockGauge.Update(int64(block.NumberU64()))
|
||||||
}
|
|
||||||
bc.currentBlock.Store(block)
|
bc.currentBlock.Store(block)
|
||||||
headBlockGauge.Update(int64(block.NumberU64()))
|
headBlockGauge.Update(int64(block.NumberU64()))
|
||||||
}
|
}
|
||||||
|
@ -79,10 +79,10 @@ func testShortRepair(t *testing.T, snapshots bool) {
|
|||||||
// already committed, after which the process crashed. In this case we expect the full
|
// already committed, after which the process crashed. In this case we expect the full
|
||||||
// chain to be rolled back to the committed block, but the chain data itself left in
|
// chain to be rolled back to the committed block, but the chain data itself left in
|
||||||
// the database for replaying.
|
// the database for replaying.
|
||||||
func TestShortFastSyncedRepair(t *testing.T) { testShortFastSyncedRepair(t, false) }
|
func TestShortSnapSyncedRepair(t *testing.T) { testShortSnapSyncedRepair(t, false) }
|
||||||
func TestShortFastSyncedRepairWithSnapshots(t *testing.T) { testShortFastSyncedRepair(t, true) }
|
func TestShortSnapSyncedRepairWithSnapshots(t *testing.T) { testShortSnapSyncedRepair(t, true) }
|
||||||
|
|
||||||
func testShortFastSyncedRepair(t *testing.T, snapshots bool) {
|
func testShortSnapSyncedRepair(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
||||||
//
|
//
|
||||||
@ -119,10 +119,10 @@ func testShortFastSyncedRepair(t *testing.T, snapshots bool) {
|
|||||||
// not yet committed, but the process crashed. In this case we expect the chain to
|
// not yet committed, but the process crashed. In this case we expect the chain to
|
||||||
// detect that it was fast syncing and not delete anything, since we can just pick
|
// detect that it was fast syncing and not delete anything, since we can just pick
|
||||||
// up directly where we left off.
|
// up directly where we left off.
|
||||||
func TestShortFastSyncingRepair(t *testing.T) { testShortFastSyncingRepair(t, false) }
|
func TestShortSnapSyncingRepair(t *testing.T) { testShortSnapSyncingRepair(t, false) }
|
||||||
func TestShortFastSyncingRepairWithSnapshots(t *testing.T) { testShortFastSyncingRepair(t, true) }
|
func TestShortSnapSyncingRepairWithSnapshots(t *testing.T) { testShortSnapSyncingRepair(t, true) }
|
||||||
|
|
||||||
func testShortFastSyncingRepair(t *testing.T, snapshots bool) {
|
func testShortSnapSyncingRepair(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
||||||
//
|
//
|
||||||
@ -203,14 +203,14 @@ func testShortOldForkedRepair(t *testing.T, snapshots bool) {
|
|||||||
// crashed. In this test scenario the side chain is below the committed block. In
|
// crashed. In this test scenario the side chain is below the committed block. In
|
||||||
// this case we expect the canonical chain to be rolled back to the committed block,
|
// this case we expect the canonical chain to be rolled back to the committed block,
|
||||||
// but the chain data itself left in the database for replaying.
|
// but the chain data itself left in the database for replaying.
|
||||||
func TestShortOldForkedFastSyncedRepair(t *testing.T) {
|
func TestShortOldForkedSnapSyncedRepair(t *testing.T) {
|
||||||
testShortOldForkedFastSyncedRepair(t, false)
|
testShortOldForkedSnapSyncedRepair(t, false)
|
||||||
}
|
}
|
||||||
func TestShortOldForkedFastSyncedRepairWithSnapshots(t *testing.T) {
|
func TestShortOldForkedSnapSyncedRepairWithSnapshots(t *testing.T) {
|
||||||
testShortOldForkedFastSyncedRepair(t, true)
|
testShortOldForkedSnapSyncedRepair(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testShortOldForkedFastSyncedRepair(t *testing.T, snapshots bool) {
|
func testShortOldForkedSnapSyncedRepair(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
||||||
// └->S1->S2->S3
|
// └->S1->S2->S3
|
||||||
@ -250,14 +250,14 @@ func testShortOldForkedFastSyncedRepair(t *testing.T, snapshots bool) {
|
|||||||
// test scenario the side chain is below the committed block. In this case we expect
|
// test scenario the side chain is below the committed block. In this case we expect
|
||||||
// the chain to detect that it was fast syncing and not delete anything, since we
|
// the chain to detect that it was fast syncing and not delete anything, since we
|
||||||
// can just pick up directly where we left off.
|
// can just pick up directly where we left off.
|
||||||
func TestShortOldForkedFastSyncingRepair(t *testing.T) {
|
func TestShortOldForkedSnapSyncingRepair(t *testing.T) {
|
||||||
testShortOldForkedFastSyncingRepair(t, false)
|
testShortOldForkedSnapSyncingRepair(t, false)
|
||||||
}
|
}
|
||||||
func TestShortOldForkedFastSyncingRepairWithSnapshots(t *testing.T) {
|
func TestShortOldForkedSnapSyncingRepairWithSnapshots(t *testing.T) {
|
||||||
testShortOldForkedFastSyncingRepair(t, true)
|
testShortOldForkedSnapSyncingRepair(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testShortOldForkedFastSyncingRepair(t *testing.T, snapshots bool) {
|
func testShortOldForkedSnapSyncingRepair(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
||||||
// └->S1->S2->S3
|
// └->S1->S2->S3
|
||||||
@ -340,14 +340,14 @@ func testShortNewlyForkedRepair(t *testing.T, snapshots bool) {
|
|||||||
// crashed. In this test scenario the side chain reaches above the committed block.
|
// crashed. In this test scenario the side chain reaches above the committed block.
|
||||||
// In this case we expect the canonical chain to be rolled back to the committed
|
// In this case we expect the canonical chain to be rolled back to the committed
|
||||||
// block, but the chain data itself left in the database for replaying.
|
// block, but the chain data itself left in the database for replaying.
|
||||||
func TestShortNewlyForkedFastSyncedRepair(t *testing.T) {
|
func TestShortNewlyForkedSnapSyncedRepair(t *testing.T) {
|
||||||
testShortNewlyForkedFastSyncedRepair(t, false)
|
testShortNewlyForkedSnapSyncedRepair(t, false)
|
||||||
}
|
}
|
||||||
func TestShortNewlyForkedFastSyncedRepairWithSnapshots(t *testing.T) {
|
func TestShortNewlyForkedSnapSyncedRepairWithSnapshots(t *testing.T) {
|
||||||
testShortNewlyForkedFastSyncedRepair(t, true)
|
testShortNewlyForkedSnapSyncedRepair(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testShortNewlyForkedFastSyncedRepair(t *testing.T, snapshots bool) {
|
func testShortNewlyForkedSnapSyncedRepair(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
||||||
// └->S1->S2->S3->S4->S5->S6
|
// └->S1->S2->S3->S4->S5->S6
|
||||||
@ -387,14 +387,14 @@ func testShortNewlyForkedFastSyncedRepair(t *testing.T, snapshots bool) {
|
|||||||
// this test scenario the side chain reaches above the committed block. In this
|
// this test scenario the side chain reaches above the committed block. In this
|
||||||
// case we expect the chain to detect that it was fast syncing and not delete
|
// case we expect the chain to detect that it was fast syncing and not delete
|
||||||
// anything, since we can just pick up directly where we left off.
|
// anything, since we can just pick up directly where we left off.
|
||||||
func TestShortNewlyForkedFastSyncingRepair(t *testing.T) {
|
func TestShortNewlyForkedSnapSyncingRepair(t *testing.T) {
|
||||||
testShortNewlyForkedFastSyncingRepair(t, false)
|
testShortNewlyForkedSnapSyncingRepair(t, false)
|
||||||
}
|
}
|
||||||
func TestShortNewlyForkedFastSyncingRepairWithSnapshots(t *testing.T) {
|
func TestShortNewlyForkedSnapSyncingRepairWithSnapshots(t *testing.T) {
|
||||||
testShortNewlyForkedFastSyncingRepair(t, true)
|
testShortNewlyForkedSnapSyncingRepair(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testShortNewlyForkedFastSyncingRepair(t *testing.T, snapshots bool) {
|
func testShortNewlyForkedSnapSyncingRepair(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
||||||
// └->S1->S2->S3->S4->S5->S6
|
// └->S1->S2->S3->S4->S5->S6
|
||||||
@ -475,14 +475,14 @@ func testShortReorgedRepair(t *testing.T, snapshots bool) {
|
|||||||
// the fast sync pivot point was already committed to disk and then the process
|
// the fast sync pivot point was already committed to disk and then the process
|
||||||
// crashed. In this case we expect the canonical chain to be rolled back to the
|
// crashed. In this case we expect the canonical chain to be rolled back to the
|
||||||
// committed block, but the chain data itself left in the database for replaying.
|
// committed block, but the chain data itself left in the database for replaying.
|
||||||
func TestShortReorgedFastSyncedRepair(t *testing.T) {
|
func TestShortReorgedSnapSyncedRepair(t *testing.T) {
|
||||||
testShortReorgedFastSyncedRepair(t, false)
|
testShortReorgedSnapSyncedRepair(t, false)
|
||||||
}
|
}
|
||||||
func TestShortReorgedFastSyncedRepairWithSnapshots(t *testing.T) {
|
func TestShortReorgedSnapSyncedRepairWithSnapshots(t *testing.T) {
|
||||||
testShortReorgedFastSyncedRepair(t, true)
|
testShortReorgedSnapSyncedRepair(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testShortReorgedFastSyncedRepair(t *testing.T, snapshots bool) {
|
func testShortReorgedSnapSyncedRepair(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
||||||
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10
|
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10
|
||||||
@ -521,14 +521,14 @@ func testShortReorgedFastSyncedRepair(t *testing.T, snapshots bool) {
|
|||||||
// the fast sync pivot point was not yet committed, but the process crashed. In
|
// the fast sync pivot point was not yet committed, but the process crashed. In
|
||||||
// this case we expect the chain to detect that it was fast syncing and not delete
|
// this case we expect the chain to detect that it was fast syncing and not delete
|
||||||
// anything, since we can just pick up directly where we left off.
|
// anything, since we can just pick up directly where we left off.
|
||||||
func TestShortReorgedFastSyncingRepair(t *testing.T) {
|
func TestShortReorgedSnapSyncingRepair(t *testing.T) {
|
||||||
testShortReorgedFastSyncingRepair(t, false)
|
testShortReorgedSnapSyncingRepair(t, false)
|
||||||
}
|
}
|
||||||
func TestShortReorgedFastSyncingRepairWithSnapshots(t *testing.T) {
|
func TestShortReorgedSnapSyncingRepairWithSnapshots(t *testing.T) {
|
||||||
testShortReorgedFastSyncingRepair(t, true)
|
testShortReorgedSnapSyncingRepair(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testShortReorgedFastSyncingRepair(t *testing.T, snapshots bool) {
|
func testShortReorgedSnapSyncingRepair(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
||||||
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10
|
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10
|
||||||
@ -656,14 +656,14 @@ func testLongDeepRepair(t *testing.T, snapshots bool) {
|
|||||||
// sync pivot point - newer than the ancient limit - was already committed, after
|
// sync pivot point - newer than the ancient limit - was already committed, after
|
||||||
// which the process crashed. In this case we expect the chain to be rolled back
|
// which the process crashed. In this case we expect the chain to be rolled back
|
||||||
// to the committed block, with everything afterwads kept as fast sync data.
|
// to the committed block, with everything afterwads kept as fast sync data.
|
||||||
func TestLongFastSyncedShallowRepair(t *testing.T) {
|
func TestLongSnapSyncedShallowRepair(t *testing.T) {
|
||||||
testLongFastSyncedShallowRepair(t, false)
|
testLongSnapSyncedShallowRepair(t, false)
|
||||||
}
|
}
|
||||||
func TestLongFastSyncedShallowRepairWithSnapshots(t *testing.T) {
|
func TestLongSnapSyncedShallowRepairWithSnapshots(t *testing.T) {
|
||||||
testLongFastSyncedShallowRepair(t, true)
|
testLongSnapSyncedShallowRepair(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLongFastSyncedShallowRepair(t *testing.T, snapshots bool) {
|
func testLongSnapSyncedShallowRepair(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
||||||
//
|
//
|
||||||
@ -705,10 +705,10 @@ func testLongFastSyncedShallowRepair(t *testing.T, snapshots bool) {
|
|||||||
// sync pivot point - older than the ancient limit - was already committed, after
|
// sync pivot point - older than the ancient limit - was already committed, after
|
||||||
// which the process crashed. In this case we expect the chain to be rolled back
|
// which the process crashed. In this case we expect the chain to be rolled back
|
||||||
// to the committed block, with everything afterwads deleted.
|
// to the committed block, with everything afterwads deleted.
|
||||||
func TestLongFastSyncedDeepRepair(t *testing.T) { testLongFastSyncedDeepRepair(t, false) }
|
func TestLongSnapSyncedDeepRepair(t *testing.T) { testLongSnapSyncedDeepRepair(t, false) }
|
||||||
func TestLongFastSyncedDeepRepairWithSnapshots(t *testing.T) { testLongFastSyncedDeepRepair(t, true) }
|
func TestLongSnapSyncedDeepRepairWithSnapshots(t *testing.T) { testLongSnapSyncedDeepRepair(t, true) }
|
||||||
|
|
||||||
func testLongFastSyncedDeepRepair(t *testing.T, snapshots bool) {
|
func testLongSnapSyncedDeepRepair(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
||||||
//
|
//
|
||||||
@ -750,14 +750,14 @@ func testLongFastSyncedDeepRepair(t *testing.T, snapshots bool) {
|
|||||||
// process crashed. In this case we expect the chain to detect that it was fast
|
// process crashed. In this case we expect the chain to detect that it was fast
|
||||||
// syncing and not delete anything, since we can just pick up directly where we
|
// syncing and not delete anything, since we can just pick up directly where we
|
||||||
// left off.
|
// left off.
|
||||||
func TestLongFastSyncingShallowRepair(t *testing.T) {
|
func TestLongSnapSyncingShallowRepair(t *testing.T) {
|
||||||
testLongFastSyncingShallowRepair(t, false)
|
testLongSnapSyncingShallowRepair(t, false)
|
||||||
}
|
}
|
||||||
func TestLongFastSyncingShallowRepairWithSnapshots(t *testing.T) {
|
func TestLongSnapSyncingShallowRepairWithSnapshots(t *testing.T) {
|
||||||
testLongFastSyncingShallowRepair(t, true)
|
testLongSnapSyncingShallowRepair(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLongFastSyncingShallowRepair(t *testing.T, snapshots bool) {
|
func testLongSnapSyncingShallowRepair(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
||||||
//
|
//
|
||||||
@ -800,10 +800,10 @@ func testLongFastSyncingShallowRepair(t *testing.T, snapshots bool) {
|
|||||||
// process crashed. In this case we expect the chain to detect that it was fast
|
// process crashed. In this case we expect the chain to detect that it was fast
|
||||||
// syncing and not delete anything, since we can just pick up directly where we
|
// syncing and not delete anything, since we can just pick up directly where we
|
||||||
// left off.
|
// left off.
|
||||||
func TestLongFastSyncingDeepRepair(t *testing.T) { testLongFastSyncingDeepRepair(t, false) }
|
func TestLongSnapSyncingDeepRepair(t *testing.T) { testLongSnapSyncingDeepRepair(t, false) }
|
||||||
func TestLongFastSyncingDeepRepairWithSnapshots(t *testing.T) { testLongFastSyncingDeepRepair(t, true) }
|
func TestLongSnapSyncingDeepRepairWithSnapshots(t *testing.T) { testLongSnapSyncingDeepRepair(t, true) }
|
||||||
|
|
||||||
func testLongFastSyncingDeepRepair(t *testing.T, snapshots bool) {
|
func testLongSnapSyncingDeepRepair(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
||||||
//
|
//
|
||||||
@ -946,14 +946,14 @@ func testLongOldForkedDeepRepair(t *testing.T, snapshots bool) {
|
|||||||
// the side chain is below the committed block. In this case we expect the chain
|
// the side chain is below the committed block. In this case we expect the chain
|
||||||
// to be rolled back to the committed block, with everything afterwads kept as
|
// to be rolled back to the committed block, with everything afterwads kept as
|
||||||
// fast sync data; the side chain completely nuked by the freezer.
|
// fast sync data; the side chain completely nuked by the freezer.
|
||||||
func TestLongOldForkedFastSyncedShallowRepair(t *testing.T) {
|
func TestLongOldForkedSnapSyncedShallowRepair(t *testing.T) {
|
||||||
testLongOldForkedFastSyncedShallowRepair(t, false)
|
testLongOldForkedSnapSyncedShallowRepair(t, false)
|
||||||
}
|
}
|
||||||
func TestLongOldForkedFastSyncedShallowRepairWithSnapshots(t *testing.T) {
|
func TestLongOldForkedSnapSyncedShallowRepairWithSnapshots(t *testing.T) {
|
||||||
testLongOldForkedFastSyncedShallowRepair(t, true)
|
testLongOldForkedSnapSyncedShallowRepair(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLongOldForkedFastSyncedShallowRepair(t *testing.T, snapshots bool) {
|
func testLongOldForkedSnapSyncedShallowRepair(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
||||||
// └->S1->S2->S3
|
// └->S1->S2->S3
|
||||||
@ -998,14 +998,14 @@ func testLongOldForkedFastSyncedShallowRepair(t *testing.T, snapshots bool) {
|
|||||||
// the side chain is below the committed block. In this case we expect the canonical
|
// the side chain is below the committed block. In this case we expect the canonical
|
||||||
// chain to be rolled back to the committed block, with everything afterwads deleted;
|
// chain to be rolled back to the committed block, with everything afterwads deleted;
|
||||||
// the side chain completely nuked by the freezer.
|
// the side chain completely nuked by the freezer.
|
||||||
func TestLongOldForkedFastSyncedDeepRepair(t *testing.T) {
|
func TestLongOldForkedSnapSyncedDeepRepair(t *testing.T) {
|
||||||
testLongOldForkedFastSyncedDeepRepair(t, false)
|
testLongOldForkedSnapSyncedDeepRepair(t, false)
|
||||||
}
|
}
|
||||||
func TestLongOldForkedFastSyncedDeepRepairWithSnapshots(t *testing.T) {
|
func TestLongOldForkedSnapSyncedDeepRepairWithSnapshots(t *testing.T) {
|
||||||
testLongOldForkedFastSyncedDeepRepair(t, true)
|
testLongOldForkedSnapSyncedDeepRepair(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLongOldForkedFastSyncedDeepRepair(t *testing.T, snapshots bool) {
|
func testLongOldForkedSnapSyncedDeepRepair(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
||||||
// └->S1->S2->S3
|
// └->S1->S2->S3
|
||||||
@ -1049,14 +1049,14 @@ func testLongOldForkedFastSyncedDeepRepair(t *testing.T, snapshots bool) {
|
|||||||
// chain is below the committed block. In this case we expect the chain to detect
|
// chain is below the committed block. In this case we expect the chain to detect
|
||||||
// that it was fast syncing and not delete anything. The side chain is completely
|
// that it was fast syncing and not delete anything. The side chain is completely
|
||||||
// nuked by the freezer.
|
// nuked by the freezer.
|
||||||
func TestLongOldForkedFastSyncingShallowRepair(t *testing.T) {
|
func TestLongOldForkedSnapSyncingShallowRepair(t *testing.T) {
|
||||||
testLongOldForkedFastSyncingShallowRepair(t, false)
|
testLongOldForkedSnapSyncingShallowRepair(t, false)
|
||||||
}
|
}
|
||||||
func TestLongOldForkedFastSyncingShallowRepairWithSnapshots(t *testing.T) {
|
func TestLongOldForkedSnapSyncingShallowRepairWithSnapshots(t *testing.T) {
|
||||||
testLongOldForkedFastSyncingShallowRepair(t, true)
|
testLongOldForkedSnapSyncingShallowRepair(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLongOldForkedFastSyncingShallowRepair(t *testing.T, snapshots bool) {
|
func testLongOldForkedSnapSyncingShallowRepair(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
||||||
// └->S1->S2->S3
|
// └->S1->S2->S3
|
||||||
@ -1101,14 +1101,14 @@ func testLongOldForkedFastSyncingShallowRepair(t *testing.T, snapshots bool) {
|
|||||||
// chain is below the committed block. In this case we expect the chain to detect
|
// chain is below the committed block. In this case we expect the chain to detect
|
||||||
// that it was fast syncing and not delete anything. The side chain is completely
|
// that it was fast syncing and not delete anything. The side chain is completely
|
||||||
// nuked by the freezer.
|
// nuked by the freezer.
|
||||||
func TestLongOldForkedFastSyncingDeepRepair(t *testing.T) {
|
func TestLongOldForkedSnapSyncingDeepRepair(t *testing.T) {
|
||||||
testLongOldForkedFastSyncingDeepRepair(t, false)
|
testLongOldForkedSnapSyncingDeepRepair(t, false)
|
||||||
}
|
}
|
||||||
func TestLongOldForkedFastSyncingDeepRepairWithSnapshots(t *testing.T) {
|
func TestLongOldForkedSnapSyncingDeepRepairWithSnapshots(t *testing.T) {
|
||||||
testLongOldForkedFastSyncingDeepRepair(t, true)
|
testLongOldForkedSnapSyncingDeepRepair(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLongOldForkedFastSyncingDeepRepair(t *testing.T, snapshots bool) {
|
func testLongOldForkedSnapSyncingDeepRepair(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
||||||
// └->S1->S2->S3
|
// └->S1->S2->S3
|
||||||
@ -1252,14 +1252,14 @@ func testLongNewerForkedDeepRepair(t *testing.T, snapshots bool) {
|
|||||||
// the side chain is above the committed block. In this case we expect the chain
|
// the side chain is above the committed block. In this case we expect the chain
|
||||||
// to be rolled back to the committed block, with everything afterwads kept as fast
|
// to be rolled back to the committed block, with everything afterwads kept as fast
|
||||||
// sync data; the side chain completely nuked by the freezer.
|
// sync data; the side chain completely nuked by the freezer.
|
||||||
func TestLongNewerForkedFastSyncedShallowRepair(t *testing.T) {
|
func TestLongNewerForkedSnapSyncedShallowRepair(t *testing.T) {
|
||||||
testLongNewerForkedFastSyncedShallowRepair(t, false)
|
testLongNewerForkedSnapSyncedShallowRepair(t, false)
|
||||||
}
|
}
|
||||||
func TestLongNewerForkedFastSyncedShallowRepairWithSnapshots(t *testing.T) {
|
func TestLongNewerForkedSnapSyncedShallowRepairWithSnapshots(t *testing.T) {
|
||||||
testLongNewerForkedFastSyncedShallowRepair(t, true)
|
testLongNewerForkedSnapSyncedShallowRepair(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLongNewerForkedFastSyncedShallowRepair(t *testing.T, snapshots bool) {
|
func testLongNewerForkedSnapSyncedShallowRepair(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
||||||
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
|
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
|
||||||
@ -1304,14 +1304,14 @@ func testLongNewerForkedFastSyncedShallowRepair(t *testing.T, snapshots bool) {
|
|||||||
// the side chain is above the committed block. In this case we expect the canonical
|
// the side chain is above the committed block. In this case we expect the canonical
|
||||||
// chain to be rolled back to the committed block, with everything afterwads deleted;
|
// chain to be rolled back to the committed block, with everything afterwads deleted;
|
||||||
// the side chain completely nuked by the freezer.
|
// the side chain completely nuked by the freezer.
|
||||||
func TestLongNewerForkedFastSyncedDeepRepair(t *testing.T) {
|
func TestLongNewerForkedSnapSyncedDeepRepair(t *testing.T) {
|
||||||
testLongNewerForkedFastSyncedDeepRepair(t, false)
|
testLongNewerForkedSnapSyncedDeepRepair(t, false)
|
||||||
}
|
}
|
||||||
func TestLongNewerForkedFastSyncedDeepRepairWithSnapshots(t *testing.T) {
|
func TestLongNewerForkedSnapSyncedDeepRepairWithSnapshots(t *testing.T) {
|
||||||
testLongNewerForkedFastSyncedDeepRepair(t, true)
|
testLongNewerForkedSnapSyncedDeepRepair(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLongNewerForkedFastSyncedDeepRepair(t *testing.T, snapshots bool) {
|
func testLongNewerForkedSnapSyncedDeepRepair(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
||||||
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
|
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
|
||||||
@ -1355,14 +1355,14 @@ func testLongNewerForkedFastSyncedDeepRepair(t *testing.T, snapshots bool) {
|
|||||||
// chain is above the committed block. In this case we expect the chain to detect
|
// chain is above the committed block. In this case we expect the chain to detect
|
||||||
// that it was fast syncing and not delete anything. The side chain is completely
|
// that it was fast syncing and not delete anything. The side chain is completely
|
||||||
// nuked by the freezer.
|
// nuked by the freezer.
|
||||||
func TestLongNewerForkedFastSyncingShallowRepair(t *testing.T) {
|
func TestLongNewerForkedSnapSyncingShallowRepair(t *testing.T) {
|
||||||
testLongNewerForkedFastSyncingShallowRepair(t, false)
|
testLongNewerForkedSnapSyncingShallowRepair(t, false)
|
||||||
}
|
}
|
||||||
func TestLongNewerForkedFastSyncingShallowRepairWithSnapshots(t *testing.T) {
|
func TestLongNewerForkedSnapSyncingShallowRepairWithSnapshots(t *testing.T) {
|
||||||
testLongNewerForkedFastSyncingShallowRepair(t, true)
|
testLongNewerForkedSnapSyncingShallowRepair(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLongNewerForkedFastSyncingShallowRepair(t *testing.T, snapshots bool) {
|
func testLongNewerForkedSnapSyncingShallowRepair(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
||||||
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
|
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
|
||||||
@ -1407,14 +1407,14 @@ func testLongNewerForkedFastSyncingShallowRepair(t *testing.T, snapshots bool) {
|
|||||||
// chain is above the committed block. In this case we expect the chain to detect
|
// chain is above the committed block. In this case we expect the chain to detect
|
||||||
// that it was fast syncing and not delete anything. The side chain is completely
|
// that it was fast syncing and not delete anything. The side chain is completely
|
||||||
// nuked by the freezer.
|
// nuked by the freezer.
|
||||||
func TestLongNewerForkedFastSyncingDeepRepair(t *testing.T) {
|
func TestLongNewerForkedSnapSyncingDeepRepair(t *testing.T) {
|
||||||
testLongNewerForkedFastSyncingDeepRepair(t, false)
|
testLongNewerForkedSnapSyncingDeepRepair(t, false)
|
||||||
}
|
}
|
||||||
func TestLongNewerForkedFastSyncingDeepRepairWithSnapshots(t *testing.T) {
|
func TestLongNewerForkedSnapSyncingDeepRepairWithSnapshots(t *testing.T) {
|
||||||
testLongNewerForkedFastSyncingDeepRepair(t, true)
|
testLongNewerForkedSnapSyncingDeepRepair(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLongNewerForkedFastSyncingDeepRepair(t *testing.T, snapshots bool) {
|
func testLongNewerForkedSnapSyncingDeepRepair(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
||||||
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
|
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
|
||||||
@ -1552,14 +1552,14 @@ func testLongReorgedDeepRepair(t *testing.T, snapshots bool) {
|
|||||||
// expect the chain to be rolled back to the committed block, with everything
|
// expect the chain to be rolled back to the committed block, with everything
|
||||||
// afterwads kept as fast sync data. The side chain completely nuked by the
|
// afterwads kept as fast sync data. The side chain completely nuked by the
|
||||||
// freezer.
|
// freezer.
|
||||||
func TestLongReorgedFastSyncedShallowRepair(t *testing.T) {
|
func TestLongReorgedSnapSyncedShallowRepair(t *testing.T) {
|
||||||
testLongReorgedFastSyncedShallowRepair(t, false)
|
testLongReorgedSnapSyncedShallowRepair(t, false)
|
||||||
}
|
}
|
||||||
func TestLongReorgedFastSyncedShallowRepairWithSnapshots(t *testing.T) {
|
func TestLongReorgedSnapSyncedShallowRepairWithSnapshots(t *testing.T) {
|
||||||
testLongReorgedFastSyncedShallowRepair(t, true)
|
testLongReorgedSnapSyncedShallowRepair(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLongReorgedFastSyncedShallowRepair(t *testing.T, snapshots bool) {
|
func testLongReorgedSnapSyncedShallowRepair(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
||||||
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
|
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
|
||||||
@ -1603,14 +1603,14 @@ func testLongReorgedFastSyncedShallowRepair(t *testing.T, snapshots bool) {
|
|||||||
// was already committed to disk and then the process crashed. In this case we
|
// was already committed to disk and then the process crashed. In this case we
|
||||||
// expect the canonical chains to be rolled back to the committed block, with
|
// expect the canonical chains to be rolled back to the committed block, with
|
||||||
// everything afterwads deleted. The side chain completely nuked by the freezer.
|
// everything afterwads deleted. The side chain completely nuked by the freezer.
|
||||||
func TestLongReorgedFastSyncedDeepRepair(t *testing.T) {
|
func TestLongReorgedSnapSyncedDeepRepair(t *testing.T) {
|
||||||
testLongReorgedFastSyncedDeepRepair(t, false)
|
testLongReorgedSnapSyncedDeepRepair(t, false)
|
||||||
}
|
}
|
||||||
func TestLongReorgedFastSyncedDeepRepairWithSnapshots(t *testing.T) {
|
func TestLongReorgedSnapSyncedDeepRepairWithSnapshots(t *testing.T) {
|
||||||
testLongReorgedFastSyncedDeepRepair(t, true)
|
testLongReorgedSnapSyncedDeepRepair(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLongReorgedFastSyncedDeepRepair(t *testing.T, snapshots bool) {
|
func testLongReorgedSnapSyncedDeepRepair(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
||||||
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
|
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
|
||||||
@ -1653,14 +1653,14 @@ func testLongReorgedFastSyncedDeepRepair(t *testing.T, snapshots bool) {
|
|||||||
// was not yet committed, but the process crashed. In this case we expect the
|
// was not yet committed, but the process crashed. In this case we expect the
|
||||||
// chain to detect that it was fast syncing and not delete anything, since we
|
// chain to detect that it was fast syncing and not delete anything, since we
|
||||||
// can just pick up directly where we left off.
|
// can just pick up directly where we left off.
|
||||||
func TestLongReorgedFastSyncingShallowRepair(t *testing.T) {
|
func TestLongReorgedSnapSyncingShallowRepair(t *testing.T) {
|
||||||
testLongReorgedFastSyncingShallowRepair(t, false)
|
testLongReorgedSnapSyncingShallowRepair(t, false)
|
||||||
}
|
}
|
||||||
func TestLongReorgedFastSyncingShallowRepairWithSnapshots(t *testing.T) {
|
func TestLongReorgedSnapSyncingShallowRepairWithSnapshots(t *testing.T) {
|
||||||
testLongReorgedFastSyncingShallowRepair(t, true)
|
testLongReorgedSnapSyncingShallowRepair(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLongReorgedFastSyncingShallowRepair(t *testing.T, snapshots bool) {
|
func testLongReorgedSnapSyncingShallowRepair(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
||||||
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
|
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
|
||||||
@ -1704,14 +1704,14 @@ func testLongReorgedFastSyncingShallowRepair(t *testing.T, snapshots bool) {
|
|||||||
// was not yet committed, but the process crashed. In this case we expect the
|
// was not yet committed, but the process crashed. In this case we expect the
|
||||||
// chain to detect that it was fast syncing and not delete anything, since we
|
// chain to detect that it was fast syncing and not delete anything, since we
|
||||||
// can just pick up directly where we left off.
|
// can just pick up directly where we left off.
|
||||||
func TestLongReorgedFastSyncingDeepRepair(t *testing.T) {
|
func TestLongReorgedSnapSyncingDeepRepair(t *testing.T) {
|
||||||
testLongReorgedFastSyncingDeepRepair(t, false)
|
testLongReorgedSnapSyncingDeepRepair(t, false)
|
||||||
}
|
}
|
||||||
func TestLongReorgedFastSyncingDeepRepairWithSnapshots(t *testing.T) {
|
func TestLongReorgedSnapSyncingDeepRepairWithSnapshots(t *testing.T) {
|
||||||
testLongReorgedFastSyncingDeepRepair(t, true)
|
testLongReorgedSnapSyncingDeepRepair(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLongReorgedFastSyncingDeepRepair(t *testing.T, snapshots bool) {
|
func testLongReorgedSnapSyncingDeepRepair(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
||||||
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
|
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
|
||||||
|
@ -194,10 +194,10 @@ func testShortSetHead(t *testing.T, snapshots bool) {
|
|||||||
// Everything above the sethead point should be deleted. In between the committed
|
// Everything above the sethead point should be deleted. In between the committed
|
||||||
// block and the requested head the data can remain as "fast sync" data to avoid
|
// block and the requested head the data can remain as "fast sync" data to avoid
|
||||||
// redownloading it.
|
// redownloading it.
|
||||||
func TestShortFastSyncedSetHead(t *testing.T) { testShortFastSyncedSetHead(t, false) }
|
func TestShortSnapSyncedSetHead(t *testing.T) { testShortSnapSyncedSetHead(t, false) }
|
||||||
func TestShortFastSyncedSetHeadWithSnapshots(t *testing.T) { testShortFastSyncedSetHead(t, true) }
|
func TestShortSnapSyncedSetHeadWithSnapshots(t *testing.T) { testShortSnapSyncedSetHead(t, true) }
|
||||||
|
|
||||||
func testShortFastSyncedSetHead(t *testing.T, snapshots bool) {
|
func testShortSnapSyncedSetHead(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
||||||
//
|
//
|
||||||
@ -236,10 +236,10 @@ func testShortFastSyncedSetHead(t *testing.T, snapshots bool) {
|
|||||||
// detect that it was fast syncing and delete everything from the new head, since
|
// detect that it was fast syncing and delete everything from the new head, since
|
||||||
// we can just pick up fast syncing from there. The head full block should be set
|
// we can just pick up fast syncing from there. The head full block should be set
|
||||||
// to the genesis.
|
// to the genesis.
|
||||||
func TestShortFastSyncingSetHead(t *testing.T) { testShortFastSyncingSetHead(t, false) }
|
func TestShortSnapSyncingSetHead(t *testing.T) { testShortSnapSyncingSetHead(t, false) }
|
||||||
func TestShortFastSyncingSetHeadWithSnapshots(t *testing.T) { testShortFastSyncingSetHead(t, true) }
|
func TestShortSnapSyncingSetHeadWithSnapshots(t *testing.T) { testShortSnapSyncingSetHead(t, true) }
|
||||||
|
|
||||||
func testShortFastSyncingSetHead(t *testing.T, snapshots bool) {
|
func testShortSnapSyncingSetHead(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
||||||
//
|
//
|
||||||
@ -326,14 +326,14 @@ func testShortOldForkedSetHead(t *testing.T, snapshots bool) {
|
|||||||
// block. Everything above the sethead point should be deleted. In between the
|
// block. Everything above the sethead point should be deleted. In between the
|
||||||
// committed block and the requested head the data can remain as "fast sync" data
|
// committed block and the requested head the data can remain as "fast sync" data
|
||||||
// to avoid redownloading it. The side chain should be left alone as it was shorter.
|
// to avoid redownloading it. The side chain should be left alone as it was shorter.
|
||||||
func TestShortOldForkedFastSyncedSetHead(t *testing.T) {
|
func TestShortOldForkedSnapSyncedSetHead(t *testing.T) {
|
||||||
testShortOldForkedFastSyncedSetHead(t, false)
|
testShortOldForkedSnapSyncedSetHead(t, false)
|
||||||
}
|
}
|
||||||
func TestShortOldForkedFastSyncedSetHeadWithSnapshots(t *testing.T) {
|
func TestShortOldForkedSnapSyncedSetHeadWithSnapshots(t *testing.T) {
|
||||||
testShortOldForkedFastSyncedSetHead(t, true)
|
testShortOldForkedSnapSyncedSetHead(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testShortOldForkedFastSyncedSetHead(t *testing.T, snapshots bool) {
|
func testShortOldForkedSnapSyncedSetHead(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
||||||
// └->S1->S2->S3
|
// └->S1->S2->S3
|
||||||
@ -375,14 +375,14 @@ func testShortOldForkedFastSyncedSetHead(t *testing.T, snapshots bool) {
|
|||||||
// the chain to detect that it was fast syncing and delete everything from the new
|
// the chain to detect that it was fast syncing and delete everything from the new
|
||||||
// head, since we can just pick up fast syncing from there. The head full block
|
// head, since we can just pick up fast syncing from there. The head full block
|
||||||
// should be set to the genesis.
|
// should be set to the genesis.
|
||||||
func TestShortOldForkedFastSyncingSetHead(t *testing.T) {
|
func TestShortOldForkedSnapSyncingSetHead(t *testing.T) {
|
||||||
testShortOldForkedFastSyncingSetHead(t, false)
|
testShortOldForkedSnapSyncingSetHead(t, false)
|
||||||
}
|
}
|
||||||
func TestShortOldForkedFastSyncingSetHeadWithSnapshots(t *testing.T) {
|
func TestShortOldForkedSnapSyncingSetHeadWithSnapshots(t *testing.T) {
|
||||||
testShortOldForkedFastSyncingSetHead(t, true)
|
testShortOldForkedSnapSyncingSetHead(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testShortOldForkedFastSyncingSetHead(t *testing.T, snapshots bool) {
|
func testShortOldForkedSnapSyncingSetHead(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
||||||
// └->S1->S2->S3
|
// └->S1->S2->S3
|
||||||
@ -478,14 +478,14 @@ func testShortNewlyForkedSetHead(t *testing.T, snapshots bool) {
|
|||||||
// The side chain could be left to be if the fork point was before the new head
|
// The side chain could be left to be if the fork point was before the new head
|
||||||
// we are deleting to, but it would be exceedingly hard to detect that case and
|
// we are deleting to, but it would be exceedingly hard to detect that case and
|
||||||
// properly handle it, so we'll trade extra work in exchange for simpler code.
|
// properly handle it, so we'll trade extra work in exchange for simpler code.
|
||||||
func TestShortNewlyForkedFastSyncedSetHead(t *testing.T) {
|
func TestShortNewlyForkedSnapSyncedSetHead(t *testing.T) {
|
||||||
testShortNewlyForkedFastSyncedSetHead(t, false)
|
testShortNewlyForkedSnapSyncedSetHead(t, false)
|
||||||
}
|
}
|
||||||
func TestShortNewlyForkedFastSyncedSetHeadWithSnapshots(t *testing.T) {
|
func TestShortNewlyForkedSnapSyncedSetHeadWithSnapshots(t *testing.T) {
|
||||||
testShortNewlyForkedFastSyncedSetHead(t, true)
|
testShortNewlyForkedSnapSyncedSetHead(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testShortNewlyForkedFastSyncedSetHead(t *testing.T, snapshots bool) {
|
func testShortNewlyForkedSnapSyncedSetHead(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10 (HEAD)
|
||||||
// └->S1->S2->S3->S4->S5->S6->S7->S8
|
// └->S1->S2->S3->S4->S5->S6->S7->S8
|
||||||
@ -531,14 +531,14 @@ func testShortNewlyForkedFastSyncedSetHead(t *testing.T, snapshots bool) {
|
|||||||
// The side chain could be left to be if the fork point was before the new head
|
// The side chain could be left to be if the fork point was before the new head
|
||||||
// we are deleting to, but it would be exceedingly hard to detect that case and
|
// we are deleting to, but it would be exceedingly hard to detect that case and
|
||||||
// properly handle it, so we'll trade extra work in exchange for simpler code.
|
// properly handle it, so we'll trade extra work in exchange for simpler code.
|
||||||
func TestShortNewlyForkedFastSyncingSetHead(t *testing.T) {
|
func TestShortNewlyForkedSnapSyncingSetHead(t *testing.T) {
|
||||||
testShortNewlyForkedFastSyncingSetHead(t, false)
|
testShortNewlyForkedSnapSyncingSetHead(t, false)
|
||||||
}
|
}
|
||||||
func TestShortNewlyForkedFastSyncingSetHeadWithSnapshots(t *testing.T) {
|
func TestShortNewlyForkedSnapSyncingSetHeadWithSnapshots(t *testing.T) {
|
||||||
testShortNewlyForkedFastSyncingSetHead(t, true)
|
testShortNewlyForkedSnapSyncingSetHead(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testShortNewlyForkedFastSyncingSetHead(t *testing.T, snapshots bool) {
|
func testShortNewlyForkedSnapSyncingSetHead(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10 (HEAD)
|
||||||
// └->S1->S2->S3->S4->S5->S6->S7->S8
|
// └->S1->S2->S3->S4->S5->S6->S7->S8
|
||||||
@ -634,14 +634,14 @@ func testShortReorgedSetHead(t *testing.T, snapshots bool) {
|
|||||||
// The side chain could be left to be if the fork point was before the new head
|
// The side chain could be left to be if the fork point was before the new head
|
||||||
// we are deleting to, but it would be exceedingly hard to detect that case and
|
// we are deleting to, but it would be exceedingly hard to detect that case and
|
||||||
// properly handle it, so we'll trade extra work in exchange for simpler code.
|
// properly handle it, so we'll trade extra work in exchange for simpler code.
|
||||||
func TestShortReorgedFastSyncedSetHead(t *testing.T) {
|
func TestShortReorgedSnapSyncedSetHead(t *testing.T) {
|
||||||
testShortReorgedFastSyncedSetHead(t, false)
|
testShortReorgedSnapSyncedSetHead(t, false)
|
||||||
}
|
}
|
||||||
func TestShortReorgedFastSyncedSetHeadWithSnapshots(t *testing.T) {
|
func TestShortReorgedSnapSyncedSetHeadWithSnapshots(t *testing.T) {
|
||||||
testShortReorgedFastSyncedSetHead(t, true)
|
testShortReorgedSnapSyncedSetHead(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testShortReorgedFastSyncedSetHead(t *testing.T, snapshots bool) {
|
func testShortReorgedSnapSyncedSetHead(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
||||||
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10
|
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10
|
||||||
@ -686,14 +686,14 @@ func testShortReorgedFastSyncedSetHead(t *testing.T, snapshots bool) {
|
|||||||
// The side chain could be left to be if the fork point was before the new head
|
// The side chain could be left to be if the fork point was before the new head
|
||||||
// we are deleting to, but it would be exceedingly hard to detect that case and
|
// we are deleting to, but it would be exceedingly hard to detect that case and
|
||||||
// properly handle it, so we'll trade extra work in exchange for simpler code.
|
// properly handle it, so we'll trade extra work in exchange for simpler code.
|
||||||
func TestShortReorgedFastSyncingSetHead(t *testing.T) {
|
func TestShortReorgedSnapSyncingSetHead(t *testing.T) {
|
||||||
testShortReorgedFastSyncingSetHead(t, false)
|
testShortReorgedSnapSyncingSetHead(t, false)
|
||||||
}
|
}
|
||||||
func TestShortReorgedFastSyncingSetHeadWithSnapshots(t *testing.T) {
|
func TestShortReorgedSnapSyncingSetHeadWithSnapshots(t *testing.T) {
|
||||||
testShortReorgedFastSyncingSetHead(t, true)
|
testShortReorgedSnapSyncingSetHead(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testShortReorgedFastSyncingSetHead(t *testing.T, snapshots bool) {
|
func testShortReorgedSnapSyncingSetHead(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
||||||
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10
|
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10
|
||||||
@ -829,14 +829,14 @@ func testLongDeepSetHead(t *testing.T, snapshots bool) {
|
|||||||
// back to the committed block. Everything above the sethead point should be
|
// back to the committed block. Everything above the sethead point should be
|
||||||
// deleted. In between the committed block and the requested head the data can
|
// deleted. In between the committed block and the requested head the data can
|
||||||
// remain as "fast sync" data to avoid redownloading it.
|
// remain as "fast sync" data to avoid redownloading it.
|
||||||
func TestLongFastSyncedShallowSetHead(t *testing.T) {
|
func TestLongSnapSyncedShallowSetHead(t *testing.T) {
|
||||||
testLongFastSyncedShallowSetHead(t, false)
|
testLongSnapSyncedShallowSetHead(t, false)
|
||||||
}
|
}
|
||||||
func TestLongFastSyncedShallowSetHeadWithSnapshots(t *testing.T) {
|
func TestLongSnapSyncedShallowSetHeadWithSnapshots(t *testing.T) {
|
||||||
testLongFastSyncedShallowSetHead(t, true)
|
testLongSnapSyncedShallowSetHead(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLongFastSyncedShallowSetHead(t *testing.T, snapshots bool) {
|
func testLongSnapSyncedShallowSetHead(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
||||||
//
|
//
|
||||||
@ -880,10 +880,10 @@ func testLongFastSyncedShallowSetHead(t *testing.T, snapshots bool) {
|
|||||||
// which sethead was called. In this case we expect the full chain to be rolled
|
// which sethead was called. In this case we expect the full chain to be rolled
|
||||||
// back to the committed block. Since the ancient limit was underflown, everything
|
// back to the committed block. Since the ancient limit was underflown, everything
|
||||||
// needs to be deleted onwards to avoid creating a gap.
|
// needs to be deleted onwards to avoid creating a gap.
|
||||||
func TestLongFastSyncedDeepSetHead(t *testing.T) { testLongFastSyncedDeepSetHead(t, false) }
|
func TestLongSnapSyncedDeepSetHead(t *testing.T) { testLongSnapSyncedDeepSetHead(t, false) }
|
||||||
func TestLongFastSyncedDeepSetHeadWithSnapshots(t *testing.T) { testLongFastSyncedDeepSetHead(t, true) }
|
func TestLongSnapSyncedDeepSetHeadWithSnapshots(t *testing.T) { testLongSnapSyncedDeepSetHead(t, true) }
|
||||||
|
|
||||||
func testLongFastSyncedDeepSetHead(t *testing.T, snapshots bool) {
|
func testLongSnapSyncedDeepSetHead(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
||||||
//
|
//
|
||||||
@ -926,14 +926,14 @@ func testLongFastSyncedDeepSetHead(t *testing.T, snapshots bool) {
|
|||||||
// sethead was called. In this case we expect the chain to detect that it was fast
|
// sethead was called. In this case we expect the chain to detect that it was fast
|
||||||
// syncing and delete everything from the new head, since we can just pick up fast
|
// syncing and delete everything from the new head, since we can just pick up fast
|
||||||
// syncing from there.
|
// syncing from there.
|
||||||
func TestLongFastSyncingShallowSetHead(t *testing.T) {
|
func TestLongSnapSyncingShallowSetHead(t *testing.T) {
|
||||||
testLongFastSyncingShallowSetHead(t, false)
|
testLongSnapSyncingShallowSetHead(t, false)
|
||||||
}
|
}
|
||||||
func TestLongFastSyncingShallowSetHeadWithSnapshots(t *testing.T) {
|
func TestLongSnapSyncingShallowSetHeadWithSnapshots(t *testing.T) {
|
||||||
testLongFastSyncingShallowSetHead(t, true)
|
testLongSnapSyncingShallowSetHead(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLongFastSyncingShallowSetHead(t *testing.T, snapshots bool) {
|
func testLongSnapSyncingShallowSetHead(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
||||||
//
|
//
|
||||||
@ -977,14 +977,14 @@ func testLongFastSyncingShallowSetHead(t *testing.T, snapshots bool) {
|
|||||||
// sethead was called. In this case we expect the chain to detect that it was fast
|
// sethead was called. In this case we expect the chain to detect that it was fast
|
||||||
// syncing and delete everything from the new head, since we can just pick up fast
|
// syncing and delete everything from the new head, since we can just pick up fast
|
||||||
// syncing from there.
|
// syncing from there.
|
||||||
func TestLongFastSyncingDeepSetHead(t *testing.T) {
|
func TestLongSnapSyncingDeepSetHead(t *testing.T) {
|
||||||
testLongFastSyncingDeepSetHead(t, false)
|
testLongSnapSyncingDeepSetHead(t, false)
|
||||||
}
|
}
|
||||||
func TestLongFastSyncingDeepSetHeadWithSnapshots(t *testing.T) {
|
func TestLongSnapSyncingDeepSetHeadWithSnapshots(t *testing.T) {
|
||||||
testLongFastSyncingDeepSetHead(t, true)
|
testLongSnapSyncingDeepSetHead(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLongFastSyncingDeepSetHead(t *testing.T, snapshots bool) {
|
func testLongSnapSyncingDeepSetHead(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
||||||
//
|
//
|
||||||
@ -1132,14 +1132,14 @@ func testLongOldForkedDeepSetHead(t *testing.T, snapshots bool) {
|
|||||||
// sethead point should be deleted. In between the committed block and the
|
// sethead point should be deleted. In between the committed block and the
|
||||||
// requested head the data can remain as "fast sync" data to avoid redownloading
|
// requested head the data can remain as "fast sync" data to avoid redownloading
|
||||||
// it. The side chain is nuked by the freezer.
|
// it. The side chain is nuked by the freezer.
|
||||||
func TestLongOldForkedFastSyncedShallowSetHead(t *testing.T) {
|
func TestLongOldForkedSnapSyncedShallowSetHead(t *testing.T) {
|
||||||
testLongOldForkedFastSyncedShallowSetHead(t, false)
|
testLongOldForkedSnapSyncedShallowSetHead(t, false)
|
||||||
}
|
}
|
||||||
func TestLongOldForkedFastSyncedShallowSetHeadWithSnapshots(t *testing.T) {
|
func TestLongOldForkedSnapSyncedShallowSetHeadWithSnapshots(t *testing.T) {
|
||||||
testLongOldForkedFastSyncedShallowSetHead(t, true)
|
testLongOldForkedSnapSyncedShallowSetHead(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLongOldForkedFastSyncedShallowSetHead(t *testing.T, snapshots bool) {
|
func testLongOldForkedSnapSyncedShallowSetHead(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
||||||
// └->S1->S2->S3
|
// └->S1->S2->S3
|
||||||
@ -1186,14 +1186,14 @@ func testLongOldForkedFastSyncedShallowSetHead(t *testing.T, snapshots bool) {
|
|||||||
// full chain to be rolled back to the committed block. Since the ancient limit was
|
// full chain to be rolled back to the committed block. Since the ancient limit was
|
||||||
// underflown, everything needs to be deleted onwards to avoid creating a gap. The
|
// underflown, everything needs to be deleted onwards to avoid creating a gap. The
|
||||||
// side chain is nuked by the freezer.
|
// side chain is nuked by the freezer.
|
||||||
func TestLongOldForkedFastSyncedDeepSetHead(t *testing.T) {
|
func TestLongOldForkedSnapSyncedDeepSetHead(t *testing.T) {
|
||||||
testLongOldForkedFastSyncedDeepSetHead(t, false)
|
testLongOldForkedSnapSyncedDeepSetHead(t, false)
|
||||||
}
|
}
|
||||||
func TestLongOldForkedFastSyncedDeepSetHeadWithSnapshots(t *testing.T) {
|
func TestLongOldForkedSnapSyncedDeepSetHeadWithSnapshots(t *testing.T) {
|
||||||
testLongOldForkedFastSyncedDeepSetHead(t, true)
|
testLongOldForkedSnapSyncedDeepSetHead(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLongOldForkedFastSyncedDeepSetHead(t *testing.T, snapshots bool) {
|
func testLongOldForkedSnapSyncedDeepSetHead(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
||||||
// └->S1->S2->S3
|
// └->S1->S2->S3
|
||||||
@ -1239,14 +1239,14 @@ func testLongOldForkedFastSyncedDeepSetHead(t *testing.T, snapshots bool) {
|
|||||||
// that it was fast syncing and delete everything from the new head, since we can
|
// that it was fast syncing and delete everything from the new head, since we can
|
||||||
// just pick up fast syncing from there. The side chain is completely nuked by the
|
// just pick up fast syncing from there. The side chain is completely nuked by the
|
||||||
// freezer.
|
// freezer.
|
||||||
func TestLongOldForkedFastSyncingShallowSetHead(t *testing.T) {
|
func TestLongOldForkedSnapSyncingShallowSetHead(t *testing.T) {
|
||||||
testLongOldForkedFastSyncingShallowSetHead(t, false)
|
testLongOldForkedSnapSyncingShallowSetHead(t, false)
|
||||||
}
|
}
|
||||||
func TestLongOldForkedFastSyncingShallowSetHeadWithSnapshots(t *testing.T) {
|
func TestLongOldForkedSnapSyncingShallowSetHeadWithSnapshots(t *testing.T) {
|
||||||
testLongOldForkedFastSyncingShallowSetHead(t, true)
|
testLongOldForkedSnapSyncingShallowSetHead(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLongOldForkedFastSyncingShallowSetHead(t *testing.T, snapshots bool) {
|
func testLongOldForkedSnapSyncingShallowSetHead(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
||||||
// └->S1->S2->S3
|
// └->S1->S2->S3
|
||||||
@ -1293,14 +1293,14 @@ func testLongOldForkedFastSyncingShallowSetHead(t *testing.T, snapshots bool) {
|
|||||||
// that it was fast syncing and delete everything from the new head, since we can
|
// that it was fast syncing and delete everything from the new head, since we can
|
||||||
// just pick up fast syncing from there. The side chain is completely nuked by the
|
// just pick up fast syncing from there. The side chain is completely nuked by the
|
||||||
// freezer.
|
// freezer.
|
||||||
func TestLongOldForkedFastSyncingDeepSetHead(t *testing.T) {
|
func TestLongOldForkedSnapSyncingDeepSetHead(t *testing.T) {
|
||||||
testLongOldForkedFastSyncingDeepSetHead(t, false)
|
testLongOldForkedSnapSyncingDeepSetHead(t, false)
|
||||||
}
|
}
|
||||||
func TestLongOldForkedFastSyncingDeepSetHeadWithSnapshots(t *testing.T) {
|
func TestLongOldForkedSnapSyncingDeepSetHeadWithSnapshots(t *testing.T) {
|
||||||
testLongOldForkedFastSyncingDeepSetHead(t, true)
|
testLongOldForkedSnapSyncingDeepSetHead(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLongOldForkedFastSyncingDeepSetHead(t *testing.T, snapshots bool) {
|
func testLongOldForkedSnapSyncingDeepSetHead(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
||||||
// └->S1->S2->S3
|
// └->S1->S2->S3
|
||||||
@ -1446,15 +1446,15 @@ func testLongNewerForkedDeepSetHead(t *testing.T, snapshots bool) {
|
|||||||
// side chain, where the fast sync pivot point - newer than the ancient limit -
|
// side chain, where the fast sync pivot point - newer than the ancient limit -
|
||||||
// was already committed to disk and then sethead was called. In this test scenario
|
// was already committed to disk and then sethead was called. In this test scenario
|
||||||
// the side chain is above the committed block. In this case the freezer will delete
|
// the side chain is above the committed block. In this case the freezer will delete
|
||||||
// the sidechain since it's dangling, reverting to TestLongFastSyncedShallowSetHead.
|
// the sidechain since it's dangling, reverting to TestLongSnapSyncedShallowSetHead.
|
||||||
func TestLongNewerForkedFastSyncedShallowSetHead(t *testing.T) {
|
func TestLongNewerForkedSnapSyncedShallowSetHead(t *testing.T) {
|
||||||
testLongNewerForkedFastSyncedShallowSetHead(t, false)
|
testLongNewerForkedSnapSyncedShallowSetHead(t, false)
|
||||||
}
|
}
|
||||||
func TestLongNewerForkedFastSyncedShallowSetHeadWithSnapshots(t *testing.T) {
|
func TestLongNewerForkedSnapSyncedShallowSetHeadWithSnapshots(t *testing.T) {
|
||||||
testLongNewerForkedFastSyncedShallowSetHead(t, true)
|
testLongNewerForkedSnapSyncedShallowSetHead(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLongNewerForkedFastSyncedShallowSetHead(t *testing.T, snapshots bool) {
|
func testLongNewerForkedSnapSyncedShallowSetHead(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
||||||
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
|
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
|
||||||
@ -1498,15 +1498,15 @@ func testLongNewerForkedFastSyncedShallowSetHead(t *testing.T, snapshots bool) {
|
|||||||
// side chain, where the fast sync pivot point - older than the ancient limit -
|
// side chain, where the fast sync pivot point - older than the ancient limit -
|
||||||
// was already committed to disk and then sethead was called. In this test scenario
|
// was already committed to disk and then sethead was called. In this test scenario
|
||||||
// the side chain is above the committed block. In this case the freezer will delete
|
// the side chain is above the committed block. In this case the freezer will delete
|
||||||
// the sidechain since it's dangling, reverting to TestLongFastSyncedDeepSetHead.
|
// the sidechain since it's dangling, reverting to TestLongSnapSyncedDeepSetHead.
|
||||||
func TestLongNewerForkedFastSyncedDeepSetHead(t *testing.T) {
|
func TestLongNewerForkedSnapSyncedDeepSetHead(t *testing.T) {
|
||||||
testLongNewerForkedFastSyncedDeepSetHead(t, false)
|
testLongNewerForkedSnapSyncedDeepSetHead(t, false)
|
||||||
}
|
}
|
||||||
func TestLongNewerForkedFastSyncedDeepSetHeadWithSnapshots(t *testing.T) {
|
func TestLongNewerForkedSnapSyncedDeepSetHeadWithSnapshots(t *testing.T) {
|
||||||
testLongNewerForkedFastSyncedDeepSetHead(t, true)
|
testLongNewerForkedSnapSyncedDeepSetHead(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLongNewerForkedFastSyncedDeepSetHead(t *testing.T, snapshots bool) {
|
func testLongNewerForkedSnapSyncedDeepSetHead(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
||||||
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
|
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
|
||||||
@ -1549,15 +1549,15 @@ func testLongNewerForkedFastSyncedDeepSetHead(t *testing.T, snapshots bool) {
|
|||||||
// side chain, where the fast sync pivot point - newer than the ancient limit -
|
// side chain, where the fast sync pivot point - newer than the ancient limit -
|
||||||
// was not yet committed, but sethead was called. In this test scenario the side
|
// was not yet committed, but sethead was called. In this test scenario the side
|
||||||
// chain is above the committed block. In this case the freezer will delete the
|
// chain is above the committed block. In this case the freezer will delete the
|
||||||
// sidechain since it's dangling, reverting to TestLongFastSyncinghallowSetHead.
|
// sidechain since it's dangling, reverting to TestLongSnapSyncinghallowSetHead.
|
||||||
func TestLongNewerForkedFastSyncingShallowSetHead(t *testing.T) {
|
func TestLongNewerForkedSnapSyncingShallowSetHead(t *testing.T) {
|
||||||
testLongNewerForkedFastSyncingShallowSetHead(t, false)
|
testLongNewerForkedSnapSyncingShallowSetHead(t, false)
|
||||||
}
|
}
|
||||||
func TestLongNewerForkedFastSyncingShallowSetHeadWithSnapshots(t *testing.T) {
|
func TestLongNewerForkedSnapSyncingShallowSetHeadWithSnapshots(t *testing.T) {
|
||||||
testLongNewerForkedFastSyncingShallowSetHead(t, true)
|
testLongNewerForkedSnapSyncingShallowSetHead(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLongNewerForkedFastSyncingShallowSetHead(t *testing.T, snapshots bool) {
|
func testLongNewerForkedSnapSyncingShallowSetHead(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
||||||
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
|
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
|
||||||
@ -1601,15 +1601,15 @@ func testLongNewerForkedFastSyncingShallowSetHead(t *testing.T, snapshots bool)
|
|||||||
// side chain, where the fast sync pivot point - older than the ancient limit -
|
// side chain, where the fast sync pivot point - older than the ancient limit -
|
||||||
// was not yet committed, but sethead was called. In this test scenario the side
|
// was not yet committed, but sethead was called. In this test scenario the side
|
||||||
// chain is above the committed block. In this case the freezer will delete the
|
// chain is above the committed block. In this case the freezer will delete the
|
||||||
// sidechain since it's dangling, reverting to TestLongFastSyncingDeepSetHead.
|
// sidechain since it's dangling, reverting to TestLongSnapSyncingDeepSetHead.
|
||||||
func TestLongNewerForkedFastSyncingDeepSetHead(t *testing.T) {
|
func TestLongNewerForkedSnapSyncingDeepSetHead(t *testing.T) {
|
||||||
testLongNewerForkedFastSyncingDeepSetHead(t, false)
|
testLongNewerForkedSnapSyncingDeepSetHead(t, false)
|
||||||
}
|
}
|
||||||
func TestLongNewerForkedFastSyncingDeepSetHeadWithSnapshots(t *testing.T) {
|
func TestLongNewerForkedSnapSyncingDeepSetHeadWithSnapshots(t *testing.T) {
|
||||||
testLongNewerForkedFastSyncingDeepSetHead(t, true)
|
testLongNewerForkedSnapSyncingDeepSetHead(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLongNewerForkedFastSyncingDeepSetHead(t *testing.T, snapshots bool) {
|
func testLongNewerForkedSnapSyncingDeepSetHead(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
||||||
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
|
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
|
||||||
@ -1745,15 +1745,15 @@ func testLongReorgedDeepSetHead(t *testing.T, snapshots bool) {
|
|||||||
// side chain, where the fast sync pivot point - newer than the ancient limit -
|
// side chain, where the fast sync pivot point - newer than the ancient limit -
|
||||||
// was already committed to disk and then sethead was called. In this case the
|
// was already committed to disk and then sethead was called. In this case the
|
||||||
// freezer will delete the sidechain since it's dangling, reverting to
|
// freezer will delete the sidechain since it's dangling, reverting to
|
||||||
// TestLongFastSyncedShallowSetHead.
|
// TestLongSnapSyncedShallowSetHead.
|
||||||
func TestLongReorgedFastSyncedShallowSetHead(t *testing.T) {
|
func TestLongReorgedSnapSyncedShallowSetHead(t *testing.T) {
|
||||||
testLongReorgedFastSyncedShallowSetHead(t, false)
|
testLongReorgedSnapSyncedShallowSetHead(t, false)
|
||||||
}
|
}
|
||||||
func TestLongReorgedFastSyncedShallowSetHeadWithSnapshots(t *testing.T) {
|
func TestLongReorgedSnapSyncedShallowSetHeadWithSnapshots(t *testing.T) {
|
||||||
testLongReorgedFastSyncedShallowSetHead(t, true)
|
testLongReorgedSnapSyncedShallowSetHead(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLongReorgedFastSyncedShallowSetHead(t *testing.T, snapshots bool) {
|
func testLongReorgedSnapSyncedShallowSetHead(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
||||||
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
|
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
|
||||||
@ -1797,15 +1797,15 @@ func testLongReorgedFastSyncedShallowSetHead(t *testing.T, snapshots bool) {
|
|||||||
// side chain, where the fast sync pivot point - older than the ancient limit -
|
// side chain, where the fast sync pivot point - older than the ancient limit -
|
||||||
// was already committed to disk and then sethead was called. In this case the
|
// was already committed to disk and then sethead was called. In this case the
|
||||||
// freezer will delete the sidechain since it's dangling, reverting to
|
// freezer will delete the sidechain since it's dangling, reverting to
|
||||||
// TestLongFastSyncedDeepSetHead.
|
// TestLongSnapSyncedDeepSetHead.
|
||||||
func TestLongReorgedFastSyncedDeepSetHead(t *testing.T) {
|
func TestLongReorgedSnapSyncedDeepSetHead(t *testing.T) {
|
||||||
testLongReorgedFastSyncedDeepSetHead(t, false)
|
testLongReorgedSnapSyncedDeepSetHead(t, false)
|
||||||
}
|
}
|
||||||
func TestLongReorgedFastSyncedDeepSetHeadWithSnapshots(t *testing.T) {
|
func TestLongReorgedSnapSyncedDeepSetHeadWithSnapshots(t *testing.T) {
|
||||||
testLongReorgedFastSyncedDeepSetHead(t, true)
|
testLongReorgedSnapSyncedDeepSetHead(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLongReorgedFastSyncedDeepSetHead(t *testing.T, snapshots bool) {
|
func testLongReorgedSnapSyncedDeepSetHead(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
||||||
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
|
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
|
||||||
@ -1850,14 +1850,14 @@ func testLongReorgedFastSyncedDeepSetHead(t *testing.T, snapshots bool) {
|
|||||||
// chain to detect that it was fast syncing and delete everything from the new
|
// chain to detect that it was fast syncing and delete everything from the new
|
||||||
// head, since we can just pick up fast syncing from there. The side chain is
|
// head, since we can just pick up fast syncing from there. The side chain is
|
||||||
// completely nuked by the freezer.
|
// completely nuked by the freezer.
|
||||||
func TestLongReorgedFastSyncingShallowSetHead(t *testing.T) {
|
func TestLongReorgedSnapSyncingShallowSetHead(t *testing.T) {
|
||||||
testLongReorgedFastSyncingShallowSetHead(t, false)
|
testLongReorgedSnapSyncingShallowSetHead(t, false)
|
||||||
}
|
}
|
||||||
func TestLongReorgedFastSyncingShallowSetHeadWithSnapshots(t *testing.T) {
|
func TestLongReorgedSnapSyncingShallowSetHeadWithSnapshots(t *testing.T) {
|
||||||
testLongReorgedFastSyncingShallowSetHead(t, true)
|
testLongReorgedSnapSyncingShallowSetHead(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLongReorgedFastSyncingShallowSetHead(t *testing.T, snapshots bool) {
|
func testLongReorgedSnapSyncingShallowSetHead(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
||||||
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
|
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
|
||||||
@ -1903,14 +1903,14 @@ func testLongReorgedFastSyncingShallowSetHead(t *testing.T, snapshots bool) {
|
|||||||
// chain to detect that it was fast syncing and delete everything from the new
|
// chain to detect that it was fast syncing and delete everything from the new
|
||||||
// head, since we can just pick up fast syncing from there. The side chain is
|
// head, since we can just pick up fast syncing from there. The side chain is
|
||||||
// completely nuked by the freezer.
|
// completely nuked by the freezer.
|
||||||
func TestLongReorgedFastSyncingDeepSetHead(t *testing.T) {
|
func TestLongReorgedSnapSyncingDeepSetHead(t *testing.T) {
|
||||||
testLongReorgedFastSyncingDeepSetHead(t, false)
|
testLongReorgedSnapSyncingDeepSetHead(t, false)
|
||||||
}
|
}
|
||||||
func TestLongReorgedFastSyncingDeepSetHeadWithSnapshots(t *testing.T) {
|
func TestLongReorgedSnapSyncingDeepSetHeadWithSnapshots(t *testing.T) {
|
||||||
testLongReorgedFastSyncingDeepSetHead(t, true)
|
testLongReorgedSnapSyncingDeepSetHead(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLongReorgedFastSyncingDeepSetHead(t *testing.T, snapshots bool) {
|
func testLongReorgedSnapSyncingDeepSetHead(t *testing.T, snapshots bool) {
|
||||||
// Chain:
|
// Chain:
|
||||||
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
||||||
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
|
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
|
||||||
|
@ -2637,7 +2637,7 @@ func TestTransactionIndices(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSkipStaleTxIndicesInFastSync(t *testing.T) {
|
func TestSkipStaleTxIndicesInSnapSync(t *testing.T) {
|
||||||
// Configure and generate a sample block chain
|
// Configure and generate a sample block chain
|
||||||
var (
|
var (
|
||||||
gendb = rawdb.NewMemoryDatabase()
|
gendb = rawdb.NewMemoryDatabase()
|
||||||
|
@ -155,6 +155,28 @@ func (b *BlockGen) TxNonce(addr common.Address) uint64 {
|
|||||||
|
|
||||||
// AddUncle adds an uncle header to the generated block.
|
// AddUncle adds an uncle header to the generated block.
|
||||||
func (b *BlockGen) AddUncle(h *types.Header) {
|
func (b *BlockGen) AddUncle(h *types.Header) {
|
||||||
|
// The uncle will have the same timestamp and auto-generated difficulty
|
||||||
|
h.Time = b.header.Time
|
||||||
|
|
||||||
|
var parent *types.Header
|
||||||
|
for i := b.i - 1; i >= 0; i-- {
|
||||||
|
if b.chain[i].Hash() == h.ParentHash {
|
||||||
|
parent = b.chain[i].Header()
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
chainreader := &fakeChainReader{config: b.config}
|
||||||
|
h.Difficulty = b.engine.CalcDifficulty(chainreader, b.header.Time, parent)
|
||||||
|
|
||||||
|
// The gas limit and price should be derived from the parent
|
||||||
|
h.GasLimit = parent.GasLimit
|
||||||
|
if b.config.IsLondon(h.Number) {
|
||||||
|
h.BaseFee = misc.CalcBaseFee(b.config, parent)
|
||||||
|
if !b.config.IsLondon(parent.Number) {
|
||||||
|
parentGasLimit := parent.GasLimit * params.ElasticityMultiplier
|
||||||
|
h.GasLimit = CalcGasLimit(parentGasLimit, parentGasLimit)
|
||||||
|
}
|
||||||
|
}
|
||||||
b.uncles = append(b.uncles, h)
|
b.uncles = append(b.uncles, h)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -242,24 +242,6 @@ func WriteLastPivotNumber(db ethdb.KeyValueWriter, pivot uint64) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReadFastTrieProgress retrieves the number of tries nodes fast synced to allow
|
|
||||||
// reporting correct numbers across restarts.
|
|
||||||
func ReadFastTrieProgress(db ethdb.KeyValueReader) uint64 {
|
|
||||||
data, _ := db.Get(fastTrieProgressKey)
|
|
||||||
if len(data) == 0 {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
return new(big.Int).SetBytes(data).Uint64()
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteFastTrieProgress stores the fast sync trie process counter to support
|
|
||||||
// retrieving it across restarts.
|
|
||||||
func WriteFastTrieProgress(db ethdb.KeyValueWriter, count uint64) {
|
|
||||||
if err := db.Put(fastTrieProgressKey, new(big.Int).SetUint64(count).Bytes()); err != nil {
|
|
||||||
log.Crit("Failed to store fast sync trie progress", "err", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReadTxIndexTail retrieves the number of oldest indexed block
|
// ReadTxIndexTail retrieves the number of oldest indexed block
|
||||||
// whose transaction indices has been indexed. If the corresponding entry
|
// whose transaction indices has been indexed. If the corresponding entry
|
||||||
// is non-existent in database it means the indexing has been finished.
|
// is non-existent in database it means the indexing has been finished.
|
||||||
|
@ -208,11 +208,3 @@ func WriteSnapshotSyncStatus(db ethdb.KeyValueWriter, status []byte) {
|
|||||||
log.Crit("Failed to store snapshot sync status", "err", err)
|
log.Crit("Failed to store snapshot sync status", "err", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteSnapshotSyncStatus deletes the serialized sync status saved at the last
|
|
||||||
// shutdown
|
|
||||||
func DeleteSnapshotSyncStatus(db ethdb.KeyValueWriter) {
|
|
||||||
if err := db.Delete(snapshotSyncStatusKey); err != nil {
|
|
||||||
log.Crit("Failed to remove snapshot sync status", "err", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
115
eth/downloader/fetchers.go
Normal file
115
eth/downloader/fetchers.go
Normal file
@ -0,0 +1,115 @@
|
|||||||
|
// Copyright 2021 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package downloader
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/eth/protocols/eth"
|
||||||
|
)
|
||||||
|
|
||||||
|
// fetchHeadersByHash is a blocking version of Peer.RequestHeadersByHash which
|
||||||
|
// handles all the cancellation, interruption and timeout mechanisms of a data
|
||||||
|
// retrieval to allow blocking API calls.
|
||||||
|
func (d *Downloader) fetchHeadersByHash(p *peerConnection, hash common.Hash, amount int, skip int, reverse bool) ([]*types.Header, error) {
|
||||||
|
// Create the response sink and send the network request
|
||||||
|
start := time.Now()
|
||||||
|
resCh := make(chan *eth.Response)
|
||||||
|
|
||||||
|
req, err := p.peer.RequestHeadersByHash(hash, amount, skip, reverse, resCh)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer req.Close()
|
||||||
|
|
||||||
|
// Wait until the response arrives, the request is cancelled or times out
|
||||||
|
ttl := d.peers.rates.TargetTimeout()
|
||||||
|
|
||||||
|
timeoutTimer := time.NewTimer(ttl)
|
||||||
|
defer timeoutTimer.Stop()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-d.cancelCh:
|
||||||
|
return nil, errCanceled
|
||||||
|
|
||||||
|
case <-timeoutTimer.C:
|
||||||
|
// Header retrieval timed out, update the metrics
|
||||||
|
p.log.Debug("Header request timed out", "elapsed", ttl)
|
||||||
|
headerTimeoutMeter.Mark(1)
|
||||||
|
|
||||||
|
return nil, errTimeout
|
||||||
|
|
||||||
|
case res := <-resCh:
|
||||||
|
// Headers successfully retrieved, update the metrics
|
||||||
|
headerReqTimer.Update(time.Since(start))
|
||||||
|
headerInMeter.Mark(int64(len(*res.Res.(*eth.BlockHeadersPacket))))
|
||||||
|
|
||||||
|
// Don't reject the packet even if it turns out to be bad, downloader will
|
||||||
|
// disconnect the peer on its own terms. Simply delivery the headers to
|
||||||
|
// be processed by the caller
|
||||||
|
res.Done <- nil
|
||||||
|
|
||||||
|
return *res.Res.(*eth.BlockHeadersPacket), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// fetchHeadersByNumber is a blocking version of Peer.RequestHeadersByNumber which
|
||||||
|
// handles all the cancellation, interruption and timeout mechanisms of a data
|
||||||
|
// retrieval to allow blocking API calls.
|
||||||
|
func (d *Downloader) fetchHeadersByNumber(p *peerConnection, number uint64, amount int, skip int, reverse bool) ([]*types.Header, error) {
|
||||||
|
// Create the response sink and send the network request
|
||||||
|
start := time.Now()
|
||||||
|
resCh := make(chan *eth.Response)
|
||||||
|
|
||||||
|
req, err := p.peer.RequestHeadersByNumber(number, amount, skip, reverse, resCh)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer req.Close()
|
||||||
|
|
||||||
|
// Wait until the response arrives, the request is cancelled or times out
|
||||||
|
ttl := d.peers.rates.TargetTimeout()
|
||||||
|
|
||||||
|
timeoutTimer := time.NewTimer(ttl)
|
||||||
|
defer timeoutTimer.Stop()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-d.cancelCh:
|
||||||
|
return nil, errCanceled
|
||||||
|
|
||||||
|
case <-timeoutTimer.C:
|
||||||
|
// Header retrieval timed out, update the metrics
|
||||||
|
p.log.Debug("Header request timed out", "elapsed", ttl)
|
||||||
|
headerTimeoutMeter.Mark(1)
|
||||||
|
|
||||||
|
return nil, errTimeout
|
||||||
|
|
||||||
|
case res := <-resCh:
|
||||||
|
// Headers successfully retrieved, update the metrics
|
||||||
|
headerReqTimer.Update(time.Since(start))
|
||||||
|
headerInMeter.Mark(int64(len(*res.Res.(*eth.BlockHeadersPacket))))
|
||||||
|
|
||||||
|
// Don't reject the packet even if it turns out to be bad, downloader will
|
||||||
|
// disconnect the peer on its own terms. Simply delivery the headers to
|
||||||
|
// be processed by the caller
|
||||||
|
res.Done <- nil
|
||||||
|
|
||||||
|
return *res.Res.(*eth.BlockHeadersPacket), nil
|
||||||
|
}
|
||||||
|
}
|
381
eth/downloader/fetchers_concurrent.go
Normal file
381
eth/downloader/fetchers_concurrent.go
Normal file
@ -0,0 +1,381 @@
|
|||||||
|
// Copyright 2021 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package downloader
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"sort"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/common/prque"
|
||||||
|
"github.com/ethereum/go-ethereum/eth/protocols/eth"
|
||||||
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
// timeoutGracePeriod is the amount of time to allow for a peer to deliver a
|
||||||
|
// response to a locally already timed out request. Timeouts are not penalized
|
||||||
|
// as a peer might be temporarily overloaded, however, they still must reply
|
||||||
|
// to each request. Failing to do so is considered a protocol violation.
|
||||||
|
var timeoutGracePeriod = 2 * time.Minute
|
||||||
|
|
||||||
|
// typedQueue is an interface defining the adaptor needed to translate the type
|
||||||
|
// specific downloader/queue schedulers into the type-agnostic general concurrent
|
||||||
|
// fetcher algorithm calls.
|
||||||
|
type typedQueue interface {
|
||||||
|
// waker returns a notification channel that gets pinged in case more fetches
|
||||||
|
// have been queued up, so the fetcher might assign it to idle peers.
|
||||||
|
waker() chan bool
|
||||||
|
|
||||||
|
// pending returns the number of wrapped items that are currently queued for
|
||||||
|
// fetching by the concurrent downloader.
|
||||||
|
pending() int
|
||||||
|
|
||||||
|
// capacity is responsible for calculating how many items of the abstracted
|
||||||
|
// type a particular peer is estimated to be able to retrieve within the
|
||||||
|
// alloted round trip time.
|
||||||
|
capacity(peer *peerConnection, rtt time.Duration) int
|
||||||
|
|
||||||
|
// updateCapacity is responsible for updating how many items of the abstracted
|
||||||
|
// type a particular peer is estimated to be able to retrieve in a unit time.
|
||||||
|
updateCapacity(peer *peerConnection, items int, elapsed time.Duration)
|
||||||
|
|
||||||
|
// reserve is responsible for allocating a requested number of pending items
|
||||||
|
// from the download queue to the specified peer.
|
||||||
|
reserve(peer *peerConnection, items int) (*fetchRequest, bool, bool)
|
||||||
|
|
||||||
|
// unreserve is resposible for removing the current retrieval allocation
|
||||||
|
// assigned to a specific peer and placing it back into the pool to allow
|
||||||
|
// reassigning to some other peer.
|
||||||
|
unreserve(peer string) int
|
||||||
|
|
||||||
|
// request is responsible for converting a generic fetch request into a typed
|
||||||
|
// one and sending it to the remote peer for fulfillment.
|
||||||
|
request(peer *peerConnection, req *fetchRequest, resCh chan *eth.Response) (*eth.Request, error)
|
||||||
|
|
||||||
|
// deliver is responsible for taking a generic response packet from the
|
||||||
|
// concurrent fetcher, unpacking the type specific data and delivering
|
||||||
|
// it to the downloader's queue.
|
||||||
|
deliver(peer *peerConnection, packet *eth.Response) (int, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// concurrentFetch iteratively downloads scheduled block parts, taking available
|
||||||
|
// peers, reserving a chunk of fetch requests for each and waiting for delivery
|
||||||
|
// or timeouts.
|
||||||
|
func (d *Downloader) concurrentFetch(queue typedQueue) error {
|
||||||
|
// Create a delivery channel to accept responses from all peers
|
||||||
|
responses := make(chan *eth.Response)
|
||||||
|
|
||||||
|
// Track the currently active requests and their timeout order
|
||||||
|
pending := make(map[string]*eth.Request)
|
||||||
|
defer func() {
|
||||||
|
// Abort all requests on sync cycle cancellation. The requests may still
|
||||||
|
// be fulfilled by the remote side, but the dispatcher will not wait to
|
||||||
|
// deliver them since nobody's going to be listening.
|
||||||
|
for _, req := range pending {
|
||||||
|
req.Close()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
ordering := make(map[*eth.Request]int)
|
||||||
|
timeouts := prque.New(func(data interface{}, index int) {
|
||||||
|
ordering[data.(*eth.Request)] = index
|
||||||
|
})
|
||||||
|
|
||||||
|
timeout := time.NewTimer(0)
|
||||||
|
if !timeout.Stop() {
|
||||||
|
<-timeout.C
|
||||||
|
}
|
||||||
|
defer timeout.Stop()
|
||||||
|
|
||||||
|
// Track the timed-out but not-yet-answered requests separately. We want to
|
||||||
|
// keep tracking which peers are busy (potentially overloaded), so removing
|
||||||
|
// all trace of a timed out request is not good. We also can't just cancel
|
||||||
|
// the pending request altogether as that would prevent a late response from
|
||||||
|
// being delivered, thus never unblocking the peer.
|
||||||
|
stales := make(map[string]*eth.Request)
|
||||||
|
defer func() {
|
||||||
|
// Abort all requests on sync cycle cancellation. The requests may still
|
||||||
|
// be fulfilled by the remote side, but the dispatcher will not wait to
|
||||||
|
// deliver them since nobody's going to be listening.
|
||||||
|
for _, req := range stales {
|
||||||
|
req.Close()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
// Subscribe to peer lifecycle events to schedule tasks to new joiners and
|
||||||
|
// reschedule tasks upon disconnections. We don't care which event happened
|
||||||
|
// for simplicity, so just use a single channel.
|
||||||
|
peering := make(chan *peeringEvent, 64) // arbitrary buffer, just some burst protection
|
||||||
|
|
||||||
|
peeringSub := d.peers.SubscribeEvents(peering)
|
||||||
|
defer peeringSub.Unsubscribe()
|
||||||
|
|
||||||
|
// Prepare the queue and fetch block parts until the block header fetcher's done
|
||||||
|
finished := false
|
||||||
|
for {
|
||||||
|
// Short circuit if we lost all our peers
|
||||||
|
if d.peers.Len() == 0 {
|
||||||
|
return errNoPeers
|
||||||
|
}
|
||||||
|
// If there's nothing more to fetch, wait or terminate
|
||||||
|
if queue.pending() == 0 {
|
||||||
|
if len(pending) == 0 && finished {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Send a download request to all idle peers, until throttled
|
||||||
|
var (
|
||||||
|
idles []*peerConnection
|
||||||
|
caps []int
|
||||||
|
)
|
||||||
|
for _, peer := range d.peers.AllPeers() {
|
||||||
|
pending, stale := pending[peer.id], stales[peer.id]
|
||||||
|
if pending == nil && stale == nil {
|
||||||
|
idles = append(idles, peer)
|
||||||
|
caps = append(caps, queue.capacity(peer, time.Second))
|
||||||
|
} else if stale != nil {
|
||||||
|
if waited := time.Since(stale.Sent); waited > timeoutGracePeriod {
|
||||||
|
// Request has been in flight longer than the grace period
|
||||||
|
// permitted it, consider the peer malicious attempting to
|
||||||
|
// stall the sync.
|
||||||
|
peer.log.Warn("Peer stalling, dropping", "waited", common.PrettyDuration(waited))
|
||||||
|
d.dropPeer(peer.id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sort.Sort(&peerCapacitySort{idles, caps})
|
||||||
|
|
||||||
|
var (
|
||||||
|
progressed bool
|
||||||
|
throttled bool
|
||||||
|
queued = queue.pending()
|
||||||
|
)
|
||||||
|
for _, peer := range idles {
|
||||||
|
// Short circuit if throttling activated or there are no more
|
||||||
|
// queued tasks to be retrieved
|
||||||
|
if throttled {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if queued = queue.pending(); queued == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// Reserve a chunk of fetches for a peer. A nil can mean either that
|
||||||
|
// no more headers are available, or that the peer is known not to
|
||||||
|
// have them.
|
||||||
|
request, progress, throttle := queue.reserve(peer, queue.capacity(peer, d.peers.rates.TargetRoundTrip()))
|
||||||
|
if progress {
|
||||||
|
progressed = true
|
||||||
|
}
|
||||||
|
if throttle {
|
||||||
|
throttled = true
|
||||||
|
throttleCounter.Inc(1)
|
||||||
|
}
|
||||||
|
if request == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Fetch the chunk and make sure any errors return the hashes to the queue
|
||||||
|
req, err := queue.request(peer, request, responses)
|
||||||
|
if err != nil {
|
||||||
|
// Sending the request failed, which generally means the peer
|
||||||
|
// was diconnected in between assignment and network send.
|
||||||
|
// Although all peer removal operations return allocated tasks
|
||||||
|
// to the queue, that is async, and we can do better here by
|
||||||
|
// immediately pushing the unfulfilled requests.
|
||||||
|
queue.unreserve(peer.id) // TODO(karalabe): This needs a non-expiration method
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
pending[peer.id] = req
|
||||||
|
|
||||||
|
ttl := d.peers.rates.TargetTimeout()
|
||||||
|
ordering[req] = timeouts.Size()
|
||||||
|
|
||||||
|
timeouts.Push(req, -time.Now().Add(ttl).UnixNano())
|
||||||
|
if timeouts.Size() == 1 {
|
||||||
|
timeout.Reset(ttl)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Make sure that we have peers available for fetching. If all peers have been tried
|
||||||
|
// and all failed throw an error
|
||||||
|
if !progressed && !throttled && len(pending) == 0 && len(idles) == d.peers.Len() && queued > 0 {
|
||||||
|
return errPeersUnavailable
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Wait for something to happen
|
||||||
|
select {
|
||||||
|
case <-d.cancelCh:
|
||||||
|
// If sync was cancelled, tear down the parallel retriever. Pending
|
||||||
|
// requests will be cancelled locally, and the remote responses will
|
||||||
|
// be dropped when they arrive
|
||||||
|
return errCanceled
|
||||||
|
|
||||||
|
case event := <-peering:
|
||||||
|
// A peer joined or left, the tasks queue and allocations need to be
|
||||||
|
// checked for potential assignment or reassignment
|
||||||
|
peerid := event.peer.id
|
||||||
|
|
||||||
|
if event.join {
|
||||||
|
// Sanity check the internal state; this can be dropped later
|
||||||
|
if _, ok := pending[peerid]; ok {
|
||||||
|
event.peer.log.Error("Pending request exists for joining peer")
|
||||||
|
}
|
||||||
|
if _, ok := stales[peerid]; ok {
|
||||||
|
event.peer.log.Error("Stale request exists for joining peer")
|
||||||
|
}
|
||||||
|
// Loop back to the entry point for task assignment
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// A peer left, any existing requests need to be untracked, pending
|
||||||
|
// tasks returned and possible reassignment checked
|
||||||
|
if req, ok := pending[peerid]; ok {
|
||||||
|
queue.unreserve(peerid) // TODO(karalabe): This needs a non-expiration method
|
||||||
|
delete(pending, peerid)
|
||||||
|
req.Close()
|
||||||
|
|
||||||
|
if index, live := ordering[req]; live {
|
||||||
|
timeouts.Remove(index)
|
||||||
|
if index == 0 {
|
||||||
|
if !timeout.Stop() {
|
||||||
|
<-timeout.C
|
||||||
|
}
|
||||||
|
if timeouts.Size() > 0 {
|
||||||
|
_, exp := timeouts.Peek()
|
||||||
|
timeout.Reset(time.Until(time.Unix(0, -exp)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
delete(ordering, req)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if req, ok := stales[peerid]; ok {
|
||||||
|
delete(stales, peerid)
|
||||||
|
req.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
case <-timeout.C:
|
||||||
|
// Retrieve the next request which should have timed out. The check
|
||||||
|
// below is purely for to catch programming errors, given the correct
|
||||||
|
// code, there's no possible order of events that should result in a
|
||||||
|
// timeout firing for a non-existent event.
|
||||||
|
item, exp := timeouts.Peek()
|
||||||
|
if now, at := time.Now(), time.Unix(0, -exp); now.Before(at) {
|
||||||
|
log.Error("Timeout triggered but not reached", "left", at.Sub(now))
|
||||||
|
timeout.Reset(at.Sub(now))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
req := item.(*eth.Request)
|
||||||
|
|
||||||
|
// Stop tracking the timed out request from a timing perspective,
|
||||||
|
// cancel it, so it's not considered in-flight anymore, but keep
|
||||||
|
// the peer marked busy to prevent assigning a second request and
|
||||||
|
// overloading it further.
|
||||||
|
delete(pending, req.Peer)
|
||||||
|
stales[req.Peer] = req
|
||||||
|
delete(ordering, req)
|
||||||
|
|
||||||
|
timeouts.Pop()
|
||||||
|
if timeouts.Size() > 0 {
|
||||||
|
_, exp := timeouts.Peek()
|
||||||
|
timeout.Reset(time.Until(time.Unix(0, -exp)))
|
||||||
|
}
|
||||||
|
// New timeout potentially set if there are more requests pending,
|
||||||
|
// reschedule the failed one to a free peer
|
||||||
|
fails := queue.unreserve(req.Peer)
|
||||||
|
|
||||||
|
// Finally, update the peer's retrieval capacity, or if it's already
|
||||||
|
// below the minimum allowance, drop the peer. If a lot of retrieval
|
||||||
|
// elements expired, we might have overestimated the remote peer or
|
||||||
|
// perhaps ourselves. Only reset to minimal throughput but don't drop
|
||||||
|
// just yet.
|
||||||
|
//
|
||||||
|
// The reason the minimum threshold is 2 is that the downloader tries
|
||||||
|
// to estimate the bandwidth and latency of a peer separately, which
|
||||||
|
// requires pushing the measured capacity a bit and seeing how response
|
||||||
|
// times reacts, to it always requests one more than the minimum (i.e.
|
||||||
|
// min 2).
|
||||||
|
peer := d.peers.Peer(req.Peer)
|
||||||
|
if peer == nil {
|
||||||
|
// If the peer got disconnected in between, we should really have
|
||||||
|
// short-circuited it already. Just in case there's some strange
|
||||||
|
// codepath, leave this check in not to crash.
|
||||||
|
log.Error("Delivery timeout from unknown peer", "peer", req.Peer)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if fails > 2 {
|
||||||
|
queue.updateCapacity(peer, 0, 0)
|
||||||
|
} else {
|
||||||
|
d.dropPeer(peer.id)
|
||||||
|
|
||||||
|
// If this peer was the master peer, abort sync immediately
|
||||||
|
d.cancelLock.RLock()
|
||||||
|
master := peer.id == d.cancelPeer
|
||||||
|
d.cancelLock.RUnlock()
|
||||||
|
|
||||||
|
if master {
|
||||||
|
d.cancel()
|
||||||
|
return errTimeout
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
case res := <-responses:
|
||||||
|
// Response arrived, it may be for an existing or an already timed
|
||||||
|
// out request. If the former, update the timeout heap and perhaps
|
||||||
|
// reschedule the timeout timer.
|
||||||
|
index, live := ordering[res.Req]
|
||||||
|
if live {
|
||||||
|
timeouts.Remove(index)
|
||||||
|
if index == 0 {
|
||||||
|
if !timeout.Stop() {
|
||||||
|
<-timeout.C
|
||||||
|
}
|
||||||
|
if timeouts.Size() > 0 {
|
||||||
|
_, exp := timeouts.Peek()
|
||||||
|
timeout.Reset(time.Until(time.Unix(0, -exp)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
delete(ordering, res.Req)
|
||||||
|
}
|
||||||
|
// Delete the pending request (if it still exists) and mark the peer idle
|
||||||
|
delete(pending, res.Req.Peer)
|
||||||
|
delete(stales, res.Req.Peer)
|
||||||
|
|
||||||
|
// Signal the dispatcher that the round trip is done. We'll drop the
|
||||||
|
// peer if the data turns out to be junk.
|
||||||
|
res.Done <- nil
|
||||||
|
res.Req.Close()
|
||||||
|
|
||||||
|
// If the peer was previously banned and failed to deliver its pack
|
||||||
|
// in a reasonable time frame, ignore its message.
|
||||||
|
if peer := d.peers.Peer(res.Req.Peer); peer != nil {
|
||||||
|
// Deliver the received chunk of data and check chain validity
|
||||||
|
accepted, err := queue.deliver(peer, res)
|
||||||
|
if errors.Is(err, errInvalidChain) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Unless a peer delivered something completely else than requested (usually
|
||||||
|
// caused by a timed out request which came through in the end), set it to
|
||||||
|
// idle. If the delivery's stale, the peer should have already been idled.
|
||||||
|
if !errors.Is(err, errStaleDelivery) {
|
||||||
|
queue.updateCapacity(peer, accepted, res.Time)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
case cont := <-queue.waker():
|
||||||
|
// The header fetcher sent a continuation flag, check if it's done
|
||||||
|
if !cont {
|
||||||
|
finished = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
104
eth/downloader/fetchers_concurrent_bodies.go
Normal file
104
eth/downloader/fetchers_concurrent_bodies.go
Normal file
@ -0,0 +1,104 @@
|
|||||||
|
// Copyright 2021 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package downloader
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/eth/protocols/eth"
|
||||||
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
// bodyQueue implements typedQueue and is a type adapter between the generic
|
||||||
|
// concurrent fetcher and the downloader.
|
||||||
|
type bodyQueue Downloader
|
||||||
|
|
||||||
|
// waker returns a notification channel that gets pinged in case more body
|
||||||
|
// fetches have been queued up, so the fetcher might assign it to idle peers.
|
||||||
|
func (q *bodyQueue) waker() chan bool {
|
||||||
|
return q.queue.blockWakeCh
|
||||||
|
}
|
||||||
|
|
||||||
|
// pending returns the number of bodies that are currently queued for fetching
|
||||||
|
// by the concurrent downloader.
|
||||||
|
func (q *bodyQueue) pending() int {
|
||||||
|
return q.queue.PendingBodies()
|
||||||
|
}
|
||||||
|
|
||||||
|
// capacity is responsible for calculating how many bodies a particular peer is
|
||||||
|
// estimated to be able to retrieve within the alloted round trip time.
|
||||||
|
func (q *bodyQueue) capacity(peer *peerConnection, rtt time.Duration) int {
|
||||||
|
return peer.BodyCapacity(rtt)
|
||||||
|
}
|
||||||
|
|
||||||
|
// updateCapacity is responsible for updating how many bodies a particular peer
|
||||||
|
// is estimated to be able to retrieve in a unit time.
|
||||||
|
func (q *bodyQueue) updateCapacity(peer *peerConnection, items int, span time.Duration) {
|
||||||
|
peer.UpdateBodyRate(items, span)
|
||||||
|
}
|
||||||
|
|
||||||
|
// reserve is responsible for allocating a requested number of pending bodies
|
||||||
|
// from the download queue to the specified peer.
|
||||||
|
func (q *bodyQueue) reserve(peer *peerConnection, items int) (*fetchRequest, bool, bool) {
|
||||||
|
return q.queue.ReserveBodies(peer, items)
|
||||||
|
}
|
||||||
|
|
||||||
|
// unreserve is resposible for removing the current body retrieval allocation
|
||||||
|
// assigned to a specific peer and placing it back into the pool to allow
|
||||||
|
// reassigning to some other peer.
|
||||||
|
func (q *bodyQueue) unreserve(peer string) int {
|
||||||
|
fails := q.queue.ExpireBodies(peer)
|
||||||
|
if fails > 2 {
|
||||||
|
log.Trace("Body delivery timed out", "peer", peer)
|
||||||
|
} else {
|
||||||
|
log.Debug("Body delivery stalling", "peer", peer)
|
||||||
|
}
|
||||||
|
return fails
|
||||||
|
}
|
||||||
|
|
||||||
|
// request is responsible for converting a generic fetch request into a body
|
||||||
|
// one and sending it to the remote peer for fulfillment.
|
||||||
|
func (q *bodyQueue) request(peer *peerConnection, req *fetchRequest, resCh chan *eth.Response) (*eth.Request, error) {
|
||||||
|
peer.log.Trace("Requesting new batch of bodies", "count", len(req.Headers), "from", req.Headers[0].Number)
|
||||||
|
if q.bodyFetchHook != nil {
|
||||||
|
q.bodyFetchHook(req.Headers)
|
||||||
|
}
|
||||||
|
|
||||||
|
hashes := make([]common.Hash, 0, len(req.Headers))
|
||||||
|
for _, header := range req.Headers {
|
||||||
|
hashes = append(hashes, header.Hash())
|
||||||
|
}
|
||||||
|
return peer.peer.RequestBodies(hashes, resCh)
|
||||||
|
}
|
||||||
|
|
||||||
|
// deliver is responsible for taking a generic response packet from the concurrent
|
||||||
|
// fetcher, unpacking the body data and delivering it to the downloader's queue.
|
||||||
|
func (q *bodyQueue) deliver(peer *peerConnection, packet *eth.Response) (int, error) {
|
||||||
|
txs, uncles := packet.Res.(*eth.BlockBodiesPacket).Unpack()
|
||||||
|
|
||||||
|
accepted, err := q.queue.DeliverBodies(peer.id, txs, uncles)
|
||||||
|
switch {
|
||||||
|
case err == nil && len(txs) == 0:
|
||||||
|
peer.log.Trace("Requested bodies delivered")
|
||||||
|
case err == nil:
|
||||||
|
peer.log.Trace("Delivered new batch of bodies", "count", len(txs), "accepted", accepted)
|
||||||
|
default:
|
||||||
|
peer.log.Debug("Failed to deliver retrieved bodies", "err", err)
|
||||||
|
}
|
||||||
|
return accepted, err
|
||||||
|
}
|
95
eth/downloader/fetchers_concurrent_headers.go
Normal file
95
eth/downloader/fetchers_concurrent_headers.go
Normal file
@ -0,0 +1,95 @@
|
|||||||
|
// Copyright 2021 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package downloader
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/eth/protocols/eth"
|
||||||
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
// headerQueue implements typedQueue and is a type adapter between the generic
|
||||||
|
// concurrent fetcher and the downloader.
|
||||||
|
type headerQueue Downloader
|
||||||
|
|
||||||
|
// waker returns a notification channel that gets pinged in case more header
|
||||||
|
// fetches have been queued up, so the fetcher might assign it to idle peers.
|
||||||
|
func (q *headerQueue) waker() chan bool {
|
||||||
|
return q.queue.headerContCh
|
||||||
|
}
|
||||||
|
|
||||||
|
// pending returns the number of headers that are currently queued for fetching
|
||||||
|
// by the concurrent downloader.
|
||||||
|
func (q *headerQueue) pending() int {
|
||||||
|
return q.queue.PendingHeaders()
|
||||||
|
}
|
||||||
|
|
||||||
|
// capacity is responsible for calculating how many headers a particular peer is
|
||||||
|
// estimated to be able to retrieve within the alloted round trip time.
|
||||||
|
func (q *headerQueue) capacity(peer *peerConnection, rtt time.Duration) int {
|
||||||
|
return peer.HeaderCapacity(rtt)
|
||||||
|
}
|
||||||
|
|
||||||
|
// updateCapacity is responsible for updating how many headers a particular peer
|
||||||
|
// is estimated to be able to retrieve in a unit time.
|
||||||
|
func (q *headerQueue) updateCapacity(peer *peerConnection, items int, span time.Duration) {
|
||||||
|
peer.UpdateHeaderRate(items, span)
|
||||||
|
}
|
||||||
|
|
||||||
|
// reserve is responsible for allocating a requested number of pending headers
|
||||||
|
// from the download queue to the specified peer.
|
||||||
|
func (q *headerQueue) reserve(peer *peerConnection, items int) (*fetchRequest, bool, bool) {
|
||||||
|
return q.queue.ReserveHeaders(peer, items), false, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// unreserve is resposible for removing the current header retrieval allocation
|
||||||
|
// assigned to a specific peer and placing it back into the pool to allow
|
||||||
|
// reassigning to some other peer.
|
||||||
|
func (q *headerQueue) unreserve(peer string) int {
|
||||||
|
fails := q.queue.ExpireHeaders(peer)
|
||||||
|
if fails > 2 {
|
||||||
|
log.Trace("Header delivery timed out", "peer", peer)
|
||||||
|
} else {
|
||||||
|
log.Debug("Header delivery stalling", "peer", peer)
|
||||||
|
}
|
||||||
|
return fails
|
||||||
|
}
|
||||||
|
|
||||||
|
// request is responsible for converting a generic fetch request into a header
|
||||||
|
// one and sending it to the remote peer for fulfillment.
|
||||||
|
func (q *headerQueue) request(peer *peerConnection, req *fetchRequest, resCh chan *eth.Response) (*eth.Request, error) {
|
||||||
|
peer.log.Trace("Requesting new batch of headers", "from", req.From)
|
||||||
|
return peer.peer.RequestHeadersByNumber(req.From, MaxHeaderFetch, 0, false, resCh)
|
||||||
|
}
|
||||||
|
|
||||||
|
// deliver is responsible for taking a generic response packet from the concurrent
|
||||||
|
// fetcher, unpacking the header data and delivering it to the downloader's queue.
|
||||||
|
func (q *headerQueue) deliver(peer *peerConnection, packet *eth.Response) (int, error) {
|
||||||
|
headers := *packet.Res.(*eth.BlockHeadersPacket)
|
||||||
|
|
||||||
|
accepted, err := q.queue.DeliverHeaders(peer.id, headers, q.headerProcCh)
|
||||||
|
switch {
|
||||||
|
case err == nil && len(headers) == 0:
|
||||||
|
peer.log.Trace("Requested headers delivered")
|
||||||
|
case err == nil:
|
||||||
|
peer.log.Trace("Delivered new batch of headers", "count", len(headers), "accepted", accepted)
|
||||||
|
default:
|
||||||
|
peer.log.Debug("Failed to deliver retrieved headers", "err", err)
|
||||||
|
}
|
||||||
|
return accepted, err
|
||||||
|
}
|
103
eth/downloader/fetchers_concurrent_receipts.go
Normal file
103
eth/downloader/fetchers_concurrent_receipts.go
Normal file
@ -0,0 +1,103 @@
|
|||||||
|
// Copyright 2021 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package downloader
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/eth/protocols/eth"
|
||||||
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
// receiptQueue implements typedQueue and is a type adapter between the generic
|
||||||
|
// concurrent fetcher and the downloader.
|
||||||
|
type receiptQueue Downloader
|
||||||
|
|
||||||
|
// waker returns a notification channel that gets pinged in case more reecipt
|
||||||
|
// fetches have been queued up, so the fetcher might assign it to idle peers.
|
||||||
|
func (q *receiptQueue) waker() chan bool {
|
||||||
|
return q.queue.receiptWakeCh
|
||||||
|
}
|
||||||
|
|
||||||
|
// pending returns the number of receipt that are currently queued for fetching
|
||||||
|
// by the concurrent downloader.
|
||||||
|
func (q *receiptQueue) pending() int {
|
||||||
|
return q.queue.PendingReceipts()
|
||||||
|
}
|
||||||
|
|
||||||
|
// capacity is responsible for calculating how many receipts a particular peer is
|
||||||
|
// estimated to be able to retrieve within the alloted round trip time.
|
||||||
|
func (q *receiptQueue) capacity(peer *peerConnection, rtt time.Duration) int {
|
||||||
|
return peer.ReceiptCapacity(rtt)
|
||||||
|
}
|
||||||
|
|
||||||
|
// updateCapacity is responsible for updating how many receipts a particular peer
|
||||||
|
// is estimated to be able to retrieve in a unit time.
|
||||||
|
func (q *receiptQueue) updateCapacity(peer *peerConnection, items int, span time.Duration) {
|
||||||
|
peer.UpdateReceiptRate(items, span)
|
||||||
|
}
|
||||||
|
|
||||||
|
// reserve is responsible for allocating a requested number of pending receipts
|
||||||
|
// from the download queue to the specified peer.
|
||||||
|
func (q *receiptQueue) reserve(peer *peerConnection, items int) (*fetchRequest, bool, bool) {
|
||||||
|
return q.queue.ReserveReceipts(peer, items)
|
||||||
|
}
|
||||||
|
|
||||||
|
// unreserve is resposible for removing the current receipt retrieval allocation
|
||||||
|
// assigned to a specific peer and placing it back into the pool to allow
|
||||||
|
// reassigning to some other peer.
|
||||||
|
func (q *receiptQueue) unreserve(peer string) int {
|
||||||
|
fails := q.queue.ExpireReceipts(peer)
|
||||||
|
if fails > 2 {
|
||||||
|
log.Trace("Receipt delivery timed out", "peer", peer)
|
||||||
|
} else {
|
||||||
|
log.Debug("Receipt delivery stalling", "peer", peer)
|
||||||
|
}
|
||||||
|
return fails
|
||||||
|
}
|
||||||
|
|
||||||
|
// request is responsible for converting a generic fetch request into a receipt
|
||||||
|
// one and sending it to the remote peer for fulfillment.
|
||||||
|
func (q *receiptQueue) request(peer *peerConnection, req *fetchRequest, resCh chan *eth.Response) (*eth.Request, error) {
|
||||||
|
peer.log.Trace("Requesting new batch of receipts", "count", len(req.Headers), "from", req.Headers[0].Number)
|
||||||
|
if q.receiptFetchHook != nil {
|
||||||
|
q.receiptFetchHook(req.Headers)
|
||||||
|
}
|
||||||
|
hashes := make([]common.Hash, 0, len(req.Headers))
|
||||||
|
for _, header := range req.Headers {
|
||||||
|
hashes = append(hashes, header.Hash())
|
||||||
|
}
|
||||||
|
return peer.peer.RequestReceipts(hashes, resCh)
|
||||||
|
}
|
||||||
|
|
||||||
|
// deliver is responsible for taking a generic response packet from the concurrent
|
||||||
|
// fetcher, unpacking the receipt data and delivering it to the downloader's queue.
|
||||||
|
func (q *receiptQueue) deliver(peer *peerConnection, packet *eth.Response) (int, error) {
|
||||||
|
receipts := *packet.Res.(*eth.ReceiptsPacket)
|
||||||
|
|
||||||
|
accepted, err := q.queue.DeliverReceipts(peer.id, receipts)
|
||||||
|
switch {
|
||||||
|
case err == nil && len(receipts) == 0:
|
||||||
|
peer.log.Trace("Requested receipts delivered")
|
||||||
|
case err == nil:
|
||||||
|
peer.log.Trace("Delivered new batch of receipts", "count", len(receipts), "accepted", accepted)
|
||||||
|
default:
|
||||||
|
peer.log.Debug("Failed to deliver retrieved receipts", "err", err)
|
||||||
|
}
|
||||||
|
return accepted, err
|
||||||
|
}
|
@ -38,8 +38,5 @@ var (
|
|||||||
receiptDropMeter = metrics.NewRegisteredMeter("eth/downloader/receipts/drop", nil)
|
receiptDropMeter = metrics.NewRegisteredMeter("eth/downloader/receipts/drop", nil)
|
||||||
receiptTimeoutMeter = metrics.NewRegisteredMeter("eth/downloader/receipts/timeout", nil)
|
receiptTimeoutMeter = metrics.NewRegisteredMeter("eth/downloader/receipts/timeout", nil)
|
||||||
|
|
||||||
stateInMeter = metrics.NewRegisteredMeter("eth/downloader/states/in", nil)
|
|
||||||
stateDropMeter = metrics.NewRegisteredMeter("eth/downloader/states/drop", nil)
|
|
||||||
|
|
||||||
throttleCounter = metrics.NewRegisteredCounter("eth/downloader/throttle", nil)
|
throttleCounter = metrics.NewRegisteredCounter("eth/downloader/throttle", nil)
|
||||||
)
|
)
|
||||||
|
@ -24,7 +24,6 @@ type SyncMode uint32
|
|||||||
|
|
||||||
const (
|
const (
|
||||||
FullSync SyncMode = iota // Synchronise the entire blockchain history from full blocks
|
FullSync SyncMode = iota // Synchronise the entire blockchain history from full blocks
|
||||||
FastSync // Quickly download the headers, full sync only at the chain
|
|
||||||
SnapSync // Download the chain and the state via compact snapshots
|
SnapSync // Download the chain and the state via compact snapshots
|
||||||
LightSync // Download only the headers and terminate afterwards
|
LightSync // Download only the headers and terminate afterwards
|
||||||
)
|
)
|
||||||
@ -38,8 +37,6 @@ func (mode SyncMode) String() string {
|
|||||||
switch mode {
|
switch mode {
|
||||||
case FullSync:
|
case FullSync:
|
||||||
return "full"
|
return "full"
|
||||||
case FastSync:
|
|
||||||
return "fast"
|
|
||||||
case SnapSync:
|
case SnapSync:
|
||||||
return "snap"
|
return "snap"
|
||||||
case LightSync:
|
case LightSync:
|
||||||
@ -53,8 +50,6 @@ func (mode SyncMode) MarshalText() ([]byte, error) {
|
|||||||
switch mode {
|
switch mode {
|
||||||
case FullSync:
|
case FullSync:
|
||||||
return []byte("full"), nil
|
return []byte("full"), nil
|
||||||
case FastSync:
|
|
||||||
return []byte("fast"), nil
|
|
||||||
case SnapSync:
|
case SnapSync:
|
||||||
return []byte("snap"), nil
|
return []byte("snap"), nil
|
||||||
case LightSync:
|
case LightSync:
|
||||||
@ -68,14 +63,12 @@ func (mode *SyncMode) UnmarshalText(text []byte) error {
|
|||||||
switch string(text) {
|
switch string(text) {
|
||||||
case "full":
|
case "full":
|
||||||
*mode = FullSync
|
*mode = FullSync
|
||||||
case "fast":
|
|
||||||
*mode = FastSync
|
|
||||||
case "snap":
|
case "snap":
|
||||||
*mode = SnapSync
|
*mode = SnapSync
|
||||||
case "light":
|
case "light":
|
||||||
*mode = LightSync
|
*mode = LightSync
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf(`unknown sync mode %q, want "full", "fast" or "light"`, text)
|
return fmt.Errorf(`unknown sync mode %q, want "full", "snap" or "light"`, text)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -22,9 +22,7 @@ package downloader
|
|||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"math/big"
|
"math/big"
|
||||||
"sort"
|
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
@ -39,7 +37,6 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
errAlreadyFetching = errors.New("already fetching blocks from peer")
|
|
||||||
errAlreadyRegistered = errors.New("peer is already registered")
|
errAlreadyRegistered = errors.New("peer is already registered")
|
||||||
errNotRegistered = errors.New("peer is not registered")
|
errNotRegistered = errors.New("peer is not registered")
|
||||||
)
|
)
|
||||||
@ -48,16 +45,6 @@ var (
|
|||||||
type peerConnection struct {
|
type peerConnection struct {
|
||||||
id string // Unique identifier of the peer
|
id string // Unique identifier of the peer
|
||||||
|
|
||||||
headerIdle int32 // Current header activity state of the peer (idle = 0, active = 1)
|
|
||||||
blockIdle int32 // Current block activity state of the peer (idle = 0, active = 1)
|
|
||||||
receiptIdle int32 // Current receipt activity state of the peer (idle = 0, active = 1)
|
|
||||||
stateIdle int32 // Current node data activity state of the peer (idle = 0, active = 1)
|
|
||||||
|
|
||||||
headerStarted time.Time // Time instance when the last header fetch was started
|
|
||||||
blockStarted time.Time // Time instance when the last block (body) fetch was started
|
|
||||||
receiptStarted time.Time // Time instance when the last receipt fetch was started
|
|
||||||
stateStarted time.Time // Time instance when the last node data fetch was started
|
|
||||||
|
|
||||||
rates *msgrate.Tracker // Tracker to hone in on the number of items retrievable per second
|
rates *msgrate.Tracker // Tracker to hone in on the number of items retrievable per second
|
||||||
lacking map[common.Hash]struct{} // Set of hashes not to request (didn't have previously)
|
lacking map[common.Hash]struct{} // Set of hashes not to request (didn't have previously)
|
||||||
|
|
||||||
@ -71,16 +58,15 @@ type peerConnection struct {
|
|||||||
// LightPeer encapsulates the methods required to synchronise with a remote light peer.
|
// LightPeer encapsulates the methods required to synchronise with a remote light peer.
|
||||||
type LightPeer interface {
|
type LightPeer interface {
|
||||||
Head() (common.Hash, *big.Int)
|
Head() (common.Hash, *big.Int)
|
||||||
RequestHeadersByHash(common.Hash, int, int, bool) error
|
RequestHeadersByHash(common.Hash, int, int, bool, chan *eth.Response) (*eth.Request, error)
|
||||||
RequestHeadersByNumber(uint64, int, int, bool) error
|
RequestHeadersByNumber(uint64, int, int, bool, chan *eth.Response) (*eth.Request, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Peer encapsulates the methods required to synchronise with a remote full peer.
|
// Peer encapsulates the methods required to synchronise with a remote full peer.
|
||||||
type Peer interface {
|
type Peer interface {
|
||||||
LightPeer
|
LightPeer
|
||||||
RequestBodies([]common.Hash) error
|
RequestBodies([]common.Hash, chan *eth.Response) (*eth.Request, error)
|
||||||
RequestReceipts([]common.Hash) error
|
RequestReceipts([]common.Hash, chan *eth.Response) (*eth.Request, error)
|
||||||
RequestNodeData([]common.Hash) error
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// lightPeerWrapper wraps a LightPeer struct, stubbing out the Peer-only methods.
|
// lightPeerWrapper wraps a LightPeer struct, stubbing out the Peer-only methods.
|
||||||
@ -89,21 +75,18 @@ type lightPeerWrapper struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (w *lightPeerWrapper) Head() (common.Hash, *big.Int) { return w.peer.Head() }
|
func (w *lightPeerWrapper) Head() (common.Hash, *big.Int) { return w.peer.Head() }
|
||||||
func (w *lightPeerWrapper) RequestHeadersByHash(h common.Hash, amount int, skip int, reverse bool) error {
|
func (w *lightPeerWrapper) RequestHeadersByHash(h common.Hash, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) {
|
||||||
return w.peer.RequestHeadersByHash(h, amount, skip, reverse)
|
return w.peer.RequestHeadersByHash(h, amount, skip, reverse, sink)
|
||||||
}
|
}
|
||||||
func (w *lightPeerWrapper) RequestHeadersByNumber(i uint64, amount int, skip int, reverse bool) error {
|
func (w *lightPeerWrapper) RequestHeadersByNumber(i uint64, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) {
|
||||||
return w.peer.RequestHeadersByNumber(i, amount, skip, reverse)
|
return w.peer.RequestHeadersByNumber(i, amount, skip, reverse, sink)
|
||||||
}
|
}
|
||||||
func (w *lightPeerWrapper) RequestBodies([]common.Hash) error {
|
func (w *lightPeerWrapper) RequestBodies([]common.Hash, chan *eth.Response) (*eth.Request, error) {
|
||||||
panic("RequestBodies not supported in light client mode sync")
|
panic("RequestBodies not supported in light client mode sync")
|
||||||
}
|
}
|
||||||
func (w *lightPeerWrapper) RequestReceipts([]common.Hash) error {
|
func (w *lightPeerWrapper) RequestReceipts([]common.Hash, chan *eth.Response) (*eth.Request, error) {
|
||||||
panic("RequestReceipts not supported in light client mode sync")
|
panic("RequestReceipts not supported in light client mode sync")
|
||||||
}
|
}
|
||||||
func (w *lightPeerWrapper) RequestNodeData([]common.Hash) error {
|
|
||||||
panic("RequestNodeData not supported in light client mode sync")
|
|
||||||
}
|
|
||||||
|
|
||||||
// newPeerConnection creates a new downloader peer.
|
// newPeerConnection creates a new downloader peer.
|
||||||
func newPeerConnection(id string, version uint, peer Peer, logger log.Logger) *peerConnection {
|
func newPeerConnection(id string, version uint, peer Peer, logger log.Logger) *peerConnection {
|
||||||
@ -121,114 +104,28 @@ func (p *peerConnection) Reset() {
|
|||||||
p.lock.Lock()
|
p.lock.Lock()
|
||||||
defer p.lock.Unlock()
|
defer p.lock.Unlock()
|
||||||
|
|
||||||
atomic.StoreInt32(&p.headerIdle, 0)
|
|
||||||
atomic.StoreInt32(&p.blockIdle, 0)
|
|
||||||
atomic.StoreInt32(&p.receiptIdle, 0)
|
|
||||||
atomic.StoreInt32(&p.stateIdle, 0)
|
|
||||||
|
|
||||||
p.lacking = make(map[common.Hash]struct{})
|
p.lacking = make(map[common.Hash]struct{})
|
||||||
}
|
}
|
||||||
|
|
||||||
// FetchHeaders sends a header retrieval request to the remote peer.
|
// UpdateHeaderRate updates the peer's estimated header retrieval throughput with
|
||||||
func (p *peerConnection) FetchHeaders(from uint64, count int) error {
|
// the current measurement.
|
||||||
// Short circuit if the peer is already fetching
|
func (p *peerConnection) UpdateHeaderRate(delivered int, elapsed time.Duration) {
|
||||||
if !atomic.CompareAndSwapInt32(&p.headerIdle, 0, 1) {
|
p.rates.Update(eth.BlockHeadersMsg, elapsed, delivered)
|
||||||
return errAlreadyFetching
|
|
||||||
}
|
|
||||||
p.headerStarted = time.Now()
|
|
||||||
|
|
||||||
// Issue the header retrieval request (absolute upwards without gaps)
|
|
||||||
go p.peer.RequestHeadersByNumber(from, count, 0, false)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// FetchBodies sends a block body retrieval request to the remote peer.
|
// UpdateBodyRate updates the peer's estimated body retrieval throughput with the
|
||||||
func (p *peerConnection) FetchBodies(request *fetchRequest) error {
|
// current measurement.
|
||||||
// Short circuit if the peer is already fetching
|
func (p *peerConnection) UpdateBodyRate(delivered int, elapsed time.Duration) {
|
||||||
if !atomic.CompareAndSwapInt32(&p.blockIdle, 0, 1) {
|
p.rates.Update(eth.BlockBodiesMsg, elapsed, delivered)
|
||||||
return errAlreadyFetching
|
|
||||||
}
|
|
||||||
p.blockStarted = time.Now()
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
// Convert the header set to a retrievable slice
|
|
||||||
hashes := make([]common.Hash, 0, len(request.Headers))
|
|
||||||
for _, header := range request.Headers {
|
|
||||||
hashes = append(hashes, header.Hash())
|
|
||||||
}
|
|
||||||
p.peer.RequestBodies(hashes)
|
|
||||||
}()
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// FetchReceipts sends a receipt retrieval request to the remote peer.
|
// UpdateReceiptRate updates the peer's estimated receipt retrieval throughput
|
||||||
func (p *peerConnection) FetchReceipts(request *fetchRequest) error {
|
// with the current measurement.
|
||||||
// Short circuit if the peer is already fetching
|
func (p *peerConnection) UpdateReceiptRate(delivered int, elapsed time.Duration) {
|
||||||
if !atomic.CompareAndSwapInt32(&p.receiptIdle, 0, 1) {
|
p.rates.Update(eth.ReceiptsMsg, elapsed, delivered)
|
||||||
return errAlreadyFetching
|
|
||||||
}
|
|
||||||
p.receiptStarted = time.Now()
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
// Convert the header set to a retrievable slice
|
|
||||||
hashes := make([]common.Hash, 0, len(request.Headers))
|
|
||||||
for _, header := range request.Headers {
|
|
||||||
hashes = append(hashes, header.Hash())
|
|
||||||
}
|
|
||||||
p.peer.RequestReceipts(hashes)
|
|
||||||
}()
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// FetchNodeData sends a node state data retrieval request to the remote peer.
|
// HeaderCapacity retrieves the peer's header download allowance based on its
|
||||||
func (p *peerConnection) FetchNodeData(hashes []common.Hash) error {
|
|
||||||
// Short circuit if the peer is already fetching
|
|
||||||
if !atomic.CompareAndSwapInt32(&p.stateIdle, 0, 1) {
|
|
||||||
return errAlreadyFetching
|
|
||||||
}
|
|
||||||
p.stateStarted = time.Now()
|
|
||||||
|
|
||||||
go p.peer.RequestNodeData(hashes)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetHeadersIdle sets the peer to idle, allowing it to execute new header retrieval
|
|
||||||
// requests. Its estimated header retrieval throughput is updated with that measured
|
|
||||||
// just now.
|
|
||||||
func (p *peerConnection) SetHeadersIdle(delivered int, deliveryTime time.Time) {
|
|
||||||
p.rates.Update(eth.BlockHeadersMsg, deliveryTime.Sub(p.headerStarted), delivered)
|
|
||||||
atomic.StoreInt32(&p.headerIdle, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetBodiesIdle sets the peer to idle, allowing it to execute block body retrieval
|
|
||||||
// requests. Its estimated body retrieval throughput is updated with that measured
|
|
||||||
// just now.
|
|
||||||
func (p *peerConnection) SetBodiesIdle(delivered int, deliveryTime time.Time) {
|
|
||||||
p.rates.Update(eth.BlockBodiesMsg, deliveryTime.Sub(p.blockStarted), delivered)
|
|
||||||
atomic.StoreInt32(&p.blockIdle, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetReceiptsIdle sets the peer to idle, allowing it to execute new receipt
|
|
||||||
// retrieval requests. Its estimated receipt retrieval throughput is updated
|
|
||||||
// with that measured just now.
|
|
||||||
func (p *peerConnection) SetReceiptsIdle(delivered int, deliveryTime time.Time) {
|
|
||||||
p.rates.Update(eth.ReceiptsMsg, deliveryTime.Sub(p.receiptStarted), delivered)
|
|
||||||
atomic.StoreInt32(&p.receiptIdle, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetNodeDataIdle sets the peer to idle, allowing it to execute new state trie
|
|
||||||
// data retrieval requests. Its estimated state retrieval throughput is updated
|
|
||||||
// with that measured just now.
|
|
||||||
func (p *peerConnection) SetNodeDataIdle(delivered int, deliveryTime time.Time) {
|
|
||||||
p.rates.Update(eth.NodeDataMsg, deliveryTime.Sub(p.stateStarted), delivered)
|
|
||||||
atomic.StoreInt32(&p.stateIdle, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// HeaderCapacity retrieves the peers header download allowance based on its
|
|
||||||
// previously discovered throughput.
|
// previously discovered throughput.
|
||||||
func (p *peerConnection) HeaderCapacity(targetRTT time.Duration) int {
|
func (p *peerConnection) HeaderCapacity(targetRTT time.Duration) int {
|
||||||
cap := p.rates.Capacity(eth.BlockHeadersMsg, targetRTT)
|
cap := p.rates.Capacity(eth.BlockHeadersMsg, targetRTT)
|
||||||
@ -238,9 +135,9 @@ func (p *peerConnection) HeaderCapacity(targetRTT time.Duration) int {
|
|||||||
return cap
|
return cap
|
||||||
}
|
}
|
||||||
|
|
||||||
// BlockCapacity retrieves the peers block download allowance based on its
|
// BodyCapacity retrieves the peer's body download allowance based on its
|
||||||
// previously discovered throughput.
|
// previously discovered throughput.
|
||||||
func (p *peerConnection) BlockCapacity(targetRTT time.Duration) int {
|
func (p *peerConnection) BodyCapacity(targetRTT time.Duration) int {
|
||||||
cap := p.rates.Capacity(eth.BlockBodiesMsg, targetRTT)
|
cap := p.rates.Capacity(eth.BlockBodiesMsg, targetRTT)
|
||||||
if cap > MaxBlockFetch {
|
if cap > MaxBlockFetch {
|
||||||
cap = MaxBlockFetch
|
cap = MaxBlockFetch
|
||||||
@ -258,16 +155,6 @@ func (p *peerConnection) ReceiptCapacity(targetRTT time.Duration) int {
|
|||||||
return cap
|
return cap
|
||||||
}
|
}
|
||||||
|
|
||||||
// NodeDataCapacity retrieves the peers state download allowance based on its
|
|
||||||
// previously discovered throughput.
|
|
||||||
func (p *peerConnection) NodeDataCapacity(targetRTT time.Duration) int {
|
|
||||||
cap := p.rates.Capacity(eth.NodeDataMsg, targetRTT)
|
|
||||||
if cap > MaxStateFetch {
|
|
||||||
cap = MaxStateFetch
|
|
||||||
}
|
|
||||||
return cap
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarkLacking appends a new entity to the set of items (blocks, receipts, states)
|
// MarkLacking appends a new entity to the set of items (blocks, receipts, states)
|
||||||
// that a peer is known not to have (i.e. have been requested before). If the
|
// that a peer is known not to have (i.e. have been requested before). If the
|
||||||
// set reaches its maximum allowed capacity, items are randomly dropped off.
|
// set reaches its maximum allowed capacity, items are randomly dropped off.
|
||||||
@ -294,14 +181,19 @@ func (p *peerConnection) Lacks(hash common.Hash) bool {
|
|||||||
return ok
|
return ok
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// peeringEvent is sent on the peer event feed when a remote peer connects or
|
||||||
|
// disconnects.
|
||||||
|
type peeringEvent struct {
|
||||||
|
peer *peerConnection
|
||||||
|
join bool
|
||||||
|
}
|
||||||
|
|
||||||
// peerSet represents the collection of active peer participating in the chain
|
// peerSet represents the collection of active peer participating in the chain
|
||||||
// download procedure.
|
// download procedure.
|
||||||
type peerSet struct {
|
type peerSet struct {
|
||||||
peers map[string]*peerConnection
|
peers map[string]*peerConnection
|
||||||
rates *msgrate.Trackers // Set of rate trackers to give the sync a common beat
|
rates *msgrate.Trackers // Set of rate trackers to give the sync a common beat
|
||||||
|
events event.Feed // Feed to publish peer lifecycle events on
|
||||||
newPeerFeed event.Feed
|
|
||||||
peerDropFeed event.Feed
|
|
||||||
|
|
||||||
lock sync.RWMutex
|
lock sync.RWMutex
|
||||||
}
|
}
|
||||||
@ -314,14 +206,9 @@ func newPeerSet() *peerSet {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// SubscribeNewPeers subscribes to peer arrival events.
|
// SubscribeEvents subscribes to peer arrival and departure events.
|
||||||
func (ps *peerSet) SubscribeNewPeers(ch chan<- *peerConnection) event.Subscription {
|
func (ps *peerSet) SubscribeEvents(ch chan<- *peeringEvent) event.Subscription {
|
||||||
return ps.newPeerFeed.Subscribe(ch)
|
return ps.events.Subscribe(ch)
|
||||||
}
|
|
||||||
|
|
||||||
// SubscribePeerDrops subscribes to peer departure events.
|
|
||||||
func (ps *peerSet) SubscribePeerDrops(ch chan<- *peerConnection) event.Subscription {
|
|
||||||
return ps.peerDropFeed.Subscribe(ch)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reset iterates over the current peer set, and resets each of the known peers
|
// Reset iterates over the current peer set, and resets each of the known peers
|
||||||
@ -355,7 +242,7 @@ func (ps *peerSet) Register(p *peerConnection) error {
|
|||||||
ps.peers[p.id] = p
|
ps.peers[p.id] = p
|
||||||
ps.lock.Unlock()
|
ps.lock.Unlock()
|
||||||
|
|
||||||
ps.newPeerFeed.Send(p)
|
ps.events.Send(&peeringEvent{peer: p, join: true})
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -372,7 +259,7 @@ func (ps *peerSet) Unregister(id string) error {
|
|||||||
ps.rates.Untrack(id)
|
ps.rates.Untrack(id)
|
||||||
ps.lock.Unlock()
|
ps.lock.Unlock()
|
||||||
|
|
||||||
ps.peerDropFeed.Send(p)
|
ps.events.Send(&peeringEvent{peer: p, join: false})
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -404,82 +291,6 @@ func (ps *peerSet) AllPeers() []*peerConnection {
|
|||||||
return list
|
return list
|
||||||
}
|
}
|
||||||
|
|
||||||
// HeaderIdlePeers retrieves a flat list of all the currently header-idle peers
|
|
||||||
// within the active peer set, ordered by their reputation.
|
|
||||||
func (ps *peerSet) HeaderIdlePeers() ([]*peerConnection, int) {
|
|
||||||
idle := func(p *peerConnection) bool {
|
|
||||||
return atomic.LoadInt32(&p.headerIdle) == 0
|
|
||||||
}
|
|
||||||
throughput := func(p *peerConnection) int {
|
|
||||||
return p.rates.Capacity(eth.BlockHeadersMsg, time.Second)
|
|
||||||
}
|
|
||||||
return ps.idlePeers(eth.ETH66, eth.ETH66, idle, throughput)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BodyIdlePeers retrieves a flat list of all the currently body-idle peers within
|
|
||||||
// the active peer set, ordered by their reputation.
|
|
||||||
func (ps *peerSet) BodyIdlePeers() ([]*peerConnection, int) {
|
|
||||||
idle := func(p *peerConnection) bool {
|
|
||||||
return atomic.LoadInt32(&p.blockIdle) == 0
|
|
||||||
}
|
|
||||||
throughput := func(p *peerConnection) int {
|
|
||||||
return p.rates.Capacity(eth.BlockBodiesMsg, time.Second)
|
|
||||||
}
|
|
||||||
return ps.idlePeers(eth.ETH66, eth.ETH66, idle, throughput)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReceiptIdlePeers retrieves a flat list of all the currently receipt-idle peers
|
|
||||||
// within the active peer set, ordered by their reputation.
|
|
||||||
func (ps *peerSet) ReceiptIdlePeers() ([]*peerConnection, int) {
|
|
||||||
idle := func(p *peerConnection) bool {
|
|
||||||
return atomic.LoadInt32(&p.receiptIdle) == 0
|
|
||||||
}
|
|
||||||
throughput := func(p *peerConnection) int {
|
|
||||||
return p.rates.Capacity(eth.ReceiptsMsg, time.Second)
|
|
||||||
}
|
|
||||||
return ps.idlePeers(eth.ETH66, eth.ETH66, idle, throughput)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NodeDataIdlePeers retrieves a flat list of all the currently node-data-idle
|
|
||||||
// peers within the active peer set, ordered by their reputation.
|
|
||||||
func (ps *peerSet) NodeDataIdlePeers() ([]*peerConnection, int) {
|
|
||||||
idle := func(p *peerConnection) bool {
|
|
||||||
return atomic.LoadInt32(&p.stateIdle) == 0
|
|
||||||
}
|
|
||||||
throughput := func(p *peerConnection) int {
|
|
||||||
return p.rates.Capacity(eth.NodeDataMsg, time.Second)
|
|
||||||
}
|
|
||||||
return ps.idlePeers(eth.ETH66, eth.ETH66, idle, throughput)
|
|
||||||
}
|
|
||||||
|
|
||||||
// idlePeers retrieves a flat list of all currently idle peers satisfying the
|
|
||||||
// protocol version constraints, using the provided function to check idleness.
|
|
||||||
// The resulting set of peers are sorted by their capacity.
|
|
||||||
func (ps *peerSet) idlePeers(minProtocol, maxProtocol uint, idleCheck func(*peerConnection) bool, capacity func(*peerConnection) int) ([]*peerConnection, int) {
|
|
||||||
ps.lock.RLock()
|
|
||||||
defer ps.lock.RUnlock()
|
|
||||||
|
|
||||||
var (
|
|
||||||
total = 0
|
|
||||||
idle = make([]*peerConnection, 0, len(ps.peers))
|
|
||||||
tps = make([]int, 0, len(ps.peers))
|
|
||||||
)
|
|
||||||
for _, p := range ps.peers {
|
|
||||||
if p.version >= minProtocol && p.version <= maxProtocol {
|
|
||||||
if idleCheck(p) {
|
|
||||||
idle = append(idle, p)
|
|
||||||
tps = append(tps, capacity(p))
|
|
||||||
}
|
|
||||||
total++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// And sort them
|
|
||||||
sortPeers := &peerCapacitySort{idle, tps}
|
|
||||||
sort.Sort(sortPeers)
|
|
||||||
return sortPeers.p, total
|
|
||||||
}
|
|
||||||
|
|
||||||
// peerCapacitySort implements sort.Interface.
|
// peerCapacitySort implements sort.Interface.
|
||||||
// It sorts peer connections by capacity (descending).
|
// It sorts peer connections by capacity (descending).
|
||||||
type peerCapacitySort struct {
|
type peerCapacitySort struct {
|
||||||
|
@ -54,8 +54,8 @@ var (
|
|||||||
// fetchRequest is a currently running data retrieval operation.
|
// fetchRequest is a currently running data retrieval operation.
|
||||||
type fetchRequest struct {
|
type fetchRequest struct {
|
||||||
Peer *peerConnection // Peer to which the request was sent
|
Peer *peerConnection // Peer to which the request was sent
|
||||||
From uint64 // [eth/62] Requested chain element index (used for skeleton fills only)
|
From uint64 // Requested chain element index (used for skeleton fills only)
|
||||||
Headers []*types.Header // [eth/62] Requested headers, sorted by request order
|
Headers []*types.Header // Requested headers, sorted by request order
|
||||||
Time time.Time // Time when the request was made
|
Time time.Time // Time when the request was made
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -127,10 +127,12 @@ type queue struct {
|
|||||||
blockTaskPool map[common.Hash]*types.Header // Pending block (body) retrieval tasks, mapping hashes to headers
|
blockTaskPool map[common.Hash]*types.Header // Pending block (body) retrieval tasks, mapping hashes to headers
|
||||||
blockTaskQueue *prque.Prque // Priority queue of the headers to fetch the blocks (bodies) for
|
blockTaskQueue *prque.Prque // Priority queue of the headers to fetch the blocks (bodies) for
|
||||||
blockPendPool map[string]*fetchRequest // Currently pending block (body) retrieval operations
|
blockPendPool map[string]*fetchRequest // Currently pending block (body) retrieval operations
|
||||||
|
blockWakeCh chan bool // Channel to notify the block fetcher of new tasks
|
||||||
|
|
||||||
receiptTaskPool map[common.Hash]*types.Header // Pending receipt retrieval tasks, mapping hashes to headers
|
receiptTaskPool map[common.Hash]*types.Header // Pending receipt retrieval tasks, mapping hashes to headers
|
||||||
receiptTaskQueue *prque.Prque // Priority queue of the headers to fetch the receipts for
|
receiptTaskQueue *prque.Prque // Priority queue of the headers to fetch the receipts for
|
||||||
receiptPendPool map[string]*fetchRequest // Currently pending receipt retrieval operations
|
receiptPendPool map[string]*fetchRequest // Currently pending receipt retrieval operations
|
||||||
|
receiptWakeCh chan bool // Channel to notify when receipt fetcher of new tasks
|
||||||
|
|
||||||
resultCache *resultStore // Downloaded but not yet delivered fetch results
|
resultCache *resultStore // Downloaded but not yet delivered fetch results
|
||||||
resultSize common.StorageSize // Approximate size of a block (exponential moving average)
|
resultSize common.StorageSize // Approximate size of a block (exponential moving average)
|
||||||
@ -146,9 +148,11 @@ type queue struct {
|
|||||||
func newQueue(blockCacheLimit int, thresholdInitialSize int) *queue {
|
func newQueue(blockCacheLimit int, thresholdInitialSize int) *queue {
|
||||||
lock := new(sync.RWMutex)
|
lock := new(sync.RWMutex)
|
||||||
q := &queue{
|
q := &queue{
|
||||||
headerContCh: make(chan bool),
|
headerContCh: make(chan bool, 1),
|
||||||
blockTaskQueue: prque.New(nil),
|
blockTaskQueue: prque.New(nil),
|
||||||
|
blockWakeCh: make(chan bool, 1),
|
||||||
receiptTaskQueue: prque.New(nil),
|
receiptTaskQueue: prque.New(nil),
|
||||||
|
receiptWakeCh: make(chan bool, 1),
|
||||||
active: sync.NewCond(lock),
|
active: sync.NewCond(lock),
|
||||||
lock: lock,
|
lock: lock,
|
||||||
}
|
}
|
||||||
@ -196,8 +200,8 @@ func (q *queue) PendingHeaders() int {
|
|||||||
return q.headerTaskQueue.Size()
|
return q.headerTaskQueue.Size()
|
||||||
}
|
}
|
||||||
|
|
||||||
// PendingBlocks retrieves the number of block (body) requests pending for retrieval.
|
// PendingBodies retrieves the number of block body requests pending for retrieval.
|
||||||
func (q *queue) PendingBlocks() int {
|
func (q *queue) PendingBodies() int {
|
||||||
q.lock.Lock()
|
q.lock.Lock()
|
||||||
defer q.lock.Unlock()
|
defer q.lock.Unlock()
|
||||||
|
|
||||||
@ -212,15 +216,6 @@ func (q *queue) PendingReceipts() int {
|
|||||||
return q.receiptTaskQueue.Size()
|
return q.receiptTaskQueue.Size()
|
||||||
}
|
}
|
||||||
|
|
||||||
// InFlightHeaders retrieves whether there are header fetch requests currently
|
|
||||||
// in flight.
|
|
||||||
func (q *queue) InFlightHeaders() bool {
|
|
||||||
q.lock.Lock()
|
|
||||||
defer q.lock.Unlock()
|
|
||||||
|
|
||||||
return len(q.headerPendPool) > 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// InFlightBlocks retrieves whether there are block fetch requests currently in
|
// InFlightBlocks retrieves whether there are block fetch requests currently in
|
||||||
// flight.
|
// flight.
|
||||||
func (q *queue) InFlightBlocks() bool {
|
func (q *queue) InFlightBlocks() bool {
|
||||||
@ -318,7 +313,7 @@ func (q *queue) Schedule(headers []*types.Header, from uint64) []*types.Header {
|
|||||||
q.blockTaskQueue.Push(header, -int64(header.Number.Uint64()))
|
q.blockTaskQueue.Push(header, -int64(header.Number.Uint64()))
|
||||||
}
|
}
|
||||||
// Queue for receipt retrieval
|
// Queue for receipt retrieval
|
||||||
if q.mode == FastSync && !header.EmptyReceipts() {
|
if q.mode == SnapSync && !header.EmptyReceipts() {
|
||||||
if _, ok := q.receiptTaskPool[hash]; ok {
|
if _, ok := q.receiptTaskPool[hash]; ok {
|
||||||
log.Warn("Header already scheduled for receipt fetch", "number", header.Number, "hash", hash)
|
log.Warn("Header already scheduled for receipt fetch", "number", header.Number, "hash", hash)
|
||||||
} else {
|
} else {
|
||||||
@ -383,6 +378,13 @@ func (q *queue) Results(block bool) []*fetchResult {
|
|||||||
throttleThreshold := uint64((common.StorageSize(blockCacheMemory) + q.resultSize - 1) / q.resultSize)
|
throttleThreshold := uint64((common.StorageSize(blockCacheMemory) + q.resultSize - 1) / q.resultSize)
|
||||||
throttleThreshold = q.resultCache.SetThrottleThreshold(throttleThreshold)
|
throttleThreshold = q.resultCache.SetThrottleThreshold(throttleThreshold)
|
||||||
|
|
||||||
|
// With results removed from the cache, wake throttled fetchers
|
||||||
|
for _, ch := range []chan bool{q.blockWakeCh, q.receiptWakeCh} {
|
||||||
|
select {
|
||||||
|
case ch <- true:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
// Log some info at certain times
|
// Log some info at certain times
|
||||||
if time.Since(q.lastStatLog) > 60*time.Second {
|
if time.Since(q.lastStatLog) > 60*time.Second {
|
||||||
q.lastStatLog = time.Now()
|
q.lastStatLog = time.Now()
|
||||||
@ -503,7 +505,7 @@ func (q *queue) reserveHeaders(p *peerConnection, count int, taskPool map[common
|
|||||||
// we can ask the resultcache if this header is within the
|
// we can ask the resultcache if this header is within the
|
||||||
// "prioritized" segment of blocks. If it is not, we need to throttle
|
// "prioritized" segment of blocks. If it is not, we need to throttle
|
||||||
|
|
||||||
stale, throttle, item, err := q.resultCache.AddFetch(header, q.mode == FastSync)
|
stale, throttle, item, err := q.resultCache.AddFetch(header, q.mode == SnapSync)
|
||||||
if stale {
|
if stale {
|
||||||
// Don't put back in the task queue, this item has already been
|
// Don't put back in the task queue, this item has already been
|
||||||
// delivered upstream
|
// delivered upstream
|
||||||
@ -566,40 +568,6 @@ func (q *queue) reserveHeaders(p *peerConnection, count int, taskPool map[common
|
|||||||
return request, progress, throttled
|
return request, progress, throttled
|
||||||
}
|
}
|
||||||
|
|
||||||
// CancelHeaders aborts a fetch request, returning all pending skeleton indexes to the queue.
|
|
||||||
func (q *queue) CancelHeaders(request *fetchRequest) {
|
|
||||||
q.lock.Lock()
|
|
||||||
defer q.lock.Unlock()
|
|
||||||
q.cancel(request, q.headerTaskQueue, q.headerPendPool)
|
|
||||||
}
|
|
||||||
|
|
||||||
// CancelBodies aborts a body fetch request, returning all pending headers to the
|
|
||||||
// task queue.
|
|
||||||
func (q *queue) CancelBodies(request *fetchRequest) {
|
|
||||||
q.lock.Lock()
|
|
||||||
defer q.lock.Unlock()
|
|
||||||
q.cancel(request, q.blockTaskQueue, q.blockPendPool)
|
|
||||||
}
|
|
||||||
|
|
||||||
// CancelReceipts aborts a body fetch request, returning all pending headers to
|
|
||||||
// the task queue.
|
|
||||||
func (q *queue) CancelReceipts(request *fetchRequest) {
|
|
||||||
q.lock.Lock()
|
|
||||||
defer q.lock.Unlock()
|
|
||||||
q.cancel(request, q.receiptTaskQueue, q.receiptPendPool)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Cancel aborts a fetch request, returning all pending hashes to the task queue.
|
|
||||||
func (q *queue) cancel(request *fetchRequest, taskQueue *prque.Prque, pendPool map[string]*fetchRequest) {
|
|
||||||
if request.From > 0 {
|
|
||||||
taskQueue.Push(request.From, -int64(request.From))
|
|
||||||
}
|
|
||||||
for _, header := range request.Headers {
|
|
||||||
taskQueue.Push(header, -int64(header.Number.Uint64()))
|
|
||||||
}
|
|
||||||
delete(pendPool, request.Peer.id)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Revoke cancels all pending requests belonging to a given peer. This method is
|
// Revoke cancels all pending requests belonging to a given peer. This method is
|
||||||
// meant to be called during a peer drop to quickly reassign owned data fetches
|
// meant to be called during a peer drop to quickly reassign owned data fetches
|
||||||
// to remaining nodes.
|
// to remaining nodes.
|
||||||
@ -607,6 +575,10 @@ func (q *queue) Revoke(peerID string) {
|
|||||||
q.lock.Lock()
|
q.lock.Lock()
|
||||||
defer q.lock.Unlock()
|
defer q.lock.Unlock()
|
||||||
|
|
||||||
|
if request, ok := q.headerPendPool[peerID]; ok {
|
||||||
|
q.headerTaskQueue.Push(request.From, -int64(request.From))
|
||||||
|
delete(q.headerPendPool, peerID)
|
||||||
|
}
|
||||||
if request, ok := q.blockPendPool[peerID]; ok {
|
if request, ok := q.blockPendPool[peerID]; ok {
|
||||||
for _, header := range request.Headers {
|
for _, header := range request.Headers {
|
||||||
q.blockTaskQueue.Push(header, -int64(header.Number.Uint64()))
|
q.blockTaskQueue.Push(header, -int64(header.Number.Uint64()))
|
||||||
@ -621,62 +593,60 @@ func (q *queue) Revoke(peerID string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ExpireHeaders checks for in flight requests that exceeded a timeout allowance,
|
// ExpireHeaders cancels a request that timed out and moves the pending fetch
|
||||||
// canceling them and returning the responsible peers for penalisation.
|
// task back into the queue for rescheduling.
|
||||||
func (q *queue) ExpireHeaders(timeout time.Duration) map[string]int {
|
func (q *queue) ExpireHeaders(peer string) int {
|
||||||
q.lock.Lock()
|
q.lock.Lock()
|
||||||
defer q.lock.Unlock()
|
defer q.lock.Unlock()
|
||||||
|
|
||||||
return q.expire(timeout, q.headerPendPool, q.headerTaskQueue, headerTimeoutMeter)
|
headerTimeoutMeter.Mark(1)
|
||||||
|
return q.expire(peer, q.headerPendPool, q.headerTaskQueue)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ExpireBodies checks for in flight block body requests that exceeded a timeout
|
// ExpireBodies checks for in flight block body requests that exceeded a timeout
|
||||||
// allowance, canceling them and returning the responsible peers for penalisation.
|
// allowance, canceling them and returning the responsible peers for penalisation.
|
||||||
func (q *queue) ExpireBodies(timeout time.Duration) map[string]int {
|
func (q *queue) ExpireBodies(peer string) int {
|
||||||
q.lock.Lock()
|
q.lock.Lock()
|
||||||
defer q.lock.Unlock()
|
defer q.lock.Unlock()
|
||||||
|
|
||||||
return q.expire(timeout, q.blockPendPool, q.blockTaskQueue, bodyTimeoutMeter)
|
bodyTimeoutMeter.Mark(1)
|
||||||
|
return q.expire(peer, q.blockPendPool, q.blockTaskQueue)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ExpireReceipts checks for in flight receipt requests that exceeded a timeout
|
// ExpireReceipts checks for in flight receipt requests that exceeded a timeout
|
||||||
// allowance, canceling them and returning the responsible peers for penalisation.
|
// allowance, canceling them and returning the responsible peers for penalisation.
|
||||||
func (q *queue) ExpireReceipts(timeout time.Duration) map[string]int {
|
func (q *queue) ExpireReceipts(peer string) int {
|
||||||
q.lock.Lock()
|
q.lock.Lock()
|
||||||
defer q.lock.Unlock()
|
defer q.lock.Unlock()
|
||||||
|
|
||||||
return q.expire(timeout, q.receiptPendPool, q.receiptTaskQueue, receiptTimeoutMeter)
|
receiptTimeoutMeter.Mark(1)
|
||||||
|
return q.expire(peer, q.receiptPendPool, q.receiptTaskQueue)
|
||||||
}
|
}
|
||||||
|
|
||||||
// expire is the generic check that move expired tasks from a pending pool back
|
// expire is the generic check that moves a specific expired task from a pending
|
||||||
// into a task pool, returning all entities caught with expired tasks.
|
// pool back into a task pool.
|
||||||
//
|
//
|
||||||
// Note, this method expects the queue lock to be already held. The
|
// Note, this method expects the queue lock to be already held. The reason the
|
||||||
// reason the lock is not obtained in here is because the parameters already need
|
// lock is not obtained in here is that the parameters already need to access
|
||||||
// to access the queue, so they already need a lock anyway.
|
// the queue, so they already need a lock anyway.
|
||||||
func (q *queue) expire(timeout time.Duration, pendPool map[string]*fetchRequest, taskQueue *prque.Prque, timeoutMeter metrics.Meter) map[string]int {
|
func (q *queue) expire(peer string, pendPool map[string]*fetchRequest, taskQueue *prque.Prque) int {
|
||||||
// Iterate over the expired requests and return each to the queue
|
// Retrieve the request being expired and log an error if it's non-existnet,
|
||||||
expiries := make(map[string]int)
|
// as there's no order of events that should lead to such expirations.
|
||||||
for id, request := range pendPool {
|
req := pendPool[peer]
|
||||||
if time.Since(request.Time) > timeout {
|
if req == nil {
|
||||||
// Update the metrics with the timeout
|
log.Error("Expired request does not exist", "peer", peer)
|
||||||
timeoutMeter.Mark(1)
|
return 0
|
||||||
|
|
||||||
// Return any non satisfied requests to the pool
|
|
||||||
if request.From > 0 {
|
|
||||||
taskQueue.Push(request.From, -int64(request.From))
|
|
||||||
}
|
}
|
||||||
for _, header := range request.Headers {
|
delete(pendPool, peer)
|
||||||
|
|
||||||
|
// Return any non-satisfied requests to the pool
|
||||||
|
if req.From > 0 {
|
||||||
|
taskQueue.Push(req.From, -int64(req.From))
|
||||||
|
}
|
||||||
|
for _, header := range req.Headers {
|
||||||
taskQueue.Push(header, -int64(header.Number.Uint64()))
|
taskQueue.Push(header, -int64(header.Number.Uint64()))
|
||||||
}
|
}
|
||||||
// Add the peer to the expiry report along the number of failed requests
|
return len(req.Headers)
|
||||||
expiries[id] = len(request.Headers)
|
|
||||||
|
|
||||||
// Remove the expired requests from the pending pool directly
|
|
||||||
delete(pendPool, id)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return expiries
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeliverHeaders injects a header retrieval response into the header results
|
// DeliverHeaders injects a header retrieval response into the header results
|
||||||
@ -684,7 +654,7 @@ func (q *queue) expire(timeout time.Duration, pendPool map[string]*fetchRequest,
|
|||||||
// if they do not map correctly to the skeleton.
|
// if they do not map correctly to the skeleton.
|
||||||
//
|
//
|
||||||
// If the headers are accepted, the method makes an attempt to deliver the set
|
// If the headers are accepted, the method makes an attempt to deliver the set
|
||||||
// of ready headers to the processor to keep the pipeline full. However it will
|
// of ready headers to the processor to keep the pipeline full. However, it will
|
||||||
// not block to prevent stalling other pending deliveries.
|
// not block to prevent stalling other pending deliveries.
|
||||||
func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh chan []*types.Header) (int, error) {
|
func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh chan []*types.Header) (int, error) {
|
||||||
q.lock.Lock()
|
q.lock.Lock()
|
||||||
@ -700,11 +670,14 @@ func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh
|
|||||||
// Short circuit if the data was never requested
|
// Short circuit if the data was never requested
|
||||||
request := q.headerPendPool[id]
|
request := q.headerPendPool[id]
|
||||||
if request == nil {
|
if request == nil {
|
||||||
|
headerDropMeter.Mark(int64(len(headers)))
|
||||||
return 0, errNoFetchesPending
|
return 0, errNoFetchesPending
|
||||||
}
|
}
|
||||||
headerReqTimer.UpdateSince(request.Time)
|
|
||||||
delete(q.headerPendPool, id)
|
delete(q.headerPendPool, id)
|
||||||
|
|
||||||
|
headerReqTimer.UpdateSince(request.Time)
|
||||||
|
headerInMeter.Mark(int64(len(headers)))
|
||||||
|
|
||||||
// Ensure headers can be mapped onto the skeleton chain
|
// Ensure headers can be mapped onto the skeleton chain
|
||||||
target := q.headerTaskPool[request.From].Hash()
|
target := q.headerTaskPool[request.From].Hash()
|
||||||
|
|
||||||
@ -739,6 +712,7 @@ func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh
|
|||||||
// If the batch of headers wasn't accepted, mark as unavailable
|
// If the batch of headers wasn't accepted, mark as unavailable
|
||||||
if !accepted {
|
if !accepted {
|
||||||
logger.Trace("Skeleton filling not accepted", "from", request.From)
|
logger.Trace("Skeleton filling not accepted", "from", request.From)
|
||||||
|
headerDropMeter.Mark(int64(len(headers)))
|
||||||
|
|
||||||
miss := q.headerPeerMiss[id]
|
miss := q.headerPeerMiss[id]
|
||||||
if miss == nil {
|
if miss == nil {
|
||||||
@ -783,6 +757,7 @@ func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh
|
|||||||
func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, uncleLists [][]*types.Header) (int, error) {
|
func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, uncleLists [][]*types.Header) (int, error) {
|
||||||
q.lock.Lock()
|
q.lock.Lock()
|
||||||
defer q.lock.Unlock()
|
defer q.lock.Unlock()
|
||||||
|
|
||||||
trieHasher := trie.NewStackTrie(nil)
|
trieHasher := trie.NewStackTrie(nil)
|
||||||
validate := func(index int, header *types.Header) error {
|
validate := func(index int, header *types.Header) error {
|
||||||
if types.DeriveSha(types.Transactions(txLists[index]), trieHasher) != header.TxHash {
|
if types.DeriveSha(types.Transactions(txLists[index]), trieHasher) != header.TxHash {
|
||||||
@ -800,7 +775,7 @@ func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, uncleLi
|
|||||||
result.SetBodyDone()
|
result.SetBodyDone()
|
||||||
}
|
}
|
||||||
return q.deliver(id, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool,
|
return q.deliver(id, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool,
|
||||||
bodyReqTimer, len(txLists), validate, reconstruct)
|
bodyReqTimer, bodyInMeter, bodyDropMeter, len(txLists), validate, reconstruct)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeliverReceipts injects a receipt retrieval response into the results queue.
|
// DeliverReceipts injects a receipt retrieval response into the results queue.
|
||||||
@ -809,6 +784,7 @@ func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, uncleLi
|
|||||||
func (q *queue) DeliverReceipts(id string, receiptList [][]*types.Receipt) (int, error) {
|
func (q *queue) DeliverReceipts(id string, receiptList [][]*types.Receipt) (int, error) {
|
||||||
q.lock.Lock()
|
q.lock.Lock()
|
||||||
defer q.lock.Unlock()
|
defer q.lock.Unlock()
|
||||||
|
|
||||||
trieHasher := trie.NewStackTrie(nil)
|
trieHasher := trie.NewStackTrie(nil)
|
||||||
validate := func(index int, header *types.Header) error {
|
validate := func(index int, header *types.Header) error {
|
||||||
if types.DeriveSha(types.Receipts(receiptList[index]), trieHasher) != header.ReceiptHash {
|
if types.DeriveSha(types.Receipts(receiptList[index]), trieHasher) != header.ReceiptHash {
|
||||||
@ -821,7 +797,7 @@ func (q *queue) DeliverReceipts(id string, receiptList [][]*types.Receipt) (int,
|
|||||||
result.SetReceiptsDone()
|
result.SetReceiptsDone()
|
||||||
}
|
}
|
||||||
return q.deliver(id, q.receiptTaskPool, q.receiptTaskQueue, q.receiptPendPool,
|
return q.deliver(id, q.receiptTaskPool, q.receiptTaskQueue, q.receiptPendPool,
|
||||||
receiptReqTimer, len(receiptList), validate, reconstruct)
|
receiptReqTimer, receiptInMeter, receiptDropMeter, len(receiptList), validate, reconstruct)
|
||||||
}
|
}
|
||||||
|
|
||||||
// deliver injects a data retrieval response into the results queue.
|
// deliver injects a data retrieval response into the results queue.
|
||||||
@ -830,18 +806,22 @@ func (q *queue) DeliverReceipts(id string, receiptList [][]*types.Receipt) (int,
|
|||||||
// reason this lock is not obtained in here is because the parameters already need
|
// reason this lock is not obtained in here is because the parameters already need
|
||||||
// to access the queue, so they already need a lock anyway.
|
// to access the queue, so they already need a lock anyway.
|
||||||
func (q *queue) deliver(id string, taskPool map[common.Hash]*types.Header,
|
func (q *queue) deliver(id string, taskPool map[common.Hash]*types.Header,
|
||||||
taskQueue *prque.Prque, pendPool map[string]*fetchRequest, reqTimer metrics.Timer,
|
taskQueue *prque.Prque, pendPool map[string]*fetchRequest,
|
||||||
|
reqTimer metrics.Timer, resInMeter metrics.Meter, resDropMeter metrics.Meter,
|
||||||
results int, validate func(index int, header *types.Header) error,
|
results int, validate func(index int, header *types.Header) error,
|
||||||
reconstruct func(index int, result *fetchResult)) (int, error) {
|
reconstruct func(index int, result *fetchResult)) (int, error) {
|
||||||
|
|
||||||
// Short circuit if the data was never requested
|
// Short circuit if the data was never requested
|
||||||
request := pendPool[id]
|
request := pendPool[id]
|
||||||
if request == nil {
|
if request == nil {
|
||||||
|
resDropMeter.Mark(int64(results))
|
||||||
return 0, errNoFetchesPending
|
return 0, errNoFetchesPending
|
||||||
}
|
}
|
||||||
reqTimer.UpdateSince(request.Time)
|
|
||||||
delete(pendPool, id)
|
delete(pendPool, id)
|
||||||
|
|
||||||
|
reqTimer.UpdateSince(request.Time)
|
||||||
|
resInMeter.Mark(int64(results))
|
||||||
|
|
||||||
// If no data items were retrieved, mark them as unavailable for the origin peer
|
// If no data items were retrieved, mark them as unavailable for the origin peer
|
||||||
if results == 0 {
|
if results == 0 {
|
||||||
for _, header := range request.Headers {
|
for _, header := range request.Headers {
|
||||||
@ -883,6 +863,8 @@ func (q *queue) deliver(id string, taskPool map[common.Hash]*types.Header,
|
|||||||
delete(taskPool, hashes[accepted])
|
delete(taskPool, hashes[accepted])
|
||||||
accepted++
|
accepted++
|
||||||
}
|
}
|
||||||
|
resDropMeter.Mark(int64(results - accepted))
|
||||||
|
|
||||||
// Return all failed or missing fetches to the queue
|
// Return all failed or missing fetches to the queue
|
||||||
for _, header := range request.Headers[accepted:] {
|
for _, header := range request.Headers[accepted:] {
|
||||||
taskQueue.Push(header, -int64(header.Number.Uint64()))
|
taskQueue.Push(header, -int64(header.Number.Uint64()))
|
||||||
|
@ -104,7 +104,7 @@ func TestBasics(t *testing.T) {
|
|||||||
if !q.Idle() {
|
if !q.Idle() {
|
||||||
t.Errorf("new queue should be idle")
|
t.Errorf("new queue should be idle")
|
||||||
}
|
}
|
||||||
q.Prepare(1, FastSync)
|
q.Prepare(1, SnapSync)
|
||||||
if res := q.Results(false); len(res) != 0 {
|
if res := q.Results(false); len(res) != 0 {
|
||||||
t.Fatal("new queue should have 0 results")
|
t.Fatal("new queue should have 0 results")
|
||||||
}
|
}
|
||||||
@ -114,7 +114,7 @@ func TestBasics(t *testing.T) {
|
|||||||
if q.Idle() {
|
if q.Idle() {
|
||||||
t.Errorf("queue should not be idle")
|
t.Errorf("queue should not be idle")
|
||||||
}
|
}
|
||||||
if got, exp := q.PendingBlocks(), chain.Len(); got != exp {
|
if got, exp := q.PendingBodies(), chain.Len(); got != exp {
|
||||||
t.Errorf("wrong pending block count, got %d, exp %d", got, exp)
|
t.Errorf("wrong pending block count, got %d, exp %d", got, exp)
|
||||||
}
|
}
|
||||||
// Only non-empty receipts get added to task-queue
|
// Only non-empty receipts get added to task-queue
|
||||||
@ -197,13 +197,13 @@ func TestEmptyBlocks(t *testing.T) {
|
|||||||
|
|
||||||
q := newQueue(10, 10)
|
q := newQueue(10, 10)
|
||||||
|
|
||||||
q.Prepare(1, FastSync)
|
q.Prepare(1, SnapSync)
|
||||||
// Schedule a batch of headers
|
// Schedule a batch of headers
|
||||||
q.Schedule(emptyChain.headers(), 1)
|
q.Schedule(emptyChain.headers(), 1)
|
||||||
if q.Idle() {
|
if q.Idle() {
|
||||||
t.Errorf("queue should not be idle")
|
t.Errorf("queue should not be idle")
|
||||||
}
|
}
|
||||||
if got, exp := q.PendingBlocks(), len(emptyChain.blocks); got != exp {
|
if got, exp := q.PendingBodies(), len(emptyChain.blocks); got != exp {
|
||||||
t.Errorf("wrong pending block count, got %d, exp %d", got, exp)
|
t.Errorf("wrong pending block count, got %d, exp %d", got, exp)
|
||||||
}
|
}
|
||||||
if got, exp := q.PendingReceipts(), 0; got != exp {
|
if got, exp := q.PendingReceipts(), 0; got != exp {
|
||||||
@ -272,7 +272,7 @@ func XTestDelivery(t *testing.T) {
|
|||||||
}
|
}
|
||||||
q := newQueue(10, 10)
|
q := newQueue(10, 10)
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
q.Prepare(1, FastSync)
|
q.Prepare(1, SnapSync)
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
// deliver headers
|
// deliver headers
|
||||||
|
@ -17,48 +17,12 @@
|
|||||||
package downloader
|
package downloader
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
|
||||||
"github.com/ethereum/go-ethereum/core/state"
|
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
|
||||||
"golang.org/x/crypto/sha3"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// stateReq represents a batch of state fetch requests grouped together into
|
|
||||||
// a single data retrieval network packet.
|
|
||||||
type stateReq struct {
|
|
||||||
nItems uint16 // Number of items requested for download (max is 384, so uint16 is sufficient)
|
|
||||||
trieTasks map[common.Hash]*trieTask // Trie node download tasks to track previous attempts
|
|
||||||
codeTasks map[common.Hash]*codeTask // Byte code download tasks to track previous attempts
|
|
||||||
timeout time.Duration // Maximum round trip time for this to complete
|
|
||||||
timer *time.Timer // Timer to fire when the RTT timeout expires
|
|
||||||
peer *peerConnection // Peer that we're requesting from
|
|
||||||
delivered time.Time // Time when the packet was delivered (independent when we process it)
|
|
||||||
response [][]byte // Response data of the peer (nil for timeouts)
|
|
||||||
dropped bool // Flag whether the peer dropped off early
|
|
||||||
}
|
|
||||||
|
|
||||||
// timedOut returns if this request timed out.
|
|
||||||
func (req *stateReq) timedOut() bool {
|
|
||||||
return req.response == nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// stateSyncStats is a collection of progress stats to report during a state trie
|
|
||||||
// sync to RPC requests as well as to display in user logs.
|
|
||||||
type stateSyncStats struct {
|
|
||||||
processed uint64 // Number of state entries processed
|
|
||||||
duplicate uint64 // Number of state entries downloaded twice
|
|
||||||
unexpected uint64 // Number of non-requested state entries received
|
|
||||||
pending uint64 // Number of still pending state entries
|
|
||||||
}
|
|
||||||
|
|
||||||
// syncState starts downloading state with the given root hash.
|
// syncState starts downloading state with the given root hash.
|
||||||
func (d *Downloader) syncState(root common.Hash) *stateSync {
|
func (d *Downloader) syncState(root common.Hash) *stateSync {
|
||||||
// Create the state sync
|
// Create the state sync
|
||||||
@ -85,8 +49,6 @@ func (d *Downloader) stateFetcher() {
|
|||||||
for next := s; next != nil; {
|
for next := s; next != nil; {
|
||||||
next = d.runStateSync(next)
|
next = d.runStateSync(next)
|
||||||
}
|
}
|
||||||
case <-d.stateCh:
|
|
||||||
// Ignore state responses while no sync is running.
|
|
||||||
case <-d.quitCh:
|
case <-d.quitCh:
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -96,162 +58,19 @@ func (d *Downloader) stateFetcher() {
|
|||||||
// runStateSync runs a state synchronisation until it completes or another root
|
// runStateSync runs a state synchronisation until it completes or another root
|
||||||
// hash is requested to be switched over to.
|
// hash is requested to be switched over to.
|
||||||
func (d *Downloader) runStateSync(s *stateSync) *stateSync {
|
func (d *Downloader) runStateSync(s *stateSync) *stateSync {
|
||||||
var (
|
|
||||||
active = make(map[string]*stateReq) // Currently in-flight requests
|
|
||||||
finished []*stateReq // Completed or failed requests
|
|
||||||
timeout = make(chan *stateReq) // Timed out active requests
|
|
||||||
)
|
|
||||||
log.Trace("State sync starting", "root", s.root)
|
log.Trace("State sync starting", "root", s.root)
|
||||||
|
|
||||||
defer func() {
|
|
||||||
// Cancel active request timers on exit. Also set peers to idle so they're
|
|
||||||
// available for the next sync.
|
|
||||||
for _, req := range active {
|
|
||||||
req.timer.Stop()
|
|
||||||
req.peer.SetNodeDataIdle(int(req.nItems), time.Now())
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
go s.run()
|
go s.run()
|
||||||
defer s.Cancel()
|
defer s.Cancel()
|
||||||
|
|
||||||
// Listen for peer departure events to cancel assigned tasks
|
|
||||||
peerDrop := make(chan *peerConnection, 1024)
|
|
||||||
peerSub := s.d.peers.SubscribePeerDrops(peerDrop)
|
|
||||||
defer peerSub.Unsubscribe()
|
|
||||||
|
|
||||||
for {
|
for {
|
||||||
// Enable sending of the first buffered element if there is one.
|
|
||||||
var (
|
|
||||||
deliverReq *stateReq
|
|
||||||
deliverReqCh chan *stateReq
|
|
||||||
)
|
|
||||||
if len(finished) > 0 {
|
|
||||||
deliverReq = finished[0]
|
|
||||||
deliverReqCh = s.deliver
|
|
||||||
}
|
|
||||||
|
|
||||||
select {
|
select {
|
||||||
// The stateSync lifecycle:
|
|
||||||
case next := <-d.stateSyncStart:
|
case next := <-d.stateSyncStart:
|
||||||
d.spindownStateSync(active, finished, timeout, peerDrop)
|
|
||||||
return next
|
return next
|
||||||
|
|
||||||
case <-s.done:
|
case <-s.done:
|
||||||
d.spindownStateSync(active, finished, timeout, peerDrop)
|
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
// Send the next finished request to the current sync:
|
|
||||||
case deliverReqCh <- deliverReq:
|
|
||||||
// Shift out the first request, but also set the emptied slot to nil for GC
|
|
||||||
copy(finished, finished[1:])
|
|
||||||
finished[len(finished)-1] = nil
|
|
||||||
finished = finished[:len(finished)-1]
|
|
||||||
|
|
||||||
// Handle incoming state packs:
|
|
||||||
case pack := <-d.stateCh:
|
|
||||||
// Discard any data not requested (or previously timed out)
|
|
||||||
req := active[pack.PeerId()]
|
|
||||||
if req == nil {
|
|
||||||
log.Debug("Unrequested node data", "peer", pack.PeerId(), "len", pack.Items())
|
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
// Finalize the request and queue up for processing
|
|
||||||
req.timer.Stop()
|
|
||||||
req.response = pack.(*statePack).states
|
|
||||||
req.delivered = time.Now()
|
|
||||||
|
|
||||||
finished = append(finished, req)
|
|
||||||
delete(active, pack.PeerId())
|
|
||||||
|
|
||||||
// Handle dropped peer connections:
|
|
||||||
case p := <-peerDrop:
|
|
||||||
// Skip if no request is currently pending
|
|
||||||
req := active[p.id]
|
|
||||||
if req == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Finalize the request and queue up for processing
|
|
||||||
req.timer.Stop()
|
|
||||||
req.dropped = true
|
|
||||||
req.delivered = time.Now()
|
|
||||||
|
|
||||||
finished = append(finished, req)
|
|
||||||
delete(active, p.id)
|
|
||||||
|
|
||||||
// Handle timed-out requests:
|
|
||||||
case req := <-timeout:
|
|
||||||
// If the peer is already requesting something else, ignore the stale timeout.
|
|
||||||
// This can happen when the timeout and the delivery happens simultaneously,
|
|
||||||
// causing both pathways to trigger.
|
|
||||||
if active[req.peer.id] != req {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
req.delivered = time.Now()
|
|
||||||
// Move the timed out data back into the download queue
|
|
||||||
finished = append(finished, req)
|
|
||||||
delete(active, req.peer.id)
|
|
||||||
|
|
||||||
// Track outgoing state requests:
|
|
||||||
case req := <-d.trackStateReq:
|
|
||||||
// If an active request already exists for this peer, we have a problem. In
|
|
||||||
// theory the trie node schedule must never assign two requests to the same
|
|
||||||
// peer. In practice however, a peer might receive a request, disconnect and
|
|
||||||
// immediately reconnect before the previous times out. In this case the first
|
|
||||||
// request is never honored, alas we must not silently overwrite it, as that
|
|
||||||
// causes valid requests to go missing and sync to get stuck.
|
|
||||||
if old := active[req.peer.id]; old != nil {
|
|
||||||
log.Warn("Busy peer assigned new state fetch", "peer", old.peer.id)
|
|
||||||
// Move the previous request to the finished set
|
|
||||||
old.timer.Stop()
|
|
||||||
old.dropped = true
|
|
||||||
old.delivered = time.Now()
|
|
||||||
finished = append(finished, old)
|
|
||||||
}
|
|
||||||
// Start a timer to notify the sync loop if the peer stalled.
|
|
||||||
req.timer = time.AfterFunc(req.timeout, func() {
|
|
||||||
timeout <- req
|
|
||||||
})
|
|
||||||
active[req.peer.id] = req
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// spindownStateSync 'drains' the outstanding requests; some will be delivered and other
|
|
||||||
// will time out. This is to ensure that when the next stateSync starts working, all peers
|
|
||||||
// are marked as idle and de facto _are_ idle.
|
|
||||||
func (d *Downloader) spindownStateSync(active map[string]*stateReq, finished []*stateReq, timeout chan *stateReq, peerDrop chan *peerConnection) {
|
|
||||||
log.Trace("State sync spinning down", "active", len(active), "finished", len(finished))
|
|
||||||
for len(active) > 0 {
|
|
||||||
var (
|
|
||||||
req *stateReq
|
|
||||||
reason string
|
|
||||||
)
|
|
||||||
select {
|
|
||||||
// Handle (drop) incoming state packs:
|
|
||||||
case pack := <-d.stateCh:
|
|
||||||
req = active[pack.PeerId()]
|
|
||||||
reason = "delivered"
|
|
||||||
// Handle dropped peer connections:
|
|
||||||
case p := <-peerDrop:
|
|
||||||
req = active[p.id]
|
|
||||||
reason = "peerdrop"
|
|
||||||
// Handle timed-out requests:
|
|
||||||
case req = <-timeout:
|
|
||||||
reason = "timeout"
|
|
||||||
}
|
|
||||||
if req == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
req.peer.log.Trace("State peer marked idle (spindown)", "req.items", int(req.nItems), "reason", reason)
|
|
||||||
req.timer.Stop()
|
|
||||||
delete(active, req.peer.id)
|
|
||||||
req.peer.SetNodeDataIdle(int(req.nItems), time.Now())
|
|
||||||
}
|
|
||||||
// The 'finished' set contains deliveries that we were going to pass to processing.
|
|
||||||
// Those are now moot, but we still need to set those peers as idle, which would
|
|
||||||
// otherwise have been done after processing
|
|
||||||
for _, req := range finished {
|
|
||||||
req.peer.SetNodeDataIdle(int(req.nItems), time.Now())
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -259,50 +78,21 @@ func (d *Downloader) spindownStateSync(active map[string]*stateReq, finished []*
|
|||||||
// by a given state root.
|
// by a given state root.
|
||||||
type stateSync struct {
|
type stateSync struct {
|
||||||
d *Downloader // Downloader instance to access and manage current peerset
|
d *Downloader // Downloader instance to access and manage current peerset
|
||||||
|
|
||||||
root common.Hash // State root currently being synced
|
root common.Hash // State root currently being synced
|
||||||
sched *trie.Sync // State trie sync scheduler defining the tasks
|
|
||||||
keccak crypto.KeccakState // Keccak256 hasher to verify deliveries with
|
|
||||||
|
|
||||||
trieTasks map[common.Hash]*trieTask // Set of trie node tasks currently queued for retrieval
|
|
||||||
codeTasks map[common.Hash]*codeTask // Set of byte code tasks currently queued for retrieval
|
|
||||||
|
|
||||||
numUncommitted int
|
|
||||||
bytesUncommitted int
|
|
||||||
|
|
||||||
started chan struct{} // Started is signalled once the sync loop starts
|
started chan struct{} // Started is signalled once the sync loop starts
|
||||||
|
|
||||||
deliver chan *stateReq // Delivery channel multiplexing peer responses
|
|
||||||
cancel chan struct{} // Channel to signal a termination request
|
cancel chan struct{} // Channel to signal a termination request
|
||||||
cancelOnce sync.Once // Ensures cancel only ever gets called once
|
cancelOnce sync.Once // Ensures cancel only ever gets called once
|
||||||
done chan struct{} // Channel to signal termination completion
|
done chan struct{} // Channel to signal termination completion
|
||||||
err error // Any error hit during sync (set before completion)
|
err error // Any error hit during sync (set before completion)
|
||||||
}
|
}
|
||||||
|
|
||||||
// trieTask represents a single trie node download task, containing a set of
|
|
||||||
// peers already attempted retrieval from to detect stalled syncs and abort.
|
|
||||||
type trieTask struct {
|
|
||||||
path [][]byte
|
|
||||||
attempts map[string]struct{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// codeTask represents a single byte code download task, containing a set of
|
|
||||||
// peers already attempted retrieval from to detect stalled syncs and abort.
|
|
||||||
type codeTask struct {
|
|
||||||
attempts map[string]struct{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// newStateSync creates a new state trie download scheduler. This method does not
|
// newStateSync creates a new state trie download scheduler. This method does not
|
||||||
// yet start the sync. The user needs to call run to initiate.
|
// yet start the sync. The user needs to call run to initiate.
|
||||||
func newStateSync(d *Downloader, root common.Hash) *stateSync {
|
func newStateSync(d *Downloader, root common.Hash) *stateSync {
|
||||||
return &stateSync{
|
return &stateSync{
|
||||||
d: d,
|
d: d,
|
||||||
root: root,
|
root: root,
|
||||||
sched: state.NewStateSync(root, d.stateDB, d.stateBloom, nil),
|
|
||||||
keccak: sha3.NewLegacyKeccak256().(crypto.KeccakState),
|
|
||||||
trieTasks: make(map[common.Hash]*trieTask),
|
|
||||||
codeTasks: make(map[common.Hash]*codeTask),
|
|
||||||
deliver: make(chan *stateReq),
|
|
||||||
cancel: make(chan struct{}),
|
cancel: make(chan struct{}),
|
||||||
done: make(chan struct{}),
|
done: make(chan struct{}),
|
||||||
started: make(chan struct{}),
|
started: make(chan struct{}),
|
||||||
@ -314,11 +104,7 @@ func newStateSync(d *Downloader, root common.Hash) *stateSync {
|
|||||||
// finish.
|
// finish.
|
||||||
func (s *stateSync) run() {
|
func (s *stateSync) run() {
|
||||||
close(s.started)
|
close(s.started)
|
||||||
if s.d.snapSync {
|
|
||||||
s.err = s.d.SnapSyncer.Sync(s.root, s.cancel)
|
s.err = s.d.SnapSyncer.Sync(s.root, s.cancel)
|
||||||
} else {
|
|
||||||
s.err = s.loop()
|
|
||||||
}
|
|
||||||
close(s.done)
|
close(s.done)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -335,281 +121,3 @@ func (s *stateSync) Cancel() error {
|
|||||||
})
|
})
|
||||||
return s.Wait()
|
return s.Wait()
|
||||||
}
|
}
|
||||||
|
|
||||||
// loop is the main event loop of a state trie sync. It it responsible for the
|
|
||||||
// assignment of new tasks to peers (including sending it to them) as well as
|
|
||||||
// for the processing of inbound data. Note, that the loop does not directly
|
|
||||||
// receive data from peers, rather those are buffered up in the downloader and
|
|
||||||
// pushed here async. The reason is to decouple processing from data receipt
|
|
||||||
// and timeouts.
|
|
||||||
func (s *stateSync) loop() (err error) {
|
|
||||||
// Listen for new peer events to assign tasks to them
|
|
||||||
newPeer := make(chan *peerConnection, 1024)
|
|
||||||
peerSub := s.d.peers.SubscribeNewPeers(newPeer)
|
|
||||||
defer peerSub.Unsubscribe()
|
|
||||||
defer func() {
|
|
||||||
cerr := s.commit(true)
|
|
||||||
if err == nil {
|
|
||||||
err = cerr
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Keep assigning new tasks until the sync completes or aborts
|
|
||||||
for s.sched.Pending() > 0 {
|
|
||||||
if err = s.commit(false); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
s.assignTasks()
|
|
||||||
// Tasks assigned, wait for something to happen
|
|
||||||
select {
|
|
||||||
case <-newPeer:
|
|
||||||
// New peer arrived, try to assign it download tasks
|
|
||||||
|
|
||||||
case <-s.cancel:
|
|
||||||
return errCancelStateFetch
|
|
||||||
|
|
||||||
case <-s.d.cancelCh:
|
|
||||||
return errCanceled
|
|
||||||
|
|
||||||
case req := <-s.deliver:
|
|
||||||
// Response, disconnect or timeout triggered, drop the peer if stalling
|
|
||||||
log.Trace("Received node data response", "peer", req.peer.id, "count", len(req.response), "dropped", req.dropped, "timeout", !req.dropped && req.timedOut())
|
|
||||||
if req.nItems <= 2 && !req.dropped && req.timedOut() {
|
|
||||||
// 2 items are the minimum requested, if even that times out, we've no use of
|
|
||||||
// this peer at the moment.
|
|
||||||
log.Warn("Stalling state sync, dropping peer", "peer", req.peer.id)
|
|
||||||
if s.d.dropPeer == nil {
|
|
||||||
// The dropPeer method is nil when `--copydb` is used for a local copy.
|
|
||||||
// Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored
|
|
||||||
req.peer.log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", req.peer.id)
|
|
||||||
} else {
|
|
||||||
s.d.dropPeer(req.peer.id)
|
|
||||||
|
|
||||||
// If this peer was the master peer, abort sync immediately
|
|
||||||
s.d.cancelLock.RLock()
|
|
||||||
master := req.peer.id == s.d.cancelPeer
|
|
||||||
s.d.cancelLock.RUnlock()
|
|
||||||
|
|
||||||
if master {
|
|
||||||
s.d.cancel()
|
|
||||||
return errTimeout
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Process all the received blobs and check for stale delivery
|
|
||||||
delivered, err := s.process(req)
|
|
||||||
req.peer.SetNodeDataIdle(delivered, req.delivered)
|
|
||||||
if err != nil {
|
|
||||||
log.Warn("Node data write error", "err", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stateSync) commit(force bool) error {
|
|
||||||
if !force && s.bytesUncommitted < ethdb.IdealBatchSize {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
start := time.Now()
|
|
||||||
b := s.d.stateDB.NewBatch()
|
|
||||||
if err := s.sched.Commit(b); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := b.Write(); err != nil {
|
|
||||||
return fmt.Errorf("DB write error: %v", err)
|
|
||||||
}
|
|
||||||
s.updateStats(s.numUncommitted, 0, 0, time.Since(start))
|
|
||||||
s.numUncommitted = 0
|
|
||||||
s.bytesUncommitted = 0
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// assignTasks attempts to assign new tasks to all idle peers, either from the
|
|
||||||
// batch currently being retried, or fetching new data from the trie sync itself.
|
|
||||||
func (s *stateSync) assignTasks() {
|
|
||||||
// Iterate over all idle peers and try to assign them state fetches
|
|
||||||
peers, _ := s.d.peers.NodeDataIdlePeers()
|
|
||||||
for _, p := range peers {
|
|
||||||
// Assign a batch of fetches proportional to the estimated latency/bandwidth
|
|
||||||
cap := p.NodeDataCapacity(s.d.peers.rates.TargetRoundTrip())
|
|
||||||
req := &stateReq{peer: p, timeout: s.d.peers.rates.TargetTimeout()}
|
|
||||||
|
|
||||||
nodes, _, codes := s.fillTasks(cap, req)
|
|
||||||
|
|
||||||
// If the peer was assigned tasks to fetch, send the network request
|
|
||||||
if len(nodes)+len(codes) > 0 {
|
|
||||||
req.peer.log.Trace("Requesting batch of state data", "nodes", len(nodes), "codes", len(codes), "root", s.root)
|
|
||||||
select {
|
|
||||||
case s.d.trackStateReq <- req:
|
|
||||||
req.peer.FetchNodeData(append(nodes, codes...)) // Unified retrieval under eth/6x
|
|
||||||
case <-s.cancel:
|
|
||||||
case <-s.d.cancelCh:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// fillTasks fills the given request object with a maximum of n state download
|
|
||||||
// tasks to send to the remote peer.
|
|
||||||
func (s *stateSync) fillTasks(n int, req *stateReq) (nodes []common.Hash, paths []trie.SyncPath, codes []common.Hash) {
|
|
||||||
// Refill available tasks from the scheduler.
|
|
||||||
if fill := n - (len(s.trieTasks) + len(s.codeTasks)); fill > 0 {
|
|
||||||
nodes, paths, codes := s.sched.Missing(fill)
|
|
||||||
for i, hash := range nodes {
|
|
||||||
s.trieTasks[hash] = &trieTask{
|
|
||||||
path: paths[i],
|
|
||||||
attempts: make(map[string]struct{}),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, hash := range codes {
|
|
||||||
s.codeTasks[hash] = &codeTask{
|
|
||||||
attempts: make(map[string]struct{}),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Find tasks that haven't been tried with the request's peer. Prefer code
|
|
||||||
// over trie nodes as those can be written to disk and forgotten about.
|
|
||||||
nodes = make([]common.Hash, 0, n)
|
|
||||||
paths = make([]trie.SyncPath, 0, n)
|
|
||||||
codes = make([]common.Hash, 0, n)
|
|
||||||
|
|
||||||
req.trieTasks = make(map[common.Hash]*trieTask, n)
|
|
||||||
req.codeTasks = make(map[common.Hash]*codeTask, n)
|
|
||||||
|
|
||||||
for hash, t := range s.codeTasks {
|
|
||||||
// Stop when we've gathered enough requests
|
|
||||||
if len(nodes)+len(codes) == n {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
// Skip any requests we've already tried from this peer
|
|
||||||
if _, ok := t.attempts[req.peer.id]; ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Assign the request to this peer
|
|
||||||
t.attempts[req.peer.id] = struct{}{}
|
|
||||||
codes = append(codes, hash)
|
|
||||||
req.codeTasks[hash] = t
|
|
||||||
delete(s.codeTasks, hash)
|
|
||||||
}
|
|
||||||
for hash, t := range s.trieTasks {
|
|
||||||
// Stop when we've gathered enough requests
|
|
||||||
if len(nodes)+len(codes) == n {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
// Skip any requests we've already tried from this peer
|
|
||||||
if _, ok := t.attempts[req.peer.id]; ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Assign the request to this peer
|
|
||||||
t.attempts[req.peer.id] = struct{}{}
|
|
||||||
|
|
||||||
nodes = append(nodes, hash)
|
|
||||||
paths = append(paths, t.path)
|
|
||||||
|
|
||||||
req.trieTasks[hash] = t
|
|
||||||
delete(s.trieTasks, hash)
|
|
||||||
}
|
|
||||||
req.nItems = uint16(len(nodes) + len(codes))
|
|
||||||
return nodes, paths, codes
|
|
||||||
}
|
|
||||||
|
|
||||||
// process iterates over a batch of delivered state data, injecting each item
|
|
||||||
// into a running state sync, re-queuing any items that were requested but not
|
|
||||||
// delivered. Returns whether the peer actually managed to deliver anything of
|
|
||||||
// value, and any error that occurred.
|
|
||||||
func (s *stateSync) process(req *stateReq) (int, error) {
|
|
||||||
// Collect processing stats and update progress if valid data was received
|
|
||||||
duplicate, unexpected, successful := 0, 0, 0
|
|
||||||
|
|
||||||
defer func(start time.Time) {
|
|
||||||
if duplicate > 0 || unexpected > 0 {
|
|
||||||
s.updateStats(0, duplicate, unexpected, time.Since(start))
|
|
||||||
}
|
|
||||||
}(time.Now())
|
|
||||||
|
|
||||||
// Iterate over all the delivered data and inject one-by-one into the trie
|
|
||||||
for _, blob := range req.response {
|
|
||||||
hash, err := s.processNodeData(blob)
|
|
||||||
switch err {
|
|
||||||
case nil:
|
|
||||||
s.numUncommitted++
|
|
||||||
s.bytesUncommitted += len(blob)
|
|
||||||
successful++
|
|
||||||
case trie.ErrNotRequested:
|
|
||||||
unexpected++
|
|
||||||
case trie.ErrAlreadyProcessed:
|
|
||||||
duplicate++
|
|
||||||
default:
|
|
||||||
return successful, fmt.Errorf("invalid state node %s: %v", hash.TerminalString(), err)
|
|
||||||
}
|
|
||||||
// Delete from both queues (one delivery is enough for the syncer)
|
|
||||||
delete(req.trieTasks, hash)
|
|
||||||
delete(req.codeTasks, hash)
|
|
||||||
}
|
|
||||||
// Put unfulfilled tasks back into the retry queue
|
|
||||||
npeers := s.d.peers.Len()
|
|
||||||
for hash, task := range req.trieTasks {
|
|
||||||
// If the node did deliver something, missing items may be due to a protocol
|
|
||||||
// limit or a previous timeout + delayed delivery. Both cases should permit
|
|
||||||
// the node to retry the missing items (to avoid single-peer stalls).
|
|
||||||
if len(req.response) > 0 || req.timedOut() {
|
|
||||||
delete(task.attempts, req.peer.id)
|
|
||||||
}
|
|
||||||
// If we've requested the node too many times already, it may be a malicious
|
|
||||||
// sync where nobody has the right data. Abort.
|
|
||||||
if len(task.attempts) >= npeers {
|
|
||||||
return successful, fmt.Errorf("trie node %s failed with all peers (%d tries, %d peers)", hash.TerminalString(), len(task.attempts), npeers)
|
|
||||||
}
|
|
||||||
// Missing item, place into the retry queue.
|
|
||||||
s.trieTasks[hash] = task
|
|
||||||
}
|
|
||||||
for hash, task := range req.codeTasks {
|
|
||||||
// If the node did deliver something, missing items may be due to a protocol
|
|
||||||
// limit or a previous timeout + delayed delivery. Both cases should permit
|
|
||||||
// the node to retry the missing items (to avoid single-peer stalls).
|
|
||||||
if len(req.response) > 0 || req.timedOut() {
|
|
||||||
delete(task.attempts, req.peer.id)
|
|
||||||
}
|
|
||||||
// If we've requested the node too many times already, it may be a malicious
|
|
||||||
// sync where nobody has the right data. Abort.
|
|
||||||
if len(task.attempts) >= npeers {
|
|
||||||
return successful, fmt.Errorf("byte code %s failed with all peers (%d tries, %d peers)", hash.TerminalString(), len(task.attempts), npeers)
|
|
||||||
}
|
|
||||||
// Missing item, place into the retry queue.
|
|
||||||
s.codeTasks[hash] = task
|
|
||||||
}
|
|
||||||
return successful, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// processNodeData tries to inject a trie node data blob delivered from a remote
|
|
||||||
// peer into the state trie, returning whether anything useful was written or any
|
|
||||||
// error occurred.
|
|
||||||
func (s *stateSync) processNodeData(blob []byte) (common.Hash, error) {
|
|
||||||
res := trie.SyncResult{Data: blob}
|
|
||||||
s.keccak.Reset()
|
|
||||||
s.keccak.Write(blob)
|
|
||||||
s.keccak.Read(res.Hash[:])
|
|
||||||
err := s.sched.Process(res)
|
|
||||||
return res.Hash, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// updateStats bumps the various state sync progress counters and displays a log
|
|
||||||
// message for the user to see.
|
|
||||||
func (s *stateSync) updateStats(written, duplicate, unexpected int, duration time.Duration) {
|
|
||||||
s.d.syncStatsLock.Lock()
|
|
||||||
defer s.d.syncStatsLock.Unlock()
|
|
||||||
|
|
||||||
s.d.syncStatsState.pending = uint64(s.sched.Pending())
|
|
||||||
s.d.syncStatsState.processed += uint64(written)
|
|
||||||
s.d.syncStatsState.duplicate += uint64(duplicate)
|
|
||||||
s.d.syncStatsState.unexpected += uint64(unexpected)
|
|
||||||
|
|
||||||
if written > 0 || duplicate > 0 || unexpected > 0 {
|
|
||||||
log.Info("Imported new state entries", "count", written, "elapsed", common.PrettyDuration(duration), "processed", s.d.syncStatsState.processed, "pending", s.d.syncStatsState.pending, "trieretry", len(s.trieTasks), "coderetry", len(s.codeTasks), "duplicate", s.d.syncStatsState.duplicate, "unexpected", s.d.syncStatsState.unexpected)
|
|
||||||
}
|
|
||||||
if written > 0 {
|
|
||||||
rawdb.WriteFastTrieProgress(s.d.stateDB, s.d.syncStatsState.processed)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
@ -20,12 +20,14 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"math/big"
|
"math/big"
|
||||||
"sync"
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
)
|
)
|
||||||
@ -39,73 +41,110 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// The common prefix of all test chains:
|
// The common prefix of all test chains:
|
||||||
var testChainBase = newTestChain(blockCacheMaxItems+200, testGenesis)
|
var testChainBase *testChain
|
||||||
|
|
||||||
// Different forks on top of the base chain:
|
// Different forks on top of the base chain:
|
||||||
var testChainForkLightA, testChainForkLightB, testChainForkHeavy *testChain
|
var testChainForkLightA, testChainForkLightB, testChainForkHeavy *testChain
|
||||||
|
|
||||||
|
var pregenerated bool
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
// Reduce some of the parameters to make the tester faster
|
||||||
|
fullMaxForkAncestry = 10000
|
||||||
|
lightMaxForkAncestry = 10000
|
||||||
|
blockCacheMaxItems = 1024
|
||||||
|
fsHeaderSafetyNet = 256
|
||||||
|
fsHeaderContCheck = 500 * time.Millisecond
|
||||||
|
|
||||||
|
testChainBase = newTestChain(blockCacheMaxItems+200, testGenesis)
|
||||||
|
|
||||||
var forkLen = int(fullMaxForkAncestry + 50)
|
var forkLen = int(fullMaxForkAncestry + 50)
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
|
|
||||||
|
// Generate the test chains to seed the peers with
|
||||||
wg.Add(3)
|
wg.Add(3)
|
||||||
go func() { testChainForkLightA = testChainBase.makeFork(forkLen, false, 1); wg.Done() }()
|
go func() { testChainForkLightA = testChainBase.makeFork(forkLen, false, 1); wg.Done() }()
|
||||||
go func() { testChainForkLightB = testChainBase.makeFork(forkLen, false, 2); wg.Done() }()
|
go func() { testChainForkLightB = testChainBase.makeFork(forkLen, false, 2); wg.Done() }()
|
||||||
go func() { testChainForkHeavy = testChainBase.makeFork(forkLen, true, 3); wg.Done() }()
|
go func() { testChainForkHeavy = testChainBase.makeFork(forkLen, true, 3); wg.Done() }()
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
|
||||||
|
// Generate the test peers used by the tests to avoid overloading during testing.
|
||||||
|
// These seemingly random chains are used in various downloader tests. We're just
|
||||||
|
// pre-generating them here.
|
||||||
|
chains := []*testChain{
|
||||||
|
testChainBase,
|
||||||
|
testChainForkLightA,
|
||||||
|
testChainForkLightB,
|
||||||
|
testChainForkHeavy,
|
||||||
|
testChainBase.shorten(1),
|
||||||
|
testChainBase.shorten(blockCacheMaxItems - 15),
|
||||||
|
testChainBase.shorten((blockCacheMaxItems - 15) / 2),
|
||||||
|
testChainBase.shorten(blockCacheMaxItems - 15 - 5),
|
||||||
|
testChainBase.shorten(MaxHeaderFetch),
|
||||||
|
testChainBase.shorten(800),
|
||||||
|
testChainBase.shorten(800 / 2),
|
||||||
|
testChainBase.shorten(800 / 3),
|
||||||
|
testChainBase.shorten(800 / 4),
|
||||||
|
testChainBase.shorten(800 / 5),
|
||||||
|
testChainBase.shorten(800 / 6),
|
||||||
|
testChainBase.shorten(800 / 7),
|
||||||
|
testChainBase.shorten(800 / 8),
|
||||||
|
testChainBase.shorten(3*fsHeaderSafetyNet + 256 + fsMinFullBlocks),
|
||||||
|
testChainBase.shorten(fsMinFullBlocks + 256 - 1),
|
||||||
|
testChainForkLightA.shorten(len(testChainBase.blocks) + 80),
|
||||||
|
testChainForkLightB.shorten(len(testChainBase.blocks) + 81),
|
||||||
|
testChainForkLightA.shorten(len(testChainBase.blocks) + MaxHeaderFetch),
|
||||||
|
testChainForkLightB.shorten(len(testChainBase.blocks) + MaxHeaderFetch),
|
||||||
|
testChainForkHeavy.shorten(len(testChainBase.blocks) + 79),
|
||||||
|
}
|
||||||
|
wg.Add(len(chains))
|
||||||
|
for _, chain := range chains {
|
||||||
|
go func(blocks []*types.Block) {
|
||||||
|
newTestBlockchain(blocks)
|
||||||
|
wg.Done()
|
||||||
|
}(chain.blocks[1:])
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
// Mark the chains pregenerated. Generating a new one will lead to a panic.
|
||||||
|
pregenerated = true
|
||||||
}
|
}
|
||||||
|
|
||||||
type testChain struct {
|
type testChain struct {
|
||||||
genesis *types.Block
|
blocks []*types.Block
|
||||||
chain []common.Hash
|
|
||||||
headerm map[common.Hash]*types.Header
|
|
||||||
blockm map[common.Hash]*types.Block
|
|
||||||
receiptm map[common.Hash][]*types.Receipt
|
|
||||||
tdm map[common.Hash]*big.Int
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// newTestChain creates a blockchain of the given length.
|
// newTestChain creates a blockchain of the given length.
|
||||||
func newTestChain(length int, genesis *types.Block) *testChain {
|
func newTestChain(length int, genesis *types.Block) *testChain {
|
||||||
tc := new(testChain).copy(length)
|
tc := &testChain{
|
||||||
tc.genesis = genesis
|
blocks: []*types.Block{genesis},
|
||||||
tc.chain = append(tc.chain, genesis.Hash())
|
}
|
||||||
tc.headerm[tc.genesis.Hash()] = tc.genesis.Header()
|
|
||||||
tc.tdm[tc.genesis.Hash()] = tc.genesis.Difficulty()
|
|
||||||
tc.blockm[tc.genesis.Hash()] = tc.genesis
|
|
||||||
tc.generate(length-1, 0, genesis, false)
|
tc.generate(length-1, 0, genesis, false)
|
||||||
return tc
|
return tc
|
||||||
}
|
}
|
||||||
|
|
||||||
// makeFork creates a fork on top of the test chain.
|
// makeFork creates a fork on top of the test chain.
|
||||||
func (tc *testChain) makeFork(length int, heavy bool, seed byte) *testChain {
|
func (tc *testChain) makeFork(length int, heavy bool, seed byte) *testChain {
|
||||||
fork := tc.copy(tc.len() + length)
|
fork := tc.copy(len(tc.blocks) + length)
|
||||||
fork.generate(length, seed, tc.headBlock(), heavy)
|
fork.generate(length, seed, tc.blocks[len(tc.blocks)-1], heavy)
|
||||||
return fork
|
return fork
|
||||||
}
|
}
|
||||||
|
|
||||||
// shorten creates a copy of the chain with the given length. It panics if the
|
// shorten creates a copy of the chain with the given length. It panics if the
|
||||||
// length is longer than the number of available blocks.
|
// length is longer than the number of available blocks.
|
||||||
func (tc *testChain) shorten(length int) *testChain {
|
func (tc *testChain) shorten(length int) *testChain {
|
||||||
if length > tc.len() {
|
if length > len(tc.blocks) {
|
||||||
panic(fmt.Errorf("can't shorten test chain to %d blocks, it's only %d blocks long", length, tc.len()))
|
panic(fmt.Errorf("can't shorten test chain to %d blocks, it's only %d blocks long", length, len(tc.blocks)))
|
||||||
}
|
}
|
||||||
return tc.copy(length)
|
return tc.copy(length)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tc *testChain) copy(newlen int) *testChain {
|
func (tc *testChain) copy(newlen int) *testChain {
|
||||||
cpy := &testChain{
|
if newlen > len(tc.blocks) {
|
||||||
genesis: tc.genesis,
|
newlen = len(tc.blocks)
|
||||||
headerm: make(map[common.Hash]*types.Header, newlen),
|
|
||||||
blockm: make(map[common.Hash]*types.Block, newlen),
|
|
||||||
receiptm: make(map[common.Hash][]*types.Receipt, newlen),
|
|
||||||
tdm: make(map[common.Hash]*big.Int, newlen),
|
|
||||||
}
|
}
|
||||||
for i := 0; i < len(tc.chain) && i < newlen; i++ {
|
cpy := &testChain{
|
||||||
hash := tc.chain[i]
|
blocks: append([]*types.Block{}, tc.blocks[:newlen]...),
|
||||||
cpy.chain = append(cpy.chain, tc.chain[i])
|
|
||||||
cpy.tdm[hash] = tc.tdm[hash]
|
|
||||||
cpy.blockm[hash] = tc.blockm[hash]
|
|
||||||
cpy.headerm[hash] = tc.headerm[hash]
|
|
||||||
cpy.receiptm[hash] = tc.receiptm[hash]
|
|
||||||
}
|
}
|
||||||
return cpy
|
return cpy
|
||||||
}
|
}
|
||||||
@ -115,17 +154,14 @@ func (tc *testChain) copy(newlen int) *testChain {
|
|||||||
// contains a transaction and every 5th an uncle to allow testing correct block
|
// contains a transaction and every 5th an uncle to allow testing correct block
|
||||||
// reassembly.
|
// reassembly.
|
||||||
func (tc *testChain) generate(n int, seed byte, parent *types.Block, heavy bool) {
|
func (tc *testChain) generate(n int, seed byte, parent *types.Block, heavy bool) {
|
||||||
// start := time.Now()
|
blocks, _ := core.GenerateChain(params.TestChainConfig, parent, ethash.NewFaker(), testDB, n, func(i int, block *core.BlockGen) {
|
||||||
// defer func() { fmt.Printf("test chain generated in %v\n", time.Since(start)) }()
|
|
||||||
|
|
||||||
blocks, receipts := core.GenerateChain(params.TestChainConfig, parent, ethash.NewFaker(), testDB, n, func(i int, block *core.BlockGen) {
|
|
||||||
block.SetCoinbase(common.Address{seed})
|
block.SetCoinbase(common.Address{seed})
|
||||||
// If a heavy chain is requested, delay blocks to raise difficulty
|
// If a heavy chain is requested, delay blocks to raise difficulty
|
||||||
if heavy {
|
if heavy {
|
||||||
block.OffsetTime(-1)
|
block.OffsetTime(-9)
|
||||||
}
|
}
|
||||||
// Include transactions to the miner to make blocks more interesting.
|
// Include transactions to the miner to make blocks more interesting.
|
||||||
if parent == tc.genesis && i%22 == 0 {
|
if parent == tc.blocks[0] && i%22 == 0 {
|
||||||
signer := types.MakeSigner(params.TestChainConfig, block.Number())
|
signer := types.MakeSigner(params.TestChainConfig, block.Number())
|
||||||
tx, err := types.SignTx(types.NewTransaction(block.TxNonce(testAddress), common.Address{seed}, big.NewInt(1000), params.TxGas, block.BaseFee(), nil), signer, testKey)
|
tx, err := types.SignTx(types.NewTransaction(block.TxNonce(testAddress), common.Address{seed}, big.NewInt(1000), params.TxGas, block.BaseFee(), nil), signer, testKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -136,95 +172,56 @@ func (tc *testChain) generate(n int, seed byte, parent *types.Block, heavy bool)
|
|||||||
// if the block number is a multiple of 5, add a bonus uncle to the block
|
// if the block number is a multiple of 5, add a bonus uncle to the block
|
||||||
if i > 0 && i%5 == 0 {
|
if i > 0 && i%5 == 0 {
|
||||||
block.AddUncle(&types.Header{
|
block.AddUncle(&types.Header{
|
||||||
ParentHash: block.PrevBlock(i - 1).Hash(),
|
ParentHash: block.PrevBlock(i - 2).Hash(),
|
||||||
Number: big.NewInt(block.Number().Int64() - 1),
|
Number: big.NewInt(block.Number().Int64() - 1),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
tc.blocks = append(tc.blocks, blocks...)
|
||||||
// Convert the block-chain into a hash-chain and header/block maps
|
|
||||||
td := new(big.Int).Set(tc.td(parent.Hash()))
|
|
||||||
for i, b := range blocks {
|
|
||||||
td := td.Add(td, b.Difficulty())
|
|
||||||
hash := b.Hash()
|
|
||||||
tc.chain = append(tc.chain, hash)
|
|
||||||
tc.blockm[hash] = b
|
|
||||||
tc.headerm[hash] = b.Header()
|
|
||||||
tc.receiptm[hash] = receipts[i]
|
|
||||||
tc.tdm[hash] = new(big.Int).Set(td)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// len returns the total number of blocks in the chain.
|
var (
|
||||||
func (tc *testChain) len() int {
|
testBlockchains = make(map[common.Hash]*testBlockchain)
|
||||||
return len(tc.chain)
|
testBlockchainsLock sync.Mutex
|
||||||
|
)
|
||||||
|
|
||||||
|
type testBlockchain struct {
|
||||||
|
chain *core.BlockChain
|
||||||
|
gen sync.Once
|
||||||
}
|
}
|
||||||
|
|
||||||
// headBlock returns the head of the chain.
|
// newTestBlockchain creates a blockchain database built by running the given blocks,
|
||||||
func (tc *testChain) headBlock() *types.Block {
|
// either actually running them, or reusing a previously created one. The returned
|
||||||
return tc.blockm[tc.chain[len(tc.chain)-1]]
|
// chains are *shared*, so *do not* mutate them.
|
||||||
|
func newTestBlockchain(blocks []*types.Block) *core.BlockChain {
|
||||||
|
// Retrieve an existing database, or create a new one
|
||||||
|
head := testGenesis.Hash()
|
||||||
|
if len(blocks) > 0 {
|
||||||
|
head = blocks[len(blocks)-1].Hash()
|
||||||
}
|
}
|
||||||
|
testBlockchainsLock.Lock()
|
||||||
|
if _, ok := testBlockchains[head]; !ok {
|
||||||
|
testBlockchains[head] = new(testBlockchain)
|
||||||
|
}
|
||||||
|
tbc := testBlockchains[head]
|
||||||
|
testBlockchainsLock.Unlock()
|
||||||
|
|
||||||
// td returns the total difficulty of the given block.
|
// Ensure that the database is generated
|
||||||
func (tc *testChain) td(hash common.Hash) *big.Int {
|
tbc.gen.Do(func() {
|
||||||
return tc.tdm[hash]
|
if pregenerated {
|
||||||
|
panic("Requested chain generation outside of init")
|
||||||
}
|
}
|
||||||
|
db := rawdb.NewMemoryDatabase()
|
||||||
|
core.GenesisBlockForTesting(db, testAddress, big.NewInt(1000000000000000))
|
||||||
|
|
||||||
// headersByHash returns headers in order from the given hash.
|
chain, err := core.NewBlockChain(db, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil, nil)
|
||||||
func (tc *testChain) headersByHash(origin common.Hash, amount int, skip int, reverse bool) []*types.Header {
|
if err != nil {
|
||||||
num, _ := tc.hashToNumber(origin)
|
panic(err)
|
||||||
return tc.headersByNumber(num, amount, skip, reverse)
|
|
||||||
}
|
}
|
||||||
|
if n, err := chain.InsertChain(blocks); err != nil {
|
||||||
// headersByNumber returns headers from the given number.
|
panic(fmt.Sprintf("block %d: %v", n, err))
|
||||||
func (tc *testChain) headersByNumber(origin uint64, amount int, skip int, reverse bool) []*types.Header {
|
|
||||||
result := make([]*types.Header, 0, amount)
|
|
||||||
|
|
||||||
if !reverse {
|
|
||||||
for num := origin; num < uint64(len(tc.chain)) && len(result) < amount; num += uint64(skip) + 1 {
|
|
||||||
if header, ok := tc.headerm[tc.chain[int(num)]]; ok {
|
|
||||||
result = append(result, header)
|
|
||||||
}
|
}
|
||||||
}
|
tbc.chain = chain
|
||||||
} else {
|
})
|
||||||
for num := int64(origin); num >= 0 && len(result) < amount; num -= int64(skip) + 1 {
|
return tbc.chain
|
||||||
if header, ok := tc.headerm[tc.chain[int(num)]]; ok {
|
|
||||||
result = append(result, header)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// receipts returns the receipts of the given block hashes.
|
|
||||||
func (tc *testChain) receipts(hashes []common.Hash) [][]*types.Receipt {
|
|
||||||
results := make([][]*types.Receipt, 0, len(hashes))
|
|
||||||
for _, hash := range hashes {
|
|
||||||
if receipt, ok := tc.receiptm[hash]; ok {
|
|
||||||
results = append(results, receipt)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return results
|
|
||||||
}
|
|
||||||
|
|
||||||
// bodies returns the block bodies of the given block hashes.
|
|
||||||
func (tc *testChain) bodies(hashes []common.Hash) ([][]*types.Transaction, [][]*types.Header) {
|
|
||||||
transactions := make([][]*types.Transaction, 0, len(hashes))
|
|
||||||
uncles := make([][]*types.Header, 0, len(hashes))
|
|
||||||
for _, hash := range hashes {
|
|
||||||
if block, ok := tc.blockm[hash]; ok {
|
|
||||||
transactions = append(transactions, block.Transactions())
|
|
||||||
uncles = append(uncles, block.Uncles())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return transactions, uncles
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tc *testChain) hashToNumber(target common.Hash) (uint64, bool) {
|
|
||||||
for num, hash := range tc.chain {
|
|
||||||
if hash == target {
|
|
||||||
return uint64(num), true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return 0, false
|
|
||||||
}
|
}
|
||||||
|
@ -1,79 +0,0 @@
|
|||||||
// Copyright 2015 The go-ethereum Authors
|
|
||||||
// This file is part of the go-ethereum library.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Lesser General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Lesser General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package downloader
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
|
||||||
)
|
|
||||||
|
|
||||||
// peerDropFn is a callback type for dropping a peer detected as malicious.
|
|
||||||
type peerDropFn func(id string)
|
|
||||||
|
|
||||||
// dataPack is a data message returned by a peer for some query.
|
|
||||||
type dataPack interface {
|
|
||||||
PeerId() string
|
|
||||||
Items() int
|
|
||||||
Stats() string
|
|
||||||
}
|
|
||||||
|
|
||||||
// headerPack is a batch of block headers returned by a peer.
|
|
||||||
type headerPack struct {
|
|
||||||
peerID string
|
|
||||||
headers []*types.Header
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *headerPack) PeerId() string { return p.peerID }
|
|
||||||
func (p *headerPack) Items() int { return len(p.headers) }
|
|
||||||
func (p *headerPack) Stats() string { return fmt.Sprintf("%d", len(p.headers)) }
|
|
||||||
|
|
||||||
// bodyPack is a batch of block bodies returned by a peer.
|
|
||||||
type bodyPack struct {
|
|
||||||
peerID string
|
|
||||||
transactions [][]*types.Transaction
|
|
||||||
uncles [][]*types.Header
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *bodyPack) PeerId() string { return p.peerID }
|
|
||||||
func (p *bodyPack) Items() int {
|
|
||||||
if len(p.transactions) <= len(p.uncles) {
|
|
||||||
return len(p.transactions)
|
|
||||||
}
|
|
||||||
return len(p.uncles)
|
|
||||||
}
|
|
||||||
func (p *bodyPack) Stats() string { return fmt.Sprintf("%d:%d", len(p.transactions), len(p.uncles)) }
|
|
||||||
|
|
||||||
// receiptPack is a batch of receipts returned by a peer.
|
|
||||||
type receiptPack struct {
|
|
||||||
peerID string
|
|
||||||
receipts [][]*types.Receipt
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *receiptPack) PeerId() string { return p.peerID }
|
|
||||||
func (p *receiptPack) Items() int { return len(p.receipts) }
|
|
||||||
func (p *receiptPack) Stats() string { return fmt.Sprintf("%d", len(p.receipts)) }
|
|
||||||
|
|
||||||
// statePack is a batch of states returned by a peer.
|
|
||||||
type statePack struct {
|
|
||||||
peerID string
|
|
||||||
states [][]byte
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *statePack) PeerId() string { return p.peerID }
|
|
||||||
func (p *statePack) Items() int { return len(p.states) }
|
|
||||||
func (p *statePack) Stats() string { return fmt.Sprintf("%d", len(p.states)) }
|
|
@ -26,6 +26,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/common/prque"
|
"github.com/ethereum/go-ethereum/common/prque"
|
||||||
"github.com/ethereum/go-ethereum/consensus"
|
"github.com/ethereum/go-ethereum/consensus"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/eth/protocols/eth"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/metrics"
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
@ -74,10 +75,10 @@ type HeaderRetrievalFn func(common.Hash) *types.Header
|
|||||||
type blockRetrievalFn func(common.Hash) *types.Block
|
type blockRetrievalFn func(common.Hash) *types.Block
|
||||||
|
|
||||||
// headerRequesterFn is a callback type for sending a header retrieval request.
|
// headerRequesterFn is a callback type for sending a header retrieval request.
|
||||||
type headerRequesterFn func(common.Hash) error
|
type headerRequesterFn func(common.Hash, chan *eth.Response) (*eth.Request, error)
|
||||||
|
|
||||||
// bodyRequesterFn is a callback type for sending a body retrieval request.
|
// bodyRequesterFn is a callback type for sending a body retrieval request.
|
||||||
type bodyRequesterFn func([]common.Hash) error
|
type bodyRequesterFn func([]common.Hash, chan *eth.Response) (*eth.Request, error)
|
||||||
|
|
||||||
// headerVerifierFn is a callback type to verify a block's header for fast propagation.
|
// headerVerifierFn is a callback type to verify a block's header for fast propagation.
|
||||||
type headerVerifierFn func(header *types.Header) error
|
type headerVerifierFn func(header *types.Header) error
|
||||||
@ -461,15 +462,28 @@ func (f *BlockFetcher) loop() {
|
|||||||
|
|
||||||
// Create a closure of the fetch and schedule in on a new thread
|
// Create a closure of the fetch and schedule in on a new thread
|
||||||
fetchHeader, hashes := f.fetching[hashes[0]].fetchHeader, hashes
|
fetchHeader, hashes := f.fetching[hashes[0]].fetchHeader, hashes
|
||||||
go func() {
|
go func(peer string) {
|
||||||
if f.fetchingHook != nil {
|
if f.fetchingHook != nil {
|
||||||
f.fetchingHook(hashes)
|
f.fetchingHook(hashes)
|
||||||
}
|
}
|
||||||
for _, hash := range hashes {
|
for _, hash := range hashes {
|
||||||
headerFetchMeter.Mark(1)
|
headerFetchMeter.Mark(1)
|
||||||
fetchHeader(hash) // Suboptimal, but protocol doesn't allow batch header retrievals
|
go func(hash common.Hash) {
|
||||||
|
resCh := make(chan *eth.Response)
|
||||||
|
|
||||||
|
req, err := fetchHeader(hash, resCh)
|
||||||
|
if err != nil {
|
||||||
|
return // Legacy code, yolo
|
||||||
}
|
}
|
||||||
}()
|
defer req.Close()
|
||||||
|
|
||||||
|
res := <-resCh
|
||||||
|
res.Done <- nil
|
||||||
|
|
||||||
|
f.FilterHeaders(peer, *res.Res.(*eth.BlockHeadersPacket), time.Now().Add(res.Time))
|
||||||
|
}(hash)
|
||||||
|
}
|
||||||
|
}(peer)
|
||||||
}
|
}
|
||||||
// Schedule the next fetch if blocks are still pending
|
// Schedule the next fetch if blocks are still pending
|
||||||
f.rescheduleFetch(fetchTimer)
|
f.rescheduleFetch(fetchTimer)
|
||||||
@ -497,8 +511,24 @@ func (f *BlockFetcher) loop() {
|
|||||||
if f.completingHook != nil {
|
if f.completingHook != nil {
|
||||||
f.completingHook(hashes)
|
f.completingHook(hashes)
|
||||||
}
|
}
|
||||||
|
fetchBodies := f.completing[hashes[0]].fetchBodies
|
||||||
bodyFetchMeter.Mark(int64(len(hashes)))
|
bodyFetchMeter.Mark(int64(len(hashes)))
|
||||||
go f.completing[hashes[0]].fetchBodies(hashes)
|
|
||||||
|
go func(peer string, hashes []common.Hash) {
|
||||||
|
resCh := make(chan *eth.Response)
|
||||||
|
|
||||||
|
req, err := fetchBodies(hashes, resCh)
|
||||||
|
if err != nil {
|
||||||
|
return // Legacy code, yolo
|
||||||
|
}
|
||||||
|
defer req.Close()
|
||||||
|
|
||||||
|
res := <-resCh
|
||||||
|
res.Done <- nil
|
||||||
|
|
||||||
|
txs, uncles := res.Res.(*eth.BlockBodiesPacket).Unpack()
|
||||||
|
f.FilterBodies(peer, txs, uncles, time.Now())
|
||||||
|
}(peer, hashes)
|
||||||
}
|
}
|
||||||
// Schedule the next fetch if blocks are still pending
|
// Schedule the next fetch if blocks are still pending
|
||||||
f.rescheduleComplete(completeTimer)
|
f.rescheduleComplete(completeTimer)
|
||||||
|
@ -30,6 +30,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
|
"github.com/ethereum/go-ethereum/eth/protocols/eth"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
)
|
)
|
||||||
@ -60,8 +61,8 @@ func makeChain(n int, seed byte, parent *types.Block) ([]common.Hash, map[common
|
|||||||
block.AddTx(tx)
|
block.AddTx(tx)
|
||||||
}
|
}
|
||||||
// If the block number is a multiple of 5, add a bonus uncle to the block
|
// If the block number is a multiple of 5, add a bonus uncle to the block
|
||||||
if i%5 == 0 {
|
if i > 0 && i%5 == 0 {
|
||||||
block.AddUncle(&types.Header{ParentHash: block.PrevBlock(i - 1).Hash(), Number: big.NewInt(int64(i - 1))})
|
block.AddUncle(&types.Header{ParentHash: block.PrevBlock(i - 2).Hash(), Number: big.NewInt(int64(i - 1))})
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
hashes := make([]common.Hash, n+1)
|
hashes := make([]common.Hash, n+1)
|
||||||
@ -195,16 +196,26 @@ func (f *fetcherTester) makeHeaderFetcher(peer string, blocks map[common.Hash]*t
|
|||||||
closure[hash] = block
|
closure[hash] = block
|
||||||
}
|
}
|
||||||
// Create a function that return a header from the closure
|
// Create a function that return a header from the closure
|
||||||
return func(hash common.Hash) error {
|
return func(hash common.Hash, sink chan *eth.Response) (*eth.Request, error) {
|
||||||
// Gather the blocks to return
|
// Gather the blocks to return
|
||||||
headers := make([]*types.Header, 0, 1)
|
headers := make([]*types.Header, 0, 1)
|
||||||
if block, ok := closure[hash]; ok {
|
if block, ok := closure[hash]; ok {
|
||||||
headers = append(headers, block.Header())
|
headers = append(headers, block.Header())
|
||||||
}
|
}
|
||||||
// Return on a new thread
|
// Return on a new thread
|
||||||
go f.fetcher.FilterHeaders(peer, headers, time.Now().Add(drift))
|
req := ð.Request{
|
||||||
|
Peer: peer,
|
||||||
return nil
|
}
|
||||||
|
res := ð.Response{
|
||||||
|
Req: req,
|
||||||
|
Res: (*eth.BlockHeadersPacket)(&headers),
|
||||||
|
Time: drift,
|
||||||
|
Done: make(chan error, 1), // Ignore the returned status
|
||||||
|
}
|
||||||
|
go func() {
|
||||||
|
sink <- res
|
||||||
|
}()
|
||||||
|
return req, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -215,7 +226,7 @@ func (f *fetcherTester) makeBodyFetcher(peer string, blocks map[common.Hash]*typ
|
|||||||
closure[hash] = block
|
closure[hash] = block
|
||||||
}
|
}
|
||||||
// Create a function that returns blocks from the closure
|
// Create a function that returns blocks from the closure
|
||||||
return func(hashes []common.Hash) error {
|
return func(hashes []common.Hash, sink chan *eth.Response) (*eth.Request, error) {
|
||||||
// Gather the block bodies to return
|
// Gather the block bodies to return
|
||||||
transactions := make([][]*types.Transaction, 0, len(hashes))
|
transactions := make([][]*types.Transaction, 0, len(hashes))
|
||||||
uncles := make([][]*types.Header, 0, len(hashes))
|
uncles := make([][]*types.Header, 0, len(hashes))
|
||||||
@ -227,14 +238,33 @@ func (f *fetcherTester) makeBodyFetcher(peer string, blocks map[common.Hash]*typ
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Return on a new thread
|
// Return on a new thread
|
||||||
go f.fetcher.FilterBodies(peer, transactions, uncles, time.Now().Add(drift))
|
bodies := make([]*eth.BlockBody, len(transactions))
|
||||||
|
for i, txs := range transactions {
|
||||||
return nil
|
bodies[i] = ð.BlockBody{
|
||||||
|
Transactions: txs,
|
||||||
|
Uncles: uncles[i],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
req := ð.Request{
|
||||||
|
Peer: peer,
|
||||||
|
}
|
||||||
|
res := ð.Response{
|
||||||
|
Req: req,
|
||||||
|
Res: (*eth.BlockBodiesPacket)(&bodies),
|
||||||
|
Time: drift,
|
||||||
|
Done: make(chan error, 1), // Ignore the returned status
|
||||||
|
}
|
||||||
|
go func() {
|
||||||
|
sink <- res
|
||||||
|
}()
|
||||||
|
return req, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// verifyFetchingEvent verifies that one single event arrive on a fetching channel.
|
// verifyFetchingEvent verifies that one single event arrive on a fetching channel.
|
||||||
func verifyFetchingEvent(t *testing.T, fetching chan []common.Hash, arrive bool) {
|
func verifyFetchingEvent(t *testing.T, fetching chan []common.Hash, arrive bool) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
if arrive {
|
if arrive {
|
||||||
select {
|
select {
|
||||||
case <-fetching:
|
case <-fetching:
|
||||||
@ -252,6 +282,8 @@ func verifyFetchingEvent(t *testing.T, fetching chan []common.Hash, arrive bool)
|
|||||||
|
|
||||||
// verifyCompletingEvent verifies that one single event arrive on an completing channel.
|
// verifyCompletingEvent verifies that one single event arrive on an completing channel.
|
||||||
func verifyCompletingEvent(t *testing.T, completing chan []common.Hash, arrive bool) {
|
func verifyCompletingEvent(t *testing.T, completing chan []common.Hash, arrive bool) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
if arrive {
|
if arrive {
|
||||||
select {
|
select {
|
||||||
case <-completing:
|
case <-completing:
|
||||||
@ -269,6 +301,8 @@ func verifyCompletingEvent(t *testing.T, completing chan []common.Hash, arrive b
|
|||||||
|
|
||||||
// verifyImportEvent verifies that one single event arrive on an import channel.
|
// verifyImportEvent verifies that one single event arrive on an import channel.
|
||||||
func verifyImportEvent(t *testing.T, imported chan interface{}, arrive bool) {
|
func verifyImportEvent(t *testing.T, imported chan interface{}, arrive bool) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
if arrive {
|
if arrive {
|
||||||
select {
|
select {
|
||||||
case <-imported:
|
case <-imported:
|
||||||
@ -287,6 +321,8 @@ func verifyImportEvent(t *testing.T, imported chan interface{}, arrive bool) {
|
|||||||
// verifyImportCount verifies that exactly count number of events arrive on an
|
// verifyImportCount verifies that exactly count number of events arrive on an
|
||||||
// import hook channel.
|
// import hook channel.
|
||||||
func verifyImportCount(t *testing.T, imported chan interface{}, count int) {
|
func verifyImportCount(t *testing.T, imported chan interface{}, count int) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
for i := 0; i < count; i++ {
|
for i := 0; i < count; i++ {
|
||||||
select {
|
select {
|
||||||
case <-imported:
|
case <-imported:
|
||||||
@ -299,6 +335,8 @@ func verifyImportCount(t *testing.T, imported chan interface{}, count int) {
|
|||||||
|
|
||||||
// verifyImportDone verifies that no more events are arriving on an import channel.
|
// verifyImportDone verifies that no more events are arriving on an import channel.
|
||||||
func verifyImportDone(t *testing.T, imported chan interface{}) {
|
func verifyImportDone(t *testing.T, imported chan interface{}) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case <-imported:
|
case <-imported:
|
||||||
t.Fatalf("extra block imported")
|
t.Fatalf("extra block imported")
|
||||||
@ -308,6 +346,8 @@ func verifyImportDone(t *testing.T, imported chan interface{}) {
|
|||||||
|
|
||||||
// verifyChainHeight verifies the chain height is as expected.
|
// verifyChainHeight verifies the chain height is as expected.
|
||||||
func verifyChainHeight(t *testing.T, fetcher *fetcherTester, height uint64) {
|
func verifyChainHeight(t *testing.T, fetcher *fetcherTester, height uint64) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
if fetcher.chainHeight() != height {
|
if fetcher.chainHeight() != height {
|
||||||
t.Fatalf("chain height mismatch, got %d, want %d", fetcher.chainHeight(), height)
|
t.Fatalf("chain height mismatch, got %d, want %d", fetcher.chainHeight(), height)
|
||||||
}
|
}
|
||||||
@ -368,13 +408,13 @@ func testConcurrentAnnouncements(t *testing.T, light bool) {
|
|||||||
secondBodyFetcher := tester.makeBodyFetcher("second", blocks, 0)
|
secondBodyFetcher := tester.makeBodyFetcher("second", blocks, 0)
|
||||||
|
|
||||||
counter := uint32(0)
|
counter := uint32(0)
|
||||||
firstHeaderWrapper := func(hash common.Hash) error {
|
firstHeaderWrapper := func(hash common.Hash, sink chan *eth.Response) (*eth.Request, error) {
|
||||||
atomic.AddUint32(&counter, 1)
|
atomic.AddUint32(&counter, 1)
|
||||||
return firstHeaderFetcher(hash)
|
return firstHeaderFetcher(hash, sink)
|
||||||
}
|
}
|
||||||
secondHeaderWrapper := func(hash common.Hash) error {
|
secondHeaderWrapper := func(hash common.Hash, sink chan *eth.Response) (*eth.Request, error) {
|
||||||
atomic.AddUint32(&counter, 1)
|
atomic.AddUint32(&counter, 1)
|
||||||
return secondHeaderFetcher(hash)
|
return secondHeaderFetcher(hash, sink)
|
||||||
}
|
}
|
||||||
// Iteratively announce blocks until all are imported
|
// Iteratively announce blocks until all are imported
|
||||||
imported := make(chan interface{})
|
imported := make(chan interface{})
|
||||||
@ -468,15 +508,20 @@ func testPendingDeduplication(t *testing.T, light bool) {
|
|||||||
|
|
||||||
delay := 50 * time.Millisecond
|
delay := 50 * time.Millisecond
|
||||||
counter := uint32(0)
|
counter := uint32(0)
|
||||||
headerWrapper := func(hash common.Hash) error {
|
headerWrapper := func(hash common.Hash, sink chan *eth.Response) (*eth.Request, error) {
|
||||||
atomic.AddUint32(&counter, 1)
|
atomic.AddUint32(&counter, 1)
|
||||||
|
|
||||||
// Simulate a long running fetch
|
// Simulate a long running fetch
|
||||||
|
resink := make(chan *eth.Response)
|
||||||
|
req, err := headerFetcher(hash, resink)
|
||||||
|
if err == nil {
|
||||||
go func() {
|
go func() {
|
||||||
|
res := <-resink
|
||||||
time.Sleep(delay)
|
time.Sleep(delay)
|
||||||
headerFetcher(hash)
|
sink <- res
|
||||||
}()
|
}()
|
||||||
return nil
|
}
|
||||||
|
return req, err
|
||||||
}
|
}
|
||||||
checkNonExist := func() bool {
|
checkNonExist := func() bool {
|
||||||
return tester.getBlock(hashes[0]) == nil
|
return tester.getBlock(hashes[0]) == nil
|
||||||
|
125
eth/handler.go
125
eth/handler.go
@ -83,8 +83,8 @@ type handlerConfig struct {
|
|||||||
TxPool txPool // Transaction pool to propagate from
|
TxPool txPool // Transaction pool to propagate from
|
||||||
Merger *consensus.Merger // The manager for eth1/2 transition
|
Merger *consensus.Merger // The manager for eth1/2 transition
|
||||||
Network uint64 // Network identifier to adfvertise
|
Network uint64 // Network identifier to adfvertise
|
||||||
Sync downloader.SyncMode // Whether to fast or full sync
|
Sync downloader.SyncMode // Whether to snap or full sync
|
||||||
BloomCache uint64 // Megabytes to alloc for fast sync bloom
|
BloomCache uint64 // Megabytes to alloc for snap sync bloom
|
||||||
EventMux *event.TypeMux // Legacy event mux, deprecate for `feed`
|
EventMux *event.TypeMux // Legacy event mux, deprecate for `feed`
|
||||||
Checkpoint *params.TrustedCheckpoint // Hard coded checkpoint for sync challenges
|
Checkpoint *params.TrustedCheckpoint // Hard coded checkpoint for sync challenges
|
||||||
Whitelist map[uint64]common.Hash // Hard coded whitelist for sync challenged
|
Whitelist map[uint64]common.Hash // Hard coded whitelist for sync challenged
|
||||||
@ -94,8 +94,7 @@ type handler struct {
|
|||||||
networkID uint64
|
networkID uint64
|
||||||
forkFilter forkid.Filter // Fork ID filter, constant across the lifetime of the node
|
forkFilter forkid.Filter // Fork ID filter, constant across the lifetime of the node
|
||||||
|
|
||||||
fastSync uint32 // Flag whether fast sync is enabled (gets disabled if we already have blocks)
|
snapSync uint32 // Flag whether snap sync is enabled (gets disabled if we already have blocks)
|
||||||
snapSync uint32 // Flag whether fast sync should operate on top of the snap protocol
|
|
||||||
acceptTxs uint32 // Flag whether we're considered synchronised (enables transaction processing)
|
acceptTxs uint32 // Flag whether we're considered synchronised (enables transaction processing)
|
||||||
|
|
||||||
checkpointNumber uint64 // Block number for the sync progress validator to cross reference
|
checkpointNumber uint64 // Block number for the sync progress validator to cross reference
|
||||||
@ -147,44 +146,41 @@ func newHandler(config *handlerConfig) (*handler, error) {
|
|||||||
quitSync: make(chan struct{}),
|
quitSync: make(chan struct{}),
|
||||||
}
|
}
|
||||||
if config.Sync == downloader.FullSync {
|
if config.Sync == downloader.FullSync {
|
||||||
// The database seems empty as the current block is the genesis. Yet the fast
|
// The database seems empty as the current block is the genesis. Yet the snap
|
||||||
// block is ahead, so fast sync was enabled for this node at a certain point.
|
// block is ahead, so snap sync was enabled for this node at a certain point.
|
||||||
// The scenarios where this can happen is
|
// The scenarios where this can happen is
|
||||||
// * if the user manually (or via a bad block) rolled back a fast sync node
|
// * if the user manually (or via a bad block) rolled back a snap sync node
|
||||||
// below the sync point.
|
// below the sync point.
|
||||||
// * the last fast sync is not finished while user specifies a full sync this
|
// * the last snap sync is not finished while user specifies a full sync this
|
||||||
// time. But we don't have any recent state for full sync.
|
// time. But we don't have any recent state for full sync.
|
||||||
// In these cases however it's safe to reenable fast sync.
|
// In these cases however it's safe to reenable snap sync.
|
||||||
fullBlock, fastBlock := h.chain.CurrentBlock(), h.chain.CurrentFastBlock()
|
fullBlock, fastBlock := h.chain.CurrentBlock(), h.chain.CurrentFastBlock()
|
||||||
if fullBlock.NumberU64() == 0 && fastBlock.NumberU64() > 0 {
|
if fullBlock.NumberU64() == 0 && fastBlock.NumberU64() > 0 {
|
||||||
h.fastSync = uint32(1)
|
h.snapSync = uint32(1)
|
||||||
log.Warn("Switch sync mode from full sync to fast sync")
|
log.Warn("Switch sync mode from full sync to snap sync")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if h.chain.CurrentBlock().NumberU64() > 0 {
|
if h.chain.CurrentBlock().NumberU64() > 0 {
|
||||||
// Print warning log if database is not empty to run fast sync.
|
// Print warning log if database is not empty to run snap sync.
|
||||||
log.Warn("Switch sync mode from fast sync to full sync")
|
log.Warn("Switch sync mode from snap sync to full sync")
|
||||||
} else {
|
} else {
|
||||||
// If fast sync was requested and our database is empty, grant it
|
// If snap sync was requested and our database is empty, grant it
|
||||||
h.fastSync = uint32(1)
|
|
||||||
if config.Sync == downloader.SnapSync {
|
|
||||||
h.snapSync = uint32(1)
|
h.snapSync = uint32(1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
// If we have trusted checkpoints, enforce them on the chain
|
// If we have trusted checkpoints, enforce them on the chain
|
||||||
if config.Checkpoint != nil {
|
if config.Checkpoint != nil {
|
||||||
h.checkpointNumber = (config.Checkpoint.SectionIndex+1)*params.CHTFrequency - 1
|
h.checkpointNumber = (config.Checkpoint.SectionIndex+1)*params.CHTFrequency - 1
|
||||||
h.checkpointHash = config.Checkpoint.SectionHead
|
h.checkpointHash = config.Checkpoint.SectionHead
|
||||||
}
|
}
|
||||||
// Construct the downloader (long sync) and its backing state bloom if fast
|
// Construct the downloader (long sync) and its backing state bloom if snap
|
||||||
// sync is requested. The downloader is responsible for deallocating the state
|
// sync is requested. The downloader is responsible for deallocating the state
|
||||||
// bloom when it's done.
|
// bloom when it's done.
|
||||||
// Note: we don't enable it if snap-sync is performed, since it's very heavy
|
// Note: we don't enable it if snap-sync is performed, since it's very heavy
|
||||||
// and the heal-portion of the snap sync is much lighter than fast. What we particularly
|
// and the heal-portion of the snap sync is much lighter than snap. What we particularly
|
||||||
// want to avoid, is a 90%-finished (but restarted) snap-sync to begin
|
// want to avoid, is a 90%-finished (but restarted) snap-sync to begin
|
||||||
// indexing the entire trie
|
// indexing the entire trie
|
||||||
if atomic.LoadUint32(&h.fastSync) == 1 && atomic.LoadUint32(&h.snapSync) == 0 {
|
if atomic.LoadUint32(&h.snapSync) == 1 && atomic.LoadUint32(&h.snapSync) == 0 {
|
||||||
h.stateBloom = trie.NewSyncBloom(config.BloomCache, config.Database)
|
h.stateBloom = trie.NewSyncBloom(config.BloomCache, config.Database)
|
||||||
}
|
}
|
||||||
h.downloader = downloader.New(h.checkpointNumber, config.Database, h.stateBloom, h.eventMux, h.chain, nil, h.removePeer)
|
h.downloader = downloader.New(h.checkpointNumber, config.Database, h.stateBloom, h.eventMux, h.chain, nil, h.removePeer)
|
||||||
@ -236,12 +232,12 @@ func newHandler(config *handlerConfig) (*handler, error) {
|
|||||||
log.Warn("Unsynced yet, discarded propagated block", "number", blocks[0].Number(), "hash", blocks[0].Hash())
|
log.Warn("Unsynced yet, discarded propagated block", "number", blocks[0].Number(), "hash", blocks[0].Hash())
|
||||||
return 0, nil
|
return 0, nil
|
||||||
}
|
}
|
||||||
// If fast sync is running, deny importing weird blocks. This is a problematic
|
// If snap sync is running, deny importing weird blocks. This is a problematic
|
||||||
// clause when starting up a new network, because fast-syncing miners might not
|
// clause when starting up a new network, because snap-syncing miners might not
|
||||||
// accept each others' blocks until a restart. Unfortunately we haven't figured
|
// accept each others' blocks until a restart. Unfortunately we haven't figured
|
||||||
// out a way yet where nodes can decide unilaterally whether the network is new
|
// out a way yet where nodes can decide unilaterally whether the network is new
|
||||||
// or not. This should be fixed if we figure out a solution.
|
// or not. This should be fixed if we figure out a solution.
|
||||||
if atomic.LoadUint32(&h.fastSync) == 1 {
|
if atomic.LoadUint32(&h.snapSync) == 1 {
|
||||||
log.Warn("Fast syncing, discarded propagated block", "number", blocks[0].Number(), "hash", blocks[0].Hash())
|
log.Warn("Fast syncing, discarded propagated block", "number", blocks[0].Number(), "hash", blocks[0].Hash())
|
||||||
return 0, nil
|
return 0, nil
|
||||||
}
|
}
|
||||||
@ -365,30 +361,93 @@ func (h *handler) runEthPeer(peer *eth.Peer, handler eth.Handler) error {
|
|||||||
// after this will be sent via broadcasts.
|
// after this will be sent via broadcasts.
|
||||||
h.syncTransactions(peer)
|
h.syncTransactions(peer)
|
||||||
|
|
||||||
|
// Create a notification channel for pending requests if the peer goes down
|
||||||
|
dead := make(chan struct{})
|
||||||
|
defer close(dead)
|
||||||
|
|
||||||
// If we have a trusted CHT, reject all peers below that (avoid fast sync eclipse)
|
// If we have a trusted CHT, reject all peers below that (avoid fast sync eclipse)
|
||||||
if h.checkpointHash != (common.Hash{}) {
|
if h.checkpointHash != (common.Hash{}) {
|
||||||
// Request the peer's checkpoint header for chain height/weight validation
|
// Request the peer's checkpoint header for chain height/weight validation
|
||||||
if err := peer.RequestHeadersByNumber(h.checkpointNumber, 1, 0, false); err != nil {
|
resCh := make(chan *eth.Response)
|
||||||
|
if _, err := peer.RequestHeadersByNumber(h.checkpointNumber, 1, 0, false, resCh); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// Start a timer to disconnect if the peer doesn't reply in time
|
// Start a timer to disconnect if the peer doesn't reply in time
|
||||||
p.syncDrop = time.AfterFunc(syncChallengeTimeout, func() {
|
go func() {
|
||||||
|
timeout := time.NewTimer(syncChallengeTimeout)
|
||||||
|
defer timeout.Stop()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case res := <-resCh:
|
||||||
|
headers := ([]*types.Header)(*res.Res.(*eth.BlockHeadersPacket))
|
||||||
|
if len(headers) == 0 {
|
||||||
|
// If we're doing a snap sync, we must enforce the checkpoint
|
||||||
|
// block to avoid eclipse attacks. Unsynced nodes are welcome
|
||||||
|
// to connect after we're done joining the network.
|
||||||
|
if atomic.LoadUint32(&h.snapSync) == 1 {
|
||||||
|
peer.Log().Warn("Dropping unsynced node during sync", "addr", peer.RemoteAddr(), "type", peer.Name())
|
||||||
|
res.Done <- errors.New("unsynced node cannot serve sync")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
res.Done <- nil
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Validate the header and either drop the peer or continue
|
||||||
|
if len(headers) > 1 {
|
||||||
|
res.Done <- errors.New("too many headers in checkpoint response")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if headers[0].Hash() != h.checkpointHash {
|
||||||
|
res.Done <- errors.New("checkpoint hash mismatch")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
res.Done <- nil
|
||||||
|
|
||||||
|
case <-timeout.C:
|
||||||
peer.Log().Warn("Checkpoint challenge timed out, dropping", "addr", peer.RemoteAddr(), "type", peer.Name())
|
peer.Log().Warn("Checkpoint challenge timed out, dropping", "addr", peer.RemoteAddr(), "type", peer.Name())
|
||||||
h.removePeer(peer.ID())
|
h.removePeer(peer.ID())
|
||||||
})
|
|
||||||
// Make sure it's cleaned up if the peer dies off
|
case <-dead:
|
||||||
defer func() {
|
// Peer handler terminated, abort all goroutines
|
||||||
if p.syncDrop != nil {
|
|
||||||
p.syncDrop.Stop()
|
|
||||||
p.syncDrop = nil
|
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
// If we have any explicit whitelist block hashes, request them
|
// If we have any explicit whitelist block hashes, request them
|
||||||
for number := range h.whitelist {
|
for number, hash := range h.whitelist {
|
||||||
if err := peer.RequestHeadersByNumber(number, 1, 0, false); err != nil {
|
resCh := make(chan *eth.Response)
|
||||||
|
if _, err := peer.RequestHeadersByNumber(number, 1, 0, false, resCh); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
go func(number uint64, hash common.Hash) {
|
||||||
|
timeout := time.NewTimer(syncChallengeTimeout)
|
||||||
|
defer timeout.Stop()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case res := <-resCh:
|
||||||
|
headers := ([]*types.Header)(*res.Res.(*eth.BlockHeadersPacket))
|
||||||
|
if len(headers) == 0 {
|
||||||
|
// Whitelisted blocks are allowed to be missing if the remote
|
||||||
|
// node is not yet synced
|
||||||
|
res.Done <- nil
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Validate the header and either drop the peer or continue
|
||||||
|
if len(headers) > 1 {
|
||||||
|
res.Done <- errors.New("too many headers in whitelist response")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if headers[0].Number.Uint64() != number || headers[0].Hash() != hash {
|
||||||
|
peer.Log().Info("Whitelist mismatch, dropping peer", "number", number, "hash", headers[0].Hash(), "want", hash)
|
||||||
|
res.Done <- errors.New("whitelist block mismatch")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
peer.Log().Debug("Whitelist block verified", "number", number, "hash", hash)
|
||||||
|
|
||||||
|
case <-timeout.C:
|
||||||
|
peer.Log().Warn("Whitelist challenge timed out, dropping", "addr", peer.RemoteAddr(), "type", peer.Name())
|
||||||
|
h.removePeer(peer.ID())
|
||||||
|
}
|
||||||
|
}(number, hash)
|
||||||
}
|
}
|
||||||
// Handle incoming messages until the connection is torn down
|
// Handle incoming messages until the connection is torn down
|
||||||
return handler(peer)
|
return handler(peer)
|
||||||
|
@ -17,7 +17,6 @@
|
|||||||
package eth
|
package eth
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/big"
|
"math/big"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
@ -27,7 +26,6 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/eth/protocols/eth"
|
"github.com/ethereum/go-ethereum/eth/protocols/eth"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
|
||||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
)
|
)
|
||||||
@ -64,25 +62,6 @@ func (h *ethHandler) AcceptTxs() bool {
|
|||||||
func (h *ethHandler) Handle(peer *eth.Peer, packet eth.Packet) error {
|
func (h *ethHandler) Handle(peer *eth.Peer, packet eth.Packet) error {
|
||||||
// Consume any broadcasts and announces, forwarding the rest to the downloader
|
// Consume any broadcasts and announces, forwarding the rest to the downloader
|
||||||
switch packet := packet.(type) {
|
switch packet := packet.(type) {
|
||||||
case *eth.BlockHeadersPacket:
|
|
||||||
return h.handleHeaders(peer, *packet)
|
|
||||||
|
|
||||||
case *eth.BlockBodiesPacket:
|
|
||||||
txset, uncleset := packet.Unpack()
|
|
||||||
return h.handleBodies(peer, txset, uncleset)
|
|
||||||
|
|
||||||
case *eth.NodeDataPacket:
|
|
||||||
if err := h.downloader.DeliverNodeData(peer.ID(), *packet); err != nil {
|
|
||||||
log.Debug("Failed to deliver node state data", "err", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
|
|
||||||
case *eth.ReceiptsPacket:
|
|
||||||
if err := h.downloader.DeliverReceipts(peer.ID(), *packet); err != nil {
|
|
||||||
log.Debug("Failed to deliver receipts", "err", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
|
|
||||||
case *eth.NewBlockHashesPacket:
|
case *eth.NewBlockHashesPacket:
|
||||||
hashes, numbers := packet.Unpack()
|
hashes, numbers := packet.Unpack()
|
||||||
return h.handleBlockAnnounces(peer, hashes, numbers)
|
return h.handleBlockAnnounces(peer, hashes, numbers)
|
||||||
@ -104,79 +83,6 @@ func (h *ethHandler) Handle(peer *eth.Peer, packet eth.Packet) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// handleHeaders is invoked from a peer's message handler when it transmits a batch
|
|
||||||
// of headers for the local node to process.
|
|
||||||
func (h *ethHandler) handleHeaders(peer *eth.Peer, headers []*types.Header) error {
|
|
||||||
p := h.peers.peer(peer.ID())
|
|
||||||
if p == nil {
|
|
||||||
return errors.New("unregistered during callback")
|
|
||||||
}
|
|
||||||
// If no headers were received, but we're expencting a checkpoint header, consider it that
|
|
||||||
if len(headers) == 0 && p.syncDrop != nil {
|
|
||||||
// Stop the timer either way, decide later to drop or not
|
|
||||||
p.syncDrop.Stop()
|
|
||||||
p.syncDrop = nil
|
|
||||||
|
|
||||||
// If we're doing a fast (or snap) sync, we must enforce the checkpoint block to avoid
|
|
||||||
// eclipse attacks. Unsynced nodes are welcome to connect after we're done
|
|
||||||
// joining the network
|
|
||||||
if atomic.LoadUint32(&h.fastSync) == 1 {
|
|
||||||
peer.Log().Warn("Dropping unsynced node during sync", "addr", peer.RemoteAddr(), "type", peer.Name())
|
|
||||||
return errors.New("unsynced node cannot serve sync")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Filter out any explicitly requested headers, deliver the rest to the downloader
|
|
||||||
filter := len(headers) == 1
|
|
||||||
if filter {
|
|
||||||
// If it's a potential sync progress check, validate the content and advertised chain weight
|
|
||||||
if p.syncDrop != nil && headers[0].Number.Uint64() == h.checkpointNumber {
|
|
||||||
// Disable the sync drop timer
|
|
||||||
p.syncDrop.Stop()
|
|
||||||
p.syncDrop = nil
|
|
||||||
|
|
||||||
// Validate the header and either drop the peer or continue
|
|
||||||
if headers[0].Hash() != h.checkpointHash {
|
|
||||||
return errors.New("checkpoint hash mismatch")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
// Otherwise if it's a whitelisted block, validate against the set
|
|
||||||
if want, ok := h.whitelist[headers[0].Number.Uint64()]; ok {
|
|
||||||
if hash := headers[0].Hash(); want != hash {
|
|
||||||
peer.Log().Info("Whitelist mismatch, dropping peer", "number", headers[0].Number.Uint64(), "hash", hash, "want", want)
|
|
||||||
return errors.New("whitelist block mismatch")
|
|
||||||
}
|
|
||||||
peer.Log().Debug("Whitelist block verified", "number", headers[0].Number.Uint64(), "hash", want)
|
|
||||||
}
|
|
||||||
// Irrelevant of the fork checks, send the header to the fetcher just in case
|
|
||||||
headers = h.blockFetcher.FilterHeaders(peer.ID(), headers, time.Now())
|
|
||||||
}
|
|
||||||
if len(headers) > 0 || !filter {
|
|
||||||
err := h.downloader.DeliverHeaders(peer.ID(), headers)
|
|
||||||
if err != nil {
|
|
||||||
log.Debug("Failed to deliver headers", "err", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// handleBodies is invoked from a peer's message handler when it transmits a batch
|
|
||||||
// of block bodies for the local node to process.
|
|
||||||
func (h *ethHandler) handleBodies(peer *eth.Peer, txs [][]*types.Transaction, uncles [][]*types.Header) error {
|
|
||||||
// Filter out any explicitly requested bodies, deliver the rest to the downloader
|
|
||||||
filter := len(txs) > 0 || len(uncles) > 0
|
|
||||||
if filter {
|
|
||||||
txs, uncles = h.blockFetcher.FilterBodies(peer.ID(), txs, uncles, time.Now())
|
|
||||||
}
|
|
||||||
if len(txs) > 0 || len(uncles) > 0 || !filter {
|
|
||||||
err := h.downloader.DeliverBodies(peer.ID(), txs, uncles)
|
|
||||||
if err != nil {
|
|
||||||
log.Debug("Failed to deliver bodies", "err", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// handleBlockAnnounces is invoked from a peer's message handler when it transmits a
|
// handleBlockAnnounces is invoked from a peer's message handler when it transmits a
|
||||||
// batch of block announcements for the local node to process.
|
// batch of block announcements for the local node to process.
|
||||||
func (h *ethHandler) handleBlockAnnounces(peer *eth.Peer, hashes []common.Hash, numbers []uint64) error {
|
func (h *ethHandler) handleBlockAnnounces(peer *eth.Peer, hashes []common.Hash, numbers []uint64) error {
|
||||||
|
@ -354,7 +354,7 @@ func testSendTransactions(t *testing.T, protocol uint) {
|
|||||||
seen := make(map[common.Hash]struct{})
|
seen := make(map[common.Hash]struct{})
|
||||||
for len(seen) < len(insert) {
|
for len(seen) < len(insert) {
|
||||||
switch protocol {
|
switch protocol {
|
||||||
case 65, 66:
|
case 66:
|
||||||
select {
|
select {
|
||||||
case hashes := <-anns:
|
case hashes := <-anns:
|
||||||
for _, hash := range hashes {
|
for _, hash := range hashes {
|
||||||
@ -364,7 +364,7 @@ func testSendTransactions(t *testing.T, protocol uint) {
|
|||||||
seen[hash] = struct{}{}
|
seen[hash] = struct{}{}
|
||||||
}
|
}
|
||||||
case <-bcasts:
|
case <-bcasts:
|
||||||
t.Errorf("initial tx broadcast received on post eth/65")
|
t.Errorf("initial tx broadcast received on post eth/66")
|
||||||
}
|
}
|
||||||
|
|
||||||
default:
|
default:
|
||||||
@ -389,6 +389,7 @@ func testTransactionPropagation(t *testing.T, protocol uint) {
|
|||||||
// to receive them. We need multiple sinks since a one-to-one peering would
|
// to receive them. We need multiple sinks since a one-to-one peering would
|
||||||
// broadcast all transactions without announcement.
|
// broadcast all transactions without announcement.
|
||||||
source := newTestHandler()
|
source := newTestHandler()
|
||||||
|
source.handler.snapSync = 0 // Avoid requiring snap, otherwise some will be dropped below
|
||||||
defer source.close()
|
defer source.close()
|
||||||
|
|
||||||
sinks := make([]*testHandler, 10)
|
sinks := make([]*testHandler, 10)
|
||||||
@ -406,7 +407,7 @@ func testTransactionPropagation(t *testing.T, protocol uint) {
|
|||||||
defer sourcePipe.Close()
|
defer sourcePipe.Close()
|
||||||
defer sinkPipe.Close()
|
defer sinkPipe.Close()
|
||||||
|
|
||||||
sourcePeer := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{byte(i)}, "", nil, sourcePipe), sourcePipe, source.txpool)
|
sourcePeer := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{byte(i + 1)}, "", nil, sourcePipe), sourcePipe, source.txpool)
|
||||||
sinkPeer := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{0}, "", nil, sinkPipe), sinkPipe, sink.txpool)
|
sinkPeer := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{0}, "", nil, sinkPipe), sinkPipe, sink.txpool)
|
||||||
defer sourcePeer.Close()
|
defer sourcePeer.Close()
|
||||||
defer sinkPeer.Close()
|
defer sinkPeer.Close()
|
||||||
@ -438,12 +439,13 @@ func testTransactionPropagation(t *testing.T, protocol uint) {
|
|||||||
|
|
||||||
// Iterate through all the sinks and ensure they all got the transactions
|
// Iterate through all the sinks and ensure they all got the transactions
|
||||||
for i := range sinks {
|
for i := range sinks {
|
||||||
for arrived := 0; arrived < len(txs); {
|
for arrived, timeout := 0, false; arrived < len(txs) && !timeout; {
|
||||||
select {
|
select {
|
||||||
case event := <-txChs[i]:
|
case event := <-txChs[i]:
|
||||||
arrived += len(event.Txs)
|
arrived += len(event.Txs)
|
||||||
case <-time.NewTimer(time.Second).C:
|
case <-time.After(time.Second):
|
||||||
t.Errorf("sink %d: transaction propagation timed out: have %d, want %d", i, arrived, len(txs))
|
t.Errorf("sink %d: transaction propagation timed out: have %d, want %d", i, arrived, len(txs))
|
||||||
|
timeout = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -463,23 +465,23 @@ func TestCheckpointChallenge(t *testing.T) {
|
|||||||
}{
|
}{
|
||||||
// If checkpointing is not enabled locally, don't challenge and don't drop
|
// If checkpointing is not enabled locally, don't challenge and don't drop
|
||||||
{downloader.FullSync, false, false, false, false, false},
|
{downloader.FullSync, false, false, false, false, false},
|
||||||
{downloader.FastSync, false, false, false, false, false},
|
{downloader.SnapSync, false, false, false, false, false},
|
||||||
|
|
||||||
// If checkpointing is enabled locally and remote response is empty, only drop during fast sync
|
// If checkpointing is enabled locally and remote response is empty, only drop during fast sync
|
||||||
{downloader.FullSync, true, false, true, false, false},
|
{downloader.FullSync, true, false, true, false, false},
|
||||||
{downloader.FastSync, true, false, true, false, true}, // Special case, fast sync, unsynced peer
|
{downloader.SnapSync, true, false, true, false, true}, // Special case, fast sync, unsynced peer
|
||||||
|
|
||||||
// If checkpointing is enabled locally and remote response mismatches, always drop
|
// If checkpointing is enabled locally and remote response mismatches, always drop
|
||||||
{downloader.FullSync, true, false, false, false, true},
|
{downloader.FullSync, true, false, false, false, true},
|
||||||
{downloader.FastSync, true, false, false, false, true},
|
{downloader.SnapSync, true, false, false, false, true},
|
||||||
|
|
||||||
// If checkpointing is enabled locally and remote response matches, never drop
|
// If checkpointing is enabled locally and remote response matches, never drop
|
||||||
{downloader.FullSync, true, false, false, true, false},
|
{downloader.FullSync, true, false, false, true, false},
|
||||||
{downloader.FastSync, true, false, false, true, false},
|
{downloader.SnapSync, true, false, false, true, false},
|
||||||
|
|
||||||
// If checkpointing is enabled locally and remote times out, always drop
|
// If checkpointing is enabled locally and remote times out, always drop
|
||||||
{downloader.FullSync, true, true, false, true, true},
|
{downloader.FullSync, true, true, false, true, true},
|
||||||
{downloader.FastSync, true, true, false, true, true},
|
{downloader.SnapSync, true, true, false, true, true},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(fmt.Sprintf("sync %v checkpoint %v timeout %v empty %v match %v", tt.syncmode, tt.checkpoint, tt.timeout, tt.empty, tt.match), func(t *testing.T) {
|
t.Run(fmt.Sprintf("sync %v checkpoint %v timeout %v empty %v match %v", tt.syncmode, tt.checkpoint, tt.timeout, tt.empty, tt.match), func(t *testing.T) {
|
||||||
@ -500,10 +502,10 @@ func testCheckpointChallenge(t *testing.T, syncmode downloader.SyncMode, checkpo
|
|||||||
handler := newTestHandler()
|
handler := newTestHandler()
|
||||||
defer handler.close()
|
defer handler.close()
|
||||||
|
|
||||||
if syncmode == downloader.FastSync {
|
if syncmode == downloader.SnapSync {
|
||||||
atomic.StoreUint32(&handler.handler.fastSync, 1)
|
atomic.StoreUint32(&handler.handler.snapSync, 1)
|
||||||
} else {
|
} else {
|
||||||
atomic.StoreUint32(&handler.handler.fastSync, 0)
|
atomic.StoreUint32(&handler.handler.snapSync, 0)
|
||||||
}
|
}
|
||||||
var response *types.Header
|
var response *types.Header
|
||||||
if checkpoint {
|
if checkpoint {
|
||||||
|
@ -152,7 +152,7 @@ func newTestHandlerWithBlocks(blocks int) *testHandler {
|
|||||||
TxPool: txpool,
|
TxPool: txpool,
|
||||||
Merger: consensus.NewMerger(rawdb.NewMemoryDatabase()),
|
Merger: consensus.NewMerger(rawdb.NewMemoryDatabase()),
|
||||||
Network: 1,
|
Network: 1,
|
||||||
Sync: downloader.FastSync,
|
Sync: downloader.SnapSync,
|
||||||
BloomCache: 1,
|
BloomCache: 1,
|
||||||
})
|
})
|
||||||
handler.Start(1000)
|
handler.Start(1000)
|
||||||
|
@ -18,8 +18,6 @@ package eth
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"math/big"
|
"math/big"
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/eth/protocols/eth"
|
"github.com/ethereum/go-ethereum/eth/protocols/eth"
|
||||||
"github.com/ethereum/go-ethereum/eth/protocols/snap"
|
"github.com/ethereum/go-ethereum/eth/protocols/snap"
|
||||||
@ -37,10 +35,7 @@ type ethPeerInfo struct {
|
|||||||
type ethPeer struct {
|
type ethPeer struct {
|
||||||
*eth.Peer
|
*eth.Peer
|
||||||
snapExt *snapPeer // Satellite `snap` connection
|
snapExt *snapPeer // Satellite `snap` connection
|
||||||
|
|
||||||
syncDrop *time.Timer // Connection dropper if `eth` sync progress isn't validated in time
|
|
||||||
snapWait chan struct{} // Notification channel for snap connections
|
snapWait chan struct{} // Notification channel for snap connections
|
||||||
lock sync.RWMutex // Mutex protecting the internal fields
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// info gathers and returns some `eth` protocol metadata known about a peer.
|
// info gathers and returns some `eth` protocol metadata known about a peer.
|
||||||
|
247
eth/protocols/eth/dispatcher.go
Normal file
247
eth/protocols/eth/dispatcher.go
Normal file
@ -0,0 +1,247 @@
|
|||||||
|
// Copyright 2021 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package eth
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/p2p"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// errDisconnected is returned if a request is attempted to be made to a peer
|
||||||
|
// that was already closed.
|
||||||
|
errDisconnected = errors.New("disconnected")
|
||||||
|
|
||||||
|
// errDanglingResponse is returned if a response arrives with a request id
|
||||||
|
// which does not match to any existing pending requests.
|
||||||
|
errDanglingResponse = errors.New("response to non-existent request")
|
||||||
|
|
||||||
|
// errMismatchingResponseType is returned if the remote peer sent a different
|
||||||
|
// packet type as a response to a request than what the local node expected.
|
||||||
|
errMismatchingResponseType = errors.New("mismatching response type")
|
||||||
|
)
|
||||||
|
|
||||||
|
// Request is a pending request to allow tracking it and delivering a response
|
||||||
|
// back to the requester on their chosen channel.
|
||||||
|
type Request struct {
|
||||||
|
peer *Peer // Peer to which this request belogs for untracking
|
||||||
|
id uint64 // Request ID to match up replies to
|
||||||
|
|
||||||
|
sink chan *Response // Channel to deliver the response on
|
||||||
|
cancel chan struct{} // Channel to cancel requests ahead of time
|
||||||
|
|
||||||
|
code uint64 // Message code of the request packet
|
||||||
|
want uint64 // Message code of the response packet
|
||||||
|
data interface{} // Data content of the request packet
|
||||||
|
|
||||||
|
Peer string // Demultiplexer if cross-peer requests are batched together
|
||||||
|
Sent time.Time // Timestamp when the request was sent
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close aborts an in-flight request. Although there's no way to notify the
|
||||||
|
// remote peer about the cancellation, this method notifies the dispatcher to
|
||||||
|
// discard any late responses.
|
||||||
|
func (r *Request) Close() error {
|
||||||
|
if r.peer == nil { // Tests mock out the dispatcher, skip internal cancellation
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
cancelOp := &cancel{
|
||||||
|
id: r.id,
|
||||||
|
fail: make(chan error),
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case r.peer.reqCancel <- cancelOp:
|
||||||
|
if err := <-cancelOp.fail; err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
close(r.cancel)
|
||||||
|
return nil
|
||||||
|
case <-r.peer.term:
|
||||||
|
return errDisconnected
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// request is a wrapper around a client Request that has an error channel to
|
||||||
|
// signal on if sending the request already failed on a network level.
|
||||||
|
type request struct {
|
||||||
|
req *Request
|
||||||
|
fail chan error
|
||||||
|
}
|
||||||
|
|
||||||
|
// cancel is a maintenance type on the dispatcher to stop tracking a pending
|
||||||
|
// request.
|
||||||
|
type cancel struct {
|
||||||
|
id uint64 // Request ID to stop tracking
|
||||||
|
fail chan error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Response is a reply packet to a previously created request. It is delivered
|
||||||
|
// on the channel assigned by the requester subsystem and contains the original
|
||||||
|
// request embedded to allow uniquely matching it caller side.
|
||||||
|
type Response struct {
|
||||||
|
id uint64 // Request ID to match up this reply to
|
||||||
|
recv time.Time // Timestamp when the request was received
|
||||||
|
code uint64 // Response packet type to cross validate with request
|
||||||
|
|
||||||
|
Req *Request // Original request to cross-reference with
|
||||||
|
Res interface{} // Remote response for the request query
|
||||||
|
Time time.Duration // Time it took for the request to be served
|
||||||
|
Done chan error // Channel to signal message handling to the reader
|
||||||
|
}
|
||||||
|
|
||||||
|
// response is a wrapper around a remote Response that has an error channel to
|
||||||
|
// signal on if processing the response failed.
|
||||||
|
type response struct {
|
||||||
|
res *Response
|
||||||
|
fail chan error
|
||||||
|
}
|
||||||
|
|
||||||
|
// dispatchRequest schedules the request to the dispatcher for tracking and
|
||||||
|
// network serialization, blocking until it's successfully sent.
|
||||||
|
//
|
||||||
|
// The returned Request must either be closed before discarding it, or the reply
|
||||||
|
// must be waited for and the Response's Done channel signalled.
|
||||||
|
func (p *Peer) dispatchRequest(req *Request) error {
|
||||||
|
reqOp := &request{
|
||||||
|
req: req,
|
||||||
|
fail: make(chan error),
|
||||||
|
}
|
||||||
|
req.cancel = make(chan struct{})
|
||||||
|
req.peer = p
|
||||||
|
req.Peer = p.id
|
||||||
|
|
||||||
|
select {
|
||||||
|
case p.reqDispatch <- reqOp:
|
||||||
|
return <-reqOp.fail
|
||||||
|
case <-p.term:
|
||||||
|
return errDisconnected
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// dispatchRequest fulfils a pending request and delivers it to the requested
|
||||||
|
// sink.
|
||||||
|
func (p *Peer) dispatchResponse(res *Response) error {
|
||||||
|
resOp := &response{
|
||||||
|
res: res,
|
||||||
|
fail: make(chan error),
|
||||||
|
}
|
||||||
|
res.recv = time.Now()
|
||||||
|
res.Done = make(chan error)
|
||||||
|
|
||||||
|
select {
|
||||||
|
case p.resDispatch <- resOp:
|
||||||
|
// Ensure the response is accepted by the dispatcher
|
||||||
|
if err := <-resOp.fail; err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Deliver the filled out response and wait until it's handled. This
|
||||||
|
// path is a bit funky as Go's select has no order, so if a response
|
||||||
|
// arrives to an already cancelled request, there's a 50-50% changes
|
||||||
|
// of picking on channel or the other. To avoid such cases delivering
|
||||||
|
// the packet upstream, check for cancellation first and only after
|
||||||
|
// block on delivery.
|
||||||
|
select {
|
||||||
|
case <-res.Req.cancel:
|
||||||
|
return nil // Request cancelled, silently discard response
|
||||||
|
default:
|
||||||
|
// Request not yet cancelled, attempt to deliver it, but do watch
|
||||||
|
// for fresh cancellations too
|
||||||
|
select {
|
||||||
|
case res.Req.sink <- res:
|
||||||
|
return <-res.Done // Response delivered, return any errors
|
||||||
|
case <-res.Req.cancel:
|
||||||
|
return nil // Request cancelled, silently discard response
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
case <-p.term:
|
||||||
|
return errDisconnected
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// dispatcher is a loop that accepts requests from higher layer packages, pushes
|
||||||
|
// it to the network and tracks and dispatches the responses back to the original
|
||||||
|
// requester.
|
||||||
|
func (p *Peer) dispatcher() {
|
||||||
|
pending := make(map[uint64]*Request)
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case reqOp := <-p.reqDispatch:
|
||||||
|
req := reqOp.req
|
||||||
|
req.Sent = time.Now()
|
||||||
|
|
||||||
|
requestTracker.Track(p.id, p.version, req.code, req.want, req.id)
|
||||||
|
err := p2p.Send(p.rw, req.code, req.data)
|
||||||
|
reqOp.fail <- err
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
pending[req.id] = req
|
||||||
|
}
|
||||||
|
|
||||||
|
case cancelOp := <-p.reqCancel:
|
||||||
|
// Retrieve the pendign request to cancel and short circuit if it
|
||||||
|
// has already been serviced and is not available anymore
|
||||||
|
req := pending[cancelOp.id]
|
||||||
|
if req == nil {
|
||||||
|
cancelOp.fail <- nil
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Stop tracking the request
|
||||||
|
delete(pending, cancelOp.id)
|
||||||
|
cancelOp.fail <- nil
|
||||||
|
|
||||||
|
case resOp := <-p.resDispatch:
|
||||||
|
res := resOp.res
|
||||||
|
res.Req = pending[res.id]
|
||||||
|
|
||||||
|
// Independent if the request exists or not, track this packet
|
||||||
|
requestTracker.Fulfil(p.id, p.version, res.code, res.id)
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case res.Req == nil:
|
||||||
|
// Response arrived with an untracked ID. Since even cancelled
|
||||||
|
// requests are tracked until fulfilment, a dangling repsponse
|
||||||
|
// means the remote peer implements the protocol badly.
|
||||||
|
resOp.fail <- errDanglingResponse
|
||||||
|
|
||||||
|
case res.Req.want != res.code:
|
||||||
|
// Response arrived, but it's a different packet type than the
|
||||||
|
// one expected by the requester. Either the local code is bad,
|
||||||
|
// or the remote peer send junk. In neither cases can we handle
|
||||||
|
// the packet.
|
||||||
|
resOp.fail <- fmt.Errorf("%w: have %d, want %d", errMismatchingResponseType, res.code, res.Req.want)
|
||||||
|
|
||||||
|
default:
|
||||||
|
// All dispatcher checks passed and the response was initialized
|
||||||
|
// with the matching request. Signal to the delivery routine that
|
||||||
|
// it can wait for a handler response and dispatch the data.
|
||||||
|
res.Time = res.recv.Sub(res.Req.Sent)
|
||||||
|
resOp.fail <- nil
|
||||||
|
|
||||||
|
// Stop tracking the request, the response dispatcher will deliver
|
||||||
|
delete(pending, res.id)
|
||||||
|
}
|
||||||
|
|
||||||
|
case <-p.term:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -21,6 +21,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
@ -34,11 +35,13 @@ func handleGetBlockHeaders66(backend Backend, msg Decoder, peer *Peer) error {
|
|||||||
if err := msg.Decode(&query); err != nil {
|
if err := msg.Decode(&query); err != nil {
|
||||||
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
||||||
}
|
}
|
||||||
response := answerGetBlockHeadersQuery(backend, query.GetBlockHeadersPacket, peer)
|
response := ServiceGetBlockHeadersQuery(backend.Chain(), query.GetBlockHeadersPacket, peer)
|
||||||
return peer.ReplyBlockHeaders(query.RequestId, response)
|
return peer.ReplyBlockHeaders(query.RequestId, response)
|
||||||
}
|
}
|
||||||
|
|
||||||
func answerGetBlockHeadersQuery(backend Backend, query *GetBlockHeadersPacket, peer *Peer) []*types.Header {
|
// ServiceGetBlockHeadersQuery assembles the response to a header query. It is
|
||||||
|
// exposed to allow external packages to test protocol behavior.
|
||||||
|
func ServiceGetBlockHeadersQuery(chain *core.BlockChain, query *GetBlockHeadersPacket, peer *Peer) []*types.Header {
|
||||||
hashMode := query.Origin.Hash != (common.Hash{})
|
hashMode := query.Origin.Hash != (common.Hash{})
|
||||||
first := true
|
first := true
|
||||||
maxNonCanonical := uint64(100)
|
maxNonCanonical := uint64(100)
|
||||||
@ -58,15 +61,15 @@ func answerGetBlockHeadersQuery(backend Backend, query *GetBlockHeadersPacket, p
|
|||||||
if hashMode {
|
if hashMode {
|
||||||
if first {
|
if first {
|
||||||
first = false
|
first = false
|
||||||
origin = backend.Chain().GetHeaderByHash(query.Origin.Hash)
|
origin = chain.GetHeaderByHash(query.Origin.Hash)
|
||||||
if origin != nil {
|
if origin != nil {
|
||||||
query.Origin.Number = origin.Number.Uint64()
|
query.Origin.Number = origin.Number.Uint64()
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
origin = backend.Chain().GetHeader(query.Origin.Hash, query.Origin.Number)
|
origin = chain.GetHeader(query.Origin.Hash, query.Origin.Number)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
origin = backend.Chain().GetHeaderByNumber(query.Origin.Number)
|
origin = chain.GetHeaderByNumber(query.Origin.Number)
|
||||||
}
|
}
|
||||||
if origin == nil {
|
if origin == nil {
|
||||||
break
|
break
|
||||||
@ -82,7 +85,7 @@ func answerGetBlockHeadersQuery(backend Backend, query *GetBlockHeadersPacket, p
|
|||||||
if ancestor == 0 {
|
if ancestor == 0 {
|
||||||
unknown = true
|
unknown = true
|
||||||
} else {
|
} else {
|
||||||
query.Origin.Hash, query.Origin.Number = backend.Chain().GetAncestor(query.Origin.Hash, query.Origin.Number, ancestor, &maxNonCanonical)
|
query.Origin.Hash, query.Origin.Number = chain.GetAncestor(query.Origin.Hash, query.Origin.Number, ancestor, &maxNonCanonical)
|
||||||
unknown = (query.Origin.Hash == common.Hash{})
|
unknown = (query.Origin.Hash == common.Hash{})
|
||||||
}
|
}
|
||||||
case hashMode && !query.Reverse:
|
case hashMode && !query.Reverse:
|
||||||
@ -96,9 +99,9 @@ func answerGetBlockHeadersQuery(backend Backend, query *GetBlockHeadersPacket, p
|
|||||||
peer.Log().Warn("GetBlockHeaders skip overflow attack", "current", current, "skip", query.Skip, "next", next, "attacker", infos)
|
peer.Log().Warn("GetBlockHeaders skip overflow attack", "current", current, "skip", query.Skip, "next", next, "attacker", infos)
|
||||||
unknown = true
|
unknown = true
|
||||||
} else {
|
} else {
|
||||||
if header := backend.Chain().GetHeaderByNumber(next); header != nil {
|
if header := chain.GetHeaderByNumber(next); header != nil {
|
||||||
nextHash := header.Hash()
|
nextHash := header.Hash()
|
||||||
expOldHash, _ := backend.Chain().GetAncestor(nextHash, next, query.Skip+1, &maxNonCanonical)
|
expOldHash, _ := chain.GetAncestor(nextHash, next, query.Skip+1, &maxNonCanonical)
|
||||||
if expOldHash == query.Origin.Hash {
|
if expOldHash == query.Origin.Hash {
|
||||||
query.Origin.Hash, query.Origin.Number = nextHash, next
|
query.Origin.Hash, query.Origin.Number = nextHash, next
|
||||||
} else {
|
} else {
|
||||||
@ -130,11 +133,13 @@ func handleGetBlockBodies66(backend Backend, msg Decoder, peer *Peer) error {
|
|||||||
if err := msg.Decode(&query); err != nil {
|
if err := msg.Decode(&query); err != nil {
|
||||||
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
||||||
}
|
}
|
||||||
response := answerGetBlockBodiesQuery(backend, query.GetBlockBodiesPacket, peer)
|
response := ServiceGetBlockBodiesQuery(backend.Chain(), query.GetBlockBodiesPacket)
|
||||||
return peer.ReplyBlockBodiesRLP(query.RequestId, response)
|
return peer.ReplyBlockBodiesRLP(query.RequestId, response)
|
||||||
}
|
}
|
||||||
|
|
||||||
func answerGetBlockBodiesQuery(backend Backend, query GetBlockBodiesPacket, peer *Peer) []rlp.RawValue {
|
// ServiceGetBlockBodiesQuery assembles the response to a body query. It is
|
||||||
|
// exposed to allow external packages to test protocol behavior.
|
||||||
|
func ServiceGetBlockBodiesQuery(chain *core.BlockChain, query GetBlockBodiesPacket) []rlp.RawValue {
|
||||||
// Gather blocks until the fetch or network limits is reached
|
// Gather blocks until the fetch or network limits is reached
|
||||||
var (
|
var (
|
||||||
bytes int
|
bytes int
|
||||||
@ -145,7 +150,7 @@ func answerGetBlockBodiesQuery(backend Backend, query GetBlockBodiesPacket, peer
|
|||||||
lookups >= 2*maxBodiesServe {
|
lookups >= 2*maxBodiesServe {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if data := backend.Chain().GetBodyRLP(hash); len(data) != 0 {
|
if data := chain.GetBodyRLP(hash); len(data) != 0 {
|
||||||
bodies = append(bodies, data)
|
bodies = append(bodies, data)
|
||||||
bytes += len(data)
|
bytes += len(data)
|
||||||
}
|
}
|
||||||
@ -159,11 +164,13 @@ func handleGetNodeData66(backend Backend, msg Decoder, peer *Peer) error {
|
|||||||
if err := msg.Decode(&query); err != nil {
|
if err := msg.Decode(&query); err != nil {
|
||||||
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
||||||
}
|
}
|
||||||
response := answerGetNodeDataQuery(backend, query.GetNodeDataPacket, peer)
|
response := ServiceGetNodeDataQuery(backend.Chain(), backend.StateBloom(), query.GetNodeDataPacket)
|
||||||
return peer.ReplyNodeData(query.RequestId, response)
|
return peer.ReplyNodeData(query.RequestId, response)
|
||||||
}
|
}
|
||||||
|
|
||||||
func answerGetNodeDataQuery(backend Backend, query GetNodeDataPacket, peer *Peer) [][]byte {
|
// ServiceGetNodeDataQuery assembles the response to a node data query. It is
|
||||||
|
// exposed to allow external packages to test protocol behavior.
|
||||||
|
func ServiceGetNodeDataQuery(chain *core.BlockChain, bloom *trie.SyncBloom, query GetNodeDataPacket) [][]byte {
|
||||||
// Gather state data until the fetch or network limits is reached
|
// Gather state data until the fetch or network limits is reached
|
||||||
var (
|
var (
|
||||||
bytes int
|
bytes int
|
||||||
@ -175,14 +182,14 @@ func answerGetNodeDataQuery(backend Backend, query GetNodeDataPacket, peer *Peer
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
// Retrieve the requested state entry
|
// Retrieve the requested state entry
|
||||||
if bloom := backend.StateBloom(); bloom != nil && !bloom.Contains(hash[:]) {
|
if bloom != nil && !bloom.Contains(hash[:]) {
|
||||||
// Only lookup the trie node if there's chance that we actually have it
|
// Only lookup the trie node if there's chance that we actually have it
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
entry, err := backend.Chain().TrieNode(hash)
|
entry, err := chain.TrieNode(hash)
|
||||||
if len(entry) == 0 || err != nil {
|
if len(entry) == 0 || err != nil {
|
||||||
// Read the contract code with prefix only to save unnecessary lookups.
|
// Read the contract code with prefix only to save unnecessary lookups.
|
||||||
entry, err = backend.Chain().ContractCodeWithPrefix(hash)
|
entry, err = chain.ContractCodeWithPrefix(hash)
|
||||||
}
|
}
|
||||||
if err == nil && len(entry) > 0 {
|
if err == nil && len(entry) > 0 {
|
||||||
nodes = append(nodes, entry)
|
nodes = append(nodes, entry)
|
||||||
@ -198,11 +205,13 @@ func handleGetReceipts66(backend Backend, msg Decoder, peer *Peer) error {
|
|||||||
if err := msg.Decode(&query); err != nil {
|
if err := msg.Decode(&query); err != nil {
|
||||||
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
||||||
}
|
}
|
||||||
response := answerGetReceiptsQuery(backend, query.GetReceiptsPacket, peer)
|
response := ServiceGetReceiptsQuery(backend.Chain(), query.GetReceiptsPacket)
|
||||||
return peer.ReplyReceiptsRLP(query.RequestId, response)
|
return peer.ReplyReceiptsRLP(query.RequestId, response)
|
||||||
}
|
}
|
||||||
|
|
||||||
func answerGetReceiptsQuery(backend Backend, query GetReceiptsPacket, peer *Peer) []rlp.RawValue {
|
// ServiceGetReceiptsQuery assembles the response to a receipt query. It is
|
||||||
|
// exposed to allow external packages to test protocol behavior.
|
||||||
|
func ServiceGetReceiptsQuery(chain *core.BlockChain, query GetReceiptsPacket) []rlp.RawValue {
|
||||||
// Gather state data until the fetch or network limits is reached
|
// Gather state data until the fetch or network limits is reached
|
||||||
var (
|
var (
|
||||||
bytes int
|
bytes int
|
||||||
@ -214,9 +223,9 @@ func answerGetReceiptsQuery(backend Backend, query GetReceiptsPacket, peer *Peer
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
// Retrieve the requested block's receipts
|
// Retrieve the requested block's receipts
|
||||||
results := backend.Chain().GetReceiptsByHash(hash)
|
results := chain.GetReceiptsByHash(hash)
|
||||||
if results == nil {
|
if results == nil {
|
||||||
if header := backend.Chain().GetHeaderByHash(hash); header == nil || header.ReceiptHash != types.EmptyRootHash {
|
if header := chain.GetHeaderByHash(hash); header == nil || header.ReceiptHash != types.EmptyRootHash {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -277,9 +286,11 @@ func handleBlockHeaders66(backend Backend, msg Decoder, peer *Peer) error {
|
|||||||
if err := msg.Decode(res); err != nil {
|
if err := msg.Decode(res); err != nil {
|
||||||
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
||||||
}
|
}
|
||||||
requestTracker.Fulfil(peer.id, peer.version, BlockHeadersMsg, res.RequestId)
|
return peer.dispatchResponse(&Response{
|
||||||
|
id: res.RequestId,
|
||||||
return backend.Handle(peer, &res.BlockHeadersPacket)
|
code: BlockHeadersMsg,
|
||||||
|
Res: &res.BlockHeadersPacket,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func handleBlockBodies66(backend Backend, msg Decoder, peer *Peer) error {
|
func handleBlockBodies66(backend Backend, msg Decoder, peer *Peer) error {
|
||||||
@ -288,9 +299,11 @@ func handleBlockBodies66(backend Backend, msg Decoder, peer *Peer) error {
|
|||||||
if err := msg.Decode(res); err != nil {
|
if err := msg.Decode(res); err != nil {
|
||||||
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
||||||
}
|
}
|
||||||
requestTracker.Fulfil(peer.id, peer.version, BlockBodiesMsg, res.RequestId)
|
return peer.dispatchResponse(&Response{
|
||||||
|
id: res.RequestId,
|
||||||
return backend.Handle(peer, &res.BlockBodiesPacket)
|
code: BlockBodiesMsg,
|
||||||
|
Res: &res.BlockBodiesPacket,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func handleNodeData66(backend Backend, msg Decoder, peer *Peer) error {
|
func handleNodeData66(backend Backend, msg Decoder, peer *Peer) error {
|
||||||
@ -299,9 +312,11 @@ func handleNodeData66(backend Backend, msg Decoder, peer *Peer) error {
|
|||||||
if err := msg.Decode(res); err != nil {
|
if err := msg.Decode(res); err != nil {
|
||||||
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
||||||
}
|
}
|
||||||
requestTracker.Fulfil(peer.id, peer.version, NodeDataMsg, res.RequestId)
|
return peer.dispatchResponse(&Response{
|
||||||
|
id: res.RequestId,
|
||||||
return backend.Handle(peer, &res.NodeDataPacket)
|
code: NodeDataMsg,
|
||||||
|
Res: &res.NodeDataPacket,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func handleReceipts66(backend Backend, msg Decoder, peer *Peer) error {
|
func handleReceipts66(backend Backend, msg Decoder, peer *Peer) error {
|
||||||
@ -310,9 +325,11 @@ func handleReceipts66(backend Backend, msg Decoder, peer *Peer) error {
|
|||||||
if err := msg.Decode(res); err != nil {
|
if err := msg.Decode(res); err != nil {
|
||||||
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
||||||
}
|
}
|
||||||
requestTracker.Fulfil(peer.id, peer.version, ReceiptsMsg, res.RequestId)
|
return peer.dispatchResponse(&Response{
|
||||||
|
id: res.RequestId,
|
||||||
return backend.Handle(peer, &res.ReceiptsPacket)
|
code: ReceiptsMsg,
|
||||||
|
Res: &res.ReceiptsPacket,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func handleNewPooledTransactionHashes(backend Backend, msg Decoder, peer *Peer) error {
|
func handleNewPooledTransactionHashes(backend Backend, msg Decoder, peer *Peer) error {
|
||||||
|
@ -84,6 +84,10 @@ type Peer struct {
|
|||||||
txBroadcast chan []common.Hash // Channel used to queue transaction propagation requests
|
txBroadcast chan []common.Hash // Channel used to queue transaction propagation requests
|
||||||
txAnnounce chan []common.Hash // Channel used to queue transaction announcement requests
|
txAnnounce chan []common.Hash // Channel used to queue transaction announcement requests
|
||||||
|
|
||||||
|
reqDispatch chan *request // Dispatch channel to send requests and track then until fulfilment
|
||||||
|
reqCancel chan *cancel // Dispatch channel to cancel pending requests and untrack them
|
||||||
|
resDispatch chan *response // Dispatch channel to fulfil pending requests and untrack them
|
||||||
|
|
||||||
term chan struct{} // Termination channel to stop the broadcasters
|
term chan struct{} // Termination channel to stop the broadcasters
|
||||||
lock sync.RWMutex // Mutex protecting the internal fields
|
lock sync.RWMutex // Mutex protecting the internal fields
|
||||||
}
|
}
|
||||||
@ -102,6 +106,9 @@ func NewPeer(version uint, p *p2p.Peer, rw p2p.MsgReadWriter, txpool TxPool) *Pe
|
|||||||
queuedBlockAnns: make(chan *types.Block, maxQueuedBlockAnns),
|
queuedBlockAnns: make(chan *types.Block, maxQueuedBlockAnns),
|
||||||
txBroadcast: make(chan []common.Hash),
|
txBroadcast: make(chan []common.Hash),
|
||||||
txAnnounce: make(chan []common.Hash),
|
txAnnounce: make(chan []common.Hash),
|
||||||
|
reqDispatch: make(chan *request),
|
||||||
|
reqCancel: make(chan *cancel),
|
||||||
|
resDispatch: make(chan *response),
|
||||||
txpool: txpool,
|
txpool: txpool,
|
||||||
term: make(chan struct{}),
|
term: make(chan struct{}),
|
||||||
}
|
}
|
||||||
@ -109,6 +116,7 @@ func NewPeer(version uint, p *p2p.Peer, rw p2p.MsgReadWriter, txpool TxPool) *Pe
|
|||||||
go peer.broadcastBlocks()
|
go peer.broadcastBlocks()
|
||||||
go peer.broadcastTransactions()
|
go peer.broadcastTransactions()
|
||||||
go peer.announceTransactions()
|
go peer.announceTransactions()
|
||||||
|
go peer.dispatcher()
|
||||||
|
|
||||||
return peer
|
return peer
|
||||||
}
|
}
|
||||||
@ -323,12 +331,16 @@ func (p *Peer) ReplyReceiptsRLP(id uint64, receipts []rlp.RawValue) error {
|
|||||||
|
|
||||||
// RequestOneHeader is a wrapper around the header query functions to fetch a
|
// RequestOneHeader is a wrapper around the header query functions to fetch a
|
||||||
// single header. It is used solely by the fetcher.
|
// single header. It is used solely by the fetcher.
|
||||||
func (p *Peer) RequestOneHeader(hash common.Hash) error {
|
func (p *Peer) RequestOneHeader(hash common.Hash, sink chan *Response) (*Request, error) {
|
||||||
p.Log().Debug("Fetching single header", "hash", hash)
|
p.Log().Debug("Fetching single header", "hash", hash)
|
||||||
id := rand.Uint64()
|
id := rand.Uint64()
|
||||||
|
|
||||||
requestTracker.Track(p.id, p.version, GetBlockHeadersMsg, BlockHeadersMsg, id)
|
req := &Request{
|
||||||
return p2p.Send(p.rw, GetBlockHeadersMsg, &GetBlockHeadersPacket66{
|
id: id,
|
||||||
|
sink: sink,
|
||||||
|
code: GetBlockHeadersMsg,
|
||||||
|
want: BlockHeadersMsg,
|
||||||
|
data: &GetBlockHeadersPacket66{
|
||||||
RequestId: id,
|
RequestId: id,
|
||||||
GetBlockHeadersPacket: &GetBlockHeadersPacket{
|
GetBlockHeadersPacket: &GetBlockHeadersPacket{
|
||||||
Origin: HashOrNumber{Hash: hash},
|
Origin: HashOrNumber{Hash: hash},
|
||||||
@ -336,17 +348,26 @@ func (p *Peer) RequestOneHeader(hash common.Hash) error {
|
|||||||
Skip: uint64(0),
|
Skip: uint64(0),
|
||||||
Reverse: false,
|
Reverse: false,
|
||||||
},
|
},
|
||||||
})
|
},
|
||||||
|
}
|
||||||
|
if err := p.dispatchRequest(req); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return req, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// RequestHeadersByHash fetches a batch of blocks' headers corresponding to the
|
// RequestHeadersByHash fetches a batch of blocks' headers corresponding to the
|
||||||
// specified header query, based on the hash of an origin block.
|
// specified header query, based on the hash of an origin block.
|
||||||
func (p *Peer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error {
|
func (p *Peer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool, sink chan *Response) (*Request, error) {
|
||||||
p.Log().Debug("Fetching batch of headers", "count", amount, "fromhash", origin, "skip", skip, "reverse", reverse)
|
p.Log().Debug("Fetching batch of headers", "count", amount, "fromhash", origin, "skip", skip, "reverse", reverse)
|
||||||
id := rand.Uint64()
|
id := rand.Uint64()
|
||||||
|
|
||||||
requestTracker.Track(p.id, p.version, GetBlockHeadersMsg, BlockHeadersMsg, id)
|
req := &Request{
|
||||||
return p2p.Send(p.rw, GetBlockHeadersMsg, &GetBlockHeadersPacket66{
|
id: id,
|
||||||
|
sink: sink,
|
||||||
|
code: GetBlockHeadersMsg,
|
||||||
|
want: BlockHeadersMsg,
|
||||||
|
data: &GetBlockHeadersPacket66{
|
||||||
RequestId: id,
|
RequestId: id,
|
||||||
GetBlockHeadersPacket: &GetBlockHeadersPacket{
|
GetBlockHeadersPacket: &GetBlockHeadersPacket{
|
||||||
Origin: HashOrNumber{Hash: origin},
|
Origin: HashOrNumber{Hash: origin},
|
||||||
@ -354,17 +375,26 @@ func (p *Peer) RequestHeadersByHash(origin common.Hash, amount int, skip int, re
|
|||||||
Skip: uint64(skip),
|
Skip: uint64(skip),
|
||||||
Reverse: reverse,
|
Reverse: reverse,
|
||||||
},
|
},
|
||||||
})
|
},
|
||||||
|
}
|
||||||
|
if err := p.dispatchRequest(req); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return req, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// RequestHeadersByNumber fetches a batch of blocks' headers corresponding to the
|
// RequestHeadersByNumber fetches a batch of blocks' headers corresponding to the
|
||||||
// specified header query, based on the number of an origin block.
|
// specified header query, based on the number of an origin block.
|
||||||
func (p *Peer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error {
|
func (p *Peer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool, sink chan *Response) (*Request, error) {
|
||||||
p.Log().Debug("Fetching batch of headers", "count", amount, "fromnum", origin, "skip", skip, "reverse", reverse)
|
p.Log().Debug("Fetching batch of headers", "count", amount, "fromnum", origin, "skip", skip, "reverse", reverse)
|
||||||
id := rand.Uint64()
|
id := rand.Uint64()
|
||||||
|
|
||||||
requestTracker.Track(p.id, p.version, GetBlockHeadersMsg, BlockHeadersMsg, id)
|
req := &Request{
|
||||||
return p2p.Send(p.rw, GetBlockHeadersMsg, &GetBlockHeadersPacket66{
|
id: id,
|
||||||
|
sink: sink,
|
||||||
|
code: GetBlockHeadersMsg,
|
||||||
|
want: BlockHeadersMsg,
|
||||||
|
data: &GetBlockHeadersPacket66{
|
||||||
RequestId: id,
|
RequestId: id,
|
||||||
GetBlockHeadersPacket: &GetBlockHeadersPacket{
|
GetBlockHeadersPacket: &GetBlockHeadersPacket{
|
||||||
Origin: HashOrNumber{Number: origin},
|
Origin: HashOrNumber{Number: origin},
|
||||||
@ -372,45 +402,77 @@ func (p *Peer) RequestHeadersByNumber(origin uint64, amount int, skip int, rever
|
|||||||
Skip: uint64(skip),
|
Skip: uint64(skip),
|
||||||
Reverse: reverse,
|
Reverse: reverse,
|
||||||
},
|
},
|
||||||
})
|
},
|
||||||
|
}
|
||||||
|
if err := p.dispatchRequest(req); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return req, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// RequestBodies fetches a batch of blocks' bodies corresponding to the hashes
|
// RequestBodies fetches a batch of blocks' bodies corresponding to the hashes
|
||||||
// specified.
|
// specified.
|
||||||
func (p *Peer) RequestBodies(hashes []common.Hash) error {
|
func (p *Peer) RequestBodies(hashes []common.Hash, sink chan *Response) (*Request, error) {
|
||||||
p.Log().Debug("Fetching batch of block bodies", "count", len(hashes))
|
p.Log().Debug("Fetching batch of block bodies", "count", len(hashes))
|
||||||
id := rand.Uint64()
|
id := rand.Uint64()
|
||||||
|
|
||||||
requestTracker.Track(p.id, p.version, GetBlockBodiesMsg, BlockBodiesMsg, id)
|
req := &Request{
|
||||||
return p2p.Send(p.rw, GetBlockBodiesMsg, &GetBlockBodiesPacket66{
|
id: id,
|
||||||
|
sink: sink,
|
||||||
|
code: GetBlockBodiesMsg,
|
||||||
|
want: BlockBodiesMsg,
|
||||||
|
data: &GetBlockBodiesPacket66{
|
||||||
RequestId: id,
|
RequestId: id,
|
||||||
GetBlockBodiesPacket: hashes,
|
GetBlockBodiesPacket: hashes,
|
||||||
})
|
},
|
||||||
|
}
|
||||||
|
if err := p.dispatchRequest(req); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return req, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// RequestNodeData fetches a batch of arbitrary data from a node's known state
|
// RequestNodeData fetches a batch of arbitrary data from a node's known state
|
||||||
// data, corresponding to the specified hashes.
|
// data, corresponding to the specified hashes.
|
||||||
func (p *Peer) RequestNodeData(hashes []common.Hash) error {
|
func (p *Peer) RequestNodeData(hashes []common.Hash, sink chan *Response) (*Request, error) {
|
||||||
p.Log().Debug("Fetching batch of state data", "count", len(hashes))
|
p.Log().Debug("Fetching batch of state data", "count", len(hashes))
|
||||||
id := rand.Uint64()
|
id := rand.Uint64()
|
||||||
|
|
||||||
requestTracker.Track(p.id, p.version, GetNodeDataMsg, NodeDataMsg, id)
|
req := &Request{
|
||||||
return p2p.Send(p.rw, GetNodeDataMsg, &GetNodeDataPacket66{
|
id: id,
|
||||||
|
sink: sink,
|
||||||
|
code: GetNodeDataMsg,
|
||||||
|
want: NodeDataMsg,
|
||||||
|
data: &GetNodeDataPacket66{
|
||||||
RequestId: id,
|
RequestId: id,
|
||||||
GetNodeDataPacket: hashes,
|
GetNodeDataPacket: hashes,
|
||||||
})
|
},
|
||||||
|
}
|
||||||
|
if err := p.dispatchRequest(req); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return req, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// RequestReceipts fetches a batch of transaction receipts from a remote node.
|
// RequestReceipts fetches a batch of transaction receipts from a remote node.
|
||||||
func (p *Peer) RequestReceipts(hashes []common.Hash) error {
|
func (p *Peer) RequestReceipts(hashes []common.Hash, sink chan *Response) (*Request, error) {
|
||||||
p.Log().Debug("Fetching batch of receipts", "count", len(hashes))
|
p.Log().Debug("Fetching batch of receipts", "count", len(hashes))
|
||||||
id := rand.Uint64()
|
id := rand.Uint64()
|
||||||
|
|
||||||
requestTracker.Track(p.id, p.version, GetReceiptsMsg, ReceiptsMsg, id)
|
req := &Request{
|
||||||
return p2p.Send(p.rw, GetReceiptsMsg, &GetReceiptsPacket66{
|
id: id,
|
||||||
|
sink: sink,
|
||||||
|
code: GetReceiptsMsg,
|
||||||
|
want: ReceiptsMsg,
|
||||||
|
data: &GetReceiptsPacket66{
|
||||||
RequestId: id,
|
RequestId: id,
|
||||||
GetReceiptsPacket: hashes,
|
GetReceiptsPacket: hashes,
|
||||||
})
|
},
|
||||||
|
}
|
||||||
|
if err := p.dispatchRequest(req); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return req, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// RequestTxs fetches a batch of transactions from a remote node.
|
// RequestTxs fetches a batch of transactions from a remote node.
|
||||||
|
@ -99,8 +99,8 @@ func MakeProtocols(backend Backend, dnsdisc enode.Iterator) []p2p.Protocol {
|
|||||||
Version: version,
|
Version: version,
|
||||||
Length: protocolLengths[version],
|
Length: protocolLengths[version],
|
||||||
Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
|
Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
|
||||||
return backend.RunPeer(newPeer(version, p, rw), func(peer *Peer) error {
|
return backend.RunPeer(NewPeer(version, p, rw), func(peer *Peer) error {
|
||||||
return handle(backend, peer)
|
return Handle(backend, peer)
|
||||||
})
|
})
|
||||||
},
|
},
|
||||||
NodeInfo: func() interface{} {
|
NodeInfo: func() interface{} {
|
||||||
@ -116,9 +116,9 @@ func MakeProtocols(backend Backend, dnsdisc enode.Iterator) []p2p.Protocol {
|
|||||||
return protocols
|
return protocols
|
||||||
}
|
}
|
||||||
|
|
||||||
// handle is the callback invoked to manage the life cycle of a `snap` peer.
|
// Handle is the callback invoked to manage the life cycle of a `snap` peer.
|
||||||
// When this function terminates, the peer is disconnected.
|
// When this function terminates, the peer is disconnected.
|
||||||
func handle(backend Backend, peer *Peer) error {
|
func Handle(backend Backend, peer *Peer) error {
|
||||||
for {
|
for {
|
||||||
if err := handleMessage(backend, peer); err != nil {
|
if err := handleMessage(backend, peer); err != nil {
|
||||||
peer.Log().Debug("Message handling failed in `snap`", "err", err)
|
peer.Log().Debug("Message handling failed in `snap`", "err", err)
|
||||||
@ -161,17 +161,137 @@ func handleMessage(backend Backend, peer *Peer) error {
|
|||||||
if err := msg.Decode(&req); err != nil {
|
if err := msg.Decode(&req); err != nil {
|
||||||
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
||||||
}
|
}
|
||||||
|
// Service the request, potentially returning nothing in case of errors
|
||||||
|
accounts, proofs := ServiceGetAccountRangeQuery(backend.Chain(), &req)
|
||||||
|
|
||||||
|
// Send back anything accumulated (or empty in case of errors)
|
||||||
|
return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{
|
||||||
|
ID: req.ID,
|
||||||
|
Accounts: accounts,
|
||||||
|
Proof: proofs,
|
||||||
|
})
|
||||||
|
|
||||||
|
case msg.Code == AccountRangeMsg:
|
||||||
|
// A range of accounts arrived to one of our previous requests
|
||||||
|
res := new(AccountRangePacket)
|
||||||
|
if err := msg.Decode(res); err != nil {
|
||||||
|
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
||||||
|
}
|
||||||
|
// Ensure the range is monotonically increasing
|
||||||
|
for i := 1; i < len(res.Accounts); i++ {
|
||||||
|
if bytes.Compare(res.Accounts[i-1].Hash[:], res.Accounts[i].Hash[:]) >= 0 {
|
||||||
|
return fmt.Errorf("accounts not monotonically increasing: #%d [%x] vs #%d [%x]", i-1, res.Accounts[i-1].Hash[:], i, res.Accounts[i].Hash[:])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
requestTracker.Fulfil(peer.id, peer.version, AccountRangeMsg, res.ID)
|
||||||
|
|
||||||
|
return backend.Handle(peer, res)
|
||||||
|
|
||||||
|
case msg.Code == GetStorageRangesMsg:
|
||||||
|
// Decode the storage retrieval request
|
||||||
|
var req GetStorageRangesPacket
|
||||||
|
if err := msg.Decode(&req); err != nil {
|
||||||
|
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
||||||
|
}
|
||||||
|
// Service the request, potentially returning nothing in case of errors
|
||||||
|
slots, proofs := ServiceGetStorageRangesQuery(backend.Chain(), &req)
|
||||||
|
|
||||||
|
// Send back anything accumulated (or empty in case of errors)
|
||||||
|
return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{
|
||||||
|
ID: req.ID,
|
||||||
|
Slots: slots,
|
||||||
|
Proof: proofs,
|
||||||
|
})
|
||||||
|
|
||||||
|
case msg.Code == StorageRangesMsg:
|
||||||
|
// A range of storage slots arrived to one of our previous requests
|
||||||
|
res := new(StorageRangesPacket)
|
||||||
|
if err := msg.Decode(res); err != nil {
|
||||||
|
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
||||||
|
}
|
||||||
|
// Ensure the ranges are monotonically increasing
|
||||||
|
for i, slots := range res.Slots {
|
||||||
|
for j := 1; j < len(slots); j++ {
|
||||||
|
if bytes.Compare(slots[j-1].Hash[:], slots[j].Hash[:]) >= 0 {
|
||||||
|
return fmt.Errorf("storage slots not monotonically increasing for account #%d: #%d [%x] vs #%d [%x]", i, j-1, slots[j-1].Hash[:], j, slots[j].Hash[:])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
requestTracker.Fulfil(peer.id, peer.version, StorageRangesMsg, res.ID)
|
||||||
|
|
||||||
|
return backend.Handle(peer, res)
|
||||||
|
|
||||||
|
case msg.Code == GetByteCodesMsg:
|
||||||
|
// Decode bytecode retrieval request
|
||||||
|
var req GetByteCodesPacket
|
||||||
|
if err := msg.Decode(&req); err != nil {
|
||||||
|
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
||||||
|
}
|
||||||
|
// Service the request, potentially returning nothing in case of errors
|
||||||
|
codes := ServiceGetByteCodesQuery(backend.Chain(), &req)
|
||||||
|
|
||||||
|
// Send back anything accumulated (or empty in case of errors)
|
||||||
|
return p2p.Send(peer.rw, ByteCodesMsg, &ByteCodesPacket{
|
||||||
|
ID: req.ID,
|
||||||
|
Codes: codes,
|
||||||
|
})
|
||||||
|
|
||||||
|
case msg.Code == ByteCodesMsg:
|
||||||
|
// A batch of byte codes arrived to one of our previous requests
|
||||||
|
res := new(ByteCodesPacket)
|
||||||
|
if err := msg.Decode(res); err != nil {
|
||||||
|
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
||||||
|
}
|
||||||
|
requestTracker.Fulfil(peer.id, peer.version, ByteCodesMsg, res.ID)
|
||||||
|
|
||||||
|
return backend.Handle(peer, res)
|
||||||
|
|
||||||
|
case msg.Code == GetTrieNodesMsg:
|
||||||
|
// Decode trie node retrieval request
|
||||||
|
var req GetTrieNodesPacket
|
||||||
|
if err := msg.Decode(&req); err != nil {
|
||||||
|
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
||||||
|
}
|
||||||
|
// Service the request, potentially returning nothing in case of errors
|
||||||
|
nodes, err := ServiceGetTrieNodesQuery(backend.Chain(), &req, start)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Send back anything accumulated (or empty in case of errors)
|
||||||
|
return p2p.Send(peer.rw, TrieNodesMsg, &TrieNodesPacket{
|
||||||
|
ID: req.ID,
|
||||||
|
Nodes: nodes,
|
||||||
|
})
|
||||||
|
|
||||||
|
case msg.Code == TrieNodesMsg:
|
||||||
|
// A batch of trie nodes arrived to one of our previous requests
|
||||||
|
res := new(TrieNodesPacket)
|
||||||
|
if err := msg.Decode(res); err != nil {
|
||||||
|
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
||||||
|
}
|
||||||
|
requestTracker.Fulfil(peer.id, peer.version, TrieNodesMsg, res.ID)
|
||||||
|
|
||||||
|
return backend.Handle(peer, res)
|
||||||
|
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("%w: %v", errInvalidMsgCode, msg.Code)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServiceGetAccountRangeQuery assembles the response to an account range query.
|
||||||
|
// It is exposed to allow external packages to test protocol behavior.
|
||||||
|
func ServiceGetAccountRangeQuery(chain *core.BlockChain, req *GetAccountRangePacket) ([]*AccountData, [][]byte) {
|
||||||
if req.Bytes > softResponseLimit {
|
if req.Bytes > softResponseLimit {
|
||||||
req.Bytes = softResponseLimit
|
req.Bytes = softResponseLimit
|
||||||
}
|
}
|
||||||
// Retrieve the requested state and bail out if non existent
|
// Retrieve the requested state and bail out if non existent
|
||||||
tr, err := trie.New(req.Root, backend.Chain().StateCache().TrieDB())
|
tr, err := trie.New(req.Root, chain.StateCache().TrieDB())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{ID: req.ID})
|
return nil, nil
|
||||||
}
|
}
|
||||||
it, err := backend.Chain().Snapshots().AccountIterator(req.Root, req.Origin)
|
it, err := chain.Snapshots().AccountIterator(req.Root, req.Origin)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{ID: req.ID})
|
return nil, nil
|
||||||
}
|
}
|
||||||
// Iterate over the requested range and pile accounts up
|
// Iterate over the requested range and pile accounts up
|
||||||
var (
|
var (
|
||||||
@ -202,47 +322,22 @@ func handleMessage(backend Backend, peer *Peer) error {
|
|||||||
proof := light.NewNodeSet()
|
proof := light.NewNodeSet()
|
||||||
if err := tr.Prove(req.Origin[:], 0, proof); err != nil {
|
if err := tr.Prove(req.Origin[:], 0, proof); err != nil {
|
||||||
log.Warn("Failed to prove account range", "origin", req.Origin, "err", err)
|
log.Warn("Failed to prove account range", "origin", req.Origin, "err", err)
|
||||||
return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{ID: req.ID})
|
return nil, nil
|
||||||
}
|
}
|
||||||
if last != (common.Hash{}) {
|
if last != (common.Hash{}) {
|
||||||
if err := tr.Prove(last[:], 0, proof); err != nil {
|
if err := tr.Prove(last[:], 0, proof); err != nil {
|
||||||
log.Warn("Failed to prove account range", "last", last, "err", err)
|
log.Warn("Failed to prove account range", "last", last, "err", err)
|
||||||
return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{ID: req.ID})
|
return nil, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
var proofs [][]byte
|
var proofs [][]byte
|
||||||
for _, blob := range proof.NodeList() {
|
for _, blob := range proof.NodeList() {
|
||||||
proofs = append(proofs, blob)
|
proofs = append(proofs, blob)
|
||||||
}
|
}
|
||||||
// Send back anything accumulated
|
return accounts, proofs
|
||||||
return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{
|
}
|
||||||
ID: req.ID,
|
|
||||||
Accounts: accounts,
|
|
||||||
Proof: proofs,
|
|
||||||
})
|
|
||||||
|
|
||||||
case msg.Code == AccountRangeMsg:
|
func ServiceGetStorageRangesQuery(chain *core.BlockChain, req *GetStorageRangesPacket) ([][]*StorageData, [][]byte) {
|
||||||
// A range of accounts arrived to one of our previous requests
|
|
||||||
res := new(AccountRangePacket)
|
|
||||||
if err := msg.Decode(res); err != nil {
|
|
||||||
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
|
||||||
}
|
|
||||||
// Ensure the range is monotonically increasing
|
|
||||||
for i := 1; i < len(res.Accounts); i++ {
|
|
||||||
if bytes.Compare(res.Accounts[i-1].Hash[:], res.Accounts[i].Hash[:]) >= 0 {
|
|
||||||
return fmt.Errorf("accounts not monotonically increasing: #%d [%x] vs #%d [%x]", i-1, res.Accounts[i-1].Hash[:], i, res.Accounts[i].Hash[:])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
requestTracker.Fulfil(peer.id, peer.version, AccountRangeMsg, res.ID)
|
|
||||||
|
|
||||||
return backend.Handle(peer, res)
|
|
||||||
|
|
||||||
case msg.Code == GetStorageRangesMsg:
|
|
||||||
// Decode the storage retrieval request
|
|
||||||
var req GetStorageRangesPacket
|
|
||||||
if err := msg.Decode(&req); err != nil {
|
|
||||||
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
|
||||||
}
|
|
||||||
if req.Bytes > softResponseLimit {
|
if req.Bytes > softResponseLimit {
|
||||||
req.Bytes = softResponseLimit
|
req.Bytes = softResponseLimit
|
||||||
}
|
}
|
||||||
@ -275,9 +370,9 @@ func handleMessage(backend Backend, peer *Peer) error {
|
|||||||
limit, req.Limit = common.BytesToHash(req.Limit), nil
|
limit, req.Limit = common.BytesToHash(req.Limit), nil
|
||||||
}
|
}
|
||||||
// Retrieve the requested state and bail out if non existent
|
// Retrieve the requested state and bail out if non existent
|
||||||
it, err := backend.Chain().Snapshots().StorageIterator(req.Root, account, origin)
|
it, err := chain.Snapshots().StorageIterator(req.Root, account, origin)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID})
|
return nil, nil
|
||||||
}
|
}
|
||||||
// Iterate over the requested range and pile slots up
|
// Iterate over the requested range and pile slots up
|
||||||
var (
|
var (
|
||||||
@ -315,27 +410,27 @@ func handleMessage(backend Backend, peer *Peer) error {
|
|||||||
if origin != (common.Hash{}) || abort {
|
if origin != (common.Hash{}) || abort {
|
||||||
// Request started at a non-zero hash or was capped prematurely, add
|
// Request started at a non-zero hash or was capped prematurely, add
|
||||||
// the endpoint Merkle proofs
|
// the endpoint Merkle proofs
|
||||||
accTrie, err := trie.New(req.Root, backend.Chain().StateCache().TrieDB())
|
accTrie, err := trie.New(req.Root, chain.StateCache().TrieDB())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID})
|
return nil, nil
|
||||||
}
|
}
|
||||||
var acc types.StateAccount
|
var acc types.StateAccount
|
||||||
if err := rlp.DecodeBytes(accTrie.Get(account[:]), &acc); err != nil {
|
if err := rlp.DecodeBytes(accTrie.Get(account[:]), &acc); err != nil {
|
||||||
return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID})
|
return nil, nil
|
||||||
}
|
}
|
||||||
stTrie, err := trie.New(acc.Root, backend.Chain().StateCache().TrieDB())
|
stTrie, err := trie.New(acc.Root, chain.StateCache().TrieDB())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID})
|
return nil, nil
|
||||||
}
|
}
|
||||||
proof := light.NewNodeSet()
|
proof := light.NewNodeSet()
|
||||||
if err := stTrie.Prove(origin[:], 0, proof); err != nil {
|
if err := stTrie.Prove(origin[:], 0, proof); err != nil {
|
||||||
log.Warn("Failed to prove storage range", "origin", req.Origin, "err", err)
|
log.Warn("Failed to prove storage range", "origin", req.Origin, "err", err)
|
||||||
return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID})
|
return nil, nil
|
||||||
}
|
}
|
||||||
if last != (common.Hash{}) {
|
if last != (common.Hash{}) {
|
||||||
if err := stTrie.Prove(last[:], 0, proof); err != nil {
|
if err := stTrie.Prove(last[:], 0, proof); err != nil {
|
||||||
log.Warn("Failed to prove storage range", "last", last, "err", err)
|
log.Warn("Failed to prove storage range", "last", last, "err", err)
|
||||||
return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID})
|
return nil, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for _, blob := range proof.NodeList() {
|
for _, blob := range proof.NodeList() {
|
||||||
@ -347,37 +442,12 @@ func handleMessage(backend Backend, peer *Peer) error {
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Send back anything accumulated
|
return slots, proofs
|
||||||
return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{
|
}
|
||||||
ID: req.ID,
|
|
||||||
Slots: slots,
|
|
||||||
Proof: proofs,
|
|
||||||
})
|
|
||||||
|
|
||||||
case msg.Code == StorageRangesMsg:
|
// ServiceGetByteCodesQuery assembles the response to a byte codes query.
|
||||||
// A range of storage slots arrived to one of our previous requests
|
// It is exposed to allow external packages to test protocol behavior.
|
||||||
res := new(StorageRangesPacket)
|
func ServiceGetByteCodesQuery(chain *core.BlockChain, req *GetByteCodesPacket) [][]byte {
|
||||||
if err := msg.Decode(res); err != nil {
|
|
||||||
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
|
||||||
}
|
|
||||||
// Ensure the ranges are monotonically increasing
|
|
||||||
for i, slots := range res.Slots {
|
|
||||||
for j := 1; j < len(slots); j++ {
|
|
||||||
if bytes.Compare(slots[j-1].Hash[:], slots[j].Hash[:]) >= 0 {
|
|
||||||
return fmt.Errorf("storage slots not monotonically increasing for account #%d: #%d [%x] vs #%d [%x]", i, j-1, slots[j-1].Hash[:], j, slots[j].Hash[:])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
requestTracker.Fulfil(peer.id, peer.version, StorageRangesMsg, res.ID)
|
|
||||||
|
|
||||||
return backend.Handle(peer, res)
|
|
||||||
|
|
||||||
case msg.Code == GetByteCodesMsg:
|
|
||||||
// Decode bytecode retrieval request
|
|
||||||
var req GetByteCodesPacket
|
|
||||||
if err := msg.Decode(&req); err != nil {
|
|
||||||
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
|
||||||
}
|
|
||||||
if req.Bytes > softResponseLimit {
|
if req.Bytes > softResponseLimit {
|
||||||
req.Bytes = softResponseLimit
|
req.Bytes = softResponseLimit
|
||||||
}
|
}
|
||||||
@ -394,7 +464,7 @@ func handleMessage(backend Backend, peer *Peer) error {
|
|||||||
// Peers should not request the empty code, but if they do, at
|
// Peers should not request the empty code, but if they do, at
|
||||||
// least sent them back a correct response without db lookups
|
// least sent them back a correct response without db lookups
|
||||||
codes = append(codes, []byte{})
|
codes = append(codes, []byte{})
|
||||||
} else if blob, err := backend.Chain().ContractCode(hash); err == nil {
|
} else if blob, err := chain.ContractCode(hash); err == nil {
|
||||||
codes = append(codes, blob)
|
codes = append(codes, blob)
|
||||||
bytes += uint64(len(blob))
|
bytes += uint64(len(blob))
|
||||||
}
|
}
|
||||||
@ -402,46 +472,30 @@ func handleMessage(backend Backend, peer *Peer) error {
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Send back anything accumulated
|
return codes
|
||||||
return p2p.Send(peer.rw, ByteCodesMsg, &ByteCodesPacket{
|
|
||||||
ID: req.ID,
|
|
||||||
Codes: codes,
|
|
||||||
})
|
|
||||||
|
|
||||||
case msg.Code == ByteCodesMsg:
|
|
||||||
// A batch of byte codes arrived to one of our previous requests
|
|
||||||
res := new(ByteCodesPacket)
|
|
||||||
if err := msg.Decode(res); err != nil {
|
|
||||||
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
|
||||||
}
|
}
|
||||||
requestTracker.Fulfil(peer.id, peer.version, ByteCodesMsg, res.ID)
|
|
||||||
|
|
||||||
return backend.Handle(peer, res)
|
// ServiceGetTrieNodesQuery assembles the response to a trie nodes query.
|
||||||
|
// It is exposed to allow external packages to test protocol behavior.
|
||||||
case msg.Code == GetTrieNodesMsg:
|
func ServiceGetTrieNodesQuery(chain *core.BlockChain, req *GetTrieNodesPacket, start time.Time) ([][]byte, error) {
|
||||||
// Decode trie node retrieval request
|
|
||||||
var req GetTrieNodesPacket
|
|
||||||
if err := msg.Decode(&req); err != nil {
|
|
||||||
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
|
||||||
}
|
|
||||||
if req.Bytes > softResponseLimit {
|
if req.Bytes > softResponseLimit {
|
||||||
req.Bytes = softResponseLimit
|
req.Bytes = softResponseLimit
|
||||||
}
|
}
|
||||||
// Make sure we have the state associated with the request
|
// Make sure we have the state associated with the request
|
||||||
triedb := backend.Chain().StateCache().TrieDB()
|
triedb := chain.StateCache().TrieDB()
|
||||||
|
|
||||||
accTrie, err := trie.NewSecure(req.Root, triedb)
|
accTrie, err := trie.NewSecure(req.Root, triedb)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// We don't have the requested state available, bail out
|
// We don't have the requested state available, bail out
|
||||||
return p2p.Send(peer.rw, TrieNodesMsg, &TrieNodesPacket{ID: req.ID})
|
return nil, nil
|
||||||
}
|
}
|
||||||
snap := backend.Chain().Snapshots().Snapshot(req.Root)
|
snap := chain.Snapshots().Snapshot(req.Root)
|
||||||
if snap == nil {
|
if snap == nil {
|
||||||
// We don't have the requested state snapshotted yet, bail out.
|
// We don't have the requested state snapshotted yet, bail out.
|
||||||
// In reality we could still serve using the account and storage
|
// In reality we could still serve using the account and storage
|
||||||
// tries only, but let's protect the node a bit while it's doing
|
// tries only, but let's protect the node a bit while it's doing
|
||||||
// snapshot generation.
|
// snapshot generation.
|
||||||
return p2p.Send(peer.rw, TrieNodesMsg, &TrieNodesPacket{ID: req.ID})
|
return nil, nil
|
||||||
}
|
}
|
||||||
// Retrieve trie nodes until the packet size limit is reached
|
// Retrieve trie nodes until the packet size limit is reached
|
||||||
var (
|
var (
|
||||||
@ -453,7 +507,7 @@ func handleMessage(backend Backend, peer *Peer) error {
|
|||||||
switch len(pathset) {
|
switch len(pathset) {
|
||||||
case 0:
|
case 0:
|
||||||
// Ensure we penalize invalid requests
|
// Ensure we penalize invalid requests
|
||||||
return fmt.Errorf("%w: zero-item pathset requested", errBadRequest)
|
return nil, fmt.Errorf("%w: zero-item pathset requested", errBadRequest)
|
||||||
|
|
||||||
case 1:
|
case 1:
|
||||||
// If we're only retrieving an account trie node, fetch it directly
|
// If we're only retrieving an account trie node, fetch it directly
|
||||||
@ -497,25 +551,7 @@ func handleMessage(backend Backend, peer *Peer) error {
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Send back anything accumulated
|
return nodes, nil
|
||||||
return p2p.Send(peer.rw, TrieNodesMsg, &TrieNodesPacket{
|
|
||||||
ID: req.ID,
|
|
||||||
Nodes: nodes,
|
|
||||||
})
|
|
||||||
|
|
||||||
case msg.Code == TrieNodesMsg:
|
|
||||||
// A batch of trie nodes arrived to one of our previous requests
|
|
||||||
res := new(TrieNodesPacket)
|
|
||||||
if err := msg.Decode(res); err != nil {
|
|
||||||
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
|
||||||
}
|
|
||||||
requestTracker.Fulfil(peer.id, peer.version, TrieNodesMsg, res.ID)
|
|
||||||
|
|
||||||
return backend.Handle(peer, res)
|
|
||||||
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("%w: %v", errInvalidMsgCode, msg.Code)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NodeInfo represents a short summary of the `snap` sub-protocol metadata
|
// NodeInfo represents a short summary of the `snap` sub-protocol metadata
|
||||||
|
@ -33,9 +33,9 @@ type Peer struct {
|
|||||||
logger log.Logger // Contextual logger with the peer id injected
|
logger log.Logger // Contextual logger with the peer id injected
|
||||||
}
|
}
|
||||||
|
|
||||||
// newPeer create a wrapper for a network connection and negotiated protocol
|
// NewPeer create a wrapper for a network connection and negotiated protocol
|
||||||
// version.
|
// version.
|
||||||
func newPeer(version uint, p *p2p.Peer, rw p2p.MsgReadWriter) *Peer {
|
func NewPeer(version uint, p *p2p.Peer, rw p2p.MsgReadWriter) *Peer {
|
||||||
id := p.ID().String()
|
id := p.ID().String()
|
||||||
return &Peer{
|
return &Peer{
|
||||||
id: id,
|
id: id,
|
||||||
|
@ -27,7 +27,7 @@ import (
|
|||||||
|
|
||||||
// Constants to match up protocol versions and messages
|
// Constants to match up protocol versions and messages
|
||||||
const (
|
const (
|
||||||
snap1 = 1
|
SNAP1 = 1
|
||||||
)
|
)
|
||||||
|
|
||||||
// ProtocolName is the official short name of the `snap` protocol used during
|
// ProtocolName is the official short name of the `snap` protocol used during
|
||||||
@ -36,11 +36,11 @@ const ProtocolName = "snap"
|
|||||||
|
|
||||||
// ProtocolVersions are the supported versions of the `snap` protocol (first
|
// ProtocolVersions are the supported versions of the `snap` protocol (first
|
||||||
// is primary).
|
// is primary).
|
||||||
var ProtocolVersions = []uint{snap1}
|
var ProtocolVersions = []uint{SNAP1}
|
||||||
|
|
||||||
// protocolLengths are the number of implemented message corresponding to
|
// protocolLengths are the number of implemented message corresponding to
|
||||||
// different protocol versions.
|
// different protocol versions.
|
||||||
var protocolLengths = map[uint]uint64{snap1: 8}
|
var protocolLengths = map[uint]uint64{SNAP1: 8}
|
||||||
|
|
||||||
// maxMessageSize is the maximum cap on the size of a protocol message.
|
// maxMessageSize is the maximum cap on the size of a protocol message.
|
||||||
const maxMessageSize = 10 * 1024 * 1024
|
const maxMessageSize = 10 * 1024 * 1024
|
||||||
|
@ -325,10 +325,10 @@ type healTask struct {
|
|||||||
codeTasks map[common.Hash]struct{} // Set of byte code tasks currently queued for retrieval
|
codeTasks map[common.Hash]struct{} // Set of byte code tasks currently queued for retrieval
|
||||||
}
|
}
|
||||||
|
|
||||||
// syncProgress is a database entry to allow suspending and resuming a snapshot state
|
// SyncProgress is a database entry to allow suspending and resuming a snapshot state
|
||||||
// sync. Opposed to full and fast sync, there is no way to restart a suspended
|
// sync. Opposed to full and fast sync, there is no way to restart a suspended
|
||||||
// snap sync without prior knowledge of the suspension point.
|
// snap sync without prior knowledge of the suspension point.
|
||||||
type syncProgress struct {
|
type SyncProgress struct {
|
||||||
Tasks []*accountTask // The suspended account tasks (contract tasks within)
|
Tasks []*accountTask // The suspended account tasks (contract tasks within)
|
||||||
|
|
||||||
// Status report during syncing phase
|
// Status report during syncing phase
|
||||||
@ -342,12 +342,15 @@ type syncProgress struct {
|
|||||||
// Status report during healing phase
|
// Status report during healing phase
|
||||||
TrienodeHealSynced uint64 // Number of state trie nodes downloaded
|
TrienodeHealSynced uint64 // Number of state trie nodes downloaded
|
||||||
TrienodeHealBytes common.StorageSize // Number of state trie bytes persisted to disk
|
TrienodeHealBytes common.StorageSize // Number of state trie bytes persisted to disk
|
||||||
TrienodeHealDups uint64 // Number of state trie nodes already processed
|
|
||||||
TrienodeHealNops uint64 // Number of state trie nodes not requested
|
|
||||||
BytecodeHealSynced uint64 // Number of bytecodes downloaded
|
BytecodeHealSynced uint64 // Number of bytecodes downloaded
|
||||||
BytecodeHealBytes common.StorageSize // Number of bytecodes persisted to disk
|
BytecodeHealBytes common.StorageSize // Number of bytecodes persisted to disk
|
||||||
BytecodeHealDups uint64 // Number of bytecodes already processed
|
}
|
||||||
BytecodeHealNops uint64 // Number of bytecodes not requested
|
|
||||||
|
// SyncPending is analogous to SyncProgress, but it's used to report on pending
|
||||||
|
// ephemeral sync progress that doesn't get persisted into the database.
|
||||||
|
type SyncPending struct {
|
||||||
|
TrienodeHeal uint64 // Number of state trie nodes pending
|
||||||
|
BytecodeHeal uint64 // Number of bytecodes pending
|
||||||
}
|
}
|
||||||
|
|
||||||
// SyncPeer abstracts out the methods required for a peer to be synced against
|
// SyncPeer abstracts out the methods required for a peer to be synced against
|
||||||
@ -671,7 +674,7 @@ func (s *Syncer) Sync(root common.Hash, cancel chan struct{}) error {
|
|||||||
// loadSyncStatus retrieves a previously aborted sync status from the database,
|
// loadSyncStatus retrieves a previously aborted sync status from the database,
|
||||||
// or generates a fresh one if none is available.
|
// or generates a fresh one if none is available.
|
||||||
func (s *Syncer) loadSyncStatus() {
|
func (s *Syncer) loadSyncStatus() {
|
||||||
var progress syncProgress
|
var progress SyncProgress
|
||||||
|
|
||||||
if status := rawdb.ReadSnapshotSyncStatus(s.db); status != nil {
|
if status := rawdb.ReadSnapshotSyncStatus(s.db); status != nil {
|
||||||
if err := json.Unmarshal(status, &progress); err != nil {
|
if err := json.Unmarshal(status, &progress); err != nil {
|
||||||
@ -775,7 +778,7 @@ func (s *Syncer) saveSyncStatus() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Store the actual progress markers
|
// Store the actual progress markers
|
||||||
progress := &syncProgress{
|
progress := &SyncProgress{
|
||||||
Tasks: s.tasks,
|
Tasks: s.tasks,
|
||||||
AccountSynced: s.accountSynced,
|
AccountSynced: s.accountSynced,
|
||||||
AccountBytes: s.accountBytes,
|
AccountBytes: s.accountBytes,
|
||||||
@ -795,6 +798,31 @@ func (s *Syncer) saveSyncStatus() {
|
|||||||
rawdb.WriteSnapshotSyncStatus(s.db, status)
|
rawdb.WriteSnapshotSyncStatus(s.db, status)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Progress returns the snap sync status statistics.
|
||||||
|
func (s *Syncer) Progress() (*SyncProgress, *SyncPending) {
|
||||||
|
s.lock.Lock()
|
||||||
|
defer s.lock.Unlock()
|
||||||
|
|
||||||
|
progress := &SyncProgress{
|
||||||
|
AccountSynced: s.accountSynced,
|
||||||
|
AccountBytes: s.accountBytes,
|
||||||
|
BytecodeSynced: s.bytecodeSynced,
|
||||||
|
BytecodeBytes: s.bytecodeBytes,
|
||||||
|
StorageSynced: s.storageSynced,
|
||||||
|
StorageBytes: s.storageBytes,
|
||||||
|
TrienodeHealSynced: s.trienodeHealSynced,
|
||||||
|
TrienodeHealBytes: s.trienodeHealBytes,
|
||||||
|
BytecodeHealSynced: s.bytecodeHealSynced,
|
||||||
|
BytecodeHealBytes: s.bytecodeHealBytes,
|
||||||
|
}
|
||||||
|
pending := new(SyncPending)
|
||||||
|
if s.healer != nil {
|
||||||
|
pending.TrienodeHeal = uint64(len(s.healer.trieTasks))
|
||||||
|
pending.BytecodeHeal = uint64(len(s.healer.codeTasks))
|
||||||
|
}
|
||||||
|
return progress, pending
|
||||||
|
}
|
||||||
|
|
||||||
// cleanAccountTasks removes account range retrieval tasks that have already been
|
// cleanAccountTasks removes account range retrieval tasks that have already been
|
||||||
// completed.
|
// completed.
|
||||||
func (s *Syncer) cleanAccountTasks() {
|
func (s *Syncer) cleanAccountTasks() {
|
||||||
|
31
eth/sync.go
31
eth/sync.go
@ -165,10 +165,7 @@ func (cs *chainSyncer) nextSyncOp() *chainSyncOp {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
mode, ourTD := cs.modeAndLocalHead()
|
mode, ourTD := cs.modeAndLocalHead()
|
||||||
if mode == downloader.FastSync && atomic.LoadUint32(&cs.handler.snapSync) == 1 {
|
|
||||||
// Fast sync via the snap protocol
|
|
||||||
mode = downloader.SnapSync
|
|
||||||
}
|
|
||||||
op := peerToSyncOp(mode, peer)
|
op := peerToSyncOp(mode, peer)
|
||||||
if op.td.Cmp(ourTD) <= 0 {
|
if op.td.Cmp(ourTD) <= 0 {
|
||||||
return nil // We're in sync.
|
return nil // We're in sync.
|
||||||
@ -182,19 +179,19 @@ func peerToSyncOp(mode downloader.SyncMode, p *eth.Peer) *chainSyncOp {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (cs *chainSyncer) modeAndLocalHead() (downloader.SyncMode, *big.Int) {
|
func (cs *chainSyncer) modeAndLocalHead() (downloader.SyncMode, *big.Int) {
|
||||||
// If we're in fast sync mode, return that directly
|
// If we're in snap sync mode, return that directly
|
||||||
if atomic.LoadUint32(&cs.handler.fastSync) == 1 {
|
if atomic.LoadUint32(&cs.handler.snapSync) == 1 {
|
||||||
block := cs.handler.chain.CurrentFastBlock()
|
block := cs.handler.chain.CurrentFastBlock()
|
||||||
td := cs.handler.chain.GetTd(block.Hash(), block.NumberU64())
|
td := cs.handler.chain.GetTd(block.Hash(), block.NumberU64())
|
||||||
return downloader.FastSync, td
|
return downloader.SnapSync, td
|
||||||
}
|
}
|
||||||
// We are probably in full sync, but we might have rewound to before the
|
// We are probably in full sync, but we might have rewound to before the
|
||||||
// fast sync pivot, check if we should reenable
|
// snap sync pivot, check if we should reenable
|
||||||
if pivot := rawdb.ReadLastPivotNumber(cs.handler.database); pivot != nil {
|
if pivot := rawdb.ReadLastPivotNumber(cs.handler.database); pivot != nil {
|
||||||
if head := cs.handler.chain.CurrentBlock(); head.NumberU64() < *pivot {
|
if head := cs.handler.chain.CurrentBlock(); head.NumberU64() < *pivot {
|
||||||
block := cs.handler.chain.CurrentFastBlock()
|
block := cs.handler.chain.CurrentFastBlock()
|
||||||
td := cs.handler.chain.GetTd(block.Hash(), block.NumberU64())
|
td := cs.handler.chain.GetTd(block.Hash(), block.NumberU64())
|
||||||
return downloader.FastSync, td
|
return downloader.SnapSync, td
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Nope, we're really full syncing
|
// Nope, we're really full syncing
|
||||||
@ -211,15 +208,15 @@ func (cs *chainSyncer) startSync(op *chainSyncOp) {
|
|||||||
|
|
||||||
// doSync synchronizes the local blockchain with a remote peer.
|
// doSync synchronizes the local blockchain with a remote peer.
|
||||||
func (h *handler) doSync(op *chainSyncOp) error {
|
func (h *handler) doSync(op *chainSyncOp) error {
|
||||||
if op.mode == downloader.FastSync || op.mode == downloader.SnapSync {
|
if op.mode == downloader.SnapSync {
|
||||||
// Before launch the fast sync, we have to ensure user uses the same
|
// Before launch the snap sync, we have to ensure user uses the same
|
||||||
// txlookup limit.
|
// txlookup limit.
|
||||||
// The main concern here is: during the fast sync Geth won't index the
|
// The main concern here is: during the snap sync Geth won't index the
|
||||||
// block(generate tx indices) before the HEAD-limit. But if user changes
|
// block(generate tx indices) before the HEAD-limit. But if user changes
|
||||||
// the limit in the next fast sync(e.g. user kill Geth manually and
|
// the limit in the next snap sync(e.g. user kill Geth manually and
|
||||||
// restart) then it will be hard for Geth to figure out the oldest block
|
// restart) then it will be hard for Geth to figure out the oldest block
|
||||||
// has been indexed. So here for the user-experience wise, it's non-optimal
|
// has been indexed. So here for the user-experience wise, it's non-optimal
|
||||||
// that user can't change limit during the fast sync. If changed, Geth
|
// that user can't change limit during the snap sync. If changed, Geth
|
||||||
// will just blindly use the original one.
|
// will just blindly use the original one.
|
||||||
limit := h.chain.TxLookupLimit()
|
limit := h.chain.TxLookupLimit()
|
||||||
if stored := rawdb.ReadFastTxLookupLimit(h.database); stored == nil {
|
if stored := rawdb.ReadFastTxLookupLimit(h.database); stored == nil {
|
||||||
@ -229,15 +226,11 @@ func (h *handler) doSync(op *chainSyncOp) error {
|
|||||||
log.Warn("Update txLookup limit", "provided", limit, "updated", *stored)
|
log.Warn("Update txLookup limit", "provided", limit, "updated", *stored)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Run the sync cycle, and disable fast sync if we're past the pivot block
|
// Run the sync cycle, and disable snap sync if we're past the pivot block
|
||||||
err := h.downloader.Synchronise(op.peer.ID(), op.head, op.td, op.mode)
|
err := h.downloader.Synchronise(op.peer.ID(), op.head, op.td, op.mode)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if atomic.LoadUint32(&h.fastSync) == 1 {
|
|
||||||
log.Info("Fast sync complete, auto disabling")
|
|
||||||
atomic.StoreUint32(&h.fastSync, 0)
|
|
||||||
}
|
|
||||||
if atomic.LoadUint32(&h.snapSync) == 1 {
|
if atomic.LoadUint32(&h.snapSync) == 1 {
|
||||||
log.Info("Snap sync complete, auto disabling")
|
log.Info("Snap sync complete, auto disabling")
|
||||||
atomic.StoreUint32(&h.snapSync, 0)
|
atomic.StoreUint32(&h.snapSync, 0)
|
||||||
|
@ -23,57 +23,74 @@ import (
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/eth/downloader"
|
"github.com/ethereum/go-ethereum/eth/downloader"
|
||||||
"github.com/ethereum/go-ethereum/eth/protocols/eth"
|
"github.com/ethereum/go-ethereum/eth/protocols/eth"
|
||||||
|
"github.com/ethereum/go-ethereum/eth/protocols/snap"
|
||||||
"github.com/ethereum/go-ethereum/p2p"
|
"github.com/ethereum/go-ethereum/p2p"
|
||||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Tests that fast sync is disabled after a successful sync cycle.
|
// Tests that snap sync is disabled after a successful sync cycle.
|
||||||
func TestFastSyncDisabling66(t *testing.T) { testFastSyncDisabling(t, eth.ETH66) }
|
func TestSnapSyncDisabling66(t *testing.T) { testSnapSyncDisabling(t, eth.ETH66, snap.SNAP1) }
|
||||||
|
|
||||||
// Tests that fast sync gets disabled as soon as a real block is successfully
|
// Tests that snap sync gets disabled as soon as a real block is successfully
|
||||||
// imported into the blockchain.
|
// imported into the blockchain.
|
||||||
func testFastSyncDisabling(t *testing.T, protocol uint) {
|
func testSnapSyncDisabling(t *testing.T, ethVer uint, snapVer uint) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
// Create an empty handler and ensure it's in fast sync mode
|
// Create an empty handler and ensure it's in snap sync mode
|
||||||
empty := newTestHandler()
|
empty := newTestHandler()
|
||||||
if atomic.LoadUint32(&empty.handler.fastSync) == 0 {
|
if atomic.LoadUint32(&empty.handler.snapSync) == 0 {
|
||||||
t.Fatalf("fast sync disabled on pristine blockchain")
|
t.Fatalf("snap sync disabled on pristine blockchain")
|
||||||
}
|
}
|
||||||
defer empty.close()
|
defer empty.close()
|
||||||
|
|
||||||
// Create a full handler and ensure fast sync ends up disabled
|
// Create a full handler and ensure snap sync ends up disabled
|
||||||
full := newTestHandlerWithBlocks(1024)
|
full := newTestHandlerWithBlocks(1024)
|
||||||
if atomic.LoadUint32(&full.handler.fastSync) == 1 {
|
if atomic.LoadUint32(&full.handler.snapSync) == 1 {
|
||||||
t.Fatalf("fast sync not disabled on non-empty blockchain")
|
t.Fatalf("snap sync not disabled on non-empty blockchain")
|
||||||
}
|
}
|
||||||
defer full.close()
|
defer full.close()
|
||||||
|
|
||||||
// Sync up the two handlers
|
// Sync up the two handlers via both `eth` and `snap`
|
||||||
emptyPipe, fullPipe := p2p.MsgPipe()
|
caps := []p2p.Cap{{Name: "eth", Version: ethVer}, {Name: "snap", Version: snapVer}}
|
||||||
defer emptyPipe.Close()
|
|
||||||
defer fullPipe.Close()
|
|
||||||
|
|
||||||
emptyPeer := eth.NewPeer(protocol, p2p.NewPeer(enode.ID{1}, "", nil), emptyPipe, empty.txpool)
|
emptyPipeEth, fullPipeEth := p2p.MsgPipe()
|
||||||
fullPeer := eth.NewPeer(protocol, p2p.NewPeer(enode.ID{2}, "", nil), fullPipe, full.txpool)
|
defer emptyPipeEth.Close()
|
||||||
defer emptyPeer.Close()
|
defer fullPipeEth.Close()
|
||||||
defer fullPeer.Close()
|
|
||||||
|
|
||||||
go empty.handler.runEthPeer(emptyPeer, func(peer *eth.Peer) error {
|
emptyPeerEth := eth.NewPeer(ethVer, p2p.NewPeer(enode.ID{1}, "", caps), emptyPipeEth, empty.txpool)
|
||||||
|
fullPeerEth := eth.NewPeer(ethVer, p2p.NewPeer(enode.ID{2}, "", caps), fullPipeEth, full.txpool)
|
||||||
|
defer emptyPeerEth.Close()
|
||||||
|
defer fullPeerEth.Close()
|
||||||
|
|
||||||
|
go empty.handler.runEthPeer(emptyPeerEth, func(peer *eth.Peer) error {
|
||||||
return eth.Handle((*ethHandler)(empty.handler), peer)
|
return eth.Handle((*ethHandler)(empty.handler), peer)
|
||||||
})
|
})
|
||||||
go full.handler.runEthPeer(fullPeer, func(peer *eth.Peer) error {
|
go full.handler.runEthPeer(fullPeerEth, func(peer *eth.Peer) error {
|
||||||
return eth.Handle((*ethHandler)(full.handler), peer)
|
return eth.Handle((*ethHandler)(full.handler), peer)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
emptyPipeSnap, fullPipeSnap := p2p.MsgPipe()
|
||||||
|
defer emptyPipeSnap.Close()
|
||||||
|
defer fullPipeSnap.Close()
|
||||||
|
|
||||||
|
emptyPeerSnap := snap.NewPeer(snapVer, p2p.NewPeer(enode.ID{1}, "", caps), emptyPipeSnap)
|
||||||
|
fullPeerSnap := snap.NewPeer(snapVer, p2p.NewPeer(enode.ID{2}, "", caps), fullPipeSnap)
|
||||||
|
|
||||||
|
go empty.handler.runSnapExtension(emptyPeerSnap, func(peer *snap.Peer) error {
|
||||||
|
return snap.Handle((*snapHandler)(empty.handler), peer)
|
||||||
|
})
|
||||||
|
go full.handler.runSnapExtension(fullPeerSnap, func(peer *snap.Peer) error {
|
||||||
|
return snap.Handle((*snapHandler)(full.handler), peer)
|
||||||
|
})
|
||||||
// Wait a bit for the above handlers to start
|
// Wait a bit for the above handlers to start
|
||||||
time.Sleep(250 * time.Millisecond)
|
time.Sleep(250 * time.Millisecond)
|
||||||
|
|
||||||
// Check that fast sync was disabled
|
// Check that snap sync was disabled
|
||||||
op := peerToSyncOp(downloader.FastSync, empty.handler.peers.peerWithHighestTD())
|
op := peerToSyncOp(downloader.SnapSync, empty.handler.peers.peerWithHighestTD())
|
||||||
if err := empty.handler.doSync(op); err != nil {
|
if err := empty.handler.doSync(op); err != nil {
|
||||||
t.Fatal("sync failed:", err)
|
t.Fatal("sync failed:", err)
|
||||||
}
|
}
|
||||||
if atomic.LoadUint32(&empty.handler.fastSync) == 1 {
|
if atomic.LoadUint32(&empty.handler.snapSync) == 1 {
|
||||||
t.Fatalf("fast sync not disabled after successful synchronisation")
|
t.Fatalf("snap sync not disabled after successful synchronisation")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -286,14 +286,6 @@ func (ec *Client) TransactionReceipt(ctx context.Context, txHash common.Hash) (*
|
|||||||
return r, err
|
return r, err
|
||||||
}
|
}
|
||||||
|
|
||||||
type rpcProgress struct {
|
|
||||||
StartingBlock hexutil.Uint64
|
|
||||||
CurrentBlock hexutil.Uint64
|
|
||||||
HighestBlock hexutil.Uint64
|
|
||||||
PulledStates hexutil.Uint64
|
|
||||||
KnownStates hexutil.Uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
// SyncProgress retrieves the current progress of the sync algorithm. If there's
|
// SyncProgress retrieves the current progress of the sync algorithm. If there's
|
||||||
// no sync currently running, it returns nil.
|
// no sync currently running, it returns nil.
|
||||||
func (ec *Client) SyncProgress(ctx context.Context) (*ethereum.SyncProgress, error) {
|
func (ec *Client) SyncProgress(ctx context.Context) (*ethereum.SyncProgress, error) {
|
||||||
@ -306,17 +298,11 @@ func (ec *Client) SyncProgress(ctx context.Context) (*ethereum.SyncProgress, err
|
|||||||
if err := json.Unmarshal(raw, &syncing); err == nil {
|
if err := json.Unmarshal(raw, &syncing); err == nil {
|
||||||
return nil, nil // Not syncing (always false)
|
return nil, nil // Not syncing (always false)
|
||||||
}
|
}
|
||||||
var progress *rpcProgress
|
var progress *ethereum.SyncProgress
|
||||||
if err := json.Unmarshal(raw, &progress); err != nil {
|
if err := json.Unmarshal(raw, &progress); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return ðereum.SyncProgress{
|
return progress, nil
|
||||||
StartingBlock: uint64(progress.StartingBlock),
|
|
||||||
CurrentBlock: uint64(progress.CurrentBlock),
|
|
||||||
HighestBlock: uint64(progress.HighestBlock),
|
|
||||||
PulledStates: uint64(progress.PulledStates),
|
|
||||||
KnownStates: uint64(progress.KnownStates),
|
|
||||||
}, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// SubscribeNewHead subscribes to notifications about the current blockchain head
|
// SubscribeNewHead subscribes to notifications about the current blockchain head
|
||||||
|
@ -1220,23 +1220,47 @@ type SyncState struct {
|
|||||||
func (s *SyncState) StartingBlock() hexutil.Uint64 {
|
func (s *SyncState) StartingBlock() hexutil.Uint64 {
|
||||||
return hexutil.Uint64(s.progress.StartingBlock)
|
return hexutil.Uint64(s.progress.StartingBlock)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *SyncState) CurrentBlock() hexutil.Uint64 {
|
func (s *SyncState) CurrentBlock() hexutil.Uint64 {
|
||||||
return hexutil.Uint64(s.progress.CurrentBlock)
|
return hexutil.Uint64(s.progress.CurrentBlock)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *SyncState) HighestBlock() hexutil.Uint64 {
|
func (s *SyncState) HighestBlock() hexutil.Uint64 {
|
||||||
return hexutil.Uint64(s.progress.HighestBlock)
|
return hexutil.Uint64(s.progress.HighestBlock)
|
||||||
}
|
}
|
||||||
|
func (s *SyncState) SyncedAccounts() hexutil.Uint64 {
|
||||||
func (s *SyncState) PulledStates() *hexutil.Uint64 {
|
return hexutil.Uint64(s.progress.SyncedAccounts)
|
||||||
ret := hexutil.Uint64(s.progress.PulledStates)
|
|
||||||
return &ret
|
|
||||||
}
|
}
|
||||||
|
func (s *SyncState) SyncedAccountBytes() hexutil.Uint64 {
|
||||||
func (s *SyncState) KnownStates() *hexutil.Uint64 {
|
return hexutil.Uint64(s.progress.SyncedAccountBytes)
|
||||||
ret := hexutil.Uint64(s.progress.KnownStates)
|
}
|
||||||
return &ret
|
func (s *SyncState) SyncedBytecodes() hexutil.Uint64 {
|
||||||
|
return hexutil.Uint64(s.progress.SyncedBytecodes)
|
||||||
|
}
|
||||||
|
func (s *SyncState) SyncedBytecodeBytes() hexutil.Uint64 {
|
||||||
|
return hexutil.Uint64(s.progress.SyncedBytecodeBytes)
|
||||||
|
}
|
||||||
|
func (s *SyncState) SyncedStorage() hexutil.Uint64 {
|
||||||
|
return hexutil.Uint64(s.progress.SyncedStorage)
|
||||||
|
}
|
||||||
|
func (s *SyncState) SyncedStorageBytes() hexutil.Uint64 {
|
||||||
|
return hexutil.Uint64(s.progress.SyncedStorageBytes)
|
||||||
|
}
|
||||||
|
func (s *SyncState) HealedTrienodes() hexutil.Uint64 {
|
||||||
|
return hexutil.Uint64(s.progress.HealedTrienodes)
|
||||||
|
}
|
||||||
|
func (s *SyncState) HealedTrienodeBytes() hexutil.Uint64 {
|
||||||
|
return hexutil.Uint64(s.progress.HealedTrienodeBytes)
|
||||||
|
}
|
||||||
|
func (s *SyncState) HealedBytecodes() hexutil.Uint64 {
|
||||||
|
return hexutil.Uint64(s.progress.HealedBytecodes)
|
||||||
|
}
|
||||||
|
func (s *SyncState) HealedBytecodeBytes() hexutil.Uint64 {
|
||||||
|
return hexutil.Uint64(s.progress.HealedBytecodeBytes)
|
||||||
|
}
|
||||||
|
func (s *SyncState) HealingTrienodes() hexutil.Uint64 {
|
||||||
|
return hexutil.Uint64(s.progress.HealingTrienodes)
|
||||||
|
}
|
||||||
|
func (s *SyncState) HealingBytecode() hexutil.Uint64 {
|
||||||
|
return hexutil.Uint64(s.progress.HealingBytecode)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Syncing returns false in case the node is currently not syncing with the network. It can be up to date or has not
|
// Syncing returns false in case the node is currently not syncing with the network. It can be up to date or has not
|
||||||
@ -1244,8 +1268,18 @@ func (s *SyncState) KnownStates() *hexutil.Uint64 {
|
|||||||
// - startingBlock: block number this node started to synchronise from
|
// - startingBlock: block number this node started to synchronise from
|
||||||
// - currentBlock: block number this node is currently importing
|
// - currentBlock: block number this node is currently importing
|
||||||
// - highestBlock: block number of the highest block header this node has received from peers
|
// - highestBlock: block number of the highest block header this node has received from peers
|
||||||
// - pulledStates: number of state entries processed until now
|
// - syncedAccounts: number of accounts downloaded
|
||||||
// - knownStates: number of known state entries that still need to be pulled
|
// - syncedAccountBytes: number of account trie bytes persisted to disk
|
||||||
|
// - syncedBytecodes: number of bytecodes downloaded
|
||||||
|
// - syncedBytecodeBytes: number of bytecode bytes downloaded
|
||||||
|
// - syncedStorage: number of storage slots downloaded
|
||||||
|
// - syncedStorageBytes: number of storage trie bytes persisted to disk
|
||||||
|
// - healedTrienodes: number of state trie nodes downloaded
|
||||||
|
// - healedTrienodeBytes: number of state trie bytes persisted to disk
|
||||||
|
// - healedBytecodes: number of bytecodes downloaded
|
||||||
|
// - healedBytecodeBytes: number of bytecodes persisted to disk
|
||||||
|
// - healingTrienodes: number of state trie nodes pending
|
||||||
|
// - healingBytecode: number of bytecodes pending
|
||||||
func (r *Resolver) Syncing() (*SyncState, error) {
|
func (r *Resolver) Syncing() (*SyncState, error) {
|
||||||
progress := r.backend.SyncProgress()
|
progress := r.backend.SyncProgress()
|
||||||
|
|
||||||
|
@ -297,12 +297,6 @@ const schema string = `
|
|||||||
currentBlock: Long!
|
currentBlock: Long!
|
||||||
# HighestBlock is the latest known block number.
|
# HighestBlock is the latest known block number.
|
||||||
highestBlock: Long!
|
highestBlock: Long!
|
||||||
# PulledStates is the number of state entries fetched so far, or null
|
|
||||||
# if this is not known or not relevant.
|
|
||||||
pulledStates: Long
|
|
||||||
# KnownStates is the number of states the node knows of so far, or null
|
|
||||||
# if this is not known or not relevant.
|
|
||||||
knownStates: Long
|
|
||||||
}
|
}
|
||||||
|
|
||||||
# Pending represents the current pending state.
|
# Pending represents the current pending state.
|
||||||
|
@ -101,8 +101,22 @@ type SyncProgress struct {
|
|||||||
StartingBlock uint64 // Block number where sync began
|
StartingBlock uint64 // Block number where sync began
|
||||||
CurrentBlock uint64 // Current block number where sync is at
|
CurrentBlock uint64 // Current block number where sync is at
|
||||||
HighestBlock uint64 // Highest alleged block number in the chain
|
HighestBlock uint64 // Highest alleged block number in the chain
|
||||||
PulledStates uint64 // Number of state trie entries already downloaded
|
|
||||||
KnownStates uint64 // Total number of state trie entries known about
|
// Fields belonging to snap sync
|
||||||
|
SyncedAccounts uint64 // Number of accounts downloaded
|
||||||
|
SyncedAccountBytes uint64 // Number of account trie bytes persisted to disk
|
||||||
|
SyncedBytecodes uint64 // Number of bytecodes downloaded
|
||||||
|
SyncedBytecodeBytes uint64 // Number of bytecode bytes downloaded
|
||||||
|
SyncedStorage uint64 // Number of storage slots downloaded
|
||||||
|
SyncedStorageBytes uint64 // Number of storage trie bytes persisted to disk
|
||||||
|
|
||||||
|
HealedTrienodes uint64 // Number of state trie nodes downloaded
|
||||||
|
HealedTrienodeBytes uint64 // Number of state trie bytes persisted to disk
|
||||||
|
HealedBytecodes uint64 // Number of bytecodes downloaded
|
||||||
|
HealedBytecodeBytes uint64 // Number of bytecodes persisted to disk
|
||||||
|
|
||||||
|
HealingTrienodes uint64 // Number of state trie nodes pending
|
||||||
|
HealingBytecode uint64 // Number of bytecodes pending
|
||||||
}
|
}
|
||||||
|
|
||||||
// ChainSyncReader wraps access to the node's current sync status. If there's no
|
// ChainSyncReader wraps access to the node's current sync status. If there's no
|
||||||
|
@ -134,8 +134,18 @@ func (s *PublicEthereumAPI) Syncing() (interface{}, error) {
|
|||||||
"startingBlock": hexutil.Uint64(progress.StartingBlock),
|
"startingBlock": hexutil.Uint64(progress.StartingBlock),
|
||||||
"currentBlock": hexutil.Uint64(progress.CurrentBlock),
|
"currentBlock": hexutil.Uint64(progress.CurrentBlock),
|
||||||
"highestBlock": hexutil.Uint64(progress.HighestBlock),
|
"highestBlock": hexutil.Uint64(progress.HighestBlock),
|
||||||
"pulledStates": hexutil.Uint64(progress.PulledStates),
|
"syncedAccounts": hexutil.Uint64(progress.SyncedAccounts),
|
||||||
"knownStates": hexutil.Uint64(progress.KnownStates),
|
"syncedAccountBytes": hexutil.Uint64(progress.SyncedAccountBytes),
|
||||||
|
"syncedBytecodes": hexutil.Uint64(progress.SyncedBytecodes),
|
||||||
|
"syncedBytecodeBytes": hexutil.Uint64(progress.SyncedBytecodeBytes),
|
||||||
|
"syncedStorage": hexutil.Uint64(progress.SyncedStorage),
|
||||||
|
"syncedStorageBytes": hexutil.Uint64(progress.SyncedStorageBytes),
|
||||||
|
"healedTrienodes": hexutil.Uint64(progress.HealedTrienodes),
|
||||||
|
"healedTrienodeBytes": hexutil.Uint64(progress.HealedTrienodeBytes),
|
||||||
|
"healedBytecodes": hexutil.Uint64(progress.HealedBytecodes),
|
||||||
|
"healedBytecodeBytes": hexutil.Uint64(progress.HealedBytecodeBytes),
|
||||||
|
"healingTrienodes": hexutil.Uint64(progress.HealingTrienodes),
|
||||||
|
"healingBytecode": hexutil.Uint64(progress.HealingBytecode),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
File diff suppressed because one or more lines are too long
@ -3949,10 +3949,18 @@ var outputSyncingFormatter = function(result) {
|
|||||||
result.startingBlock = utils.toDecimal(result.startingBlock);
|
result.startingBlock = utils.toDecimal(result.startingBlock);
|
||||||
result.currentBlock = utils.toDecimal(result.currentBlock);
|
result.currentBlock = utils.toDecimal(result.currentBlock);
|
||||||
result.highestBlock = utils.toDecimal(result.highestBlock);
|
result.highestBlock = utils.toDecimal(result.highestBlock);
|
||||||
if (result.knownStates) {
|
result.syncedAccounts = utils.toDecimal(result.syncedAccounts);
|
||||||
result.knownStates = utils.toDecimal(result.knownStates);
|
result.syncedAccountBytes = utils.toDecimal(result.syncedAccountBytes);
|
||||||
result.pulledStates = utils.toDecimal(result.pulledStates);
|
result.syncedBytecodes = utils.toDecimal(result.syncedBytecodes);
|
||||||
}
|
result.syncedBytecodeBytes = utils.toDecimal(result.syncedBytecodeBytes);
|
||||||
|
result.syncedStorage = utils.toDecimal(result.syncedStorage);
|
||||||
|
result.syncedStorageBytes = utils.toDecimal(result.syncedStorageBytes);
|
||||||
|
result.healedTrienodes = utils.toDecimal(result.healedTrienodes);
|
||||||
|
result.healedTrienodeBytes = utils.toDecimal(result.healedTrienodeBytes);
|
||||||
|
result.healedBytecodes = utils.toDecimal(result.healedBytecodes);
|
||||||
|
result.healedBytecodeBytes = utils.toDecimal(result.healedBytecodeBytes);
|
||||||
|
result.healingTrienodes = utils.toDecimal(result.healingTrienodes);
|
||||||
|
result.healingBytecode = utils.toDecimal(result.healingBytecode);
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
};
|
};
|
||||||
|
@ -231,9 +231,9 @@ func New(checkpoint uint64, stateDb ethdb.Database, stateBloom *trie.SyncBloom,
|
|||||||
stateCh: make(chan dataPack),
|
stateCh: make(chan dataPack),
|
||||||
SnapSyncer: snap.NewSyncer(stateDb),
|
SnapSyncer: snap.NewSyncer(stateDb),
|
||||||
stateSyncStart: make(chan *stateSync),
|
stateSyncStart: make(chan *stateSync),
|
||||||
syncStatsState: stateSyncStats{
|
//syncStatsState: stateSyncStats{
|
||||||
processed: rawdb.ReadFastTrieProgress(stateDb),
|
// processed: rawdb.ReadFastTrieProgress(stateDb),
|
||||||
},
|
//},
|
||||||
trackStateReq: make(chan *stateReq),
|
trackStateReq: make(chan *stateReq),
|
||||||
}
|
}
|
||||||
go dl.stateFetcher()
|
go dl.stateFetcher()
|
||||||
@ -268,8 +268,8 @@ func (d *Downloader) Progress() ethereum.SyncProgress {
|
|||||||
StartingBlock: d.syncStatsChainOrigin,
|
StartingBlock: d.syncStatsChainOrigin,
|
||||||
CurrentBlock: current,
|
CurrentBlock: current,
|
||||||
HighestBlock: d.syncStatsChainHeight,
|
HighestBlock: d.syncStatsChainHeight,
|
||||||
PulledStates: d.syncStatsState.processed,
|
//PulledStates: d.syncStatsState.processed,
|
||||||
KnownStates: d.syncStatsState.processed + d.syncStatsState.pending,
|
//KnownStates: d.syncStatsState.processed + d.syncStatsState.pending,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1207,8 +1207,8 @@ func checkProgress(t *testing.T, d *Downloader, stage string, want ethereum.Sync
|
|||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
p := d.Progress()
|
p := d.Progress()
|
||||||
p.KnownStates, p.PulledStates = 0, 0
|
//p.KnownStates, p.PulledStates = 0, 0
|
||||||
want.KnownStates, want.PulledStates = 0, 0
|
//want.KnownStates, want.PulledStates = 0, 0
|
||||||
if p != want {
|
if p != want {
|
||||||
t.Fatalf("%s progress mismatch:\nhave %+v\nwant %+v", stage, p, want)
|
t.Fatalf("%s progress mismatch:\nhave %+v\nwant %+v", stage, p, want)
|
||||||
}
|
}
|
||||||
|
@ -22,7 +22,6 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
|
||||||
"github.com/ethereum/go-ethereum/core/state"
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
@ -610,6 +609,6 @@ func (s *stateSync) updateStats(written, duplicate, unexpected int, duration tim
|
|||||||
log.Info("Imported new state entries", "count", written, "elapsed", common.PrettyDuration(duration), "processed", s.d.syncStatsState.processed, "pending", s.d.syncStatsState.pending, "trieretry", len(s.trieTasks), "coderetry", len(s.codeTasks), "duplicate", s.d.syncStatsState.duplicate, "unexpected", s.d.syncStatsState.unexpected)
|
log.Info("Imported new state entries", "count", written, "elapsed", common.PrettyDuration(duration), "processed", s.d.syncStatsState.processed, "pending", s.d.syncStatsState.pending, "trieretry", len(s.trieTasks), "coderetry", len(s.codeTasks), "duplicate", s.d.syncStatsState.duplicate, "unexpected", s.d.syncStatsState.unexpected)
|
||||||
}
|
}
|
||||||
if written > 0 {
|
if written > 0 {
|
||||||
rawdb.WriteFastTrieProgress(s.d.stateDB, s.d.syncStatsState.processed)
|
//rawdb.WriteFastTrieProgress(s.d.stateDB, s.d.syncStatsState.processed)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -60,8 +60,8 @@ func makeChain(n int, seed byte, parent *types.Block) ([]common.Hash, map[common
|
|||||||
block.AddTx(tx)
|
block.AddTx(tx)
|
||||||
}
|
}
|
||||||
// If the block number is a multiple of 5, add a bonus uncle to the block
|
// If the block number is a multiple of 5, add a bonus uncle to the block
|
||||||
if i%5 == 0 {
|
if i > 0 && i%5 == 0 {
|
||||||
block.AddUncle(&types.Header{ParentHash: block.PrevBlock(i - 1).Hash(), Number: big.NewInt(int64(i - 1))})
|
block.AddUncle(&types.Header{ParentHash: block.PrevBlock(i - 2).Hash(), Number: big.NewInt(int64(i - 1))})
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
hashes := make([]common.Hash, n+1)
|
hashes := make([]common.Hash, n+1)
|
||||||
|
@ -81,8 +81,18 @@ type SyncProgress struct {
|
|||||||
func (p *SyncProgress) GetStartingBlock() int64 { return int64(p.progress.StartingBlock) }
|
func (p *SyncProgress) GetStartingBlock() int64 { return int64(p.progress.StartingBlock) }
|
||||||
func (p *SyncProgress) GetCurrentBlock() int64 { return int64(p.progress.CurrentBlock) }
|
func (p *SyncProgress) GetCurrentBlock() int64 { return int64(p.progress.CurrentBlock) }
|
||||||
func (p *SyncProgress) GetHighestBlock() int64 { return int64(p.progress.HighestBlock) }
|
func (p *SyncProgress) GetHighestBlock() int64 { return int64(p.progress.HighestBlock) }
|
||||||
func (p *SyncProgress) GetPulledStates() int64 { return int64(p.progress.PulledStates) }
|
func (p *SyncProgress) GetSyncedAccounts() int64 { return int64(p.progress.SyncedAccounts) }
|
||||||
func (p *SyncProgress) GetKnownStates() int64 { return int64(p.progress.KnownStates) }
|
func (p *SyncProgress) GetSyncedAccountBytes() int64 { return int64(p.progress.SyncedAccountBytes) }
|
||||||
|
func (p *SyncProgress) GetSyncedBytecodes() int64 { return int64(p.progress.SyncedBytecodes) }
|
||||||
|
func (p *SyncProgress) GetSyncedBytecodeBytes() int64 { return int64(p.progress.SyncedBytecodeBytes) }
|
||||||
|
func (p *SyncProgress) GetSyncedStorage() int64 { return int64(p.progress.SyncedStorage) }
|
||||||
|
func (p *SyncProgress) GetSyncedStorageBytes() int64 { return int64(p.progress.SyncedStorageBytes) }
|
||||||
|
func (p *SyncProgress) GetHealedTrienodes() int64 { return int64(p.progress.HealedTrienodes) }
|
||||||
|
func (p *SyncProgress) GetHealedTrienodeBytes() int64 { return int64(p.progress.HealedTrienodeBytes) }
|
||||||
|
func (p *SyncProgress) GetHealedBytecodes() int64 { return int64(p.progress.HealedBytecodes) }
|
||||||
|
func (p *SyncProgress) GetHealedBytecodeBytes() int64 { return int64(p.progress.HealedBytecodeBytes) }
|
||||||
|
func (p *SyncProgress) GetHealingTrienodes() int64 { return int64(p.progress.HealingTrienodes) }
|
||||||
|
func (p *SyncProgress) GetHealingBytecode() int64 { return int64(p.progress.HealingBytecode) }
|
||||||
|
|
||||||
// Topics is a set of topic lists to filter events with.
|
// Topics is a set of topic lists to filter events with.
|
||||||
type Topics struct{ topics [][]common.Hash }
|
type Topics struct{ topics [][]common.Hash }
|
||||||
|
10
p2p/peer.go
10
p2p/peer.go
@ -121,10 +121,18 @@ type Peer struct {
|
|||||||
|
|
||||||
// NewPeer returns a peer for testing purposes.
|
// NewPeer returns a peer for testing purposes.
|
||||||
func NewPeer(id enode.ID, name string, caps []Cap) *Peer {
|
func NewPeer(id enode.ID, name string, caps []Cap) *Peer {
|
||||||
|
// Generate a fake set of local protocols to match as running caps. Almost
|
||||||
|
// no fields needs to be meaningful here as we're only using it to cross-
|
||||||
|
// check with the "remote" caps array.
|
||||||
|
protos := make([]Protocol, len(caps))
|
||||||
|
for i, cap := range caps {
|
||||||
|
protos[i].Name = cap.Name
|
||||||
|
protos[i].Version = cap.Version
|
||||||
|
}
|
||||||
pipe, _ := net.Pipe()
|
pipe, _ := net.Pipe()
|
||||||
node := enode.SignNull(new(enr.Record), id)
|
node := enode.SignNull(new(enr.Record), id)
|
||||||
conn := &conn{fd: pipe, transport: nil, node: node, caps: caps, name: name}
|
conn := &conn{fd: pipe, transport: nil, node: node, caps: caps, name: name}
|
||||||
peer := newPeer(log.Root(), conn, nil)
|
peer := newPeer(log.Root(), conn, protos)
|
||||||
close(peer.closed) // ensures Disconnect doesn't block
|
close(peer.closed) // ensures Disconnect doesn't block
|
||||||
return peer
|
return peer
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user