forked from cerc-io/plugeth
eth/downloader: remove eth62 (#21378)
* init notes removed some mentions of eth62, bumped protocol err too old to >=63 * remove sanity checks and bump supported protocol version up to 63 * remove 62 tests, still need to add 65 * remove 65 tests
This commit is contained in:
parent
350a0490ab
commit
1976bb3df0
@ -89,7 +89,7 @@ var (
|
|||||||
errCancelContentProcessing = errors.New("content processing canceled (requested)")
|
errCancelContentProcessing = errors.New("content processing canceled (requested)")
|
||||||
errCanceled = errors.New("syncing canceled (requested)")
|
errCanceled = errors.New("syncing canceled (requested)")
|
||||||
errNoSyncActive = errors.New("no sync active")
|
errNoSyncActive = errors.New("no sync active")
|
||||||
errTooOld = errors.New("peer doesn't speak recent enough protocol version (need version >= 62)")
|
errTooOld = errors.New("peer doesn't speak recent enough protocol version (need version >= 63)")
|
||||||
)
|
)
|
||||||
|
|
||||||
type Downloader struct {
|
type Downloader struct {
|
||||||
@ -431,7 +431,7 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.I
|
|||||||
d.mux.Post(DoneEvent{latest})
|
d.mux.Post(DoneEvent{latest})
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
if p.version < 62 {
|
if p.version < 63 {
|
||||||
return errTooOld
|
return errTooOld
|
||||||
}
|
}
|
||||||
mode := d.getMode()
|
mode := d.getMode()
|
||||||
|
@ -500,7 +500,6 @@ func assertOwnForkedChain(t *testing.T, tester *downloadTester, common int, leng
|
|||||||
// Tests that simple synchronization against a canonical chain works correctly.
|
// Tests that simple synchronization against a canonical chain works correctly.
|
||||||
// In this test common ancestor lookup should be short circuited and not require
|
// In this test common ancestor lookup should be short circuited and not require
|
||||||
// binary searching.
|
// binary searching.
|
||||||
func TestCanonicalSynchronisation62(t *testing.T) { testCanonicalSynchronisation(t, 62, FullSync) }
|
|
||||||
func TestCanonicalSynchronisation63Full(t *testing.T) { testCanonicalSynchronisation(t, 63, FullSync) }
|
func TestCanonicalSynchronisation63Full(t *testing.T) { testCanonicalSynchronisation(t, 63, FullSync) }
|
||||||
func TestCanonicalSynchronisation63Fast(t *testing.T) { testCanonicalSynchronisation(t, 63, FastSync) }
|
func TestCanonicalSynchronisation63Fast(t *testing.T) { testCanonicalSynchronisation(t, 63, FastSync) }
|
||||||
func TestCanonicalSynchronisation64Full(t *testing.T) { testCanonicalSynchronisation(t, 64, FullSync) }
|
func TestCanonicalSynchronisation64Full(t *testing.T) { testCanonicalSynchronisation(t, 64, FullSync) }
|
||||||
@ -528,7 +527,6 @@ func testCanonicalSynchronisation(t *testing.T, protocol int, mode SyncMode) {
|
|||||||
|
|
||||||
// Tests that if a large batch of blocks are being downloaded, it is throttled
|
// Tests that if a large batch of blocks are being downloaded, it is throttled
|
||||||
// until the cached blocks are retrieved.
|
// until the cached blocks are retrieved.
|
||||||
func TestThrottling62(t *testing.T) { testThrottling(t, 62, FullSync) }
|
|
||||||
func TestThrottling63Full(t *testing.T) { testThrottling(t, 63, FullSync) }
|
func TestThrottling63Full(t *testing.T) { testThrottling(t, 63, FullSync) }
|
||||||
func TestThrottling63Fast(t *testing.T) { testThrottling(t, 63, FastSync) }
|
func TestThrottling63Fast(t *testing.T) { testThrottling(t, 63, FastSync) }
|
||||||
func TestThrottling64Full(t *testing.T) { testThrottling(t, 64, FullSync) }
|
func TestThrottling64Full(t *testing.T) { testThrottling(t, 64, FullSync) }
|
||||||
@ -612,7 +610,6 @@ func testThrottling(t *testing.T, protocol int, mode SyncMode) {
|
|||||||
// Tests that simple synchronization against a forked chain works correctly. In
|
// Tests that simple synchronization against a forked chain works correctly. In
|
||||||
// this test common ancestor lookup should *not* be short circuited, and a full
|
// this test common ancestor lookup should *not* be short circuited, and a full
|
||||||
// binary search should be executed.
|
// binary search should be executed.
|
||||||
func TestForkedSync62(t *testing.T) { testForkedSync(t, 62, FullSync) }
|
|
||||||
func TestForkedSync63Full(t *testing.T) { testForkedSync(t, 63, FullSync) }
|
func TestForkedSync63Full(t *testing.T) { testForkedSync(t, 63, FullSync) }
|
||||||
func TestForkedSync63Fast(t *testing.T) { testForkedSync(t, 63, FastSync) }
|
func TestForkedSync63Fast(t *testing.T) { testForkedSync(t, 63, FastSync) }
|
||||||
func TestForkedSync64Full(t *testing.T) { testForkedSync(t, 64, FullSync) }
|
func TestForkedSync64Full(t *testing.T) { testForkedSync(t, 64, FullSync) }
|
||||||
@ -644,7 +641,6 @@ func testForkedSync(t *testing.T, protocol int, mode SyncMode) {
|
|||||||
|
|
||||||
// Tests that synchronising against a much shorter but much heavyer fork works
|
// Tests that synchronising against a much shorter but much heavyer fork works
|
||||||
// corrently and is not dropped.
|
// corrently and is not dropped.
|
||||||
func TestHeavyForkedSync62(t *testing.T) { testHeavyForkedSync(t, 62, FullSync) }
|
|
||||||
func TestHeavyForkedSync63Full(t *testing.T) { testHeavyForkedSync(t, 63, FullSync) }
|
func TestHeavyForkedSync63Full(t *testing.T) { testHeavyForkedSync(t, 63, FullSync) }
|
||||||
func TestHeavyForkedSync63Fast(t *testing.T) { testHeavyForkedSync(t, 63, FastSync) }
|
func TestHeavyForkedSync63Fast(t *testing.T) { testHeavyForkedSync(t, 63, FastSync) }
|
||||||
func TestHeavyForkedSync64Full(t *testing.T) { testHeavyForkedSync(t, 64, FullSync) }
|
func TestHeavyForkedSync64Full(t *testing.T) { testHeavyForkedSync(t, 64, FullSync) }
|
||||||
@ -678,7 +674,6 @@ func testHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
|
|||||||
// Tests that chain forks are contained within a certain interval of the current
|
// Tests that chain forks are contained within a certain interval of the current
|
||||||
// chain head, ensuring that malicious peers cannot waste resources by feeding
|
// chain head, ensuring that malicious peers cannot waste resources by feeding
|
||||||
// long dead chains.
|
// long dead chains.
|
||||||
func TestBoundedForkedSync62(t *testing.T) { testBoundedForkedSync(t, 62, FullSync) }
|
|
||||||
func TestBoundedForkedSync63Full(t *testing.T) { testBoundedForkedSync(t, 63, FullSync) }
|
func TestBoundedForkedSync63Full(t *testing.T) { testBoundedForkedSync(t, 63, FullSync) }
|
||||||
func TestBoundedForkedSync63Fast(t *testing.T) { testBoundedForkedSync(t, 63, FastSync) }
|
func TestBoundedForkedSync63Fast(t *testing.T) { testBoundedForkedSync(t, 63, FastSync) }
|
||||||
func TestBoundedForkedSync64Full(t *testing.T) { testBoundedForkedSync(t, 64, FullSync) }
|
func TestBoundedForkedSync64Full(t *testing.T) { testBoundedForkedSync(t, 64, FullSync) }
|
||||||
@ -711,7 +706,6 @@ func testBoundedForkedSync(t *testing.T, protocol int, mode SyncMode) {
|
|||||||
// Tests that chain forks are contained within a certain interval of the current
|
// Tests that chain forks are contained within a certain interval of the current
|
||||||
// chain head for short but heavy forks too. These are a bit special because they
|
// chain head for short but heavy forks too. These are a bit special because they
|
||||||
// take different ancestor lookup paths.
|
// take different ancestor lookup paths.
|
||||||
func TestBoundedHeavyForkedSync62(t *testing.T) { testBoundedHeavyForkedSync(t, 62, FullSync) }
|
|
||||||
func TestBoundedHeavyForkedSync63Full(t *testing.T) { testBoundedHeavyForkedSync(t, 63, FullSync) }
|
func TestBoundedHeavyForkedSync63Full(t *testing.T) { testBoundedHeavyForkedSync(t, 63, FullSync) }
|
||||||
func TestBoundedHeavyForkedSync63Fast(t *testing.T) { testBoundedHeavyForkedSync(t, 63, FastSync) }
|
func TestBoundedHeavyForkedSync63Fast(t *testing.T) { testBoundedHeavyForkedSync(t, 63, FastSync) }
|
||||||
func TestBoundedHeavyForkedSync64Full(t *testing.T) { testBoundedHeavyForkedSync(t, 64, FullSync) }
|
func TestBoundedHeavyForkedSync64Full(t *testing.T) { testBoundedHeavyForkedSync(t, 64, FullSync) }
|
||||||
@ -741,23 +735,6 @@ func testBoundedHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
|
|||||||
tester.terminate()
|
tester.terminate()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tests that an inactive downloader will not accept incoming block headers and
|
|
||||||
// bodies.
|
|
||||||
func TestInactiveDownloader62(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
tester := newTester()
|
|
||||||
defer tester.terminate()
|
|
||||||
|
|
||||||
// Check that neither block headers nor bodies are accepted
|
|
||||||
if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive {
|
|
||||||
t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
|
|
||||||
}
|
|
||||||
if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive {
|
|
||||||
t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests that an inactive downloader will not accept incoming block headers,
|
// Tests that an inactive downloader will not accept incoming block headers,
|
||||||
// bodies and receipts.
|
// bodies and receipts.
|
||||||
func TestInactiveDownloader63(t *testing.T) {
|
func TestInactiveDownloader63(t *testing.T) {
|
||||||
@ -779,7 +756,6 @@ func TestInactiveDownloader63(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Tests that a canceled download wipes all previously accumulated state.
|
// Tests that a canceled download wipes all previously accumulated state.
|
||||||
func TestCancel62(t *testing.T) { testCancel(t, 62, FullSync) }
|
|
||||||
func TestCancel63Full(t *testing.T) { testCancel(t, 63, FullSync) }
|
func TestCancel63Full(t *testing.T) { testCancel(t, 63, FullSync) }
|
||||||
func TestCancel63Fast(t *testing.T) { testCancel(t, 63, FastSync) }
|
func TestCancel63Fast(t *testing.T) { testCancel(t, 63, FastSync) }
|
||||||
func TestCancel64Full(t *testing.T) { testCancel(t, 64, FullSync) }
|
func TestCancel64Full(t *testing.T) { testCancel(t, 64, FullSync) }
|
||||||
@ -811,7 +787,6 @@ func testCancel(t *testing.T, protocol int, mode SyncMode) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Tests that synchronisation from multiple peers works as intended (multi thread sanity test).
|
// Tests that synchronisation from multiple peers works as intended (multi thread sanity test).
|
||||||
func TestMultiSynchronisation62(t *testing.T) { testMultiSynchronisation(t, 62, FullSync) }
|
|
||||||
func TestMultiSynchronisation63Full(t *testing.T) { testMultiSynchronisation(t, 63, FullSync) }
|
func TestMultiSynchronisation63Full(t *testing.T) { testMultiSynchronisation(t, 63, FullSync) }
|
||||||
func TestMultiSynchronisation63Fast(t *testing.T) { testMultiSynchronisation(t, 63, FastSync) }
|
func TestMultiSynchronisation63Fast(t *testing.T) { testMultiSynchronisation(t, 63, FastSync) }
|
||||||
func TestMultiSynchronisation64Full(t *testing.T) { testMultiSynchronisation(t, 64, FullSync) }
|
func TestMultiSynchronisation64Full(t *testing.T) { testMultiSynchronisation(t, 64, FullSync) }
|
||||||
@ -840,7 +815,6 @@ func testMultiSynchronisation(t *testing.T, protocol int, mode SyncMode) {
|
|||||||
|
|
||||||
// Tests that synchronisations behave well in multi-version protocol environments
|
// Tests that synchronisations behave well in multi-version protocol environments
|
||||||
// and not wreak havoc on other nodes in the network.
|
// and not wreak havoc on other nodes in the network.
|
||||||
func TestMultiProtoSynchronisation62(t *testing.T) { testMultiProtoSync(t, 62, FullSync) }
|
|
||||||
func TestMultiProtoSynchronisation63Full(t *testing.T) { testMultiProtoSync(t, 63, FullSync) }
|
func TestMultiProtoSynchronisation63Full(t *testing.T) { testMultiProtoSync(t, 63, FullSync) }
|
||||||
func TestMultiProtoSynchronisation63Fast(t *testing.T) { testMultiProtoSync(t, 63, FastSync) }
|
func TestMultiProtoSynchronisation63Fast(t *testing.T) { testMultiProtoSync(t, 63, FastSync) }
|
||||||
func TestMultiProtoSynchronisation64Full(t *testing.T) { testMultiProtoSync(t, 64, FullSync) }
|
func TestMultiProtoSynchronisation64Full(t *testing.T) { testMultiProtoSync(t, 64, FullSync) }
|
||||||
@ -857,7 +831,6 @@ func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) {
|
|||||||
chain := testChainBase.shorten(blockCacheItems - 15)
|
chain := testChainBase.shorten(blockCacheItems - 15)
|
||||||
|
|
||||||
// Create peers of every type
|
// Create peers of every type
|
||||||
tester.newPeer("peer 62", 62, chain)
|
|
||||||
tester.newPeer("peer 63", 63, chain)
|
tester.newPeer("peer 63", 63, chain)
|
||||||
tester.newPeer("peer 64", 64, chain)
|
tester.newPeer("peer 64", 64, chain)
|
||||||
|
|
||||||
@ -868,7 +841,7 @@ func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) {
|
|||||||
assertOwnChain(t, tester, chain.len())
|
assertOwnChain(t, tester, chain.len())
|
||||||
|
|
||||||
// Check that no peers have been dropped off
|
// Check that no peers have been dropped off
|
||||||
for _, version := range []int{62, 63, 64} {
|
for _, version := range []int{63, 64} {
|
||||||
peer := fmt.Sprintf("peer %d", version)
|
peer := fmt.Sprintf("peer %d", version)
|
||||||
if _, ok := tester.peers[peer]; !ok {
|
if _, ok := tester.peers[peer]; !ok {
|
||||||
t.Errorf("%s dropped", peer)
|
t.Errorf("%s dropped", peer)
|
||||||
@ -878,7 +851,6 @@ func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) {
|
|||||||
|
|
||||||
// Tests that if a block is empty (e.g. header only), no body request should be
|
// Tests that if a block is empty (e.g. header only), no body request should be
|
||||||
// made, and instead the header should be assembled into a whole block in itself.
|
// made, and instead the header should be assembled into a whole block in itself.
|
||||||
func TestEmptyShortCircuit62(t *testing.T) { testEmptyShortCircuit(t, 62, FullSync) }
|
|
||||||
func TestEmptyShortCircuit63Full(t *testing.T) { testEmptyShortCircuit(t, 63, FullSync) }
|
func TestEmptyShortCircuit63Full(t *testing.T) { testEmptyShortCircuit(t, 63, FullSync) }
|
||||||
func TestEmptyShortCircuit63Fast(t *testing.T) { testEmptyShortCircuit(t, 63, FastSync) }
|
func TestEmptyShortCircuit63Fast(t *testing.T) { testEmptyShortCircuit(t, 63, FastSync) }
|
||||||
func TestEmptyShortCircuit64Full(t *testing.T) { testEmptyShortCircuit(t, 64, FullSync) }
|
func TestEmptyShortCircuit64Full(t *testing.T) { testEmptyShortCircuit(t, 64, FullSync) }
|
||||||
@ -931,7 +903,6 @@ func testEmptyShortCircuit(t *testing.T, protocol int, mode SyncMode) {
|
|||||||
|
|
||||||
// Tests that headers are enqueued continuously, preventing malicious nodes from
|
// Tests that headers are enqueued continuously, preventing malicious nodes from
|
||||||
// stalling the downloader by feeding gapped header chains.
|
// stalling the downloader by feeding gapped header chains.
|
||||||
func TestMissingHeaderAttack62(t *testing.T) { testMissingHeaderAttack(t, 62, FullSync) }
|
|
||||||
func TestMissingHeaderAttack63Full(t *testing.T) { testMissingHeaderAttack(t, 63, FullSync) }
|
func TestMissingHeaderAttack63Full(t *testing.T) { testMissingHeaderAttack(t, 63, FullSync) }
|
||||||
func TestMissingHeaderAttack63Fast(t *testing.T) { testMissingHeaderAttack(t, 63, FastSync) }
|
func TestMissingHeaderAttack63Fast(t *testing.T) { testMissingHeaderAttack(t, 63, FastSync) }
|
||||||
func TestMissingHeaderAttack64Full(t *testing.T) { testMissingHeaderAttack(t, 64, FullSync) }
|
func TestMissingHeaderAttack64Full(t *testing.T) { testMissingHeaderAttack(t, 64, FullSync) }
|
||||||
@ -962,7 +933,6 @@ func testMissingHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
|
|||||||
|
|
||||||
// Tests that if requested headers are shifted (i.e. first is missing), the queue
|
// Tests that if requested headers are shifted (i.e. first is missing), the queue
|
||||||
// detects the invalid numbering.
|
// detects the invalid numbering.
|
||||||
func TestShiftedHeaderAttack62(t *testing.T) { testShiftedHeaderAttack(t, 62, FullSync) }
|
|
||||||
func TestShiftedHeaderAttack63Full(t *testing.T) { testShiftedHeaderAttack(t, 63, FullSync) }
|
func TestShiftedHeaderAttack63Full(t *testing.T) { testShiftedHeaderAttack(t, 63, FullSync) }
|
||||||
func TestShiftedHeaderAttack63Fast(t *testing.T) { testShiftedHeaderAttack(t, 63, FastSync) }
|
func TestShiftedHeaderAttack63Fast(t *testing.T) { testShiftedHeaderAttack(t, 63, FastSync) }
|
||||||
func TestShiftedHeaderAttack64Full(t *testing.T) { testShiftedHeaderAttack(t, 64, FullSync) }
|
func TestShiftedHeaderAttack64Full(t *testing.T) { testShiftedHeaderAttack(t, 64, FullSync) }
|
||||||
@ -1090,7 +1060,6 @@ func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) {
|
|||||||
|
|
||||||
// Tests that a peer advertising a high TD doesn't get to stall the downloader
|
// Tests that a peer advertising a high TD doesn't get to stall the downloader
|
||||||
// afterwards by not sending any useful hashes.
|
// afterwards by not sending any useful hashes.
|
||||||
func TestHighTDStarvationAttack62(t *testing.T) { testHighTDStarvationAttack(t, 62, FullSync) }
|
|
||||||
func TestHighTDStarvationAttack63Full(t *testing.T) { testHighTDStarvationAttack(t, 63, FullSync) }
|
func TestHighTDStarvationAttack63Full(t *testing.T) { testHighTDStarvationAttack(t, 63, FullSync) }
|
||||||
func TestHighTDStarvationAttack63Fast(t *testing.T) { testHighTDStarvationAttack(t, 63, FastSync) }
|
func TestHighTDStarvationAttack63Fast(t *testing.T) { testHighTDStarvationAttack(t, 63, FastSync) }
|
||||||
func TestHighTDStarvationAttack64Full(t *testing.T) { testHighTDStarvationAttack(t, 64, FullSync) }
|
func TestHighTDStarvationAttack64Full(t *testing.T) { testHighTDStarvationAttack(t, 64, FullSync) }
|
||||||
@ -1111,7 +1080,6 @@ func testHighTDStarvationAttack(t *testing.T, protocol int, mode SyncMode) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Tests that misbehaving peers are disconnected, whilst behaving ones are not.
|
// Tests that misbehaving peers are disconnected, whilst behaving ones are not.
|
||||||
func TestBlockHeaderAttackerDropping62(t *testing.T) { testBlockHeaderAttackerDropping(t, 62) }
|
|
||||||
func TestBlockHeaderAttackerDropping63(t *testing.T) { testBlockHeaderAttackerDropping(t, 63) }
|
func TestBlockHeaderAttackerDropping63(t *testing.T) { testBlockHeaderAttackerDropping(t, 63) }
|
||||||
func TestBlockHeaderAttackerDropping64(t *testing.T) { testBlockHeaderAttackerDropping(t, 64) }
|
func TestBlockHeaderAttackerDropping64(t *testing.T) { testBlockHeaderAttackerDropping(t, 64) }
|
||||||
|
|
||||||
@ -1165,7 +1133,6 @@ func testBlockHeaderAttackerDropping(t *testing.T, protocol int) {
|
|||||||
|
|
||||||
// Tests that synchronisation progress (origin block number, current block number
|
// Tests that synchronisation progress (origin block number, current block number
|
||||||
// and highest block number) is tracked and updated correctly.
|
// and highest block number) is tracked and updated correctly.
|
||||||
func TestSyncProgress62(t *testing.T) { testSyncProgress(t, 62, FullSync) }
|
|
||||||
func TestSyncProgress63Full(t *testing.T) { testSyncProgress(t, 63, FullSync) }
|
func TestSyncProgress63Full(t *testing.T) { testSyncProgress(t, 63, FullSync) }
|
||||||
func TestSyncProgress63Fast(t *testing.T) { testSyncProgress(t, 63, FastSync) }
|
func TestSyncProgress63Fast(t *testing.T) { testSyncProgress(t, 63, FastSync) }
|
||||||
func TestSyncProgress64Full(t *testing.T) { testSyncProgress(t, 64, FullSync) }
|
func TestSyncProgress64Full(t *testing.T) { testSyncProgress(t, 64, FullSync) }
|
||||||
@ -1248,7 +1215,6 @@ func checkProgress(t *testing.T, d *Downloader, stage string, want ethereum.Sync
|
|||||||
// Tests that synchronisation progress (origin block number and highest block
|
// Tests that synchronisation progress (origin block number and highest block
|
||||||
// number) is tracked and updated correctly in case of a fork (or manual head
|
// number) is tracked and updated correctly in case of a fork (or manual head
|
||||||
// revertal).
|
// revertal).
|
||||||
func TestForkedSyncProgress62(t *testing.T) { testForkedSyncProgress(t, 62, FullSync) }
|
|
||||||
func TestForkedSyncProgress63Full(t *testing.T) { testForkedSyncProgress(t, 63, FullSync) }
|
func TestForkedSyncProgress63Full(t *testing.T) { testForkedSyncProgress(t, 63, FullSync) }
|
||||||
func TestForkedSyncProgress63Fast(t *testing.T) { testForkedSyncProgress(t, 63, FastSync) }
|
func TestForkedSyncProgress63Fast(t *testing.T) { testForkedSyncProgress(t, 63, FastSync) }
|
||||||
func TestForkedSyncProgress64Full(t *testing.T) { testForkedSyncProgress(t, 64, FullSync) }
|
func TestForkedSyncProgress64Full(t *testing.T) { testForkedSyncProgress(t, 64, FullSync) }
|
||||||
@ -1323,7 +1289,6 @@ func testForkedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
|
|||||||
// Tests that if synchronisation is aborted due to some failure, then the progress
|
// Tests that if synchronisation is aborted due to some failure, then the progress
|
||||||
// origin is not updated in the next sync cycle, as it should be considered the
|
// origin is not updated in the next sync cycle, as it should be considered the
|
||||||
// continuation of the previous sync and not a new instance.
|
// continuation of the previous sync and not a new instance.
|
||||||
func TestFailedSyncProgress62(t *testing.T) { testFailedSyncProgress(t, 62, FullSync) }
|
|
||||||
func TestFailedSyncProgress63Full(t *testing.T) { testFailedSyncProgress(t, 63, FullSync) }
|
func TestFailedSyncProgress63Full(t *testing.T) { testFailedSyncProgress(t, 63, FullSync) }
|
||||||
func TestFailedSyncProgress63Fast(t *testing.T) { testFailedSyncProgress(t, 63, FastSync) }
|
func TestFailedSyncProgress63Fast(t *testing.T) { testFailedSyncProgress(t, 63, FastSync) }
|
||||||
func TestFailedSyncProgress64Full(t *testing.T) { testFailedSyncProgress(t, 64, FullSync) }
|
func TestFailedSyncProgress64Full(t *testing.T) { testFailedSyncProgress(t, 64, FullSync) }
|
||||||
@ -1395,7 +1360,6 @@ func testFailedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
|
|||||||
|
|
||||||
// Tests that if an attacker fakes a chain height, after the attack is detected,
|
// Tests that if an attacker fakes a chain height, after the attack is detected,
|
||||||
// the progress height is successfully reduced at the next sync invocation.
|
// the progress height is successfully reduced at the next sync invocation.
|
||||||
func TestFakedSyncProgress62(t *testing.T) { testFakedSyncProgress(t, 62, FullSync) }
|
|
||||||
func TestFakedSyncProgress63Full(t *testing.T) { testFakedSyncProgress(t, 63, FullSync) }
|
func TestFakedSyncProgress63Full(t *testing.T) { testFakedSyncProgress(t, 63, FullSync) }
|
||||||
func TestFakedSyncProgress63Fast(t *testing.T) { testFakedSyncProgress(t, 63, FastSync) }
|
func TestFakedSyncProgress63Fast(t *testing.T) { testFakedSyncProgress(t, 63, FastSync) }
|
||||||
func TestFakedSyncProgress64Full(t *testing.T) { testFakedSyncProgress(t, 64, FullSync) }
|
func TestFakedSyncProgress64Full(t *testing.T) { testFakedSyncProgress(t, 64, FullSync) }
|
||||||
@ -1478,7 +1442,6 @@ func TestDeliverHeadersHang(t *testing.T) {
|
|||||||
protocol int
|
protocol int
|
||||||
syncMode SyncMode
|
syncMode SyncMode
|
||||||
}{
|
}{
|
||||||
{62, FullSync},
|
|
||||||
{63, FullSync},
|
{63, FullSync},
|
||||||
{63, FastSync},
|
{63, FastSync},
|
||||||
{64, FullSync},
|
{64, FullSync},
|
||||||
@ -1644,7 +1607,6 @@ func TestRemoteHeaderRequestSpan(t *testing.T) {
|
|||||||
|
|
||||||
// Tests that peers below a pre-configured checkpoint block are prevented from
|
// Tests that peers below a pre-configured checkpoint block are prevented from
|
||||||
// being fast-synced from, avoiding potential cheap eclipse attacks.
|
// being fast-synced from, avoiding potential cheap eclipse attacks.
|
||||||
func TestCheckpointEnforcement62(t *testing.T) { testCheckpointEnforcement(t, 62, FullSync) }
|
|
||||||
func TestCheckpointEnforcement63Full(t *testing.T) { testCheckpointEnforcement(t, 63, FullSync) }
|
func TestCheckpointEnforcement63Full(t *testing.T) { testCheckpointEnforcement(t, 63, FullSync) }
|
||||||
func TestCheckpointEnforcement63Fast(t *testing.T) { testCheckpointEnforcement(t, 63, FastSync) }
|
func TestCheckpointEnforcement63Fast(t *testing.T) { testCheckpointEnforcement(t, 63, FastSync) }
|
||||||
func TestCheckpointEnforcement64Full(t *testing.T) { testCheckpointEnforcement(t, 64, FullSync) }
|
func TestCheckpointEnforcement64Full(t *testing.T) { testCheckpointEnforcement(t, 64, FullSync) }
|
||||||
|
@ -21,7 +21,6 @@ package downloader
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
|
||||||
"math"
|
"math"
|
||||||
"math/big"
|
"math/big"
|
||||||
"sort"
|
"sort"
|
||||||
@ -143,10 +142,6 @@ func (p *peerConnection) Reset() {
|
|||||||
|
|
||||||
// FetchHeaders sends a header retrieval request to the remote peer.
|
// FetchHeaders sends a header retrieval request to the remote peer.
|
||||||
func (p *peerConnection) FetchHeaders(from uint64, count int) error {
|
func (p *peerConnection) FetchHeaders(from uint64, count int) error {
|
||||||
// Sanity check the protocol version
|
|
||||||
if p.version < 62 {
|
|
||||||
panic(fmt.Sprintf("header fetch [eth/62+] requested on eth/%d", p.version))
|
|
||||||
}
|
|
||||||
// Short circuit if the peer is already fetching
|
// Short circuit if the peer is already fetching
|
||||||
if !atomic.CompareAndSwapInt32(&p.headerIdle, 0, 1) {
|
if !atomic.CompareAndSwapInt32(&p.headerIdle, 0, 1) {
|
||||||
return errAlreadyFetching
|
return errAlreadyFetching
|
||||||
@ -161,10 +156,6 @@ func (p *peerConnection) FetchHeaders(from uint64, count int) error {
|
|||||||
|
|
||||||
// FetchBodies sends a block body retrieval request to the remote peer.
|
// FetchBodies sends a block body retrieval request to the remote peer.
|
||||||
func (p *peerConnection) FetchBodies(request *fetchRequest) error {
|
func (p *peerConnection) FetchBodies(request *fetchRequest) error {
|
||||||
// Sanity check the protocol version
|
|
||||||
if p.version < 62 {
|
|
||||||
panic(fmt.Sprintf("body fetch [eth/62+] requested on eth/%d", p.version))
|
|
||||||
}
|
|
||||||
// Short circuit if the peer is already fetching
|
// Short circuit if the peer is already fetching
|
||||||
if !atomic.CompareAndSwapInt32(&p.blockIdle, 0, 1) {
|
if !atomic.CompareAndSwapInt32(&p.blockIdle, 0, 1) {
|
||||||
return errAlreadyFetching
|
return errAlreadyFetching
|
||||||
@ -185,10 +176,6 @@ func (p *peerConnection) FetchBodies(request *fetchRequest) error {
|
|||||||
|
|
||||||
// FetchReceipts sends a receipt retrieval request to the remote peer.
|
// FetchReceipts sends a receipt retrieval request to the remote peer.
|
||||||
func (p *peerConnection) FetchReceipts(request *fetchRequest) error {
|
func (p *peerConnection) FetchReceipts(request *fetchRequest) error {
|
||||||
// Sanity check the protocol version
|
|
||||||
if p.version < 63 {
|
|
||||||
panic(fmt.Sprintf("body fetch [eth/63+] requested on eth/%d", p.version))
|
|
||||||
}
|
|
||||||
// Short circuit if the peer is already fetching
|
// Short circuit if the peer is already fetching
|
||||||
if !atomic.CompareAndSwapInt32(&p.receiptIdle, 0, 1) {
|
if !atomic.CompareAndSwapInt32(&p.receiptIdle, 0, 1) {
|
||||||
return errAlreadyFetching
|
return errAlreadyFetching
|
||||||
@ -209,10 +196,6 @@ func (p *peerConnection) FetchReceipts(request *fetchRequest) error {
|
|||||||
|
|
||||||
// FetchNodeData sends a node state data retrieval request to the remote peer.
|
// FetchNodeData sends a node state data retrieval request to the remote peer.
|
||||||
func (p *peerConnection) FetchNodeData(hashes []common.Hash) error {
|
func (p *peerConnection) FetchNodeData(hashes []common.Hash) error {
|
||||||
// Sanity check the protocol version
|
|
||||||
if p.version < 63 {
|
|
||||||
panic(fmt.Sprintf("node data fetch [eth/63+] requested on eth/%d", p.version))
|
|
||||||
}
|
|
||||||
// Short circuit if the peer is already fetching
|
// Short circuit if the peer is already fetching
|
||||||
if !atomic.CompareAndSwapInt32(&p.stateIdle, 0, 1) {
|
if !atomic.CompareAndSwapInt32(&p.stateIdle, 0, 1) {
|
||||||
return errAlreadyFetching
|
return errAlreadyFetching
|
||||||
@ -474,7 +457,7 @@ func (ps *peerSet) HeaderIdlePeers() ([]*peerConnection, int) {
|
|||||||
defer p.lock.RUnlock()
|
defer p.lock.RUnlock()
|
||||||
return p.headerThroughput
|
return p.headerThroughput
|
||||||
}
|
}
|
||||||
return ps.idlePeers(62, 65, idle, throughput)
|
return ps.idlePeers(63, 65, idle, throughput)
|
||||||
}
|
}
|
||||||
|
|
||||||
// BodyIdlePeers retrieves a flat list of all the currently body-idle peers within
|
// BodyIdlePeers retrieves a flat list of all the currently body-idle peers within
|
||||||
@ -488,7 +471,7 @@ func (ps *peerSet) BodyIdlePeers() ([]*peerConnection, int) {
|
|||||||
defer p.lock.RUnlock()
|
defer p.lock.RUnlock()
|
||||||
return p.blockThroughput
|
return p.blockThroughput
|
||||||
}
|
}
|
||||||
return ps.idlePeers(62, 65, idle, throughput)
|
return ps.idlePeers(63, 65, idle, throughput)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReceiptIdlePeers retrieves a flat list of all the currently receipt-idle peers
|
// ReceiptIdlePeers retrieves a flat list of all the currently receipt-idle peers
|
||||||
|
Loading…
Reference in New Issue
Block a user