eth/downloader: update tests for reliability (#15337)
Updated use of Parallel and added some subtests to help isolate them. Increased timeout in RequestHeadersByNumber so it doesn't time out and causes other tests to break.
This commit is contained in:
parent
20fe928914
commit
d927c67f9d
@ -704,6 +704,7 @@ func TestThrottling64Full(t *testing.T) { testThrottling(t, 64, FullSync) }
|
|||||||
func TestThrottling64Fast(t *testing.T) { testThrottling(t, 64, FastSync) }
|
func TestThrottling64Fast(t *testing.T) { testThrottling(t, 64, FastSync) }
|
||||||
|
|
||||||
func testThrottling(t *testing.T, protocol int, mode SyncMode) {
|
func testThrottling(t *testing.T, protocol int, mode SyncMode) {
|
||||||
|
t.Parallel()
|
||||||
tester := newTester()
|
tester := newTester()
|
||||||
defer tester.terminate()
|
defer tester.terminate()
|
||||||
|
|
||||||
@ -1166,6 +1167,8 @@ func TestShiftedHeaderAttack64Fast(t *testing.T) { testShiftedHeaderAttack(t, 6
|
|||||||
func TestShiftedHeaderAttack64Light(t *testing.T) { testShiftedHeaderAttack(t, 64, LightSync) }
|
func TestShiftedHeaderAttack64Light(t *testing.T) { testShiftedHeaderAttack(t, 64, LightSync) }
|
||||||
|
|
||||||
func testShiftedHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
|
func testShiftedHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
tester := newTester()
|
tester := newTester()
|
||||||
defer tester.terminate()
|
defer tester.terminate()
|
||||||
|
|
||||||
@ -1198,6 +1201,8 @@ func TestInvalidHeaderRollback64Fast(t *testing.T) { testInvalidHeaderRollback(
|
|||||||
func TestInvalidHeaderRollback64Light(t *testing.T) { testInvalidHeaderRollback(t, 64, LightSync) }
|
func TestInvalidHeaderRollback64Light(t *testing.T) { testInvalidHeaderRollback(t, 64, LightSync) }
|
||||||
|
|
||||||
func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) {
|
func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
tester := newTester()
|
tester := newTester()
|
||||||
defer tester.terminate()
|
defer tester.terminate()
|
||||||
|
|
||||||
@ -1310,6 +1315,8 @@ func TestBlockHeaderAttackerDropping63(t *testing.T) { testBlockHeaderAttackerDr
|
|||||||
func TestBlockHeaderAttackerDropping64(t *testing.T) { testBlockHeaderAttackerDropping(t, 64) }
|
func TestBlockHeaderAttackerDropping64(t *testing.T) { testBlockHeaderAttackerDropping(t, 64) }
|
||||||
|
|
||||||
func testBlockHeaderAttackerDropping(t *testing.T, protocol int) {
|
func testBlockHeaderAttackerDropping(t *testing.T, protocol int) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
// Define the disconnection requirement for individual hash fetch errors
|
// Define the disconnection requirement for individual hash fetch errors
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
result error
|
result error
|
||||||
@ -1665,12 +1672,26 @@ func testFakedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
|
|||||||
|
|
||||||
// This test reproduces an issue where unexpected deliveries would
|
// This test reproduces an issue where unexpected deliveries would
|
||||||
// block indefinitely if they arrived at the right time.
|
// block indefinitely if they arrived at the right time.
|
||||||
func TestDeliverHeadersHang62(t *testing.T) { testDeliverHeadersHang(t, 62, FullSync) }
|
// We use data driven subtests to manage this so that it will be parallel on its own
|
||||||
func TestDeliverHeadersHang63Full(t *testing.T) { testDeliverHeadersHang(t, 63, FullSync) }
|
// and not with the other tests, avoiding intermittent failures.
|
||||||
func TestDeliverHeadersHang63Fast(t *testing.T) { testDeliverHeadersHang(t, 63, FastSync) }
|
func TestDeliverHeadersHang(t *testing.T) {
|
||||||
func TestDeliverHeadersHang64Full(t *testing.T) { testDeliverHeadersHang(t, 64, FullSync) }
|
testCases := []struct {
|
||||||
func TestDeliverHeadersHang64Fast(t *testing.T) { testDeliverHeadersHang(t, 64, FastSync) }
|
protocol int
|
||||||
func TestDeliverHeadersHang64Light(t *testing.T) { testDeliverHeadersHang(t, 64, LightSync) }
|
syncMode SyncMode
|
||||||
|
}{
|
||||||
|
{62, FullSync},
|
||||||
|
{63, FullSync},
|
||||||
|
{63, FastSync},
|
||||||
|
{64, FullSync},
|
||||||
|
{64, FastSync},
|
||||||
|
{64, LightSync},
|
||||||
|
}
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(fmt.Sprintf("protocol %d mode %v", tc.protocol, tc.syncMode), func(t *testing.T) {
|
||||||
|
testDeliverHeadersHang(t, tc.protocol, tc.syncMode)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
type floodingTestPeer struct {
|
type floodingTestPeer struct {
|
||||||
peer Peer
|
peer Peer
|
||||||
@ -1703,7 +1724,7 @@ func (ftp *floodingTestPeer) RequestHeadersByNumber(from uint64, count, skip int
|
|||||||
// Deliver the actual requested headers.
|
// Deliver the actual requested headers.
|
||||||
go ftp.peer.RequestHeadersByNumber(from, count, skip, reverse)
|
go ftp.peer.RequestHeadersByNumber(from, count, skip, reverse)
|
||||||
// None of the extra deliveries should block.
|
// None of the extra deliveries should block.
|
||||||
timeout := time.After(15 * time.Second)
|
timeout := time.After(60 * time.Second)
|
||||||
for i := 0; i < cap(deliveriesDone); i++ {
|
for i := 0; i < cap(deliveriesDone); i++ {
|
||||||
select {
|
select {
|
||||||
case <-deliveriesDone:
|
case <-deliveriesDone:
|
||||||
@ -1732,7 +1753,6 @@ func testDeliverHeadersHang(t *testing.T, protocol int, mode SyncMode) {
|
|||||||
tester.downloader.peers.peers["peer"].peer,
|
tester.downloader.peers.peers["peer"].peer,
|
||||||
tester,
|
tester,
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := tester.sync("peer", nil, mode); err != nil {
|
if err := tester.sync("peer", nil, mode); err != nil {
|
||||||
t.Errorf("sync failed: %v", err)
|
t.Errorf("sync failed: %v", err)
|
||||||
}
|
}
|
||||||
@ -1742,12 +1762,28 @@ func testDeliverHeadersHang(t *testing.T, protocol int, mode SyncMode) {
|
|||||||
|
|
||||||
// Tests that if fast sync aborts in the critical section, it can restart a few
|
// Tests that if fast sync aborts in the critical section, it can restart a few
|
||||||
// times before giving up.
|
// times before giving up.
|
||||||
func TestFastCriticalRestartsFail63(t *testing.T) { testFastCriticalRestarts(t, 63, false) }
|
// We use data driven subtests to manage this so that it will be parallel on its own
|
||||||
func TestFastCriticalRestartsFail64(t *testing.T) { testFastCriticalRestarts(t, 64, false) }
|
// and not with the other tests, avoiding intermittent failures.
|
||||||
func TestFastCriticalRestartsCont63(t *testing.T) { testFastCriticalRestarts(t, 63, true) }
|
func TestFastCriticalRestarts(t *testing.T) {
|
||||||
func TestFastCriticalRestartsCont64(t *testing.T) { testFastCriticalRestarts(t, 64, true) }
|
testCases := []struct {
|
||||||
|
protocol int
|
||||||
|
progress bool
|
||||||
|
}{
|
||||||
|
{63, false},
|
||||||
|
{64, false},
|
||||||
|
{63, true},
|
||||||
|
{64, true},
|
||||||
|
}
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(fmt.Sprintf("protocol %d progress %v", tc.protocol, tc.progress), func(t *testing.T) {
|
||||||
|
testFastCriticalRestarts(t, tc.protocol, tc.progress)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func testFastCriticalRestarts(t *testing.T, protocol int, progress bool) {
|
func testFastCriticalRestarts(t *testing.T, protocol int, progress bool) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
tester := newTester()
|
tester := newTester()
|
||||||
defer tester.terminate()
|
defer tester.terminate()
|
||||||
|
|
||||||
@ -1776,6 +1812,7 @@ func testFastCriticalRestarts(t *testing.T, protocol int, progress bool) {
|
|||||||
|
|
||||||
// If it's the first failure, pivot should be locked => reenable all others to detect pivot changes
|
// If it's the first failure, pivot should be locked => reenable all others to detect pivot changes
|
||||||
if i == 0 {
|
if i == 0 {
|
||||||
|
time.Sleep(150 * time.Millisecond) // Make sure no in-flight requests remain
|
||||||
if tester.downloader.fsPivotLock == nil {
|
if tester.downloader.fsPivotLock == nil {
|
||||||
time.Sleep(400 * time.Millisecond) // Make sure the first huge timeout expires too
|
time.Sleep(400 * time.Millisecond) // Make sure the first huge timeout expires too
|
||||||
t.Fatalf("pivot block not locked in after critical section failure")
|
t.Fatalf("pivot block not locked in after critical section failure")
|
||||||
|
Loading…
Reference in New Issue
Block a user