eth/fetcher: remove test sleeps (15s -> 2.8s)

This commit is contained in:
Péter Szilágyi 2015-06-22 18:08:28 +03:00
parent 1989d1491a
commit b53f701c27
2 changed files with 167 additions and 59 deletions

View File

@ -92,6 +92,10 @@ type Fetcher struct {
chainHeight chainHeightFn // Retrieves the current chain's height chainHeight chainHeightFn // Retrieves the current chain's height
insertChain chainInsertFn // Injects a batch of blocks into the chain insertChain chainInsertFn // Injects a batch of blocks into the chain
dropPeer peerDropFn // Drops a peer for misbehaving dropPeer peerDropFn // Drops a peer for misbehaving
// Testing hooks
fetchingHook func([]common.Hash) // Method to call upon starting a block fetch
importedHook func(*types.Block) // Method to call upon successful block import
} }
// New creates a block fetcher to retrieve blocks based on hash announcements. // New creates a block fetcher to retrieve blocks based on hash announcements.
@ -277,7 +281,13 @@ func (f *Fetcher) loop() {
glog.V(logger.Detail).Infof("Peer %s: fetching %s", peer, list) glog.V(logger.Detail).Infof("Peer %s: fetching %s", peer, list)
} }
go f.fetching[hashes[0]].fetch(hashes) hashes := hashes // closure!
go func() {
if f.fetchingHook != nil {
f.fetchingHook(hashes)
}
f.fetching[hashes[0]].fetch(hashes)
}()
} }
// Schedule the next fetch if blocks are still pending // Schedule the next fetch if blocks are still pending
f.reschedule(fetch) f.reschedule(fetch)
@ -402,6 +412,11 @@ func (f *Fetcher) insert(peer string, block *types.Block) {
} }
// If import succeeded, broadcast the block // If import succeeded, broadcast the block
go f.broadcastBlock(block, false) go f.broadcastBlock(block, false)
// Invoke the testing hook if needed
if f.importedHook != nil {
f.importedHook(block)
}
}() }()
} }

View File

@ -163,7 +163,7 @@ func (f *fetcherTester) makeFetcher(blocks map[common.Hash]*types.Block) blockRe
// them, successfully importing into the local chain. // them, successfully importing into the local chain.
func TestSequentialAnnouncements(t *testing.T) { func TestSequentialAnnouncements(t *testing.T) {
// Create a chain of blocks to import // Create a chain of blocks to import
targetBlocks := 24 targetBlocks := 4 * hashLimit
hashes := createHashes(targetBlocks, knownHash) hashes := createHashes(targetBlocks, knownHash)
blocks := createBlocksFromHashes(hashes) blocks := createBlocksFromHashes(hashes)
@ -171,12 +171,22 @@ func TestSequentialAnnouncements(t *testing.T) {
fetcher := tester.makeFetcher(blocks) fetcher := tester.makeFetcher(blocks)
// Iteratively announce blocks until all are imported // Iteratively announce blocks until all are imported
for i := len(hashes) - 1; i >= 0; i-- { imported := make(chan *types.Block)
tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
for i := len(hashes) - 2; i >= 0; i-- {
tester.fetcher.Notify("valid", hashes[i], time.Now().Add(-arriveTimeout), fetcher) tester.fetcher.Notify("valid", hashes[i], time.Now().Add(-arriveTimeout), fetcher)
time.Sleep(50 * time.Millisecond)
select {
case <-imported:
case <-time.After(time.Second):
t.Fatalf("block %d: import timeout", len(hashes)-i)
} }
if imported := len(tester.blocks); imported != targetBlocks+1 { }
t.Fatalf("synchronised block mismatch: have %v, want %v", imported, targetBlocks+1) select {
case <-imported:
t.Fatalf("extra block imported")
case <-time.After(50 * time.Millisecond):
} }
} }
@ -184,7 +194,7 @@ func TestSequentialAnnouncements(t *testing.T) {
// peer), they will only get downloaded at most once. // peer), they will only get downloaded at most once.
func TestConcurrentAnnouncements(t *testing.T) { func TestConcurrentAnnouncements(t *testing.T) {
// Create a chain of blocks to import // Create a chain of blocks to import
targetBlocks := 24 targetBlocks := 4 * hashLimit
hashes := createHashes(targetBlocks, knownHash) hashes := createHashes(targetBlocks, knownHash)
blocks := createBlocksFromHashes(hashes) blocks := createBlocksFromHashes(hashes)
@ -198,15 +208,24 @@ func TestConcurrentAnnouncements(t *testing.T) {
return fetcher(hashes) return fetcher(hashes)
} }
// Iteratively announce blocks until all are imported // Iteratively announce blocks until all are imported
for i := len(hashes) - 1; i >= 0; i-- { imported := make(chan *types.Block)
tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
for i := len(hashes) - 2; i >= 0; i-- {
tester.fetcher.Notify("first", hashes[i], time.Now().Add(-arriveTimeout), wrapper) tester.fetcher.Notify("first", hashes[i], time.Now().Add(-arriveTimeout), wrapper)
tester.fetcher.Notify("second", hashes[i], time.Now().Add(-arriveTimeout+time.Millisecond), wrapper) tester.fetcher.Notify("second", hashes[i], time.Now().Add(-arriveTimeout+time.Millisecond), wrapper)
tester.fetcher.Notify("second", hashes[i], time.Now().Add(-arriveTimeout-time.Millisecond), wrapper) tester.fetcher.Notify("second", hashes[i], time.Now().Add(-arriveTimeout-time.Millisecond), wrapper)
time.Sleep(50 * time.Millisecond) select {
case <-imported:
case <-time.After(time.Second):
t.Fatalf("block %d: import timeout", len(hashes)-i)
} }
if imported := len(tester.blocks); imported != targetBlocks+1 { }
t.Fatalf("synchronised block mismatch: have %v, want %v", imported, targetBlocks+1) select {
case <-imported:
t.Fatalf("extra block imported")
case <-time.After(50 * time.Millisecond):
} }
// Make sure no blocks were retrieved twice // Make sure no blocks were retrieved twice
if int(counter) != targetBlocks { if int(counter) != targetBlocks {
@ -218,7 +237,7 @@ func TestConcurrentAnnouncements(t *testing.T) {
// results in a valid import. // results in a valid import.
func TestOverlappingAnnouncements(t *testing.T) { func TestOverlappingAnnouncements(t *testing.T) {
// Create a chain of blocks to import // Create a chain of blocks to import
targetBlocks := 24 targetBlocks := 4 * hashLimit
hashes := createHashes(targetBlocks, knownHash) hashes := createHashes(targetBlocks, knownHash)
blocks := createBlocksFromHashes(hashes) blocks := createBlocksFromHashes(hashes)
@ -226,15 +245,31 @@ func TestOverlappingAnnouncements(t *testing.T) {
fetcher := tester.makeFetcher(blocks) fetcher := tester.makeFetcher(blocks)
// Iteratively announce blocks, but overlap them continuously // Iteratively announce blocks, but overlap them continuously
delay, overlap := 50*time.Millisecond, time.Duration(5) fetching := make(chan []common.Hash)
for i := len(hashes) - 1; i >= 0; i-- { imported := make(chan *types.Block, len(hashes)-1)
tester.fetcher.Notify("valid", hashes[i], time.Now().Add(-arriveTimeout+overlap*delay), fetcher) tester.fetcher.fetchingHook = func(hashes []common.Hash) { fetching <- hashes }
time.Sleep(delay) tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
}
time.Sleep(overlap * delay)
if imported := len(tester.blocks); imported != targetBlocks+1 { for i := len(hashes) - 2; i >= 0; i-- {
t.Fatalf("synchronised block mismatch: have %v, want %v", imported, targetBlocks+1) tester.fetcher.Notify("valid", hashes[i], time.Now().Add(-arriveTimeout), fetcher)
select {
case <-fetching:
case <-time.After(time.Second):
t.Fatalf("hash %d: announce timeout", len(hashes)-i)
}
}
// Wait for all the imports to complete and check count
for i := 0; i < len(hashes)-1; i++ {
select {
case <-imported:
case <-time.After(time.Second):
t.Fatalf("block %d: import timeout", i)
}
}
select {
case <-imported:
t.Fatalf("extra block imported")
case <-time.After(50 * time.Millisecond):
} }
} }
@ -280,27 +315,37 @@ func TestPendingDeduplication(t *testing.T) {
// imported when all the gaps are filled in. // imported when all the gaps are filled in.
func TestRandomArrivalImport(t *testing.T) { func TestRandomArrivalImport(t *testing.T) {
// Create a chain of blocks to import, and choose one to delay // Create a chain of blocks to import, and choose one to delay
targetBlocks := 24 hashes := createHashes(maxQueueDist, knownHash)
hashes := createHashes(targetBlocks, knownHash)
blocks := createBlocksFromHashes(hashes) blocks := createBlocksFromHashes(hashes)
skip := targetBlocks / 2 skip := maxQueueDist / 2
tester := newTester() tester := newTester()
fetcher := tester.makeFetcher(blocks) fetcher := tester.makeFetcher(blocks)
// Iteratively announce blocks, skipping one entry // Iteratively announce blocks, skipping one entry
imported := make(chan *types.Block, len(hashes)-1)
tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
for i := len(hashes) - 1; i >= 0; i-- { for i := len(hashes) - 1; i >= 0; i-- {
if i != skip { if i != skip {
tester.fetcher.Notify("valid", hashes[i], time.Now().Add(-arriveTimeout), fetcher) tester.fetcher.Notify("valid", hashes[i], time.Now().Add(-arriveTimeout), fetcher)
time.Sleep(50 * time.Millisecond) time.Sleep(time.Millisecond)
} }
} }
// Finally announce the skipped entry and check full import // Finally announce the skipped entry and check full import
tester.fetcher.Notify("valid", hashes[skip], time.Now().Add(-arriveTimeout), fetcher) tester.fetcher.Notify("valid", hashes[skip], time.Now().Add(-arriveTimeout), fetcher)
time.Sleep(50 * time.Millisecond)
if imported := len(tester.blocks); imported != targetBlocks+1 { for i := 0; i < len(hashes)-1; i++ {
t.Fatalf("synchronised block mismatch: have %v, want %v", imported, targetBlocks+1) select {
case <-imported:
case <-time.After(time.Second):
t.Fatalf("block %d: import timeout", i)
}
}
select {
case <-imported:
t.Fatalf("extra block imported")
case <-time.After(50 * time.Millisecond):
} }
} }
@ -308,27 +353,37 @@ func TestRandomArrivalImport(t *testing.T) {
// are correctly schedule, filling and import queue gaps. // are correctly schedule, filling and import queue gaps.
func TestQueueGapFill(t *testing.T) { func TestQueueGapFill(t *testing.T) {
// Create a chain of blocks to import, and choose one to not announce at all // Create a chain of blocks to import, and choose one to not announce at all
targetBlocks := 24 hashes := createHashes(maxQueueDist, knownHash)
hashes := createHashes(targetBlocks, knownHash)
blocks := createBlocksFromHashes(hashes) blocks := createBlocksFromHashes(hashes)
skip := targetBlocks / 2 skip := maxQueueDist / 2
tester := newTester() tester := newTester()
fetcher := tester.makeFetcher(blocks) fetcher := tester.makeFetcher(blocks)
// Iteratively announce blocks, skipping one entry // Iteratively announce blocks, skipping one entry
imported := make(chan *types.Block, len(hashes)-1)
tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
for i := len(hashes) - 1; i >= 0; i-- { for i := len(hashes) - 1; i >= 0; i-- {
if i != skip { if i != skip {
tester.fetcher.Notify("valid", hashes[i], time.Now().Add(-arriveTimeout), fetcher) tester.fetcher.Notify("valid", hashes[i], time.Now().Add(-arriveTimeout), fetcher)
time.Sleep(50 * time.Millisecond) time.Sleep(time.Millisecond)
} }
} }
// Fill the missing block directly as if propagated // Fill the missing block directly as if propagated
tester.fetcher.Enqueue("valid", blocks[hashes[skip]]) tester.fetcher.Enqueue("valid", blocks[hashes[skip]])
time.Sleep(50 * time.Millisecond)
if imported := len(tester.blocks); imported != targetBlocks+1 { for i := 0; i < len(hashes)-1; i++ {
t.Fatalf("synchronised block mismatch: have %v, want %v", imported, targetBlocks+1) select {
case <-imported:
case <-time.After(time.Second):
t.Fatalf("block %d: import timeout", i)
}
}
select {
case <-imported:
t.Fatalf("extra block imported")
case <-time.After(50 * time.Millisecond):
} }
} }
@ -348,9 +403,15 @@ func TestImportDeduplication(t *testing.T) {
atomic.AddUint32(&counter, uint32(len(blocks))) atomic.AddUint32(&counter, uint32(len(blocks)))
return tester.insertChain(blocks) return tester.insertChain(blocks)
} }
// Instrument the fetching and imported events
fetching := make(chan []common.Hash)
imported := make(chan *types.Block, len(hashes)-1)
tester.fetcher.fetchingHook = func(hashes []common.Hash) { fetching <- hashes }
tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
// Announce the duplicating block, wait for retrieval, and also propagate directly // Announce the duplicating block, wait for retrieval, and also propagate directly
tester.fetcher.Notify("valid", hashes[0], time.Now().Add(-arriveTimeout), fetcher) tester.fetcher.Notify("valid", hashes[0], time.Now().Add(-arriveTimeout), fetcher)
time.Sleep(50 * time.Millisecond) <-fetching
tester.fetcher.Enqueue("valid", blocks[hashes[0]]) tester.fetcher.Enqueue("valid", blocks[hashes[0]])
tester.fetcher.Enqueue("valid", blocks[hashes[0]]) tester.fetcher.Enqueue("valid", blocks[hashes[0]])
@ -358,8 +419,13 @@ func TestImportDeduplication(t *testing.T) {
// Fill the missing block directly as if propagated, and check import uniqueness // Fill the missing block directly as if propagated, and check import uniqueness
tester.fetcher.Enqueue("valid", blocks[hashes[1]]) tester.fetcher.Enqueue("valid", blocks[hashes[1]])
time.Sleep(50 * time.Millisecond) for done := false; !done; {
select {
case <-imported:
case <-time.After(50 * time.Millisecond):
done = true
}
}
if imported := len(tester.blocks); imported != 3 { if imported := len(tester.blocks); imported != 3 {
t.Fatalf("synchronised block mismatch: have %v, want %v", imported, 3) t.Fatalf("synchronised block mismatch: have %v, want %v", imported, 3)
} }
@ -400,8 +466,12 @@ func TestDistantDiscarding(t *testing.T) {
// block announcements to a node, but that even in the face of such an attack, // block announcements to a node, but that even in the face of such an attack,
// the fetcher remains operational. // the fetcher remains operational.
func TestHashMemoryExhaustionAttack(t *testing.T) { func TestHashMemoryExhaustionAttack(t *testing.T) {
// Create a tester with instrumented import hooks
tester := newTester() tester := newTester()
imported := make(chan *types.Block)
tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
// Create a valid chain and an infinite junk chain // Create a valid chain and an infinite junk chain
hashes := createHashes(hashLimit+2*maxQueueDist, knownHash) hashes := createHashes(hashLimit+2*maxQueueDist, knownHash)
blocks := createBlocksFromHashes(hashes) blocks := createBlocksFromHashes(hashes)
@ -413,29 +483,39 @@ func TestHashMemoryExhaustionAttack(t *testing.T) {
// Feed the tester a huge hashset from the attacker, and a limited from the valid peer // Feed the tester a huge hashset from the attacker, and a limited from the valid peer
for i := 0; i < len(attack); i++ { for i := 0; i < len(attack); i++ {
if i < maxQueueDist { if i < maxQueueDist {
tester.fetcher.Notify("valid", hashes[len(hashes)-1-i], time.Now().Add(arriveTimeout/2), valid) tester.fetcher.Notify("valid", hashes[len(hashes)-2-i], time.Now(), valid)
} }
tester.fetcher.Notify("attacker", attack[i], time.Now().Add(arriveTimeout/2), attacker) tester.fetcher.Notify("attacker", attack[i], time.Now(), attacker)
} }
if len(tester.fetcher.announced) != hashLimit+maxQueueDist { if len(tester.fetcher.announced) != hashLimit+maxQueueDist {
t.Fatalf("queued announce count mismatch: have %d, want %d", len(tester.fetcher.announced), hashLimit+maxQueueDist) t.Fatalf("queued announce count mismatch: have %d, want %d", len(tester.fetcher.announced), hashLimit+maxQueueDist)
} }
// Wait for synchronisation to complete and check success for the valid peer // Wait for fetches to complete
time.Sleep(2 * arriveTimeout) for i := 0; i < maxQueueDist; i++ {
if imported := len(tester.blocks); imported != maxQueueDist { select {
t.Fatalf("partial synchronised block mismatch: have %v, want %v", imported, maxQueueDist) case <-imported:
case <-time.After(time.Second):
t.Fatalf("block %d: import timeout", i)
}
}
select {
case <-imported:
t.Fatalf("extra block imported")
case <-time.After(50 * time.Millisecond):
} }
// Feed the remaining valid hashes to ensure DOS protection state remains clean // Feed the remaining valid hashes to ensure DOS protection state remains clean
for i := len(hashes) - maxQueueDist; i >= 0; { for i := len(hashes) - maxQueueDist - 2; i >= 0; i-- {
for j := 0; j < maxQueueDist && i >= 0; j++ { tester.fetcher.Notify("valid", hashes[i], time.Now().Add(-arriveTimeout), valid)
tester.fetcher.Notify("valid", hashes[i], time.Now().Add(time.Millisecond), valid) select {
i-- case <-imported:
case <-time.After(time.Second):
t.Fatalf("block %d: import timeout", len(hashes)-i)
} }
time.Sleep(500 * time.Millisecond)
} }
time.Sleep(500 * time.Millisecond) select {
if imported := len(tester.blocks); imported != len(hashes) { case <-imported:
t.Fatalf("fully synchronised block mismatch: have %v, want %v", imported, len(hashes)) t.Fatalf("extra block imported")
case <-time.After(50 * time.Millisecond):
} }
} }
@ -443,14 +523,18 @@ func TestHashMemoryExhaustionAttack(t *testing.T) {
// announces and retrievals) don't pile up indefinitely, exhausting available // announces and retrievals) don't pile up indefinitely, exhausting available
// system memory. // system memory.
func TestBlockMemoryExhaustionAttack(t *testing.T) { func TestBlockMemoryExhaustionAttack(t *testing.T) {
// Create a tester with instrumented import hooks
tester := newTester() tester := newTester()
imported := make(chan *types.Block)
tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
// Create a valid chain and a batch of dangling (but in range) blocks // Create a valid chain and a batch of dangling (but in range) blocks
hashes := createHashes(blockLimit, knownHash) hashes := createHashes(blockLimit+2*maxQueueDist, knownHash)
blocks := createBlocksFromHashes(hashes) blocks := createBlocksFromHashes(hashes)
attack := make(map[common.Hash]*types.Block) attack := make(map[common.Hash]*types.Block)
for i := 0; i < 16; i++ { for len(attack) < blockLimit+2*maxQueueDist {
hashes := createHashes(maxQueueDist-1, unknownHash) hashes := createHashes(maxQueueDist-1, unknownHash)
blocks := createBlocksFromHashes(hashes) blocks := createBlocksFromHashes(hashes)
for _, hash := range hashes[:maxQueueDist-2] { for _, hash := range hashes[:maxQueueDist-2] {
@ -475,18 +559,27 @@ func TestBlockMemoryExhaustionAttack(t *testing.T) {
} }
// Insert the missing piece (and sanity check the import) // Insert the missing piece (and sanity check the import)
tester.fetcher.Enqueue("valid", blocks[hashes[len(hashes)-2]]) tester.fetcher.Enqueue("valid", blocks[hashes[len(hashes)-2]])
time.Sleep(500 * time.Millisecond) for i := 0; i < maxQueueDist; i++ {
if imported := len(tester.blocks); imported != maxQueueDist+1 { select {
t.Fatalf("synchronised block mismatch: have %v, want %v", imported, maxQueueDist+1) case <-imported:
case <-time.After(time.Second):
t.Fatalf("block %d: import timeout", i)
}
}
select {
case <-imported:
t.Fatalf("extra block imported")
case <-time.After(50 * time.Millisecond):
} }
// Insert the remaining blocks in chunks to ensure clean DOS protection // Insert the remaining blocks in chunks to ensure clean DOS protection
for i := maxQueueDist; i < len(hashes)-1; i++ { for i := maxQueueDist; i < len(hashes)-1; i++ {
tester.fetcher.Enqueue("valid", blocks[hashes[len(hashes)-2-i]]) tester.fetcher.Enqueue("valid", blocks[hashes[len(hashes)-2-i]])
if i%maxQueueDist == 0 { select {
time.Sleep(500 * time.Millisecond) case <-imported:
case <-time.After(time.Second):
t.Fatalf("block %d: import timeout", len(hashes)-i)
} }
} }
time.Sleep(500 * time.Millisecond)
if imported := len(tester.blocks); imported != len(hashes) { if imported := len(tester.blocks); imported != len(hashes) {
t.Fatalf("synchronised block mismatch: have %v, want %v", imported, len(hashes)) t.Fatalf("synchronised block mismatch: have %v, want %v", imported, len(hashes))
} }