forked from cerc-io/plugeth
eth/downloader: support individual peers in the test suite
This commit is contained in:
parent
2937903299
commit
2dd6a62f67
@ -16,6 +16,8 @@ var (
|
|||||||
knownHash = common.Hash{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
|
knownHash = common.Hash{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
|
||||||
unknownHash = common.Hash{9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9}
|
unknownHash = common.Hash{9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9}
|
||||||
bannedHash = common.Hash{5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5}
|
bannedHash = common.Hash{5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5}
|
||||||
|
|
||||||
|
genesis = createBlock(1, common.Hash{}, knownHash)
|
||||||
)
|
)
|
||||||
|
|
||||||
func createHashes(start, amount int) (hashes []common.Hash) {
|
func createHashes(start, amount int) (hashes []common.Hash) {
|
||||||
@ -51,26 +53,20 @@ func createBlocksFromHashes(hashes []common.Hash) map[common.Hash]*types.Block {
|
|||||||
type downloadTester struct {
|
type downloadTester struct {
|
||||||
downloader *Downloader
|
downloader *Downloader
|
||||||
|
|
||||||
hashes []common.Hash // Chain of hashes simulating
|
ownHashes []common.Hash // Hash chain belonging to the tester
|
||||||
blocks map[common.Hash]*types.Block // Blocks associated with the hashes
|
ownBlocks map[common.Hash]*types.Block // Blocks belonging to the tester
|
||||||
chain []common.Hash // Block-chain being constructed
|
peerHashes map[string][]common.Hash // Hash chain belonging to different test peers
|
||||||
|
peerBlocks map[string]map[common.Hash]*types.Block // Blocks belonging to different test peers
|
||||||
|
|
||||||
maxHashFetch int // Overrides the maximum number of retrieved hashes
|
maxHashFetch int // Overrides the maximum number of retrieved hashes
|
||||||
|
|
||||||
t *testing.T
|
|
||||||
done chan bool
|
|
||||||
activePeerId string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTester(t *testing.T, hashes []common.Hash, blocks map[common.Hash]*types.Block) *downloadTester {
|
func newTester() *downloadTester {
|
||||||
tester := &downloadTester{
|
tester := &downloadTester{
|
||||||
t: t,
|
ownHashes: []common.Hash{knownHash},
|
||||||
|
ownBlocks: map[common.Hash]*types.Block{knownHash: genesis},
|
||||||
hashes: hashes,
|
peerHashes: make(map[string][]common.Hash),
|
||||||
blocks: blocks,
|
peerBlocks: make(map[string]map[common.Hash]*types.Block),
|
||||||
chain: []common.Hash{knownHash},
|
|
||||||
|
|
||||||
done: make(chan bool),
|
|
||||||
}
|
}
|
||||||
var mux event.TypeMux
|
var mux event.TypeMux
|
||||||
downloader := New(&mux, tester.hasBlock, tester.getBlock, nil)
|
downloader := New(&mux, tester.hasBlock, tester.getBlock, nil)
|
||||||
@ -79,13 +75,6 @@ func newTester(t *testing.T, hashes []common.Hash, blocks map[common.Hash]*types
|
|||||||
return tester
|
return tester
|
||||||
}
|
}
|
||||||
|
|
||||||
// sync is a simple wrapper around the downloader to start synchronisation and
|
|
||||||
// block until it returns
|
|
||||||
func (dl *downloadTester) sync(peerId string, head common.Hash) error {
|
|
||||||
dl.activePeerId = peerId
|
|
||||||
return dl.downloader.synchronise(peerId, head)
|
|
||||||
}
|
|
||||||
|
|
||||||
// syncTake is starts synchronising with a remote peer, but concurrently it also
|
// syncTake is starts synchronising with a remote peer, but concurrently it also
|
||||||
// starts fetching blocks that the downloader retrieved. IT blocks until both go
|
// starts fetching blocks that the downloader retrieved. IT blocks until both go
|
||||||
// routines terminate.
|
// routines terminate.
|
||||||
@ -102,12 +91,17 @@ func (dl *downloadTester) syncTake(peerId string, head common.Hash) ([]*Block, e
|
|||||||
time.Sleep(time.Millisecond)
|
time.Sleep(time.Millisecond)
|
||||||
}
|
}
|
||||||
// Take a batch of blocks and accumulate
|
// Take a batch of blocks and accumulate
|
||||||
took = append(took, dl.downloader.TakeBlocks()...)
|
blocks := dl.downloader.TakeBlocks()
|
||||||
|
for _, block := range blocks {
|
||||||
|
dl.ownHashes = append(dl.ownHashes, block.RawBlock.Hash())
|
||||||
|
dl.ownBlocks[block.RawBlock.Hash()] = block.RawBlock
|
||||||
|
}
|
||||||
|
took = append(took, blocks...)
|
||||||
}
|
}
|
||||||
done <- struct{}{}
|
done <- struct{}{}
|
||||||
}()
|
}()
|
||||||
// Start the downloading, sync the taker and return
|
// Start the downloading, sync the taker and return
|
||||||
err := dl.sync(peerId, head)
|
err := dl.downloader.synchronise(peerId, head)
|
||||||
|
|
||||||
done <- struct{}{}
|
done <- struct{}{}
|
||||||
<-done
|
<-done
|
||||||
@ -115,63 +109,74 @@ func (dl *downloadTester) syncTake(peerId string, head common.Hash) ([]*Block, e
|
|||||||
return took, err
|
return took, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// hasBlock checks if a block is present in the testers canonical chain.
|
||||||
func (dl *downloadTester) hasBlock(hash common.Hash) bool {
|
func (dl *downloadTester) hasBlock(hash common.Hash) bool {
|
||||||
for _, h := range dl.chain {
|
return dl.getBlock(hash) != nil
|
||||||
if h == hash {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// getBlock retrieves a block from the testers canonical chain.
|
||||||
func (dl *downloadTester) getBlock(hash common.Hash) *types.Block {
|
func (dl *downloadTester) getBlock(hash common.Hash) *types.Block {
|
||||||
return dl.blocks[knownHash]
|
return dl.ownBlocks[hash]
|
||||||
}
|
}
|
||||||
|
|
||||||
// getHashes retrieves a batch of hashes for reconstructing the chain.
|
// newPeer registers a new block download source into the downloader.
|
||||||
func (dl *downloadTester) getHashes(head common.Hash) error {
|
func (dl *downloadTester) newPeer(id string, hashes []common.Hash, blocks map[common.Hash]*types.Block) error {
|
||||||
limit := MaxHashFetch
|
err := dl.downloader.RegisterPeer(id, hashes[0], dl.peerGetHashesFn(id), dl.peerGetBlocksFn(id))
|
||||||
if dl.maxHashFetch > 0 {
|
if err == nil {
|
||||||
limit = dl.maxHashFetch
|
// Assign the owned hashes and blocks to the peer
|
||||||
|
dl.peerHashes[id] = hashes
|
||||||
|
dl.peerBlocks[id] = blocks
|
||||||
}
|
}
|
||||||
// Gather the next batch of hashes
|
return err
|
||||||
hashes := make([]common.Hash, 0, limit)
|
}
|
||||||
for i, hash := range dl.hashes {
|
|
||||||
if hash == head {
|
// peerGetBlocksFn constructs a getHashes function associated with a particular
|
||||||
i++
|
// peer in the download tester. The returned function can be used to retrieve
|
||||||
for len(hashes) < cap(hashes) && i < len(dl.hashes) {
|
// batches of hashes from the particularly requested peer.
|
||||||
hashes = append(hashes, dl.hashes[i])
|
func (dl *downloadTester) peerGetHashesFn(id string) func(head common.Hash) error {
|
||||||
|
return func(head common.Hash) error {
|
||||||
|
limit := MaxHashFetch
|
||||||
|
if dl.maxHashFetch > 0 {
|
||||||
|
limit = dl.maxHashFetch
|
||||||
|
}
|
||||||
|
// Gather the next batch of hashes
|
||||||
|
hashes := dl.peerHashes[id]
|
||||||
|
result := make([]common.Hash, 0, limit)
|
||||||
|
for i, hash := range hashes {
|
||||||
|
if hash == head {
|
||||||
i++
|
i++
|
||||||
}
|
for len(result) < cap(result) && i < len(hashes) {
|
||||||
break
|
result = append(result, hashes[i])
|
||||||
}
|
i++
|
||||||
}
|
}
|
||||||
// Delay delivery a bit to allow attacks to unfold
|
break
|
||||||
id := dl.activePeerId
|
|
||||||
go func() {
|
|
||||||
time.Sleep(time.Millisecond)
|
|
||||||
dl.downloader.DeliverHashes(id, hashes)
|
|
||||||
}()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (dl *downloadTester) getBlocks(id string) func([]common.Hash) error {
|
|
||||||
return func(hashes []common.Hash) error {
|
|
||||||
blocks := make([]*types.Block, 0, len(hashes))
|
|
||||||
for _, hash := range hashes {
|
|
||||||
if block, ok := dl.blocks[hash]; ok {
|
|
||||||
blocks = append(blocks, block)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
go dl.downloader.DeliverBlocks(id, blocks)
|
// Delay delivery a bit to allow attacks to unfold
|
||||||
|
go func() {
|
||||||
|
time.Sleep(time.Millisecond)
|
||||||
|
dl.downloader.DeliverHashes(id, result)
|
||||||
|
}()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// newPeer registers a new block download source into the syncer.
|
// peerGetBlocksFn constructs a getBlocks function associated with a particular
|
||||||
func (dl *downloadTester) newPeer(id string, td *big.Int, hash common.Hash) error {
|
// peer in the download tester. The returned function can be used to retrieve
|
||||||
return dl.downloader.RegisterPeer(id, hash, dl.getHashes, dl.getBlocks(id))
|
// batches of blocks from the particularly requested peer.
|
||||||
|
func (dl *downloadTester) peerGetBlocksFn(id string) func([]common.Hash) error {
|
||||||
|
return func(hashes []common.Hash) error {
|
||||||
|
blocks := dl.peerBlocks[id]
|
||||||
|
result := make([]*types.Block, 0, len(hashes))
|
||||||
|
for _, hash := range hashes {
|
||||||
|
if block, ok := blocks[hash]; ok {
|
||||||
|
result = append(result, block)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
go dl.downloader.DeliverBlocks(id, result)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tests that simple synchronization, without throttling from a good peer works.
|
// Tests that simple synchronization, without throttling from a good peer works.
|
||||||
@ -181,11 +186,11 @@ func TestSynchronisation(t *testing.T) {
|
|||||||
hashes := createHashes(0, targetBlocks)
|
hashes := createHashes(0, targetBlocks)
|
||||||
blocks := createBlocksFromHashes(hashes)
|
blocks := createBlocksFromHashes(hashes)
|
||||||
|
|
||||||
tester := newTester(t, hashes, blocks)
|
tester := newTester()
|
||||||
tester.newPeer("peer", big.NewInt(10000), hashes[0])
|
tester.newPeer("peer", hashes, blocks)
|
||||||
|
|
||||||
// Synchronise with the peer and make sure all blocks were retrieved
|
// Synchronise with the peer and make sure all blocks were retrieved
|
||||||
if err := tester.sync("peer", hashes[0]); err != nil {
|
if err := tester.downloader.synchronise("peer", hashes[0]); err != nil {
|
||||||
t.Fatalf("failed to synchronise blocks: %v", err)
|
t.Fatalf("failed to synchronise blocks: %v", err)
|
||||||
}
|
}
|
||||||
if queued := len(tester.downloader.queue.blockPool); queued != targetBlocks {
|
if queued := len(tester.downloader.queue.blockPool); queued != targetBlocks {
|
||||||
@ -200,11 +205,11 @@ func TestBlockTaking(t *testing.T) {
|
|||||||
hashes := createHashes(0, targetBlocks)
|
hashes := createHashes(0, targetBlocks)
|
||||||
blocks := createBlocksFromHashes(hashes)
|
blocks := createBlocksFromHashes(hashes)
|
||||||
|
|
||||||
tester := newTester(t, hashes, blocks)
|
tester := newTester()
|
||||||
tester.newPeer("peer", big.NewInt(10000), hashes[0])
|
tester.newPeer("peer", hashes, blocks)
|
||||||
|
|
||||||
// Synchronise with the peer and test block retrieval
|
// Synchronise with the peer and test block retrieval
|
||||||
if err := tester.sync("peer", hashes[0]); err != nil {
|
if err := tester.downloader.synchronise("peer", hashes[0]); err != nil {
|
||||||
t.Fatalf("failed to synchronise blocks: %v", err)
|
t.Fatalf("failed to synchronise blocks: %v", err)
|
||||||
}
|
}
|
||||||
if took := tester.downloader.TakeBlocks(); len(took) != targetBlocks {
|
if took := tester.downloader.TakeBlocks(); len(took) != targetBlocks {
|
||||||
@ -214,7 +219,7 @@ func TestBlockTaking(t *testing.T) {
|
|||||||
|
|
||||||
// Tests that an inactive downloader will not accept incoming hashes and blocks.
|
// Tests that an inactive downloader will not accept incoming hashes and blocks.
|
||||||
func TestInactiveDownloader(t *testing.T) {
|
func TestInactiveDownloader(t *testing.T) {
|
||||||
tester := newTester(t, nil, nil)
|
tester := newTester()
|
||||||
|
|
||||||
// Check that neither hashes nor blocks are accepted
|
// Check that neither hashes nor blocks are accepted
|
||||||
if err := tester.downloader.DeliverHashes("bad peer", []common.Hash{}); err != errNoSyncActive {
|
if err := tester.downloader.DeliverHashes("bad peer", []common.Hash{}); err != errNoSyncActive {
|
||||||
@ -232,11 +237,11 @@ func TestCancel(t *testing.T) {
|
|||||||
hashes := createHashes(0, targetBlocks)
|
hashes := createHashes(0, targetBlocks)
|
||||||
blocks := createBlocksFromHashes(hashes)
|
blocks := createBlocksFromHashes(hashes)
|
||||||
|
|
||||||
tester := newTester(t, hashes, blocks)
|
tester := newTester()
|
||||||
tester.newPeer("peer", big.NewInt(10000), hashes[0])
|
tester.newPeer("peer", hashes, blocks)
|
||||||
|
|
||||||
// Synchronise with the peer, but cancel afterwards
|
// Synchronise with the peer, but cancel afterwards
|
||||||
if err := tester.sync("peer", hashes[0]); err != nil {
|
if err := tester.downloader.synchronise("peer", hashes[0]); err != nil {
|
||||||
t.Fatalf("failed to synchronise blocks: %v", err)
|
t.Fatalf("failed to synchronise blocks: %v", err)
|
||||||
}
|
}
|
||||||
if !tester.downloader.Cancel() {
|
if !tester.downloader.Cancel() {
|
||||||
@ -260,13 +265,13 @@ func TestThrottling(t *testing.T) {
|
|||||||
hashes := createHashes(0, targetBlocks)
|
hashes := createHashes(0, targetBlocks)
|
||||||
blocks := createBlocksFromHashes(hashes)
|
blocks := createBlocksFromHashes(hashes)
|
||||||
|
|
||||||
tester := newTester(t, hashes, blocks)
|
tester := newTester()
|
||||||
tester.newPeer("peer", big.NewInt(10000), hashes[0])
|
tester.newPeer("peer", hashes, blocks)
|
||||||
|
|
||||||
// Start a synchronisation concurrently
|
// Start a synchronisation concurrently
|
||||||
errc := make(chan error)
|
errc := make(chan error)
|
||||||
go func() {
|
go func() {
|
||||||
errc <- tester.sync("peer", hashes[0])
|
errc <- tester.downloader.synchronise("peer", hashes[0])
|
||||||
}()
|
}()
|
||||||
// Iteratively take some blocks, always checking the retrieval count
|
// Iteratively take some blocks, always checking the retrieval count
|
||||||
for total := 0; total < targetBlocks; {
|
for total := 0; total < targetBlocks; {
|
||||||
@ -303,9 +308,9 @@ func TestNonExistingParentAttack(t *testing.T) {
|
|||||||
forged.ParentHeaderHash = unknownHash
|
forged.ParentHeaderHash = unknownHash
|
||||||
|
|
||||||
// Try and sync with the malicious node and check that it fails
|
// Try and sync with the malicious node and check that it fails
|
||||||
tester := newTester(t, hashes, blocks)
|
tester := newTester()
|
||||||
tester.newPeer("attack", big.NewInt(10000), hashes[0])
|
tester.newPeer("attack", hashes, blocks)
|
||||||
if err := tester.sync("attack", hashes[0]); err != nil {
|
if err := tester.downloader.synchronise("attack", hashes[0]); err != nil {
|
||||||
t.Fatalf("failed to synchronise blocks: %v", err)
|
t.Fatalf("failed to synchronise blocks: %v", err)
|
||||||
}
|
}
|
||||||
bs := tester.downloader.TakeBlocks()
|
bs := tester.downloader.TakeBlocks()
|
||||||
@ -319,8 +324,8 @@ func TestNonExistingParentAttack(t *testing.T) {
|
|||||||
|
|
||||||
// Reconstruct a valid chain, and try to synchronize with it
|
// Reconstruct a valid chain, and try to synchronize with it
|
||||||
forged.ParentHeaderHash = knownHash
|
forged.ParentHeaderHash = knownHash
|
||||||
tester.newPeer("valid", big.NewInt(20000), hashes[0])
|
tester.newPeer("valid", hashes, blocks)
|
||||||
if err := tester.sync("valid", hashes[0]); err != nil {
|
if err := tester.downloader.synchronise("valid", hashes[0]); err != nil {
|
||||||
t.Fatalf("failed to synchronise blocks: %v", err)
|
t.Fatalf("failed to synchronise blocks: %v", err)
|
||||||
}
|
}
|
||||||
bs = tester.downloader.TakeBlocks()
|
bs = tester.downloader.TakeBlocks()
|
||||||
@ -341,12 +346,12 @@ func TestRepeatingHashAttack(t *testing.T) {
|
|||||||
forged := hashes[:len(hashes)-1]
|
forged := hashes[:len(hashes)-1]
|
||||||
|
|
||||||
// Try and sync with the malicious node
|
// Try and sync with the malicious node
|
||||||
tester := newTester(t, forged, blocks)
|
tester := newTester()
|
||||||
tester.newPeer("attack", big.NewInt(10000), forged[0])
|
tester.newPeer("attack", forged, blocks)
|
||||||
|
|
||||||
errc := make(chan error)
|
errc := make(chan error)
|
||||||
go func() {
|
go func() {
|
||||||
errc <- tester.sync("attack", hashes[0])
|
errc <- tester.downloader.synchronise("attack", hashes[0])
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// Make sure that syncing returns and does so with a failure
|
// Make sure that syncing returns and does so with a failure
|
||||||
@ -359,9 +364,8 @@ func TestRepeatingHashAttack(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Ensure that a valid chain can still pass sync
|
// Ensure that a valid chain can still pass sync
|
||||||
tester.hashes = hashes
|
tester.newPeer("valid", hashes, blocks)
|
||||||
tester.newPeer("valid", big.NewInt(20000), hashes[0])
|
if err := tester.downloader.synchronise("valid", hashes[0]); err != nil {
|
||||||
if err := tester.sync("valid", hashes[0]); err != nil {
|
|
||||||
t.Fatalf("failed to synchronise blocks: %v", err)
|
t.Fatalf("failed to synchronise blocks: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -377,15 +381,15 @@ func TestNonExistingBlockAttack(t *testing.T) {
|
|||||||
hashes[len(hashes)/2] = unknownHash
|
hashes[len(hashes)/2] = unknownHash
|
||||||
|
|
||||||
// Try and sync with the malicious node and check that it fails
|
// Try and sync with the malicious node and check that it fails
|
||||||
tester := newTester(t, hashes, blocks)
|
tester := newTester()
|
||||||
tester.newPeer("attack", big.NewInt(10000), hashes[0])
|
tester.newPeer("attack", hashes, blocks)
|
||||||
if err := tester.sync("attack", hashes[0]); err != errPeersUnavailable {
|
if err := tester.downloader.synchronise("attack", hashes[0]); err != errPeersUnavailable {
|
||||||
t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errPeersUnavailable)
|
t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errPeersUnavailable)
|
||||||
}
|
}
|
||||||
// Ensure that a valid chain can still pass sync
|
// Ensure that a valid chain can still pass sync
|
||||||
hashes[len(hashes)/2] = origin
|
hashes[len(hashes)/2] = origin
|
||||||
tester.newPeer("valid", big.NewInt(20000), hashes[0])
|
tester.newPeer("valid", hashes, blocks)
|
||||||
if err := tester.sync("valid", hashes[0]); err != nil {
|
if err := tester.downloader.synchronise("valid", hashes[0]); err != nil {
|
||||||
t.Fatalf("failed to synchronise blocks: %v", err)
|
t.Fatalf("failed to synchronise blocks: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -408,14 +412,13 @@ func TestInvalidHashOrderAttack(t *testing.T) {
|
|||||||
copy(reverse[blockCacheLimit:], chunk2)
|
copy(reverse[blockCacheLimit:], chunk2)
|
||||||
|
|
||||||
// Try and sync with the malicious node and check that it fails
|
// Try and sync with the malicious node and check that it fails
|
||||||
tester := newTester(t, reverse, blocks)
|
tester := newTester()
|
||||||
tester.newPeer("attack", big.NewInt(10000), reverse[0])
|
tester.newPeer("attack", reverse, blocks)
|
||||||
if _, err := tester.syncTake("attack", reverse[0]); err != errInvalidChain {
|
if _, err := tester.syncTake("attack", reverse[0]); err != errInvalidChain {
|
||||||
t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errInvalidChain)
|
t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errInvalidChain)
|
||||||
}
|
}
|
||||||
// Ensure that a valid chain can still pass sync
|
// Ensure that a valid chain can still pass sync
|
||||||
tester.hashes = hashes
|
tester.newPeer("valid", hashes, blocks)
|
||||||
tester.newPeer("valid", big.NewInt(20000), hashes[0])
|
|
||||||
if _, err := tester.syncTake("valid", hashes[0]); err != nil {
|
if _, err := tester.syncTake("valid", hashes[0]); err != nil {
|
||||||
t.Fatalf("failed to synchronise blocks: %v", err)
|
t.Fatalf("failed to synchronise blocks: %v", err)
|
||||||
}
|
}
|
||||||
@ -431,8 +434,8 @@ func TestMadeupHashChainAttack(t *testing.T) {
|
|||||||
hashes := createHashes(0, 1024*blockCacheLimit)
|
hashes := createHashes(0, 1024*blockCacheLimit)
|
||||||
|
|
||||||
// Try and sync with the malicious node and check that it fails
|
// Try and sync with the malicious node and check that it fails
|
||||||
tester := newTester(t, hashes, nil)
|
tester := newTester()
|
||||||
tester.newPeer("attack", big.NewInt(10000), hashes[0])
|
tester.newPeer("attack", hashes, nil)
|
||||||
if _, err := tester.syncTake("attack", hashes[0]); err != errCrossCheckFailed {
|
if _, err := tester.syncTake("attack", hashes[0]); err != errCrossCheckFailed {
|
||||||
t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errCrossCheckFailed)
|
t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errCrossCheckFailed)
|
||||||
}
|
}
|
||||||
@ -445,11 +448,11 @@ func TestMadeupHashChainAttack(t *testing.T) {
|
|||||||
func TestMadeupHashChainDrippingAttack(t *testing.T) {
|
func TestMadeupHashChainDrippingAttack(t *testing.T) {
|
||||||
// Create a random chain of hashes to drip
|
// Create a random chain of hashes to drip
|
||||||
hashes := createHashes(0, 16*blockCacheLimit)
|
hashes := createHashes(0, 16*blockCacheLimit)
|
||||||
tester := newTester(t, hashes, nil)
|
tester := newTester()
|
||||||
|
|
||||||
// Try and sync with the attacker, one hash at a time
|
// Try and sync with the attacker, one hash at a time
|
||||||
tester.maxHashFetch = 1
|
tester.maxHashFetch = 1
|
||||||
tester.newPeer("attack", big.NewInt(10000), hashes[0])
|
tester.newPeer("attack", hashes, nil)
|
||||||
if _, err := tester.syncTake("attack", hashes[0]); err != errStallingPeer {
|
if _, err := tester.syncTake("attack", hashes[0]); err != errStallingPeer {
|
||||||
t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer)
|
t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer)
|
||||||
}
|
}
|
||||||
@ -473,8 +476,8 @@ func TestMadeupBlockChainAttack(t *testing.T) {
|
|||||||
gapped[i] = hashes[2*i]
|
gapped[i] = hashes[2*i]
|
||||||
}
|
}
|
||||||
// Try and sync with the malicious node and check that it fails
|
// Try and sync with the malicious node and check that it fails
|
||||||
tester := newTester(t, gapped, blocks)
|
tester := newTester()
|
||||||
tester.newPeer("attack", big.NewInt(10000), gapped[0])
|
tester.newPeer("attack", gapped, blocks)
|
||||||
if _, err := tester.syncTake("attack", gapped[0]); err != errCrossCheckFailed {
|
if _, err := tester.syncTake("attack", gapped[0]); err != errCrossCheckFailed {
|
||||||
t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errCrossCheckFailed)
|
t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errCrossCheckFailed)
|
||||||
}
|
}
|
||||||
@ -482,8 +485,7 @@ func TestMadeupBlockChainAttack(t *testing.T) {
|
|||||||
blockSoftTTL = defaultBlockTTL
|
blockSoftTTL = defaultBlockTTL
|
||||||
crossCheckCycle = defaultCrossCheckCycle
|
crossCheckCycle = defaultCrossCheckCycle
|
||||||
|
|
||||||
tester.hashes = hashes
|
tester.newPeer("valid", hashes, blocks)
|
||||||
tester.newPeer("valid", big.NewInt(20000), hashes[0])
|
|
||||||
if _, err := tester.syncTake("valid", hashes[0]); err != nil {
|
if _, err := tester.syncTake("valid", hashes[0]); err != nil {
|
||||||
t.Fatalf("failed to synchronise blocks: %v", err)
|
t.Fatalf("failed to synchronise blocks: %v", err)
|
||||||
}
|
}
|
||||||
@ -507,8 +509,8 @@ func TestMadeupParentBlockChainAttack(t *testing.T) {
|
|||||||
block.ParentHeaderHash = hash // Simulate pointing to already known hash
|
block.ParentHeaderHash = hash // Simulate pointing to already known hash
|
||||||
}
|
}
|
||||||
// Try and sync with the malicious node and check that it fails
|
// Try and sync with the malicious node and check that it fails
|
||||||
tester := newTester(t, hashes, forges)
|
tester := newTester()
|
||||||
tester.newPeer("attack", big.NewInt(10000), hashes[0])
|
tester.newPeer("attack", hashes, forges)
|
||||||
if _, err := tester.syncTake("attack", hashes[0]); err != errCrossCheckFailed {
|
if _, err := tester.syncTake("attack", hashes[0]); err != errCrossCheckFailed {
|
||||||
t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errCrossCheckFailed)
|
t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errCrossCheckFailed)
|
||||||
}
|
}
|
||||||
@ -516,8 +518,7 @@ func TestMadeupParentBlockChainAttack(t *testing.T) {
|
|||||||
blockSoftTTL = defaultBlockTTL
|
blockSoftTTL = defaultBlockTTL
|
||||||
crossCheckCycle = defaultCrossCheckCycle
|
crossCheckCycle = defaultCrossCheckCycle
|
||||||
|
|
||||||
tester.blocks = blocks
|
tester.newPeer("valid", hashes, blocks)
|
||||||
tester.newPeer("valid", big.NewInt(20000), hashes[0])
|
|
||||||
if _, err := tester.syncTake("valid", hashes[0]); err != nil {
|
if _, err := tester.syncTake("valid", hashes[0]); err != nil {
|
||||||
t.Fatalf("failed to synchronise blocks: %v", err)
|
t.Fatalf("failed to synchronise blocks: %v", err)
|
||||||
}
|
}
|
||||||
@ -534,12 +535,12 @@ func TestBannedChainStarvationAttack(t *testing.T) {
|
|||||||
blocks := createBlocksFromHashes(hashes)
|
blocks := createBlocksFromHashes(hashes)
|
||||||
|
|
||||||
// Create the tester and ban the selected hash
|
// Create the tester and ban the selected hash
|
||||||
tester := newTester(t, hashes, blocks)
|
tester := newTester()
|
||||||
tester.downloader.banned.Add(bannedHash)
|
tester.downloader.banned.Add(bannedHash)
|
||||||
|
|
||||||
// Iteratively try to sync, and verify that the banned hash list grows until
|
// Iteratively try to sync, and verify that the banned hash list grows until
|
||||||
// the head of the invalid chain is blocked too.
|
// the head of the invalid chain is blocked too.
|
||||||
tester.newPeer("attack", big.NewInt(10000), hashes[0])
|
tester.newPeer("attack", hashes, blocks)
|
||||||
for banned := tester.downloader.banned.Size(); ; {
|
for banned := tester.downloader.banned.Size(); ; {
|
||||||
// Try to sync with the attacker, check hash chain failure
|
// Try to sync with the attacker, check hash chain failure
|
||||||
if _, err := tester.syncTake("attack", hashes[0]); err != errInvalidChain {
|
if _, err := tester.syncTake("attack", hashes[0]); err != errInvalidChain {
|
||||||
@ -556,7 +557,7 @@ func TestBannedChainStarvationAttack(t *testing.T) {
|
|||||||
banned = bans
|
banned = bans
|
||||||
}
|
}
|
||||||
// Check that after banning an entire chain, bad peers get dropped
|
// Check that after banning an entire chain, bad peers get dropped
|
||||||
if err := tester.newPeer("new attacker", big.NewInt(10000), hashes[0]); err != errBannedHead {
|
if err := tester.newPeer("new attacker", hashes, blocks); err != errBannedHead {
|
||||||
t.Fatalf("peer registration mismatch: have %v, want %v", err, errBannedHead)
|
t.Fatalf("peer registration mismatch: have %v, want %v", err, errBannedHead)
|
||||||
}
|
}
|
||||||
if peer := tester.downloader.peers.Peer("net attacker"); peer != nil {
|
if peer := tester.downloader.peers.Peer("net attacker"); peer != nil {
|
||||||
@ -579,12 +580,12 @@ func TestBannedChainMemoryExhaustionAttack(t *testing.T) {
|
|||||||
blocks := createBlocksFromHashes(hashes)
|
blocks := createBlocksFromHashes(hashes)
|
||||||
|
|
||||||
// Create the tester and ban the selected hash
|
// Create the tester and ban the selected hash
|
||||||
tester := newTester(t, hashes, blocks)
|
tester := newTester()
|
||||||
tester.downloader.banned.Add(bannedHash)
|
tester.downloader.banned.Add(bannedHash)
|
||||||
|
|
||||||
// Iteratively try to sync, and verify that the banned hash list grows until
|
// Iteratively try to sync, and verify that the banned hash list grows until
|
||||||
// the head of the invalid chain is blocked too.
|
// the head of the invalid chain is blocked too.
|
||||||
tester.newPeer("attack", big.NewInt(10000), hashes[0])
|
tester.newPeer("attack", hashes, blocks)
|
||||||
for {
|
for {
|
||||||
// Try to sync with the attacker, check hash chain failure
|
// Try to sync with the attacker, check hash chain failure
|
||||||
if _, err := tester.syncTake("attack", hashes[0]); err != errInvalidChain {
|
if _, err := tester.syncTake("attack", hashes[0]); err != errInvalidChain {
|
||||||
|
Loading…
Reference in New Issue
Block a user