eth, eth/downloader: fix #1231, DOS vulnerability in hash queueing

This commit is contained in:
Péter Szilágyi 2015-07-01 15:19:11 +03:00
parent 1ae80aaf64
commit d6f2c0a76f
2 changed files with 15 additions and 5 deletions

View File

@ -34,8 +34,9 @@ var (
blockHardTTL = 3 * blockSoftTTL // Maximum time allowance before a block request is considered expired blockHardTTL = 3 * blockSoftTTL // Maximum time allowance before a block request is considered expired
crossCheckCycle = time.Second // Period after which to check for expired cross checks crossCheckCycle = time.Second // Period after which to check for expired cross checks
maxBannedHashes = 4096 // Number of bannable hashes before phasing old ones out maxQueuedHashes = 256 * 1024 // Maximum number of hashes to queue for import (DOS protection)
maxBlockProcess = 256 // Number of blocks to import at once into the chain maxBannedHashes = 4096 // Number of bannable hashes before phasing old ones out
maxBlockProcess = 256 // Number of blocks to import at once into the chain
) )
var ( var (
@ -780,6 +781,8 @@ func (d *Downloader) fetchHashes(p *peer, from uint64) error {
defer timeout.Stop() defer timeout.Stop()
getHashes := func(from uint64) { getHashes := func(from uint64) {
glog.V(logger.Detail).Infof("%v: fetching %d hashes from #%d", p, MaxHashFetch, from)
go p.getAbsHashes(from, MaxHashFetch) go p.getAbsHashes(from, MaxHashFetch)
timeout.Reset(hashTTL) timeout.Reset(hashTTL)
} }
@ -809,16 +812,23 @@ func (d *Downloader) fetchHashes(p *peer, from uint64) error {
return nil return nil
} }
// Otherwise insert all the new hashes, aborting in case of junk // Otherwise insert all the new hashes, aborting in case of junk
glog.V(logger.Detail).Infof("%v: inserting %d hashes from #%d", p, len(hashPack.hashes), from)
inserts := d.queue.Insert(hashPack.hashes, true) inserts := d.queue.Insert(hashPack.hashes, true)
if len(inserts) != len(hashPack.hashes) { if len(inserts) != len(hashPack.hashes) {
glog.V(logger.Debug).Infof("%v: stale hashes", p) glog.V(logger.Debug).Infof("%v: stale hashes", p)
return errBadPeer return errBadPeer
} }
// Notify the block fetcher of new hashes, and continue fetching // Notify the block fetcher of new hashes, but stop if queue is full
cont := d.queue.Pending() < maxQueuedHashes
select { select {
case d.processCh <- true: case d.processCh <- cont:
default: default:
} }
if !cont {
return nil
}
// Queue not yet full, fetch the next batch
from += uint64(len(hashPack.hashes)) from += uint64(len(hashPack.hashes))
getHashes(from) getHashes(from)

View File

@ -164,7 +164,7 @@ func (pm *ProtocolManager) newPeer(pv, nv int, p *p2p.Peer, rw p2p.MsgReadWriter
// handle is the callback invoked to manage the life cycle of an eth peer. When // handle is the callback invoked to manage the life cycle of an eth peer. When
// this function terminates, the peer is disconnected. // this function terminates, the peer is disconnected.
func (pm *ProtocolManager) handle(p *peer) error { func (pm *ProtocolManager) handle(p *peer) error {
glog.V(logger.Debug).Infof("%v: peer connected", p) glog.V(logger.Debug).Infof("%v: peer connected [%s]", p, p.Name())
// Execute the Ethereum handshake // Execute the Ethereum handshake
td, head, genesis := pm.chainman.Status() td, head, genesis := pm.chainman.Status()