forked from cerc-io/plugeth
eth/downloader: raise pending state limit that prevented concurrency
This commit is contained in:
parent
856b9e9c50
commit
f1ec226d80
@ -59,7 +59,6 @@ var (
|
||||
|
||||
maxQueuedHashes = 256 * 1024 // [eth/61] Maximum number of hashes to queue for import (DOS protection)
|
||||
maxQueuedHeaders = 256 * 1024 // [eth/62] Maximum number of headers to queue for import (DOS protection)
|
||||
maxQueuedStates = 256 * 1024 // [eth/63] Maximum number of state requests to queue (DOS protection)
|
||||
maxResultsProcess = 256 // Number of download results to import at once into the chain
|
||||
|
||||
fsHeaderCheckFrequency = 100 // Verification frequency of the downloaded headers during fast sync
|
||||
|
@ -39,7 +39,8 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
blockCacheLimit = 1024 // Maximum number of blocks to cache before throttling the download
|
||||
blockCacheLimit = 1024 // Maximum number of blocks to cache before throttling the download
|
||||
maxInFlightStates = 4096 // Maximum number of state downloads to allow concurrently
|
||||
)
|
||||
|
||||
var (
|
||||
@ -464,7 +465,7 @@ func (q *queue) ReserveNodeData(p *peer, count int) *fetchRequest {
|
||||
q.lock.Lock()
|
||||
defer q.lock.Unlock()
|
||||
|
||||
return q.reserveHashes(p, count, q.stateTaskQueue, generator, q.statePendPool, count)
|
||||
return q.reserveHashes(p, count, q.stateTaskQueue, generator, q.statePendPool, maxInFlightStates)
|
||||
}
|
||||
|
||||
// reserveHashes reserves a set of hashes for the given peer, skipping previously
|
||||
|
Loading…
Reference in New Issue
Block a user