forked from cerc-io/plugeth
Merge pull request #22596 from holiman/snap_timelimits
eth/protocols/snap: try to prevent requests timing out
This commit is contained in:
commit
43a3768066
@ -50,6 +50,11 @@ const (
|
|||||||
// maxTrieNodeLookups is the maximum number of state trie nodes to serve. This
|
// maxTrieNodeLookups is the maximum number of state trie nodes to serve. This
|
||||||
// number is there to limit the number of disk lookups.
|
// number is there to limit the number of disk lookups.
|
||||||
maxTrieNodeLookups = 1024
|
maxTrieNodeLookups = 1024
|
||||||
|
|
||||||
|
// maxTrieNodeTimeSpent is the maximum time we should spend on looking up trie nodes.
|
||||||
|
// If we spend too much time, then it's a fairly high chance of timing out
|
||||||
|
// at the remote side, which means all the work is in vain.
|
||||||
|
maxTrieNodeTimeSpent = 5 * time.Second
|
||||||
)
|
)
|
||||||
|
|
||||||
// Handler is a callback to invoke from an outside runner after the boilerplate
|
// Handler is a callback to invoke from an outside runner after the boilerplate
|
||||||
@ -129,7 +134,7 @@ func handleMessage(backend Backend, peer *Peer) error {
|
|||||||
return fmt.Errorf("%w: %v > %v", errMsgTooLarge, msg.Size, maxMessageSize)
|
return fmt.Errorf("%w: %v > %v", errMsgTooLarge, msg.Size, maxMessageSize)
|
||||||
}
|
}
|
||||||
defer msg.Discard()
|
defer msg.Discard()
|
||||||
|
start := time.Now()
|
||||||
// Track the emount of time it takes to serve the request and run the handler
|
// Track the emount of time it takes to serve the request and run the handler
|
||||||
if metrics.Enabled {
|
if metrics.Enabled {
|
||||||
h := fmt.Sprintf("%s/%s/%d/%#02x", p2p.HandleHistName, ProtocolName, peer.Version(), msg.Code)
|
h := fmt.Sprintf("%s/%s/%d/%#02x", p2p.HandleHistName, ProtocolName, peer.Version(), msg.Code)
|
||||||
@ -140,7 +145,7 @@ func handleMessage(backend Backend, peer *Peer) error {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
metrics.GetOrRegisterHistogramLazy(h, nil, sampler).Update(time.Since(start).Microseconds())
|
metrics.GetOrRegisterHistogramLazy(h, nil, sampler).Update(time.Since(start).Microseconds())
|
||||||
}(time.Now())
|
}(start)
|
||||||
}
|
}
|
||||||
// Handle the message depending on its contents
|
// Handle the message depending on its contents
|
||||||
switch {
|
switch {
|
||||||
@ -470,13 +475,13 @@ func handleMessage(backend Backend, peer *Peer) error {
|
|||||||
bytes += uint64(len(blob))
|
bytes += uint64(len(blob))
|
||||||
|
|
||||||
// Sanity check limits to avoid DoS on the store trie loads
|
// Sanity check limits to avoid DoS on the store trie loads
|
||||||
if bytes > req.Bytes || loads > maxTrieNodeLookups {
|
if bytes > req.Bytes || loads > maxTrieNodeLookups || time.Since(start) > maxTrieNodeTimeSpent {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Abort request processing if we've exceeded our limits
|
// Abort request processing if we've exceeded our limits
|
||||||
if bytes > req.Bytes || loads > maxTrieNodeLookups {
|
if bytes > req.Bytes || loads > maxTrieNodeLookups || time.Since(start) > maxTrieNodeTimeSpent {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -85,7 +85,7 @@ const (
|
|||||||
var (
|
var (
|
||||||
// requestTimeout is the maximum time a peer is allowed to spend on serving
|
// requestTimeout is the maximum time a peer is allowed to spend on serving
|
||||||
// a single network request.
|
// a single network request.
|
||||||
requestTimeout = 10 * time.Second // TODO(karalabe): Make it dynamic ala fast-sync?
|
requestTimeout = 15 * time.Second // TODO(karalabe): Make it dynamic ala fast-sync?
|
||||||
)
|
)
|
||||||
|
|
||||||
// ErrCancelled is returned from snap syncing if the operation was prematurely
|
// ErrCancelled is returned from snap syncing if the operation was prematurely
|
||||||
|
Loading…
Reference in New Issue
Block a user