eth/protocols/snap: add peer id and req id to the timeout logs

This commit is contained in:
Péter Szilágyi 2021-03-26 22:29:22 +02:00
parent 063f78c6ba
commit 099be04100
No known key found for this signature in database
GPG Key ID: E9AE538CEDF8293D

View File

@ -811,6 +811,8 @@ func (s *Syncer) assignAccountTasks(cancel chan struct{}) {
if idle == "" { if idle == "" {
return return
} }
peer := s.peers[idle]
// Matched a pending task to an idle peer, allocate a unique request id // Matched a pending task to an idle peer, allocate a unique request id
var reqid uint64 var reqid uint64
for { for {
@ -834,14 +836,14 @@ func (s *Syncer) assignAccountTasks(cancel chan struct{}) {
task: task, task: task,
} }
req.timeout = time.AfterFunc(requestTimeout, func() { req.timeout = time.AfterFunc(requestTimeout, func() {
log.Debug("Account range request timed out") peer.Log().Debug("Account range request timed out", "reqid", reqid)
s.scheduleRevertAccountRequest(req) s.scheduleRevertAccountRequest(req)
}) })
s.accountReqs[reqid] = req s.accountReqs[reqid] = req
delete(s.accountIdlers, idle) delete(s.accountIdlers, idle)
s.pend.Add(1) s.pend.Add(1)
go func(peer SyncPeer, root common.Hash) { go func(root common.Hash) {
defer s.pend.Done() defer s.pend.Done()
// Attempt to send the remote request and revert if it fails // Attempt to send the remote request and revert if it fails
@ -849,7 +851,7 @@ func (s *Syncer) assignAccountTasks(cancel chan struct{}) {
peer.Log().Debug("Failed to request account range", "err", err) peer.Log().Debug("Failed to request account range", "err", err)
s.scheduleRevertAccountRequest(req) s.scheduleRevertAccountRequest(req)
} }
}(s.peers[idle], s.root) // We're in the lock, peers[id] surely exists }(s.root)
// Inject the request into the task to block further assignments // Inject the request into the task to block further assignments
task.req = req task.req = req
@ -891,6 +893,8 @@ func (s *Syncer) assignBytecodeTasks(cancel chan struct{}) {
if idle == "" { if idle == "" {
return return
} }
peer := s.peers[idle]
// Matched a pending task to an idle peer, allocate a unique request id // Matched a pending task to an idle peer, allocate a unique request id
var reqid uint64 var reqid uint64
for { for {
@ -921,14 +925,14 @@ func (s *Syncer) assignBytecodeTasks(cancel chan struct{}) {
task: task, task: task,
} }
req.timeout = time.AfterFunc(requestTimeout, func() { req.timeout = time.AfterFunc(requestTimeout, func() {
log.Debug("Bytecode request timed out") peer.Log().Debug("Bytecode request timed out", "reqid", reqid)
s.scheduleRevertBytecodeRequest(req) s.scheduleRevertBytecodeRequest(req)
}) })
s.bytecodeReqs[reqid] = req s.bytecodeReqs[reqid] = req
delete(s.bytecodeIdlers, idle) delete(s.bytecodeIdlers, idle)
s.pend.Add(1) s.pend.Add(1)
go func(peer SyncPeer) { go func() {
defer s.pend.Done() defer s.pend.Done()
// Attempt to send the remote request and revert if it fails // Attempt to send the remote request and revert if it fails
@ -936,7 +940,7 @@ func (s *Syncer) assignBytecodeTasks(cancel chan struct{}) {
log.Debug("Failed to request bytecodes", "err", err) log.Debug("Failed to request bytecodes", "err", err)
s.scheduleRevertBytecodeRequest(req) s.scheduleRevertBytecodeRequest(req)
} }
}(s.peers[idle]) // We're in the lock, peers[id] surely exists }()
} }
} }
@ -976,6 +980,8 @@ func (s *Syncer) assignStorageTasks(cancel chan struct{}) {
if idle == "" { if idle == "" {
return return
} }
peer := s.peers[idle]
// Matched a pending task to an idle peer, allocate a unique request id // Matched a pending task to an idle peer, allocate a unique request id
var reqid uint64 var reqid uint64
for { for {
@ -1045,14 +1051,14 @@ func (s *Syncer) assignStorageTasks(cancel chan struct{}) {
req.limit = subtask.Last req.limit = subtask.Last
} }
req.timeout = time.AfterFunc(requestTimeout, func() { req.timeout = time.AfterFunc(requestTimeout, func() {
log.Debug("Storage request timed out") peer.Log().Debug("Storage request timed out", "reqid", reqid)
s.scheduleRevertStorageRequest(req) s.scheduleRevertStorageRequest(req)
}) })
s.storageReqs[reqid] = req s.storageReqs[reqid] = req
delete(s.storageIdlers, idle) delete(s.storageIdlers, idle)
s.pend.Add(1) s.pend.Add(1)
go func(peer SyncPeer, root common.Hash) { go func(root common.Hash) {
defer s.pend.Done() defer s.pend.Done()
// Attempt to send the remote request and revert if it fails // Attempt to send the remote request and revert if it fails
@ -1064,7 +1070,7 @@ func (s *Syncer) assignStorageTasks(cancel chan struct{}) {
log.Debug("Failed to request storage", "err", err) log.Debug("Failed to request storage", "err", err)
s.scheduleRevertStorageRequest(req) s.scheduleRevertStorageRequest(req)
} }
}(s.peers[idle], s.root) // We're in the lock, peers[id] surely exists }(s.root)
// Inject the request into the subtask to block further assignments // Inject the request into the subtask to block further assignments
if subtask != nil { if subtask != nil {
@ -1121,6 +1127,8 @@ func (s *Syncer) assignTrienodeHealTasks(cancel chan struct{}) {
if idle == "" { if idle == "" {
return return
} }
peer := s.peers[idle]
// Matched a pending task to an idle peer, allocate a unique request id // Matched a pending task to an idle peer, allocate a unique request id
var reqid uint64 var reqid uint64
for { for {
@ -1160,14 +1168,14 @@ func (s *Syncer) assignTrienodeHealTasks(cancel chan struct{}) {
task: s.healer, task: s.healer,
} }
req.timeout = time.AfterFunc(requestTimeout, func() { req.timeout = time.AfterFunc(requestTimeout, func() {
log.Debug("Trienode heal request timed out") peer.Log().Debug("Trienode heal request timed out", "reqid", reqid)
s.scheduleRevertTrienodeHealRequest(req) s.scheduleRevertTrienodeHealRequest(req)
}) })
s.trienodeHealReqs[reqid] = req s.trienodeHealReqs[reqid] = req
delete(s.trienodeHealIdlers, idle) delete(s.trienodeHealIdlers, idle)
s.pend.Add(1) s.pend.Add(1)
go func(peer SyncPeer, root common.Hash) { go func(root common.Hash) {
defer s.pend.Done() defer s.pend.Done()
// Attempt to send the remote request and revert if it fails // Attempt to send the remote request and revert if it fails
@ -1175,7 +1183,7 @@ func (s *Syncer) assignTrienodeHealTasks(cancel chan struct{}) {
log.Debug("Failed to request trienode healers", "err", err) log.Debug("Failed to request trienode healers", "err", err)
s.scheduleRevertTrienodeHealRequest(req) s.scheduleRevertTrienodeHealRequest(req)
} }
}(s.peers[idle], s.root) // We're in the lock, peers[id] surely exists }(s.root)
} }
} }
@ -1227,6 +1235,8 @@ func (s *Syncer) assignBytecodeHealTasks(cancel chan struct{}) {
if idle == "" { if idle == "" {
return return
} }
peer := s.peers[idle]
// Matched a pending task to an idle peer, allocate a unique request id // Matched a pending task to an idle peer, allocate a unique request id
var reqid uint64 var reqid uint64
for { for {
@ -1258,14 +1268,14 @@ func (s *Syncer) assignBytecodeHealTasks(cancel chan struct{}) {
task: s.healer, task: s.healer,
} }
req.timeout = time.AfterFunc(requestTimeout, func() { req.timeout = time.AfterFunc(requestTimeout, func() {
log.Debug("Bytecode heal request timed out") peer.Log().Debug("Bytecode heal request timed out", "reqid", reqid)
s.scheduleRevertBytecodeHealRequest(req) s.scheduleRevertBytecodeHealRequest(req)
}) })
s.bytecodeHealReqs[reqid] = req s.bytecodeHealReqs[reqid] = req
delete(s.bytecodeHealIdlers, idle) delete(s.bytecodeHealIdlers, idle)
s.pend.Add(1) s.pend.Add(1)
go func(peer SyncPeer) { go func() {
defer s.pend.Done() defer s.pend.Done()
// Attempt to send the remote request and revert if it fails // Attempt to send the remote request and revert if it fails
@ -1273,7 +1283,7 @@ func (s *Syncer) assignBytecodeHealTasks(cancel chan struct{}) {
log.Debug("Failed to request bytecode healers", "err", err) log.Debug("Failed to request bytecode healers", "err", err)
s.scheduleRevertBytecodeHealRequest(req) s.scheduleRevertBytecodeHealRequest(req)
} }
}(s.peers[idle]) // We're in the lock, peers[id] surely exists }()
} }
} }