forked from cerc-io/plugeth
eth, les, swarm: fix go vet issues sufraced by log15
This commit is contained in:
parent
d4fd06c3dc
commit
61e6bb1247
@ -938,7 +938,7 @@ func (d *Downloader) fetchNodeData() error {
|
|||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// If the node data processing failed, the root hash is very wrong, abort
|
// If the node data processing failed, the root hash is very wrong, abort
|
||||||
log.Error(fmt.Sprintf("peer %d: state processing failed: %v", packet.PeerId(), err))
|
log.Error(fmt.Sprintf("peer %s: state processing failed: %v", packet.PeerId(), err))
|
||||||
d.cancel()
|
d.cancel()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -441,7 +441,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
|||||||
|
|
||||||
// Block header query, collect the requested headers and reply
|
// Block header query, collect the requested headers and reply
|
||||||
case AnnounceMsg:
|
case AnnounceMsg:
|
||||||
log.Debug(fmt.Sprint("<=== AnnounceMsg from peer %v:", p.id))
|
log.Debug(fmt.Sprintf("<=== AnnounceMsg from peer %v:", p.id))
|
||||||
|
|
||||||
var req announceData
|
var req announceData
|
||||||
if err := msg.Decode(&req); err != nil {
|
if err := msg.Decode(&req); err != nil {
|
||||||
|
@ -84,7 +84,7 @@ func (self *Api) Resolve(hostPort string, nameresolver bool) (storage.Key, error
|
|||||||
err = ErrResolve(err)
|
err = ErrResolve(err)
|
||||||
log.Warn(fmt.Sprintf("DNS error : %v", err))
|
log.Warn(fmt.Sprintf("DNS error : %v", err))
|
||||||
}
|
}
|
||||||
log.Trace(fmt.Sprintf("host lookup: %v -> %v", err))
|
log.Trace(fmt.Sprintf("host lookup: %v -> %v", hostPort, contentHash))
|
||||||
return contentHash[:], err
|
return contentHash[:], err
|
||||||
}
|
}
|
||||||
func Parse(uri string) (hostPort, path string) {
|
func Parse(uri string) (hostPort, path string) {
|
||||||
|
@ -211,7 +211,7 @@ only add if less than requesterCount peers forwarded the same request id so far
|
|||||||
note this is done irrespective of status (searching or found)
|
note this is done irrespective of status (searching or found)
|
||||||
*/
|
*/
|
||||||
func (self *Depo) addRequester(rs *storage.RequestStatus, req *retrieveRequestMsgData) {
|
func (self *Depo) addRequester(rs *storage.RequestStatus, req *retrieveRequestMsgData) {
|
||||||
log.Trace(fmt.Sprintf("Depo.addRequester: key %v - add peer to req.Id %v", req.Key.Log(), req.from, req.Id))
|
log.Trace(fmt.Sprintf("Depo.addRequester: key %v - add peer to req.Id %v", req.Key.Log(), req.Id))
|
||||||
list := rs.Requesters[req.Id]
|
list := rs.Requesters[req.Id]
|
||||||
rs.Requesters[req.Id] = append(list, req)
|
rs.Requesters[req.Id] = append(list, req)
|
||||||
}
|
}
|
||||||
|
@ -120,7 +120,7 @@ func (self *KadDb) add(nrs []*NodeRecord, proximityBin func(Address) int) {
|
|||||||
copy(newnodes[:], nodes[:dbcursor])
|
copy(newnodes[:], nodes[:dbcursor])
|
||||||
newnodes[dbcursor] = node
|
newnodes[dbcursor] = node
|
||||||
copy(newnodes[dbcursor+1:], nodes[dbcursor:])
|
copy(newnodes[dbcursor+1:], nodes[dbcursor:])
|
||||||
log.Trace(fmt.Sprintf("new nodes: %v (keys: %v)\nnodes: %v", newnodes, nodes))
|
log.Trace(fmt.Sprintf("new nodes: %v, nodes: %v", newnodes, nodes))
|
||||||
self.Nodes[index] = newnodes
|
self.Nodes[index] = newnodes
|
||||||
n++
|
n++
|
||||||
}
|
}
|
||||||
@ -294,7 +294,7 @@ func (self *KadDb) save(path string, cb func(*NodeRecord, Node)) error {
|
|||||||
}
|
}
|
||||||
err = ioutil.WriteFile(path, data, os.ModePerm)
|
err = ioutil.WriteFile(path, data, os.ModePerm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn(fmt.Sprintf("unable to save kaddb with %v nodes to %v: err", n, path, err))
|
log.Warn(fmt.Sprintf("unable to save kaddb with %v nodes to %v: %v", n, path, err))
|
||||||
} else {
|
} else {
|
||||||
log.Info(fmt.Sprintf("saved kaddb with %v nodes to %v", n, path))
|
log.Info(fmt.Sprintf("saved kaddb with %v nodes to %v", n, path))
|
||||||
}
|
}
|
||||||
|
@ -217,7 +217,7 @@ LOOP:
|
|||||||
// if just switched to db mode and not quitting, then launch dbRead
|
// if just switched to db mode and not quitting, then launch dbRead
|
||||||
// in a parallel go routine to send deliveries from db
|
// in a parallel go routine to send deliveries from db
|
||||||
if inDb == 0 && quit != nil {
|
if inDb == 0 && quit != nil {
|
||||||
log.Trace(fmt.Sprintf("syncDb[%v/%v] start dbRead"))
|
log.Trace(fmt.Sprintf("syncDb[%v/%v] start dbRead", self.key.Log(), self.priority))
|
||||||
go self.dbRead(true, counter, deliver)
|
go self.dbRead(true, counter, deliver)
|
||||||
}
|
}
|
||||||
inDb++
|
inDb++
|
||||||
|
@ -480,7 +480,7 @@ LOOP:
|
|||||||
stateCopy := *state
|
stateCopy := *state
|
||||||
err := self.unsyncedKeys(unsynced, &stateCopy)
|
err := self.unsyncedKeys(unsynced, &stateCopy)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn(fmt.Sprintf("syncer[%v]: unable to send unsynced keys: %v", err))
|
log.Warn(fmt.Sprintf("syncer[%v]: unable to send unsynced keys: %v", self.key.Log(), err))
|
||||||
}
|
}
|
||||||
self.state = state
|
self.state = state
|
||||||
log.Debug(fmt.Sprintf("syncer[%v]: --> %v keys sent: (total: %v (%v), history: %v), sent sync state: %v", self.key.Log(), len(unsynced), keyCounts, keyCount, historyCnt, stateCopy))
|
log.Debug(fmt.Sprintf("syncer[%v]: --> %v keys sent: (total: %v (%v), history: %v), sent sync state: %v", self.key.Log(), len(unsynced), keyCounts, keyCount, historyCnt, stateCopy))
|
||||||
@ -553,7 +553,7 @@ LOOP:
|
|||||||
log.Trace(fmt.Sprintf("syncer[%v]: (priority %v): request %v (synced = %v)", self.key.Log(), priority, req, state.Synced))
|
log.Trace(fmt.Sprintf("syncer[%v]: (priority %v): request %v (synced = %v)", self.key.Log(), priority, req, state.Synced))
|
||||||
unsynced = append(unsynced, sreq)
|
unsynced = append(unsynced, sreq)
|
||||||
} else {
|
} else {
|
||||||
log.Warn(fmt.Sprintf("syncer[%v]: (priority %v): error creating request for %v: %v)", self.key.Log(), priority, req, state.Synced, err))
|
log.Warn(fmt.Sprintf("syncer[%v]: (priority %v): error creating request for %v: %v)", self.key.Log(), priority, req, err))
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -197,7 +197,7 @@ func (self *Swarm) Start(net *p2p.Server) error {
|
|||||||
log.Debug(fmt.Sprintf("Swarm http proxy started on port: %v", self.config.Port))
|
log.Debug(fmt.Sprintf("Swarm http proxy started on port: %v", self.config.Port))
|
||||||
|
|
||||||
if self.corsString != "" {
|
if self.corsString != "" {
|
||||||
log.Debug(fmt.Sprintf("Swarm http proxy started with corsdomain:", self.corsString))
|
log.Debug(fmt.Sprintf("Swarm http proxy started with corsdomain: %v", self.corsString))
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
Loading…
Reference in New Issue
Block a user