build: enable unconvert linter (#15456)
* build: enable unconvert linter - fixes #15453 - update code base for failing cases * cmd/puppeth: replace syscall.Stdin with os.Stdin.Fd() for unconvert linter
This commit is contained in:
parent
3ee86a57f3
commit
86f6568f66
@ -38,7 +38,7 @@ type unpacker interface {
|
||||
func readInteger(kind reflect.Kind, b []byte) interface{} {
|
||||
switch kind {
|
||||
case reflect.Uint8:
|
||||
return uint8(b[len(b)-1])
|
||||
return b[len(b)-1]
|
||||
case reflect.Uint16:
|
||||
return binary.BigEndian.Uint16(b[len(b)-2:])
|
||||
case reflect.Uint32:
|
||||
|
@ -322,9 +322,15 @@ func doLint(cmdline []string) {
|
||||
build.MustRun(goTool("get", "gopkg.in/alecthomas/gometalinter.v1"))
|
||||
build.MustRunCommand(filepath.Join(GOBIN, "gometalinter.v1"), "--install")
|
||||
|
||||
// Run fast linters batched together
|
||||
configs := []string{"--vendor", "--disable-all", "--enable=vet", "--enable=gofmt", "--enable=misspell"}
|
||||
|
||||
build.MustRunCommand(filepath.Join(GOBIN, "gometalinter.v1"), append(configs, packages...)...)
|
||||
|
||||
// Run slow linters one by one
|
||||
for _, linter := range []string{"unconvert"} {
|
||||
configs = []string{"--vendor", "--deadline=10m", "--disable-all", "--enable=" + linter}
|
||||
build.MustRunCommand(filepath.Join(GOBIN, "gometalinter.v1"), append(configs, packages...)...)
|
||||
}
|
||||
}
|
||||
|
||||
// Release Packaging
|
||||
|
@ -133,7 +133,7 @@ func deployFaucet(client *sshClient, network string, bootnodes []string, config
|
||||
})
|
||||
files[filepath.Join(workdir, "docker-compose.yaml")] = composefile.Bytes()
|
||||
|
||||
files[filepath.Join(workdir, "genesis.json")] = []byte(config.node.genesis)
|
||||
files[filepath.Join(workdir, "genesis.json")] = config.node.genesis
|
||||
files[filepath.Join(workdir, "account.json")] = []byte(config.node.keyJSON)
|
||||
files[filepath.Join(workdir, "account.pass")] = []byte(config.node.keyPass)
|
||||
|
||||
|
@ -128,7 +128,7 @@ func deployNode(client *sshClient, network string, bootv4, bootv5 []string, conf
|
||||
files[filepath.Join(workdir, "docker-compose.yaml")] = composefile.Bytes()
|
||||
|
||||
//genesisfile, _ := json.MarshalIndent(config.genesis, "", " ")
|
||||
files[filepath.Join(workdir, "genesis.json")] = []byte(config.genesis)
|
||||
files[filepath.Join(workdir, "genesis.json")] = config.genesis
|
||||
|
||||
if config.keyJSON != "" {
|
||||
files[filepath.Join(workdir, "signer.json")] = []byte(config.keyJSON)
|
||||
|
@ -27,7 +27,6 @@ import (
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"golang.org/x/crypto/ssh"
|
||||
@ -85,7 +84,7 @@ func dial(server string, pubkey []byte) (*sshClient, error) {
|
||||
}
|
||||
auths = append(auths, ssh.PasswordCallback(func() (string, error) {
|
||||
fmt.Printf("What's the login password for %s at %s? (won't be echoed)\n> ", login, server)
|
||||
blob, err := terminal.ReadPassword(int(syscall.Stdin))
|
||||
blob, err := terminal.ReadPassword(int(os.Stdin.Fd()))
|
||||
|
||||
fmt.Println()
|
||||
return string(blob), err
|
||||
|
@ -28,7 +28,6 @@ import (
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
@ -231,7 +230,7 @@ func (w *wizard) readDefaultFloat(def float64) float64 {
|
||||
// line and returns it. The input will not be echoed.
|
||||
func (w *wizard) readPassword() string {
|
||||
fmt.Printf("> ")
|
||||
text, err := terminal.ReadPassword(int(syscall.Stdin))
|
||||
text, err := terminal.ReadPassword(int(os.Stdin.Fd()))
|
||||
if err != nil {
|
||||
log.Crit("Failed to read password", "err", err)
|
||||
}
|
||||
|
@ -53,9 +53,7 @@ var (
|
||||
|
||||
type decError struct{ msg string }
|
||||
|
||||
func (err decError) Error() string {
|
||||
return string(err.msg)
|
||||
}
|
||||
func (err decError) Error() string { return err.msg }
|
||||
|
||||
// Decode decodes a hex string with 0x prefix.
|
||||
func Decode(input string) ([]byte, error) {
|
||||
|
@ -223,7 +223,7 @@ func (b *Uint64) UnmarshalText(input []byte) error {
|
||||
return ErrSyntax
|
||||
}
|
||||
dec *= 16
|
||||
dec += uint64(nib)
|
||||
dec += nib
|
||||
}
|
||||
*b = Uint64(dec)
|
||||
return nil
|
||||
|
@ -31,7 +31,7 @@ func cacheSize(block uint64) uint64 {
|
||||
return cacheSizes[epoch]
|
||||
}
|
||||
// No known cache size, calculate manually (sanity branch only)
|
||||
size := uint64(cacheInitBytes + cacheGrowthBytes*uint64(epoch) - hashBytes)
|
||||
size := cacheInitBytes + cacheGrowthBytes*uint64(epoch) - hashBytes
|
||||
for !new(big.Int).SetUint64(size / hashBytes).ProbablyPrime(1) { // Always accurate for n < 2^64
|
||||
size -= 2 * hashBytes
|
||||
}
|
||||
@ -49,7 +49,7 @@ func datasetSize(block uint64) uint64 {
|
||||
return datasetSizes[epoch]
|
||||
}
|
||||
// No known dataset size, calculate manually (sanity branch only)
|
||||
size := uint64(datasetInitBytes + datasetGrowthBytes*uint64(epoch) - mixBytes)
|
||||
size := datasetInitBytes + datasetGrowthBytes*uint64(epoch) - mixBytes
|
||||
for !new(big.Int).SetUint64(size / mixBytes).ProbablyPrime(1) { // Always accurate for n < 2^64
|
||||
size -= 2 * mixBytes
|
||||
}
|
||||
|
@ -161,8 +161,8 @@ func (in *Interpreter) Run(snapshot int, contract *Contract, input []byte) (ret
|
||||
|
||||
if in.cfg.Debug {
|
||||
logged = false
|
||||
pcCopy = uint64(pc)
|
||||
gasCopy = uint64(contract.Gas)
|
||||
pcCopy = pc
|
||||
gasCopy = contract.Gas
|
||||
stackCopy = newstack()
|
||||
for _, val := range stack.data {
|
||||
stackCopy.push(val)
|
||||
|
@ -708,7 +708,7 @@ func (d *Downloader) findAncestor(p *peerConnection, height uint64) (uint64, err
|
||||
ttl := d.requestTTL()
|
||||
timeout := time.After(ttl)
|
||||
|
||||
go p.peer.RequestHeadersByNumber(uint64(check), 1, 0, false)
|
||||
go p.peer.RequestHeadersByNumber(check, 1, 0, false)
|
||||
|
||||
// Wait until a reply arrives to this request
|
||||
for arrived := false; !arrived; {
|
||||
@ -1518,7 +1518,7 @@ func (d *Downloader) deliver(id string, destCh chan dataPack, packet dataPack, i
|
||||
func (d *Downloader) qosTuner() {
|
||||
for {
|
||||
// Retrieve the current median RTT and integrate into the previoust target RTT
|
||||
rtt := time.Duration(float64(1-qosTuningImpact)*float64(atomic.LoadUint64(&d.rttEstimate)) + qosTuningImpact*float64(d.peers.medianRTT()))
|
||||
rtt := time.Duration((1-qosTuningImpact)*float64(atomic.LoadUint64(&d.rttEstimate)) + qosTuningImpact*float64(d.peers.medianRTT()))
|
||||
atomic.StoreUint64(&d.rttEstimate, uint64(rtt))
|
||||
|
||||
// A new RTT cycle passed, increase our confidence in the estimated RTT
|
||||
|
@ -62,7 +62,7 @@ func (p *FakePeer) RequestHeadersByHash(hash common.Hash, amount int, skip int,
|
||||
number := origin.Number.Uint64()
|
||||
headers = append(headers, origin)
|
||||
if reverse {
|
||||
for i := 0; i < int(skip)+1; i++ {
|
||||
for i := 0; i <= skip; i++ {
|
||||
if header := p.hc.GetHeader(hash, number); header != nil {
|
||||
hash = header.ParentHash
|
||||
number--
|
||||
|
@ -206,7 +206,7 @@ func (f *Filter) checkMatches(ctx context.Context, header *types.Header) (logs [
|
||||
}
|
||||
var unfiltered []*types.Log
|
||||
for _, receipt := range receipts {
|
||||
unfiltered = append(unfiltered, ([]*types.Log)(receipt.Logs)...)
|
||||
unfiltered = append(unfiltered, receipt.Logs...)
|
||||
}
|
||||
logs = filterLogs(unfiltered, nil, nil, f.addresses, f.topics)
|
||||
if len(logs) > 0 {
|
||||
|
@ -400,7 +400,7 @@ func (p *peer) Handshake(td *big.Int, head common.Hash, headNum uint64, genesis
|
||||
|
||||
var send keyValueList
|
||||
send = send.add("protocolVersion", uint64(p.version))
|
||||
send = send.add("networkId", uint64(p.network))
|
||||
send = send.add("networkId", p.network)
|
||||
send = send.add("headTd", td)
|
||||
send = send.add("headHash", head)
|
||||
send = send.add("headNum", headNum)
|
||||
|
@ -112,10 +112,10 @@ func CollectProcessMetrics(refresh time.Duration) {
|
||||
memPauses.Mark(int64(memstats[i%2].PauseTotalNs - memstats[(i-1)%2].PauseTotalNs))
|
||||
|
||||
if ReadDiskStats(diskstats[i%2]) == nil {
|
||||
diskReads.Mark(int64(diskstats[i%2].ReadCount - diskstats[(i-1)%2].ReadCount))
|
||||
diskReadBytes.Mark(int64(diskstats[i%2].ReadBytes - diskstats[(i-1)%2].ReadBytes))
|
||||
diskWrites.Mark(int64(diskstats[i%2].WriteCount - diskstats[(i-1)%2].WriteCount))
|
||||
diskWriteBytes.Mark(int64(diskstats[i%2].WriteBytes - diskstats[(i-1)%2].WriteBytes))
|
||||
diskReads.Mark(diskstats[i%2].ReadCount - diskstats[(i-1)%2].ReadCount)
|
||||
diskReadBytes.Mark(diskstats[i%2].ReadBytes - diskstats[(i-1)%2].ReadBytes)
|
||||
diskWrites.Mark(diskstats[i%2].WriteCount - diskstats[(i-1)%2].WriteCount)
|
||||
diskWriteBytes.Mark(diskstats[i%2].WriteBytes - diskstats[(i-1)%2].WriteBytes)
|
||||
}
|
||||
time.Sleep(refresh)
|
||||
}
|
||||
|
@ -59,7 +59,7 @@ func (i *Interface) SetInt64(n int64) { i.object = &n }
|
||||
func (i *Interface) SetUint8(bigint *BigInt) { n := uint8(bigint.bigint.Uint64()); i.object = &n }
|
||||
func (i *Interface) SetUint16(bigint *BigInt) { n := uint16(bigint.bigint.Uint64()); i.object = &n }
|
||||
func (i *Interface) SetUint32(bigint *BigInt) { n := uint32(bigint.bigint.Uint64()); i.object = &n }
|
||||
func (i *Interface) SetUint64(bigint *BigInt) { n := uint64(bigint.bigint.Uint64()); i.object = &n }
|
||||
func (i *Interface) SetUint64(bigint *BigInt) { n := bigint.bigint.Uint64(); i.object = &n }
|
||||
func (i *Interface) SetBigInt(bigint *BigInt) { i.object = &bigint.bigint }
|
||||
func (i *Interface) SetBigInts(bigints *BigInts) { i.object = &bigints.bigints }
|
||||
|
||||
|
@ -427,7 +427,7 @@ func (tab *Table) bondall(nodes []*Node) (result []*Node) {
|
||||
rc := make(chan *Node, len(nodes))
|
||||
for i := range nodes {
|
||||
go func(n *Node) {
|
||||
nn, _ := tab.bond(false, n.ID, n.addr(), uint16(n.TCP))
|
||||
nn, _ := tab.bond(false, n.ID, n.addr(), n.TCP)
|
||||
rc <- nn
|
||||
}(nodes[i])
|
||||
}
|
||||
|
@ -238,7 +238,7 @@ func (self *manifestTrie) addEntry(entry *manifestTrieEntry, quitC chan bool) {
|
||||
return
|
||||
}
|
||||
|
||||
b := byte(entry.Path[0])
|
||||
b := entry.Path[0]
|
||||
oldentry := self.entries[b]
|
||||
if (oldentry == nil) || (oldentry.Path == entry.Path && oldentry.ContentType != ManifestType) {
|
||||
self.entries[b] = entry
|
||||
@ -294,7 +294,7 @@ func (self *manifestTrie) deleteEntry(path string, quitC chan bool) {
|
||||
return
|
||||
}
|
||||
|
||||
b := byte(path[0])
|
||||
b := path[0]
|
||||
entry := self.entries[b]
|
||||
if entry == nil {
|
||||
return
|
||||
@ -425,7 +425,7 @@ func (self *manifestTrie) findPrefixOf(path string, quitC chan bool) (entry *man
|
||||
}
|
||||
|
||||
//see if first char is in manifest entries
|
||||
b := byte(path[0])
|
||||
b := path[0]
|
||||
entry = self.entries[b]
|
||||
if entry == nil {
|
||||
return self.entries[256], 0
|
||||
|
@ -19,15 +19,16 @@
|
||||
package fuse
|
||||
|
||||
import (
|
||||
"bazil.org/fuse"
|
||||
"bazil.org/fuse/fs"
|
||||
"errors"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||
"golang.org/x/net/context"
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"bazil.org/fuse"
|
||||
"bazil.org/fuse/fs"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -87,7 +88,7 @@ func (file *SwarmFile) Attr(ctx context.Context, a *fuse.Attr) error {
|
||||
if err != nil {
|
||||
log.Warn("Couldnt get size of file %s : %v", file.path, err)
|
||||
}
|
||||
file.fileSize = int64(size)
|
||||
file.fileSize = size
|
||||
}
|
||||
a.Size = uint64(file.fileSize)
|
||||
return nil
|
||||
|
@ -55,7 +55,7 @@ func (self *Depo) HandleUnsyncedKeysMsg(req *unsyncedKeysMsgData, p *peer) error
|
||||
var err error
|
||||
for _, req := range unsynced {
|
||||
// skip keys that are found,
|
||||
chunk, err = self.localStore.Get(storage.Key(req.Key[:]))
|
||||
chunk, err = self.localStore.Get(req.Key[:])
|
||||
if err != nil || chunk.SData == nil {
|
||||
missing = append(missing, req)
|
||||
}
|
||||
|
@ -67,7 +67,7 @@ func proximity(one, other Address) (ret int) {
|
||||
for i := 0; i < len(one); i++ {
|
||||
oxo := one[i] ^ other[i]
|
||||
for j := 0; j < 8; j++ {
|
||||
if (uint8(oxo)>>uint8(7-j))&0x01 != 0 {
|
||||
if (oxo>>uint8(7-j))&0x01 != 0 {
|
||||
return i*8 + j
|
||||
}
|
||||
}
|
||||
|
@ -211,12 +211,12 @@ func (self *KadDb) findBest(maxBinSize int, binSize func(int) int) (node *NodeRe
|
||||
}
|
||||
|
||||
// if node is scheduled to connect
|
||||
if time.Time(node.After).After(time.Now()) {
|
||||
if node.After.After(time.Now()) {
|
||||
log.Debug(fmt.Sprintf("kaddb record %v (PO%03d:%d) skipped. seen at %v (%v ago), scheduled at %v", node.Addr, po, cursor, node.Seen, delta, node.After))
|
||||
continue ROW
|
||||
}
|
||||
|
||||
delta = time.Since(time.Time(node.Seen))
|
||||
delta = time.Since(node.Seen)
|
||||
if delta < self.initialRetryInterval {
|
||||
delta = self.initialRetryInterval
|
||||
}
|
||||
@ -230,7 +230,7 @@ func (self *KadDb) findBest(maxBinSize int, binSize func(int) int) (node *NodeRe
|
||||
log.Debug(fmt.Sprintf("kaddb record %v (PO%03d:%d) ready to be tried. seen at %v (%v ago), scheduled at %v", node.Addr, po, cursor, node.Seen, delta, node.After))
|
||||
|
||||
// scheduling next check
|
||||
interval = time.Duration(delta * time.Duration(self.connRetryExp))
|
||||
interval = delta * time.Duration(self.connRetryExp)
|
||||
after = time.Now().Add(interval)
|
||||
|
||||
log.Debug(fmt.Sprintf("kaddb record %v (PO%03d:%d) selected as candidate connection %v. seen at %v (%v ago), selectable since %v, retry after %v (in %v)", node.Addr, po, cursor, rounds, node.Seen, delta, node.After, after, interval))
|
||||
|
@ -309,7 +309,7 @@ func (self *bzz) handleStatus() (err error) {
|
||||
Version: uint64(Version),
|
||||
ID: "honey",
|
||||
Addr: self.selfAddr(),
|
||||
NetworkId: uint64(self.NetworkId),
|
||||
NetworkId: self.NetworkId,
|
||||
Swap: &bzzswap.SwapProfile{
|
||||
Profile: self.swapParams.Profile,
|
||||
PayProfile: self.swapParams.PayProfile,
|
||||
|
@ -378,7 +378,7 @@ func (self *syncer) syncHistory(state *syncState) chan interface{} {
|
||||
}
|
||||
select {
|
||||
// blocking until history channel is read from
|
||||
case history <- storage.Key(key):
|
||||
case history <- key:
|
||||
n++
|
||||
log.Trace(fmt.Sprintf("syncer[%v]: history: %v (%v keys)", self.key.Log(), key.Log(), n))
|
||||
state.Latest = key
|
||||
|
@ -205,9 +205,9 @@ func (self *TreeChunker) split(depth int, treeSize int64, key Key, data io.Reade
|
||||
}
|
||||
// dept > 0
|
||||
// intermediate chunk containing child nodes hashes
|
||||
branchCnt := int64((size + treeSize - 1) / treeSize)
|
||||
branchCnt := (size + treeSize - 1) / treeSize
|
||||
|
||||
var chunk []byte = make([]byte, branchCnt*self.hashSize+8)
|
||||
var chunk = make([]byte, branchCnt*self.hashSize+8)
|
||||
var pos, i int64
|
||||
|
||||
binary.LittleEndian.PutUint64(chunk[0:8], uint64(size))
|
||||
|
@ -78,7 +78,7 @@ type memTree struct {
|
||||
func newMemTree(b uint, parent *memTree, pidx uint) (node *memTree) {
|
||||
node = new(memTree)
|
||||
node.bits = b
|
||||
node.width = 1 << uint(b)
|
||||
node.width = 1 << b
|
||||
node.subtree = make([]*memTree, node.width)
|
||||
node.access = make([]uint64, node.width-1)
|
||||
node.parent = parent
|
||||
|
@ -327,7 +327,7 @@ func (self *PyramidChunker) loadTree(chunkLevel [][]*TreeEntry, key Key, chunkC
|
||||
// Add the root chunk entry
|
||||
branchCount := int64(len(chunk.SData)-8) / self.hashSize
|
||||
newEntry := &TreeEntry{
|
||||
level: int(depth - 1),
|
||||
level: depth - 1,
|
||||
branchCount: branchCount,
|
||||
subtreeSize: uint64(chunk.Size),
|
||||
chunk: chunk.SData,
|
||||
@ -352,7 +352,7 @@ func (self *PyramidChunker) loadTree(chunkLevel [][]*TreeEntry, key Key, chunkC
|
||||
}
|
||||
bewBranchCount := int64(len(newChunk.SData)-8) / self.hashSize
|
||||
newEntry := &TreeEntry{
|
||||
level: int(lvl - 1),
|
||||
level: lvl - 1,
|
||||
branchCount: bewBranchCount,
|
||||
subtreeSize: uint64(newChunk.Size),
|
||||
chunk: newChunk.SData,
|
||||
|
@ -40,8 +40,8 @@ func BytesToTopic(b []byte) (t TopicType) {
|
||||
}
|
||||
|
||||
// String converts a topic byte array to a string representation.
|
||||
func (topic *TopicType) String() string {
|
||||
return string(common.ToHex(topic[:]))
|
||||
func (t *TopicType) String() string {
|
||||
return common.ToHex(t[:])
|
||||
}
|
||||
|
||||
// MarshalText returns the hex representation of t.
|
||||
|
@ -171,7 +171,7 @@ func (w *Whisper) SetMaxMessageSize(size uint32) error {
|
||||
if size > MaxMessageSize {
|
||||
return fmt.Errorf("message size too large [%d>%d]", size, MaxMessageSize)
|
||||
}
|
||||
w.settings.Store(maxMsgSizeIdx, uint32(size))
|
||||
w.settings.Store(maxMsgSizeIdx, size)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -40,8 +40,8 @@ func BytesToTopic(b []byte) (t TopicType) {
|
||||
}
|
||||
|
||||
// String converts a topic byte array to a string representation.
|
||||
func (topic *TopicType) String() string {
|
||||
return string(common.ToHex(topic[:]))
|
||||
func (t *TopicType) String() string {
|
||||
return common.ToHex(t[:])
|
||||
}
|
||||
|
||||
// MarshalText returns the hex representation of t.
|
||||
|
@ -171,7 +171,7 @@ func (w *Whisper) SetMaxMessageSize(size uint32) error {
|
||||
if size > MaxMessageSize {
|
||||
return fmt.Errorf("message size too large [%d>%d]", size, MaxMessageSize)
|
||||
}
|
||||
w.settings.Store(maxMsgSizeIdx, uint32(size))
|
||||
w.settings.Store(maxMsgSizeIdx, size)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user