forked from cerc-io/plugeth
various: remove redundant parentheses (#15793)
This commit is contained in:
parent
9d48dbf5c2
commit
b8caba9709
@ -202,7 +202,7 @@ func (arguments Arguments) Pack(args ...interface{}) ([]byte, error) {
|
||||
inputOffset := 0
|
||||
for _, abiArg := range abiArgs {
|
||||
if abiArg.Type.T == ArrayTy {
|
||||
inputOffset += (32 * abiArg.Type.Size)
|
||||
inputOffset += 32 * abiArg.Type.Size
|
||||
} else {
|
||||
inputOffset += 32
|
||||
}
|
||||
|
@ -315,7 +315,7 @@ func checkDeprecated(ctx *cli.Context) {
|
||||
func printConfig(config *bzzapi.Config) string {
|
||||
out, err := tomlSettings.Marshal(&config)
|
||||
if err != nil {
|
||||
return (fmt.Sprintf("Something is not right with the configuration: %v", err))
|
||||
return fmt.Sprintf("Something is not right with the configuration: %v", err)
|
||||
}
|
||||
return string(out)
|
||||
}
|
||||
|
@ -40,7 +40,7 @@ func fastXORBytes(dst, a, b []byte) int {
|
||||
dw[i] = aw[i] ^ bw[i]
|
||||
}
|
||||
}
|
||||
for i := (n - n%wordSize); i < n; i++ {
|
||||
for i := n - n%wordSize; i < n; i++ {
|
||||
dst[i] = a[i] ^ b[i]
|
||||
}
|
||||
return n
|
||||
@ -84,7 +84,7 @@ func fastANDBytes(dst, a, b []byte) int {
|
||||
dw[i] = aw[i] & bw[i]
|
||||
}
|
||||
}
|
||||
for i := (n - n%wordSize); i < n; i++ {
|
||||
for i := n - n%wordSize; i < n; i++ {
|
||||
dst[i] = a[i] & b[i]
|
||||
}
|
||||
return n
|
||||
@ -128,7 +128,7 @@ func fastORBytes(dst, a, b []byte) int {
|
||||
dw[i] = aw[i] | bw[i]
|
||||
}
|
||||
}
|
||||
for i := (n - n%wordSize); i < n; i++ {
|
||||
for i := n - n%wordSize; i < n; i++ {
|
||||
dst[i] = a[i] | b[i]
|
||||
}
|
||||
return n
|
||||
@ -168,7 +168,7 @@ func fastTestBytes(p []byte) bool {
|
||||
}
|
||||
}
|
||||
}
|
||||
for i := (n - n%wordSize); i < n; i++ {
|
||||
for i := n - n%wordSize; i < n; i++ {
|
||||
if p[i] != 0 {
|
||||
return true
|
||||
}
|
||||
|
@ -314,7 +314,7 @@ func (prv *PrivateKey) Decrypt(rand io.Reader, c, s1, s2 []byte) (m []byte, err
|
||||
|
||||
switch c[0] {
|
||||
case 2, 3, 4:
|
||||
rLen = ((prv.PublicKey.Curve.Params().BitSize + 7) / 4)
|
||||
rLen = (prv.PublicKey.Curve.Params().BitSize + 7) / 4
|
||||
if len(c) < (rLen + hLen + 1) {
|
||||
err = ErrInvalidMessage
|
||||
return
|
||||
|
@ -394,14 +394,14 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
||||
case query.Reverse:
|
||||
// Number based traversal towards the genesis block
|
||||
if query.Origin.Number >= query.Skip+1 {
|
||||
query.Origin.Number -= (query.Skip + 1)
|
||||
query.Origin.Number -= query.Skip + 1
|
||||
} else {
|
||||
unknown = true
|
||||
}
|
||||
|
||||
case !query.Reverse:
|
||||
// Number based traversal towards the leaf block
|
||||
query.Origin.Number += (query.Skip + 1)
|
||||
query.Origin.Number += query.Skip + 1
|
||||
}
|
||||
}
|
||||
return p.SendBlockHeaders(headers)
|
||||
|
@ -454,14 +454,14 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
||||
case query.Reverse:
|
||||
// Number based traversal towards the genesis block
|
||||
if query.Origin.Number >= query.Skip+1 {
|
||||
query.Origin.Number -= (query.Skip + 1)
|
||||
query.Origin.Number -= query.Skip + 1
|
||||
} else {
|
||||
unknown = true
|
||||
}
|
||||
|
||||
case !query.Reverse:
|
||||
// Number based traversal towards the leaf block
|
||||
query.Origin.Number += (query.Skip + 1)
|
||||
query.Origin.Number += query.Skip + 1
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -618,7 +618,7 @@ func (e *knownEntry) Weight() int64 {
|
||||
if e.state != psNotConnected || !e.known || e.delayedRetry {
|
||||
return 0
|
||||
}
|
||||
return int64(1000000000 * e.connectStats.recentAvg() * math.Exp(-float64(e.lastConnected.fails)*failDropLn-e.responseStats.recentAvg()/float64(responseScoreTC)-e.delayStats.recentAvg()/float64(delayScoreTC)) * math.Pow((1-e.timeoutStats.recentAvg()), timeoutPow))
|
||||
return int64(1000000000 * e.connectStats.recentAvg() * math.Exp(-float64(e.lastConnected.fails)*failDropLn-e.responseStats.recentAvg()/float64(responseScoreTC)-e.delayStats.recentAvg()/float64(delayScoreTC)) * math.Pow(1-e.timeoutStats.recentAvg(), timeoutPow))
|
||||
}
|
||||
|
||||
// poolEntryAddress is a separate object because currently it is necessary to remember
|
||||
|
@ -60,7 +60,7 @@ func (self *chunkerTester) Split(chunker Splitter, data io.Reader, size int64, c
|
||||
for {
|
||||
select {
|
||||
case <-timeout:
|
||||
return errors.New(("Split timeout error"))
|
||||
return errors.New("Split timeout error")
|
||||
case <-quitC:
|
||||
return nil
|
||||
case chunk := <-chunkC:
|
||||
@ -97,7 +97,7 @@ func (self *chunkerTester) Append(chunker Splitter, rootKey Key, data io.Reader,
|
||||
for {
|
||||
select {
|
||||
case <-timeout:
|
||||
return errors.New(("Append timeout error"))
|
||||
return errors.New("Append timeout error")
|
||||
case <-quitC:
|
||||
return nil
|
||||
case chunk := <-chunkC:
|
||||
@ -146,7 +146,7 @@ func (self *chunkerTester) Join(chunker Chunker, key Key, c int, chunkC chan *Ch
|
||||
for {
|
||||
select {
|
||||
case <-timeout:
|
||||
return errors.New(("Join timeout error"))
|
||||
return errors.New("Join timeout error")
|
||||
case chunk, ok := <-chunkC:
|
||||
if !ok {
|
||||
close(quitC)
|
||||
@ -155,7 +155,7 @@ func (self *chunkerTester) Join(chunker Chunker, key Key, c int, chunkC chan *Ch
|
||||
// this just mocks the behaviour of a chunk store retrieval
|
||||
stored, success := self.chunks[chunk.Key.String()]
|
||||
if !success {
|
||||
return errors.New(("Not found"))
|
||||
return errors.New("Not found")
|
||||
}
|
||||
chunk.SData = stored.SData
|
||||
chunk.Size = int64(binary.LittleEndian.Uint64(chunk.SData[0:8]))
|
||||
|
@ -338,7 +338,7 @@ func (self *PyramidChunker) loadTree(chunkLevel [][]*TreeEntry, key Key, chunkC
|
||||
chunkLevel[depth-1] = append(chunkLevel[depth-1], newEntry)
|
||||
|
||||
// Add the rest of the tree
|
||||
for lvl := (depth - 1); lvl >= 1; lvl-- {
|
||||
for lvl := depth - 1; lvl >= 1; lvl-- {
|
||||
|
||||
//TODO(jmozah): instead of loading finished branches and then trim in the end,
|
||||
//avoid loading them in the first place
|
||||
|
@ -489,7 +489,7 @@ func (t *Trie) Commit() (root common.Hash, err error) {
|
||||
func (t *Trie) CommitTo(db DatabaseWriter) (root common.Hash, err error) {
|
||||
hash, cached, err := t.hashRoot(db)
|
||||
if err != nil {
|
||||
return (common.Hash{}), err
|
||||
return common.Hash{}, err
|
||||
}
|
||||
t.root = cached
|
||||
t.cachegen++
|
||||
|
@ -88,7 +88,7 @@ func generateTestCases(t *testing.T, SizeTestFilters int) []FilterTestCase {
|
||||
for i := 0; i < SizeTestFilters; i++ {
|
||||
f, _ := generateFilter(t, true)
|
||||
cases[i].f = f
|
||||
cases[i].alive = (mrand.Int()&int(1) == 0)
|
||||
cases[i].alive = mrand.Int()&int(1) == 0
|
||||
}
|
||||
return cases
|
||||
}
|
||||
@ -122,7 +122,7 @@ func TestInstallFilters(t *testing.T) {
|
||||
|
||||
for i, testCase := range tst {
|
||||
fil := filters.Get(testCase.id)
|
||||
exist := (fil != nil)
|
||||
exist := fil != nil
|
||||
if exist != testCase.alive {
|
||||
t.Fatalf("seed %d: failed alive: %d, %v, %v", seed, i, exist, testCase.alive)
|
||||
}
|
||||
|
@ -88,7 +88,7 @@ func generateTestCases(t *testing.T, SizeTestFilters int) []FilterTestCase {
|
||||
for i := 0; i < SizeTestFilters; i++ {
|
||||
f, _ := generateFilter(t, true)
|
||||
cases[i].f = f
|
||||
cases[i].alive = (mrand.Int()&int(1) == 0)
|
||||
cases[i].alive = mrand.Int()&int(1) == 0
|
||||
}
|
||||
return cases
|
||||
}
|
||||
@ -122,7 +122,7 @@ func TestInstallFilters(t *testing.T) {
|
||||
|
||||
for i, testCase := range tst {
|
||||
fil := filters.Get(testCase.id)
|
||||
exist := (fil != nil)
|
||||
exist := fil != nil
|
||||
if exist != testCase.alive {
|
||||
t.Fatalf("seed %d: failed alive: %d, %v, %v", seed, i, exist, testCase.alive)
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user