style: gofumpt linting (#15605)
This commit is contained in:
parent
24344fb382
commit
1f2875d445
@ -49,7 +49,8 @@ func testExecCommon(t *testing.T, buildModuleCommand func(string, *Builder) (*co
|
||||
Builder: flag.Builder{
|
||||
GetClientConn: func() (grpc.ClientConnInterface, error) {
|
||||
return conn, nil
|
||||
}},
|
||||
},
|
||||
},
|
||||
GetClientConn: func(*cobra.Command) (grpc.ClientConnInterface, error) {
|
||||
return conn, nil
|
||||
},
|
||||
|
||||
@ -159,7 +159,6 @@ func TestJSONParsing(t *testing.T) {
|
||||
"-u", "27", // shorthand
|
||||
)
|
||||
assert.DeepEqual(t, conn.lastRequest, conn.lastResponse.(*testpb.EchoResponse).Request, protocmp.Transform())
|
||||
|
||||
}
|
||||
|
||||
func TestOptions(t *testing.T) {
|
||||
@ -319,7 +318,7 @@ type testClientConn struct {
|
||||
errorOut *bytes.Buffer
|
||||
}
|
||||
|
||||
func (t *testClientConn) Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...grpc.CallOption) error {
|
||||
func (t *testClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...grpc.CallOption) error {
|
||||
err := t.ClientConn.Invoke(ctx, method, args, reply, opts...)
|
||||
t.lastRequest = args
|
||||
t.lastResponse = reply
|
||||
|
||||
@ -24,7 +24,7 @@ type pairKeyCodec[K1, K2 any] interface {
|
||||
// NewMultiPair instantiates a new MultiPair index.
|
||||
// NOTE: when using this function you will need to type hint: doing NewMultiPair[Value]()
|
||||
// Example: if the value of the indexed map is string, you need to do NewMultiPair[string](...)
|
||||
func NewMultiPair[Value any, K1, K2 any](
|
||||
func NewMultiPair[Value, K1, K2 any](
|
||||
sb *collections.SchemaBuilder,
|
||||
prefix collections.Prefix,
|
||||
name string,
|
||||
|
||||
@ -75,7 +75,7 @@ type Ranger[K any] interface {
|
||||
// iteration will yield keys from the smallest to the biggest, if order
|
||||
// is OrderDescending then the iteration will yield keys from the biggest to the smallest.
|
||||
// Ordering is defined by the keys bytes representation, which is dependent on the KeyCodec used.
|
||||
RangeValues() (start *RangeKey[K], end *RangeKey[K], order Order, err error)
|
||||
RangeValues() (start, end *RangeKey[K], order Order, err error)
|
||||
}
|
||||
|
||||
// Range is a Ranger implementer.
|
||||
@ -126,7 +126,7 @@ var (
|
||||
errOrder = errors.New("collections: invalid order")
|
||||
)
|
||||
|
||||
func (r *Range[K]) RangeValues() (start *RangeKey[K], end *RangeKey[K], order Order, err error) {
|
||||
func (r *Range[K]) RangeValues() (start, end *RangeKey[K], order Order, err error) {
|
||||
return r.start, r.end, r.order, nil
|
||||
}
|
||||
|
||||
|
||||
@ -175,7 +175,7 @@ func TestWalk(t *testing.T) {
|
||||
}
|
||||
|
||||
u := uint64(0)
|
||||
err = m.Walk(ctx, nil, func(key uint64, value uint64) bool {
|
||||
err = m.Walk(ctx, nil, func(key, value uint64) bool {
|
||||
if key == 5 {
|
||||
return true
|
||||
}
|
||||
|
||||
@ -261,7 +261,7 @@ func (p *PairRange[K1, K2]) Descending() *PairRange[K1, K2] {
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *PairRange[K1, K2]) RangeValues() (start *RangeKey[Pair[K1, K2]], end *RangeKey[Pair[K1, K2]], order Order, err error) {
|
||||
func (p *PairRange[K1, K2]) RangeValues() (start, end *RangeKey[Pair[K1, K2]], order Order, err error) {
|
||||
if p.err != nil {
|
||||
return nil, nil, 0, err
|
||||
}
|
||||
|
||||
@ -22,6 +22,6 @@ func (privKey *PrivKey) Sign(msg []byte) ([]byte, error) {
|
||||
|
||||
// VerifySignature validates the signature.
|
||||
// The msg will be hashed prior to signature verification.
|
||||
func (pubKey *PubKey) VerifySignature(msg []byte, sigStr []byte) bool {
|
||||
func (pubKey *PubKey) VerifySignature(msg, sigStr []byte) bool {
|
||||
return secp256k1.VerifySignature(pubKey.Bytes(), crypto.Sha256(msg), sigStr)
|
||||
}
|
||||
|
||||
@ -131,11 +131,11 @@ func fullTypeName(typeName string) string {
|
||||
return fmt.Sprintf("cosmossdk.io/depinject_test/depinject_test.%s", typeName)
|
||||
}
|
||||
|
||||
func (s *bindingSuite) ThereIsAGlobalBindingForA(preferredType string, interfaceType string) {
|
||||
func (s *bindingSuite) ThereIsAGlobalBindingForA(preferredType, interfaceType string) {
|
||||
s.addConfig(depinject.BindInterface(fullTypeName(interfaceType), fullTypeName(preferredType)))
|
||||
}
|
||||
|
||||
func (s *bindingSuite) ThereIsABindingForAInModule(preferredType string, interfaceType string, moduleName string) {
|
||||
func (s *bindingSuite) ThereIsABindingForAInModule(preferredType, interfaceType, moduleName string) {
|
||||
s.addConfig(depinject.BindInterfaceInModule(moduleName, fullTypeName(interfaceType), fullTypeName(preferredType)))
|
||||
}
|
||||
|
||||
@ -147,7 +147,7 @@ func (s *bindingSuite) ModuleWantsADuck(module string) {
|
||||
s.addConfig(depinject.ProvideInModule(module, ProvideModuleDuck))
|
||||
}
|
||||
|
||||
func (s *bindingSuite) ModuleResolvesA(module string, duckType string) {
|
||||
func (s *bindingSuite) ModuleResolvesA(module, duckType string) {
|
||||
pond := s.resolvePond()
|
||||
moduleFound := false
|
||||
for _, dw := range pond.Ducks {
|
||||
|
||||
@ -109,7 +109,7 @@ func invoke(ctr *container, key *moduleKey, invokers []interface{}) error {
|
||||
//
|
||||
// "cosmossdk.io/depinject_test/depinject_test.Duck",
|
||||
// "cosmossdk.io/depinject_test/depinject_test.Canvasback")
|
||||
func BindInterface(inTypeName string, outTypeName string) Config {
|
||||
func BindInterface(inTypeName, outTypeName string) Config {
|
||||
return containerConfig(func(ctr *container) error {
|
||||
return bindInterface(ctr, inTypeName, outTypeName, "")
|
||||
})
|
||||
@ -125,13 +125,13 @@ func BindInterface(inTypeName string, outTypeName string) Config {
|
||||
// "moduleFoo",
|
||||
// "cosmossdk.io/depinject_test/depinject_test.Duck",
|
||||
// "cosmossdk.io/depinject_test/depinject_test.Canvasback")
|
||||
func BindInterfaceInModule(moduleName string, inTypeName string, outTypeName string) Config {
|
||||
func BindInterfaceInModule(moduleName, inTypeName, outTypeName string) Config {
|
||||
return containerConfig(func(ctr *container) error {
|
||||
return bindInterface(ctr, inTypeName, outTypeName, moduleName)
|
||||
})
|
||||
}
|
||||
|
||||
func bindInterface(ctr *container, inTypeName string, outTypeName string, moduleName string) error {
|
||||
func bindInterface(ctr *container, inTypeName, outTypeName, moduleName string) error {
|
||||
var mk *moduleKey
|
||||
if moduleName != "" {
|
||||
mk = &moduleKey{name: moduleName}
|
||||
|
||||
@ -119,7 +119,7 @@ func (g *FileGen) TypeExpr(typ reflect.Type) (ast.Expr, error) {
|
||||
|
||||
var genericTypeNameRegex = regexp.MustCompile(`(\w+)\[(.*)]`)
|
||||
|
||||
func (g *FileGen) importGenericTypeParams(typeName string, pkgPath string) (newTypeName string) {
|
||||
func (g *FileGen) importGenericTypeParams(typeName, pkgPath string) (newTypeName string) {
|
||||
// a generic type parameter from the same package the generic type is defined won't have the
|
||||
// full package name so we need to compare it with the final package part (the default import prefix)
|
||||
// ex: for a/b.C in package a/b, we'll just see the type param b.C.
|
||||
|
||||
@ -88,7 +88,7 @@ func (f *location) Format(w fmt.State, c rune) {
|
||||
|
||||
const _vendor = "/vendor/"
|
||||
|
||||
func splitFuncName(function string) (pname string, fname string) {
|
||||
func splitFuncName(function string) (pname, fname string) {
|
||||
if len(function) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
@ -896,7 +896,7 @@ func LegacyDecEq(t *testing.T, exp, got LegacyDec) (*testing.T, bool, string, st
|
||||
return t, exp.Equal(got), "expected:\t%v\ngot:\t\t%v", exp.String(), got.String()
|
||||
}
|
||||
|
||||
func LegacyDecApproxEq(t *testing.T, d1 LegacyDec, d2 LegacyDec, tol LegacyDec) (*testing.T, bool, string, string, string) {
|
||||
func LegacyDecApproxEq(t *testing.T, d1, d2, tol LegacyDec) (*testing.T, bool, string, string, string) {
|
||||
diff := d1.Sub(d2).Abs()
|
||||
return t, diff.LTE(tol), "expected |d1 - d2| <:\t%v\ngot |d1 - d2| = \t\t%v", tol.String(), diff.String()
|
||||
}
|
||||
|
||||
24
math/int.go
24
math/int.go
@ -17,31 +17,31 @@ func newIntegerFromString(s string) (*big.Int, bool) {
|
||||
return new(big.Int).SetString(s, 0)
|
||||
}
|
||||
|
||||
func equal(i *big.Int, i2 *big.Int) bool { return i.Cmp(i2) == 0 }
|
||||
func equal(i, i2 *big.Int) bool { return i.Cmp(i2) == 0 }
|
||||
|
||||
func gt(i *big.Int, i2 *big.Int) bool { return i.Cmp(i2) == 1 }
|
||||
func gt(i, i2 *big.Int) bool { return i.Cmp(i2) == 1 }
|
||||
|
||||
func gte(i *big.Int, i2 *big.Int) bool { return i.Cmp(i2) >= 0 }
|
||||
func gte(i, i2 *big.Int) bool { return i.Cmp(i2) >= 0 }
|
||||
|
||||
func lt(i *big.Int, i2 *big.Int) bool { return i.Cmp(i2) == -1 }
|
||||
func lt(i, i2 *big.Int) bool { return i.Cmp(i2) == -1 }
|
||||
|
||||
func lte(i *big.Int, i2 *big.Int) bool { return i.Cmp(i2) <= 0 }
|
||||
func lte(i, i2 *big.Int) bool { return i.Cmp(i2) <= 0 }
|
||||
|
||||
func add(i *big.Int, i2 *big.Int) *big.Int { return new(big.Int).Add(i, i2) }
|
||||
func add(i, i2 *big.Int) *big.Int { return new(big.Int).Add(i, i2) }
|
||||
|
||||
func sub(i *big.Int, i2 *big.Int) *big.Int { return new(big.Int).Sub(i, i2) }
|
||||
func sub(i, i2 *big.Int) *big.Int { return new(big.Int).Sub(i, i2) }
|
||||
|
||||
func mul(i *big.Int, i2 *big.Int) *big.Int { return new(big.Int).Mul(i, i2) }
|
||||
func mul(i, i2 *big.Int) *big.Int { return new(big.Int).Mul(i, i2) }
|
||||
|
||||
func div(i *big.Int, i2 *big.Int) *big.Int { return new(big.Int).Quo(i, i2) }
|
||||
func div(i, i2 *big.Int) *big.Int { return new(big.Int).Quo(i, i2) }
|
||||
|
||||
func mod(i *big.Int, i2 *big.Int) *big.Int { return new(big.Int).Mod(i, i2) }
|
||||
func mod(i, i2 *big.Int) *big.Int { return new(big.Int).Mod(i, i2) }
|
||||
|
||||
func neg(i *big.Int) *big.Int { return new(big.Int).Neg(i) }
|
||||
|
||||
func abs(i *big.Int) *big.Int { return new(big.Int).Abs(i) }
|
||||
|
||||
func min(i *big.Int, i2 *big.Int) *big.Int {
|
||||
func min(i, i2 *big.Int) *big.Int {
|
||||
if i.Cmp(i2) == 1 {
|
||||
return new(big.Int).Set(i2)
|
||||
}
|
||||
@ -49,7 +49,7 @@ func min(i *big.Int, i2 *big.Int) *big.Int {
|
||||
return new(big.Int).Set(i)
|
||||
}
|
||||
|
||||
func max(i *big.Int, i2 *big.Int) *big.Int {
|
||||
func max(i, i2 *big.Int) *big.Int {
|
||||
if i.Cmp(i2) == -1 {
|
||||
return new(big.Int).Set(i2)
|
||||
}
|
||||
|
||||
@ -240,7 +240,7 @@ func checkNewUint(i *big.Int) (Uint, error) {
|
||||
|
||||
// RelativePow raises x to the power of n, where x (and the result, z) are scaled by factor b
|
||||
// for example, RelativePow(210, 2, 100) = 441 (2.1^2 = 4.41)
|
||||
func RelativePow(x Uint, n Uint, b Uint) (z Uint) {
|
||||
func RelativePow(x, n, b Uint) (z Uint) {
|
||||
if x.IsZero() {
|
||||
if n.IsZero() {
|
||||
z = b // 0^0 = 1
|
||||
|
||||
@ -252,7 +252,7 @@ func (g queryProtoGen) startResponseType(format string, args ...any) {
|
||||
g.startRequestResponseType("response", format, args...)
|
||||
}
|
||||
|
||||
func (g queryProtoGen) startRequestResponseType(typ string, format string, args ...any) {
|
||||
func (g queryProtoGen) startRequestResponseType(typ, format string, args ...any) {
|
||||
msgTypeName := fmt.Sprintf(format, args...)
|
||||
g.msgs.F("// %s is the %s/%s %s type.", msgTypeName, g.queryServiceName(), msgTypeName, typ)
|
||||
g.msgs.F("message %s {", msgTypeName)
|
||||
|
||||
@ -145,7 +145,7 @@ type debugIterator struct {
|
||||
debugger Debugger
|
||||
}
|
||||
|
||||
func (d debugIterator) Domain() (start []byte, end []byte) {
|
||||
func (d debugIterator) Domain() (start, end []byte) {
|
||||
start, end = d.iterator.Domain()
|
||||
d.debugger.Log(fmt.Sprintf(" DOMAIN %x -> %x", start, end))
|
||||
return start, end
|
||||
|
||||
@ -198,7 +198,7 @@ var (
|
||||
// isNonTrivialUniqueKey checks if unique key fields are non-trivial, meaning that they
|
||||
// don't contain the full primary key. If they contain the full primary key, then
|
||||
// we can just use a regular index because there is no new unique constraint.
|
||||
func isNonTrivialUniqueKey(fields []protoreflect.Name, primaryKeyFields []protoreflect.Name) bool {
|
||||
func isNonTrivialUniqueKey(fields, primaryKeyFields []protoreflect.Name) bool {
|
||||
have := map[protoreflect.Name]bool{}
|
||||
for _, field := range fields {
|
||||
have[field] = true
|
||||
|
||||
@ -17,7 +17,7 @@ import (
|
||||
|
||||
// ExportAppStateAndValidators exports the state of the application for a genesis
|
||||
// file.
|
||||
func (app *SimApp) ExportAppStateAndValidators(forZeroHeight bool, jailAllowedAddrs []string, modulesToExport []string) (servertypes.ExportedApp, error) {
|
||||
func (app *SimApp) ExportAppStateAndValidators(forZeroHeight bool, jailAllowedAddrs, modulesToExport []string) (servertypes.ExportedApp, error) {
|
||||
// as if they could withdraw from the start of the next block
|
||||
ctx := app.NewContext(true, cmtproto.Header{Height: app.LastBlockHeight()})
|
||||
|
||||
|
||||
@ -468,7 +468,7 @@ func calculateIP(ip string, i int) (string, error) {
|
||||
return ipv4.String(), nil
|
||||
}
|
||||
|
||||
func writeFile(name string, dir string, contents []byte) error {
|
||||
func writeFile(name, dir string, contents []byte) error {
|
||||
file := filepath.Join(dir, name)
|
||||
|
||||
if err := os.MkdirAll(dir, 0o755); err != nil {
|
||||
|
||||
@ -34,7 +34,7 @@ func generateSequentialKeys(startKey []byte, numKeys int) [][]byte {
|
||||
}
|
||||
|
||||
// Generate many random, unsorted keys
|
||||
func generateRandomKeys(keySize int, numKeys int) [][]byte {
|
||||
func generateRandomKeys(keySize, numKeys int) [][]byte {
|
||||
toReturn := make([][]byte, 0, numKeys)
|
||||
for i := 0; i < numKeys; i++ {
|
||||
newKey := randSlice(keySize)
|
||||
|
||||
@ -60,7 +60,7 @@ func newMemIterator(start, end []byte, items BTree, ascending bool) *memIterator
|
||||
return mi
|
||||
}
|
||||
|
||||
func (mi *memIterator) Domain() (start []byte, end []byte) {
|
||||
func (mi *memIterator) Domain() (start, end []byte) {
|
||||
return mi.start, mi.end
|
||||
}
|
||||
|
||||
|
||||
@ -68,7 +68,7 @@ func (store *Store) Get(key []byte) (value []byte) {
|
||||
}
|
||||
|
||||
// Set implements types.KVStore.
|
||||
func (store *Store) Set(key []byte, value []byte) {
|
||||
func (store *Store) Set(key, value []byte) {
|
||||
types.AssertValidKey(key)
|
||||
types.AssertValidValue(value)
|
||||
|
||||
|
||||
@ -44,7 +44,7 @@ func (gs *Store) Get(key []byte) (value []byte) {
|
||||
}
|
||||
|
||||
// Implements KVStore.
|
||||
func (gs *Store) Set(key []byte, value []byte) {
|
||||
func (gs *Store) Set(key, value []byte) {
|
||||
types.AssertValidKey(key)
|
||||
types.AssertValidValue(value)
|
||||
gs.gasMeter.ConsumeGas(gs.gasConfig.WriteCostFlat, types.GasWriteCostFlatDesc)
|
||||
@ -121,7 +121,7 @@ func newGasIterator(gasMeter types.GasMeter, gasConfig types.GasConfig, parent t
|
||||
}
|
||||
|
||||
// Implements Iterator.
|
||||
func (gi *gasIterator) Domain() (start []byte, end []byte) {
|
||||
func (gi *gasIterator) Domain() (start, end []byte) {
|
||||
return gi.parent.Domain()
|
||||
}
|
||||
|
||||
|
||||
@ -293,7 +293,7 @@ func TestIAVLReverseIterator(t *testing.T) {
|
||||
iavlStore.Set([]byte{0x00, 0x02}, []byte("0 2"))
|
||||
iavlStore.Set([]byte{0x01}, []byte("1"))
|
||||
|
||||
testReverseIterator := func(t *testing.T, start []byte, end []byte, expected []string) {
|
||||
testReverseIterator := func(t *testing.T, start, end []byte, expected []string) {
|
||||
iter := iavlStore.ReverseIterator(start, end)
|
||||
var i int
|
||||
for i = 0; iter.Valid(); iter.Next() {
|
||||
|
||||
@ -32,7 +32,7 @@ func (s *Store) Get(key []byte) []byte {
|
||||
|
||||
// Set implements the KVStore interface. It traces a write operation and
|
||||
// delegates the Set call to the parent KVStore.
|
||||
func (s *Store) Set(key []byte, value []byte) {
|
||||
func (s *Store) Set(key, value []byte) {
|
||||
types.AssertValidKey(key)
|
||||
s.parent.Set(key, value)
|
||||
s.listener.OnWrite(s.parentStoreKey, key, value, false)
|
||||
@ -87,7 +87,7 @@ func newTraceIterator(parent types.Iterator, listener *types.MemoryListener) typ
|
||||
}
|
||||
|
||||
// Domain implements the Iterator interface.
|
||||
func (li *listenIterator) Domain() (start []byte, end []byte) {
|
||||
func (li *listenIterator) Domain() (start, end []byte) {
|
||||
return li.parent.Domain()
|
||||
}
|
||||
|
||||
|
||||
@ -27,7 +27,7 @@ func NewStore(parent types.KVStore, prefix []byte) Store {
|
||||
}
|
||||
}
|
||||
|
||||
func cloneAppend(bz []byte, tail []byte) (res []byte) {
|
||||
func cloneAppend(bz, tail []byte) (res []byte) {
|
||||
res = make([]byte, len(bz)+len(tail))
|
||||
copy(res, bz)
|
||||
copy(res[len(bz):], tail)
|
||||
@ -193,7 +193,7 @@ func (pi *prefixIterator) Error() error {
|
||||
}
|
||||
|
||||
// copied from github.com/cometbft/cometbft/libs/db/prefix_db.go
|
||||
func stripPrefix(key []byte, prefix []byte) []byte {
|
||||
func stripPrefix(key, prefix []byte) []byte {
|
||||
if len(key) < len(prefix) || !bytes.Equal(key[:len(prefix)], prefix) {
|
||||
panic("should not happen")
|
||||
}
|
||||
|
||||
@ -253,7 +253,7 @@ func mockStoreWithStuff() types.KVStore {
|
||||
return store
|
||||
}
|
||||
|
||||
func checkValue(t *testing.T, store types.KVStore, key []byte, expected []byte) {
|
||||
func checkValue(t *testing.T, store types.KVStore, key, expected []byte) {
|
||||
bz := store.Get(key)
|
||||
require.Equal(t, expected, bz)
|
||||
}
|
||||
|
||||
@ -330,7 +330,7 @@ func deleteKVStore(kv types.KVStore) error {
|
||||
}
|
||||
|
||||
// we simulate move by a copy and delete
|
||||
func moveKVStoreData(oldDB types.KVStore, newDB types.KVStore) error {
|
||||
func moveKVStoreData(oldDB, newDB types.KVStore) error {
|
||||
// we read from one and write to another
|
||||
itr := oldDB.Iterator(nil, nil)
|
||||
for itr.Valid() {
|
||||
@ -743,7 +743,7 @@ func (rs *Store) SetInitialVersion(version int64) error {
|
||||
// parsePath expects a format like /<storeName>[/<subpath>]
|
||||
// Must start with /, subpath may be empty
|
||||
// Returns error if it doesn't start with /
|
||||
func parsePath(path string) (storeName string, subpath string, err error) {
|
||||
func parsePath(path string) (storeName, subpath string, err error) {
|
||||
if !strings.HasPrefix(path, "/") {
|
||||
return storeName, subpath, errorsmod.Wrapf(types.ErrUnknownRequest, "invalid path: %s", path)
|
||||
}
|
||||
|
||||
@ -860,7 +860,7 @@ type MockListener struct {
|
||||
stateCache []types.StoreKVPair
|
||||
}
|
||||
|
||||
func (tl *MockListener) OnWrite(storeKey types.StoreKey, key []byte, value []byte, delete bool) error {
|
||||
func (tl *MockListener) OnWrite(storeKey types.StoreKey, key, value []byte, delete bool) error {
|
||||
tl.stateCache = append(tl.stateCache, types.StoreKVPair{
|
||||
StoreKey: storeKey.Name(),
|
||||
Key: key,
|
||||
|
||||
@ -236,7 +236,7 @@ func (m *Manager) List() ([]*types.Snapshot, error) {
|
||||
|
||||
// LoadChunk loads a chunk into a byte slice, mirroring ABCI LoadChunk. It can be called
|
||||
// concurrently with other operations. If the chunk does not exist, nil is returned.
|
||||
func (m *Manager) LoadChunk(height uint64, format uint32, chunk uint32) ([]byte, error) {
|
||||
func (m *Manager) LoadChunk(height uint64, format, chunk uint32) ([]byte, error) {
|
||||
reader, err := m.store.LoadChunk(height, format, chunk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@ -166,7 +166,7 @@ func (s *Store) Load(height uint64, format uint32) (*types.Snapshot, <-chan io.R
|
||||
|
||||
// LoadChunk loads a chunk from disk, or returns nil if it does not exist. The caller must call
|
||||
// Close() on it when done.
|
||||
func (s *Store) LoadChunk(height uint64, format uint32, chunk uint32) (io.ReadCloser, error) {
|
||||
func (s *Store) LoadChunk(height uint64, format, chunk uint32) (io.ReadCloser, error) {
|
||||
path := s.pathChunk(height, format, chunk)
|
||||
file, err := os.Open(path)
|
||||
if os.IsNotExist(err) {
|
||||
@ -176,7 +176,7 @@ func (s *Store) LoadChunk(height uint64, format uint32, chunk uint32) (io.ReadCl
|
||||
}
|
||||
|
||||
// loadChunkFile loads a chunk from disk, and errors if it does not exist.
|
||||
func (s *Store) loadChunkFile(height uint64, format uint32, chunk uint32) (io.ReadCloser, error) {
|
||||
func (s *Store) loadChunkFile(height uint64, format, chunk uint32) (io.ReadCloser, error) {
|
||||
path := s.pathChunk(height, format, chunk)
|
||||
return os.Open(path)
|
||||
}
|
||||
@ -336,7 +336,7 @@ func (s *Store) pathSnapshot(height uint64, format uint32) string {
|
||||
}
|
||||
|
||||
// pathChunk generates a snapshot chunk path.
|
||||
func (s *Store) pathChunk(height uint64, format uint32, chunk uint32) string {
|
||||
func (s *Store) pathChunk(height uint64, format, chunk uint32) string {
|
||||
return filepath.Join(s.pathSnapshot(height, format), strconv.FormatUint(uint64(chunk), 10))
|
||||
}
|
||||
|
||||
|
||||
@ -28,7 +28,7 @@ func GetPluginEnvKey(name string) string {
|
||||
return fmt.Sprintf("%s_%s", pluginEnvKeyPrefix, strings.ToUpper(name))
|
||||
}
|
||||
|
||||
func NewStreamingPlugin(name string, logLevel string) (interface{}, error) {
|
||||
func NewStreamingPlugin(name, logLevel string) (interface{}, error) {
|
||||
logger := hclog.New(&hclog.LoggerOptions{
|
||||
Output: hclog.DefaultOutput,
|
||||
Level: toHclogLevel(logLevel),
|
||||
|
||||
@ -60,7 +60,7 @@ func (tkv *Store) Get(key []byte) []byte {
|
||||
|
||||
// Set implements the KVStore interface. It traces a write operation and
|
||||
// delegates the Set call to the parent KVStore.
|
||||
func (tkv *Store) Set(key []byte, value []byte) {
|
||||
func (tkv *Store) Set(key, value []byte) {
|
||||
types.AssertValidKey(key)
|
||||
writeOperation(tkv.writer, writeOp, tkv.context, key, value)
|
||||
tkv.parent.Set(key, value)
|
||||
@ -116,7 +116,7 @@ func newTraceIterator(w io.Writer, parent types.Iterator, tc types.TraceContext)
|
||||
}
|
||||
|
||||
// Domain implements the Iterator interface.
|
||||
func (ti *traceIterator) Domain() (start []byte, end []byte) {
|
||||
func (ti *traceIterator) Domain() (start, end []byte) {
|
||||
return ti.parent.Domain()
|
||||
}
|
||||
|
||||
|
||||
@ -11,7 +11,7 @@ func NewMemoryListener() *MemoryListener {
|
||||
}
|
||||
|
||||
// OnWrite implements MemoryListener interface
|
||||
func (fl *MemoryListener) OnWrite(storeKey StoreKey, key []byte, value []byte, delete bool) {
|
||||
func (fl *MemoryListener) OnWrite(storeKey StoreKey, key, value []byte, delete bool) {
|
||||
fl.stateCache = append(fl.stateCache, &StoreKVPair{
|
||||
StoreKey: storeKey.Name(),
|
||||
Delete: delete,
|
||||
|
||||
@ -33,8 +33,9 @@ func MsgRedelegateExec(clientCtx client.Context, from, src, dst, amount fmt.Stri
|
||||
}
|
||||
|
||||
// MsgUnbondExec creates a unbond message.
|
||||
func MsgUnbondExec(clientCtx client.Context, from fmt.Stringer, valAddress,
|
||||
amount fmt.Stringer, extraArgs ...string,
|
||||
func MsgUnbondExec(clientCtx client.Context, from, valAddress,
|
||||
amount fmt.Stringer,
|
||||
extraArgs ...string,
|
||||
) (testutil.BufferWriter, error) {
|
||||
args := []string{
|
||||
valAddress.String(),
|
||||
|
||||
@ -91,7 +91,7 @@ func doUnbondingDelegation(
|
||||
addrDels []sdk.AccAddress,
|
||||
addrVals []sdk.ValAddress,
|
||||
hookCalled *bool,
|
||||
) (completionTime time.Time, bondedAmt math.Int, notBondedAmt math.Int) {
|
||||
) (completionTime time.Time, bondedAmt, notBondedAmt math.Int) {
|
||||
// UNDELEGATE
|
||||
// Save original bonded and unbonded amounts
|
||||
bondedAmt1 := bankKeeper.GetBalance(ctx, stakingKeeper.GetBondedPool(ctx).GetAddress(), bondDenom).Amount
|
||||
|
||||
2
testutil/testdata/grpc_query.go
vendored
2
testutil/testdata/grpc_query.go
vendored
@ -66,7 +66,7 @@ func (m *TestAnyResponse) UnpackInterfaces(unpacker types.AnyUnpacker) error {
|
||||
// 2. That the gas consumption of the query is the same. When
|
||||
// `gasOverwrite` is set to true, we also check that this consumed
|
||||
// gas value is equal to the hardcoded `gasConsumed`.
|
||||
func DeterministicIterations[request proto.Message, response proto.Message](
|
||||
func DeterministicIterations[request, response proto.Message](
|
||||
ctx sdk.Context,
|
||||
t *testing.T,
|
||||
req request,
|
||||
|
||||
@ -371,7 +371,7 @@ func (s *argsTestSuite) TestGetConfigFromEnv() {
|
||||
absPath, perr := filepath.Abs(relPath)
|
||||
s.Require().NoError(perr)
|
||||
|
||||
newConfig := func(home, name string, downloadBin, restartUpgrade bool, restartDelay int, skipBackup bool, dataBackupPath string, interval int, preupgradeMaxRetries int, disableLogs bool) *Config {
|
||||
newConfig := func(home, name string, downloadBin, restartUpgrade bool, restartDelay int, skipBackup bool, dataBackupPath string, interval, preupgradeMaxRetries int, disableLogs bool) *Config {
|
||||
return &Config{
|
||||
Home: home,
|
||||
Name: name,
|
||||
|
||||
@ -33,7 +33,7 @@ type ChainInfo struct {
|
||||
Config *ChainConfig
|
||||
}
|
||||
|
||||
func NewChainInfo(configDir string, chain string, config *ChainConfig) *ChainInfo {
|
||||
func NewChainInfo(configDir, chain string, config *ChainConfig) *ChainInfo {
|
||||
return &ChainInfo{
|
||||
ConfigDir: configDir,
|
||||
Chain: chain,
|
||||
|
||||
@ -17,9 +17,9 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
flagInsecure string = "insecure"
|
||||
flagUpdate string = "update"
|
||||
flagConfig string = "config"
|
||||
flagInsecure = "insecure"
|
||||
flagUpdate = "update"
|
||||
flagConfig = "config"
|
||||
)
|
||||
|
||||
func RootCommand() (*cobra.Command, error) {
|
||||
@ -135,7 +135,7 @@ func RemoteCommand(config *Config, configDir string) ([]*cobra.Command, error) {
|
||||
return commands, nil
|
||||
}
|
||||
|
||||
func RemoteErrorCommand(config *Config, configDir string, chain string, chainConfig *ChainConfig, err error) *cobra.Command {
|
||||
func RemoteErrorCommand(config *Config, configDir, chain string, chainConfig *ChainConfig, err error) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: chain,
|
||||
Short: "Unable to load data",
|
||||
|
||||
@ -433,7 +433,7 @@ func (c converter) Amounts(ownedCoins []sdk.Coin, availableCoins sdk.Coins) []*r
|
||||
|
||||
// AddOperationIndexes adds the indexes to operations adhering to specific rules:
|
||||
// operations related to messages will be always before than the balance ones
|
||||
func AddOperationIndexes(msgOps []*rosettatypes.Operation, balanceOps []*rosettatypes.Operation) (finalOps []*rosettatypes.Operation) {
|
||||
func AddOperationIndexes(msgOps, balanceOps []*rosettatypes.Operation) (finalOps []*rosettatypes.Operation) {
|
||||
lenMsgOps := len(msgOps)
|
||||
lenBalanceOps := len(balanceOps)
|
||||
finalOps = make([]*rosettatypes.Operation, 0, lenMsgOps+lenBalanceOps)
|
||||
|
||||
@ -20,7 +20,7 @@ func (s *authorizeSuite) Before(t *testing.T) {
|
||||
s.baseFixture = initFixture(t)
|
||||
}
|
||||
|
||||
func (s *authorizeSuite) HasPermission(a string, b string) {
|
||||
func (s *authorizeSuite) HasPermission(a, b string) {
|
||||
panic("PENDING")
|
||||
}
|
||||
|
||||
@ -28,7 +28,7 @@ func (s *authorizeSuite) HasNoPermissions(a string) {
|
||||
panic("PENDING")
|
||||
}
|
||||
|
||||
func (s *authorizeSuite) AttemptsToGrantThePermissions(a string, b string, c gocuke.DocString) {
|
||||
func (s *authorizeSuite) AttemptsToGrantThePermissions(a, b string, c gocuke.DocString) {
|
||||
panic("PENDING")
|
||||
}
|
||||
|
||||
|
||||
@ -20,7 +20,7 @@ func (s *resetSuite) Before(t *testing.T) {
|
||||
s.baseFixture = initFixture(t)
|
||||
}
|
||||
|
||||
func (s *resetSuite) HasPermission(a string, b string) {
|
||||
func (s *resetSuite) HasPermission(a, b string) {
|
||||
panic("PENDING")
|
||||
}
|
||||
|
||||
@ -28,7 +28,7 @@ func (s *resetSuite) HasNoPermissions(a string) {
|
||||
panic("PENDING")
|
||||
}
|
||||
|
||||
func (s *resetSuite) AttemptsToResetCircuit(a string, b string, c gocuke.DocString) {
|
||||
func (s *resetSuite) AttemptsToResetCircuit(a, b string, c gocuke.DocString) {
|
||||
panic("PENDING")
|
||||
}
|
||||
|
||||
|
||||
@ -20,7 +20,7 @@ func (s *tripSuite) Before(t *testing.T) {
|
||||
s.baseFixture = initFixture(t)
|
||||
}
|
||||
|
||||
func (s *tripSuite) HasPermission(a string, b string) {
|
||||
func (s *tripSuite) HasPermission(a, b string) {
|
||||
panic("PENDING")
|
||||
}
|
||||
|
||||
@ -28,7 +28,7 @@ func (s *tripSuite) HasNoPermissions(a string) {
|
||||
panic("PENDING")
|
||||
}
|
||||
|
||||
func (s *tripSuite) AttemptsToTripCircuit(a string, b string, c gocuke.DocString) {
|
||||
func (s *tripSuite) AttemptsToTripCircuit(a, b string, c gocuke.DocString) {
|
||||
panic("PENDING")
|
||||
}
|
||||
|
||||
|
||||
@ -66,7 +66,7 @@ func (k Keeper) AllEvidence(c context.Context, req *types.QueryAllEvidenceReques
|
||||
store := ctx.KVStore(k.storeKey)
|
||||
evidenceStore := prefix.NewStore(store, types.KeyPrefixEvidence)
|
||||
|
||||
pageRes, err := query.Paginate(evidenceStore, req.Pagination, func(key []byte, value []byte) error {
|
||||
pageRes, err := query.Paginate(evidenceStore, req.Pagination, func(key, value []byte) error {
|
||||
result, err := k.UnmarshalEvidence(value)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@ -78,7 +78,7 @@ func (q Keeper) Allowances(c context.Context, req *feegrant.QueryAllowancesReque
|
||||
store := ctx.KVStore(q.storeKey)
|
||||
grantsStore := prefix.NewStore(store, feegrant.FeeAllowancePrefixByGrantee(granteeAddr))
|
||||
|
||||
pageRes, err := query.Paginate(grantsStore, req.Pagination, func(key []byte, value []byte) error {
|
||||
pageRes, err := query.Paginate(grantsStore, req.Pagination, func(key, value []byte) error {
|
||||
var grant feegrant.Grant
|
||||
|
||||
if err := q.cdc.Unmarshal(value, &grant); err != nil {
|
||||
|
||||
@ -162,7 +162,7 @@ func (k Keeper) GetAllowance(ctx sdk.Context, granter, grantee sdk.AccAddress) (
|
||||
}
|
||||
|
||||
// getGrant returns entire grant between both accounts
|
||||
func (k Keeper) getGrant(ctx sdk.Context, granter sdk.AccAddress, grantee sdk.AccAddress) (*feegrant.Grant, error) {
|
||||
func (k Keeper) getGrant(ctx sdk.Context, granter, grantee sdk.AccAddress) (*feegrant.Grant, error) {
|
||||
store := ctx.KVStore(k.storeKey)
|
||||
key := feegrant.FeeAllowanceKey(granter, grantee)
|
||||
bz := store.Get(key)
|
||||
|
||||
@ -36,7 +36,7 @@ var (
|
||||
//
|
||||
// Key format:
|
||||
// - <0x00><len(grantee_address_bytes)><grantee_address_bytes><len(granter_address_bytes)><granter_address_bytes>
|
||||
func FeeAllowanceKey(granter sdk.AccAddress, grantee sdk.AccAddress) []byte {
|
||||
func FeeAllowanceKey(granter, grantee sdk.AccAddress) []byte {
|
||||
return append(FeeAllowancePrefixByGrantee(grantee), address.MustLengthPrefix(granter.Bytes())...)
|
||||
}
|
||||
|
||||
|
||||
@ -36,7 +36,7 @@ func FeeAllowancePrefixQueue(exp *time.Time, granterAddrBz []byte) []byte {
|
||||
//
|
||||
// Key format:
|
||||
// - <0x00><len(grantee_address_bytes)><grantee_address_bytes><len(granter_address_bytes)><granter_address_bytes>
|
||||
func FeeAllowanceKey(granter sdk.AccAddress, grantee sdk.AccAddress) []byte {
|
||||
func FeeAllowanceKey(granter, grantee sdk.AccAddress) []byte {
|
||||
return append(FeeAllowancePrefixByGrantee(grantee), address.MustLengthPrefix(granter.Bytes())...)
|
||||
}
|
||||
|
||||
|
||||
@ -75,13 +75,12 @@ func (msg MsgGrantAllowance) UnpackInterfaces(unpacker types.AnyUnpacker) error
|
||||
// granter and grantee
|
||||
//
|
||||
//nolint:interfacer
|
||||
func NewMsgRevokeAllowance(granter sdk.AccAddress, grantee sdk.AccAddress) MsgRevokeAllowance {
|
||||
func NewMsgRevokeAllowance(granter, grantee sdk.AccAddress) MsgRevokeAllowance {
|
||||
return MsgRevokeAllowance{Granter: granter.String(), Grantee: grantee.String()}
|
||||
}
|
||||
|
||||
// ValidateBasic implements the sdk.Msg interface.
|
||||
func (msg MsgRevokeAllowance) ValidateBasic() error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@ -89,7 +89,7 @@ func (k Keeper) NFTs(goCtx context.Context, r *nft.QueryNFTsRequest) (*nft.Query
|
||||
|
||||
switch {
|
||||
case len(r.ClassId) > 0 && len(r.Owner) > 0:
|
||||
if pageRes, err = query.Paginate(k.getClassStoreByOwner(ctx, owner, r.ClassId), r.Pagination, func(key []byte, _ []byte) error {
|
||||
if pageRes, err = query.Paginate(k.getClassStoreByOwner(ctx, owner, r.ClassId), r.Pagination, func(key, _ []byte) error {
|
||||
nft, has := k.GetNFT(ctx, r.ClassId, string(key))
|
||||
if has {
|
||||
nfts = append(nfts, &nft)
|
||||
@ -100,7 +100,7 @@ func (k Keeper) NFTs(goCtx context.Context, r *nft.QueryNFTsRequest) (*nft.Query
|
||||
}
|
||||
case len(r.ClassId) > 0 && len(r.Owner) == 0:
|
||||
nftStore := k.getNFTStore(ctx, r.ClassId)
|
||||
if pageRes, err = query.Paginate(nftStore, r.Pagination, func(_ []byte, value []byte) error {
|
||||
if pageRes, err = query.Paginate(nftStore, r.Pagination, func(_, value []byte) error {
|
||||
var nft nft.NFT
|
||||
if err := k.cdc.Unmarshal(value, &nft); err != nil {
|
||||
return err
|
||||
@ -111,7 +111,7 @@ func (k Keeper) NFTs(goCtx context.Context, r *nft.QueryNFTsRequest) (*nft.Query
|
||||
return nil, err
|
||||
}
|
||||
case len(r.ClassId) == 0 && len(r.Owner) > 0:
|
||||
if pageRes, err = query.Paginate(k.prefixStoreNftOfClassByOwner(ctx, owner), r.Pagination, func(key []byte, value []byte) error {
|
||||
if pageRes, err = query.Paginate(k.prefixStoreNftOfClassByOwner(ctx, owner), r.Pagination, func(key, value []byte) error {
|
||||
classID, nftID := parseNftOfClassByOwnerStoreKey(key)
|
||||
if n, has := k.GetNFT(ctx, classID, nftID); has {
|
||||
nfts = append(nfts, &n)
|
||||
@ -179,7 +179,7 @@ func (k Keeper) Classes(goCtx context.Context, r *nft.QueryClassesRequest) (*nft
|
||||
classStore := prefix.NewStore(runtime.KVStoreAdapter(store), ClassKey)
|
||||
|
||||
var classes []*nft.Class
|
||||
pageRes, err := query.Paginate(classStore, r.Pagination, func(_ []byte, value []byte) error {
|
||||
pageRes, err := query.Paginate(classStore, r.Pagination, func(_, value []byte) error {
|
||||
var class nft.Class
|
||||
if err := k.cdc.Unmarshal(value, &class); err != nil {
|
||||
return err
|
||||
|
||||
@ -42,7 +42,7 @@ func (k Keeper) mintWithNoCheck(ctx context.Context, token nft.NFT, receiver sdk
|
||||
|
||||
// Burn defines a method for burning a nft from a specific account.
|
||||
// Note: When the upper module uses this method, it needs to authenticate nft
|
||||
func (k Keeper) Burn(ctx context.Context, classID string, nftID string) error {
|
||||
func (k Keeper) Burn(ctx context.Context, classID, nftID string) error {
|
||||
if !k.HasClass(ctx, classID) {
|
||||
return errors.Wrap(nft.ErrClassNotExists, classID)
|
||||
}
|
||||
@ -58,7 +58,7 @@ func (k Keeper) Burn(ctx context.Context, classID string, nftID string) error {
|
||||
// burnWithNoCheck defines a method for burning a nft from a specific account.
|
||||
// Note: this method does not check whether the class already exists in nft.
|
||||
// The upper-layer application needs to check it when it needs to use it
|
||||
func (k Keeper) burnWithNoCheck(ctx context.Context, classID string, nftID string) error {
|
||||
func (k Keeper) burnWithNoCheck(ctx context.Context, classID, nftID string) error {
|
||||
owner := k.GetOwner(ctx, classID, nftID)
|
||||
nftStore := k.getNFTStore(ctx, classID)
|
||||
nftStore.Delete([]byte(nftID))
|
||||
|
||||
@ -6,6 +6,4 @@ const (
|
||||
txCodespace = "tx"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrUnknownField = errors.Register(txCodespace, 2, "unknown protobuf field")
|
||||
)
|
||||
var ErrUnknownField = errors.Register(txCodespace, 2, "unknown protobuf field")
|
||||
|
||||
@ -14,13 +14,15 @@ import (
|
||||
|
||||
const bit11NonCritical = 1 << 10
|
||||
|
||||
var anyDesc = (&anypb.Any{}).ProtoReflect().Descriptor()
|
||||
var anyFullName = anyDesc.FullName()
|
||||
var (
|
||||
anyDesc = (&anypb.Any{}).ProtoReflect().Descriptor()
|
||||
anyFullName = anyDesc.FullName()
|
||||
)
|
||||
|
||||
// RejectUnknownFieldsStrict operates by the same rules as RejectUnknownFields, but returns an error if any unknown
|
||||
// non-critical fields are encountered.
|
||||
func RejectUnknownFieldsStrict(bz []byte, msg protoreflect.MessageDescriptor, resolver protodesc.Resolver) error {
|
||||
var _, err = RejectUnknownFields(bz, msg, false, resolver)
|
||||
_, err := RejectUnknownFields(bz, msg, false, resolver)
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@ -640,8 +640,8 @@ func TestRejectUnknownFieldsFlat(t *testing.T) {
|
||||
|
||||
c1 := new(testpb.Customer1)
|
||||
c1Desc := c1.ProtoReflect().Descriptor()
|
||||
//err = proto.Unmarshal(blob, c1)
|
||||
//require.NoError(t, err)
|
||||
// err = proto.Unmarshal(blob, c1)
|
||||
// require.NoError(t, err)
|
||||
gotErr := decode.RejectUnknownFieldsStrict(blob, c1Desc, ProtoResolver)
|
||||
if tt.wantErr != nil {
|
||||
require.EqualError(t, gotErr, tt.wantErr.Error())
|
||||
|
||||
@ -22,7 +22,7 @@ const (
|
||||
majorSimple byte = 7
|
||||
)
|
||||
|
||||
func encodeFirstByte(major byte, extra byte) byte {
|
||||
func encodeFirstByte(major, extra byte) byte {
|
||||
return (major << 5) | extra&0x1F
|
||||
}
|
||||
|
||||
|
||||
Loading…
Reference in New Issue
Block a user