chore: run modernize (#24356)

This commit is contained in:
Alex | Interchain Labs 2025-04-03 10:42:20 -04:00 committed by GitHub
parent df07789843
commit 3c6deab626
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
185 changed files with 667 additions and 590 deletions

View File

@ -635,10 +635,10 @@ func TestABCI_FinalizeBlock_DeliverTx(t *testing.T) {
nBlocks := 3
txPerHeight := 5
for blockN := 0; blockN < nBlocks; blockN++ {
for blockN := range nBlocks {
txs := [][]byte{}
for i := 0; i < txPerHeight; i++ {
for i := range txPerHeight {
counter := int64(blockN*txPerHeight + i)
tx := newTxCounter(t, suite.txConfig, counter, counter)
@ -654,7 +654,7 @@ func TestABCI_FinalizeBlock_DeliverTx(t *testing.T) {
})
require.NoError(t, err)
for i := 0; i < txPerHeight; i++ {
for i := range txPerHeight {
counter := int64(blockN*txPerHeight + i)
require.True(t, res.TxResults[i].IsOK(), fmt.Sprintf("%v", res))
@ -762,7 +762,7 @@ func TestABCI_Query_SimulateTx(t *testing.T) {
baseapptestutil.RegisterCounterServer(suite.baseApp.MsgServiceRouter(), CounterServerImplGasMeterOnly{gasConsumed})
nBlocks := 3
for blockN := 0; blockN < nBlocks; blockN++ {
for blockN := range nBlocks {
count := int64(blockN + 1)
tx := newTxCounter(t, suite.txConfig, count, count)
@ -1093,7 +1093,7 @@ func TestABCI_MaxBlockGasLimits(t *testing.T) {
require.NoError(t, err)
// execute the transaction multiple times
for j := 0; j < tc.numDelivers; j++ {
for j := range tc.numDelivers {
_, result, err := suite.baseApp.SimDeliver(suite.txConfig.TxEncoder(), tx)
@ -1594,7 +1594,7 @@ func TestABCI_PrepareProposal_ReachedMaxBytes(t *testing.T) {
})
require.NoError(t, err)
for i := 0; i < 100; i++ {
for i := range 100 {
tx2 := newTxCounter(t, suite.txConfig, int64(i), int64(i))
err := pool.Insert(sdk.Context{}, tx2)
require.NoError(t, err)
@ -1970,7 +1970,7 @@ func TestABCI_Proposal_Reset_State_Between_Calls(t *testing.T) {
// Let's pretend something happened and PrepareProposal gets called many
// times, this must be safe to do.
for i := 0; i < 5; i++ {
for range 5 {
resPrepareProposal, err := suite.baseApp.PrepareProposal(&reqPrepareProposal)
require.NoError(t, err)
require.Equal(t, 0, len(resPrepareProposal.Txs))
@ -1984,7 +1984,7 @@ func TestABCI_Proposal_Reset_State_Between_Calls(t *testing.T) {
// Let's pretend something happened and ProcessProposal gets called many
// times, this must be safe to do.
for i := 0; i < 5; i++ {
for range 5 {
resProcessProposal, err := suite.baseApp.ProcessProposal(&reqProcessProposal)
require.NoError(t, err)
require.Equal(t, abci.ResponseProcessProposal_ACCEPT, resProcessProposal.Status)
@ -2080,7 +2080,7 @@ func TestBaseApp_VoteExtensions(t *testing.T) {
numVals := 12
privKeys := make([]secp256k1.PrivKey, numVals)
vals := make([]sdk.ConsAddress, numVals)
for i := 0; i < numVals; i++ {
for i := range numVals {
privKey := secp256k1.GenPrivKey()
privKeys[i] = privKey
@ -2190,7 +2190,7 @@ func TestBaseApp_VoteExtensions(t *testing.T) {
allVEs := [][]byte{}
// simulate getting 10 vote extensions from 10 validators
for i := 0; i < 10; i++ {
for range 10 {
ve, err := suite.baseApp.ExtendVote(context.TODO(), &abci.RequestExtendVote{Height: 1})
require.NoError(t, err)
allVEs = append(allVEs, ve.VoteExtension)
@ -2375,7 +2375,7 @@ func TestOptimisticExecution(t *testing.T) {
require.NoError(t, err)
// run 50 blocks
for i := 0; i < 50; i++ {
for range 50 {
tx := newTxCounter(t, suite.txConfig, 0, 1)
txBytes, err := suite.txConfig.TxEncoder()(tx)
require.NoError(t, err)

View File

@ -554,7 +554,7 @@ func (s *ABCIUtilsTestSuite) TestDefaultProposalHandler_NoOpMempoolTxSelection()
for name, tc := range testCases {
s.Run(name, func() {
// iterate multiple times to ensure the tx selector is cleared each time
for i := 0; i < 6; i++ {
for range 6 {
resp, err := handler(tc.ctx, tc.req)
s.Require().NoError(err)
s.Require().Len(resp.Txs, tc.expectedTxs)

View File

@ -698,7 +698,7 @@ func (app *BaseApp) cacheTxContext(ctx sdk.Context, txBytes []byte) (sdk.Context
if msCache.TracingEnabled() {
msCache = msCache.SetTracingContext(
storetypes.TraceContext(
map[string]interface{}{
map[string]any{
"txHash": fmt.Sprintf("%X", tmhash.Sum(txBytes)),
},
),

View File

@ -143,10 +143,10 @@ func NewBaseAppSuiteWithSnapshots(t *testing.T, cfg SnapshotsConfig, opts ...fun
_, _, addr := testdata.KeyTestPubAddr()
txs := [][]byte{}
for txNum := 0; txNum < cfg.blockTxs; txNum++ {
for range cfg.blockTxs {
var msgs []sdk.Msg
for msgNum := 0; msgNum < 100; msgNum++ {
key := []byte(fmt.Sprintf("%v", keyCounter))
for range 100 {
key := fmt.Appendf(nil, "%v", keyCounter)
value := make([]byte, 10000)
_, err := r.Read(value)
@ -537,7 +537,7 @@ func TestCustomRunTxPanicHandler(t *testing.T) {
})
require.NoError(t, err)
suite.baseApp.AddRunTxRecoveryHandler(func(recoveryObj interface{}) error {
suite.baseApp.AddRunTxRecoveryHandler(func(recoveryObj any) error {
err, ok := recoveryObj.(error)
if !ok {
return nil

View File

@ -35,7 +35,7 @@ type GRPCQueryRouter struct {
// serviceData represents a gRPC service, along with its handler.
type serviceData struct {
serviceDesc *grpc.ServiceDesc
handler interface{}
handler any
}
var _ gogogrpc.Server = &GRPCQueryRouter{}
@ -67,7 +67,7 @@ func (qrt *GRPCQueryRouter) Route(path string) GRPCQueryHandler {
//
// This functions PANICS:
// - if a protobuf service is registered twice.
func (qrt *GRPCQueryRouter) RegisterService(sd *grpc.ServiceDesc, handler interface{}) {
func (qrt *GRPCQueryRouter) RegisterService(sd *grpc.ServiceDesc, handler any) {
// adds a top-level query handler based on the gRPC service name
for _, method := range sd.Methods {
err := qrt.registerABCIQueryHandler(sd, method, handler)
@ -86,7 +86,7 @@ func (qrt *GRPCQueryRouter) RegisterService(sd *grpc.ServiceDesc, handler interf
})
}
func (qrt *GRPCQueryRouter) registerABCIQueryHandler(sd *grpc.ServiceDesc, method grpc.MethodDesc, handler interface{}) error {
func (qrt *GRPCQueryRouter) registerABCIQueryHandler(sd *grpc.ServiceDesc, method grpc.MethodDesc, handler any) error {
fqName := fmt.Sprintf("/%s/%s", sd.ServiceName, method.MethodName)
methodHandler := method.Handler
@ -106,7 +106,7 @@ func (qrt *GRPCQueryRouter) registerABCIQueryHandler(sd *grpc.ServiceDesc, metho
qrt.routes[fqName] = func(ctx sdk.Context, req *abci.RequestQuery) (*abci.ResponseQuery, error) {
// call the method handler from the service description with the handler object,
// a wrapped sdk.Context with proto-unmarshaled data from the ABCI request data
res, err := methodHandler(handler, ctx, func(i interface{}) error {
res, err := methodHandler(handler, ctx, func(i any) error {
return qrt.cdc.Unmarshal(req.Data, i)
}, nil)
if err != nil {
@ -133,7 +133,7 @@ func (qrt *GRPCQueryRouter) HybridHandlerByRequestName(name string) []func(ctx c
return qrt.hybridHandlers[name]
}
func (qrt *GRPCQueryRouter) registerHybridHandler(sd *grpc.ServiceDesc, method grpc.MethodDesc, handler interface{}) error {
func (qrt *GRPCQueryRouter) registerHybridHandler(sd *grpc.ServiceDesc, method grpc.MethodDesc, handler any) error {
// extract message name from method descriptor
inputName, err := protocompat.RequestFullNameFromMethodDesc(sd, method)
if err != nil {

View File

@ -35,7 +35,7 @@ func NewQueryServerTestHelper(ctx sdk.Context, interfaceRegistry types.Interface
}
// Invoke implements the grpc ClientConn.Invoke method
func (q *QueryServiceTestHelper) Invoke(_ gocontext.Context, method string, args, reply interface{}, _ ...grpc.CallOption) error {
func (q *QueryServiceTestHelper) Invoke(_ gocontext.Context, method string, args, reply any, _ ...grpc.CallOption) error {
querier := q.Route(method)
if querier == nil {
return fmt.Errorf("handler not found for %s", method)

View File

@ -182,13 +182,13 @@ func testQueryDataRacesSameHandler(t *testing.T, makeClientConn func(*baseapp.GR
n := 1000
ready := make(chan bool, n)
go func() {
for i := 0; i < n; i++ {
for range n {
<-ready
}
close(greenlight)
}()
for i := 0; i < n; i++ {
for range n {
wg.Add(1)
go func() {
defer wg.Done()

View File

@ -38,7 +38,7 @@ func (app *BaseApp) RegisterGRPCServer(server gogogrpc.Server) {
func (app *BaseApp) RegisterGRPCServerWithSkipCheckHeader(server gogogrpc.Server, skipCheckHeader bool) {
// Define an interceptor for all gRPC queries: this interceptor will create
// a new sdk.Context, and pass it into the query handler.
interceptor := func(grpcCtx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) {
interceptor := func(grpcCtx context.Context, req any, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) {
// If there's some metadata in the context, retrieve it.
md, ok := metadata.FromIncomingContext(grpcCtx)
if !ok {
@ -106,7 +106,7 @@ func (app *BaseApp) RegisterGRPCServerWithSkipCheckHeader(server gogogrpc.Server
methodHandler := method.Handler
newMethods[i] = grpc.MethodDesc{
MethodName: method.MethodName,
Handler: func(srv interface{}, ctx context.Context, dec func(interface{}) error, _ grpc.UnaryServerInterceptor) (interface{}, error) {
Handler: func(srv any, ctx context.Context, dec func(any) error, _ grpc.UnaryServerInterceptor) (any, error) {
return methodHandler(srv, ctx, dec, grpcmiddleware.ChainUnaryServer(
grpcrecovery.UnaryServerInterceptor(),
interceptor,

View File

@ -24,7 +24,7 @@ var (
type Handler = func(ctx context.Context, request, response protoiface.MessageV1) error
func MakeHybridHandler(cdc codec.BinaryCodec, sd *grpc.ServiceDesc, method grpc.MethodDesc, handler interface{}) (Handler, error) {
func MakeHybridHandler(cdc codec.BinaryCodec, sd *grpc.ServiceDesc, method grpc.MethodDesc, handler any) (Handler, error) {
methodFullName := protoreflect.FullName(fmt.Sprintf("%s.%s", sd.ServiceName, method.MethodName))
desc, err := gogoproto.HybridResolver.FindDescriptorByName(methodFullName)
if err != nil {
@ -193,7 +193,7 @@ func makeGogoHybridHandler(prefMethod protoreflect.MethodDescriptor, cdc codec.B
// the type. Since the decoder function is passed in by the concrete implementer the expected
// message where bytes are unmarshaled to, we can use that to determine the type.
func isProtov2(md grpc.MethodDesc) (isV2Type bool, err error) {
pullRequestType := func(msg interface{}) error {
pullRequestType := func(msg any) error {
typ := reflect.TypeOf(msg)
switch {
case typ.Implements(protov2Type):

View File

@ -68,7 +68,7 @@ func (msr *MsgServiceRouter) HandlerByTypeURL(typeURL string) MsgServiceHandler
// - if it is called before the service `Msg`s have been registered using
// RegisterInterfaces,
// - or if a service is being registered twice.
func (msr *MsgServiceRouter) RegisterService(sd *grpc.ServiceDesc, handler interface{}) {
func (msr *MsgServiceRouter) RegisterService(sd *grpc.ServiceDesc, handler any) {
// Adds a top-level query handler based on the gRPC service name.
for _, method := range sd.Methods {
err := msr.registerMsgServiceHandler(sd, method, handler)
@ -86,7 +86,7 @@ func (msr *MsgServiceRouter) HybridHandlerByMsgName(msgName string) func(ctx con
return msr.hybridHandlers[msgName]
}
func (msr *MsgServiceRouter) registerHybridHandler(sd *grpc.ServiceDesc, method grpc.MethodDesc, handler interface{}) error {
func (msr *MsgServiceRouter) registerHybridHandler(sd *grpc.ServiceDesc, method grpc.MethodDesc, handler any) error {
inputName, err := protocompat.RequestFullNameFromMethodDesc(sd, method)
if err != nil {
return err
@ -117,7 +117,7 @@ func (msr *MsgServiceRouter) registerHybridHandler(sd *grpc.ServiceDesc, method
return nil
}
func (msr *MsgServiceRouter) registerMsgServiceHandler(sd *grpc.ServiceDesc, method grpc.MethodDesc, handler interface{}) error {
func (msr *MsgServiceRouter) registerMsgServiceHandler(sd *grpc.ServiceDesc, method grpc.MethodDesc, handler any) error {
fqMethod := fmt.Sprintf("/%s/%s", sd.ServiceName, method.MethodName)
methodHandler := method.Handler
@ -126,7 +126,7 @@ func (msr *MsgServiceRouter) registerMsgServiceHandler(sd *grpc.ServiceDesc, met
// NOTE: This is how we pull the concrete request type for each handler for registering in the InterfaceRegistry.
// This approach is maybe a bit hacky, but less hacky than reflecting on the handler object itself.
// We use a no-op interceptor to avoid actually calling into the handler itself.
_, _ = methodHandler(nil, context.Background(), func(i interface{}) error {
_, _ = methodHandler(nil, context.Background(), func(i any) error {
msg, ok := i.(sdk.Msg)
if !ok {
// We panic here because there is no other alternative and the app cannot be initialized correctly
@ -170,7 +170,7 @@ func (msr *MsgServiceRouter) registerMsgServiceHandler(sd *grpc.ServiceDesc, met
msr.routes[requestTypeName] = func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) {
ctx = ctx.WithEventManager(sdk.NewEventManager())
interceptor := func(goCtx context.Context, _ interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
interceptor := func(goCtx context.Context, _ any, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) {
goCtx = context.WithValue(goCtx, sdk.SdkContextKey, ctx)
return handler(goCtx, msg)
}
@ -215,7 +215,7 @@ func (msr *MsgServiceRouter) SetInterfaceRegistry(interfaceRegistry codectypes.I
msr.interfaceRegistry = interfaceRegistry
}
func noopDecoder(_ interface{}) error { return nil }
func noopInterceptor(_ context.Context, _ interface{}, _ *grpc.UnaryServerInfo, _ grpc.UnaryHandler) (interface{}, error) {
func noopDecoder(_ any) error { return nil }
func noopInterceptor(_ context.Context, _ any, _ *grpc.UnaryServerInfo, _ grpc.UnaryHandler) (any, error) {
return nil, nil
}

View File

@ -52,11 +52,11 @@ var (
)
type LegacyParamStore interface {
Get(ctx sdk.Context, key []byte, ptr interface{})
Get(ctx sdk.Context, key []byte, ptr any)
Has(ctx sdk.Context, key []byte) bool
}
func ValidateBlockParams(i interface{}) error {
func ValidateBlockParams(i any) error {
v, ok := i.(cmtproto.BlockParams)
if !ok {
return fmt.Errorf("invalid parameter type: %T", i)
@ -73,7 +73,7 @@ func ValidateBlockParams(i interface{}) error {
return nil
}
func ValidateEvidenceParams(i interface{}) error {
func ValidateEvidenceParams(i any) error {
v, ok := i.(cmtproto.EvidenceParams)
if !ok {
return fmt.Errorf("invalid parameter type: %T", i)
@ -94,7 +94,7 @@ func ValidateEvidenceParams(i interface{}) error {
return nil
}
func ValidateValidatorParams(i interface{}) error {
func ValidateValidatorParams(i any) error {
v, ok := i.(cmtproto.ValidatorParams)
if !ok {
return fmt.Errorf("invalid parameter type: %T", i)

View File

@ -14,16 +14,16 @@ import (
// RecoveryHandler handles recovery() object.
// Return a non-nil error if recoveryObj was processed.
// Return nil if recoveryObj was not processed.
type RecoveryHandler func(recoveryObj interface{}) error
type RecoveryHandler func(recoveryObj any) error
// recoveryMiddleware is wrapper for RecoveryHandler to create chained recovery handling.
// returns (recoveryMiddleware, nil) if recoveryObj was not processed and should be passed to the next middleware in chain.
// returns (nil, error) if recoveryObj was processed and middleware chain processing should be stopped.
type recoveryMiddleware func(recoveryObj interface{}) (recoveryMiddleware, error)
type recoveryMiddleware func(recoveryObj any) (recoveryMiddleware, error)
// processRecovery processes recoveryMiddleware chain for recovery() object.
// Chain processing stops on non-nil error or when chain is processed.
func processRecovery(recoveryObj interface{}, middleware recoveryMiddleware) error {
func processRecovery(recoveryObj any, middleware recoveryMiddleware) error {
if middleware == nil {
return nil
}
@ -38,7 +38,7 @@ func processRecovery(recoveryObj interface{}, middleware recoveryMiddleware) err
// newRecoveryMiddleware creates a RecoveryHandler middleware.
func newRecoveryMiddleware(handler RecoveryHandler, next recoveryMiddleware) recoveryMiddleware {
return func(recoveryObj interface{}) (recoveryMiddleware, error) {
return func(recoveryObj any) (recoveryMiddleware, error) {
if err := handler(recoveryObj); err != nil {
return nil, err
}
@ -49,7 +49,7 @@ func newRecoveryMiddleware(handler RecoveryHandler, next recoveryMiddleware) rec
// newOutOfGasRecoveryMiddleware creates a standard OutOfGas recovery middleware for app.runTx method.
func newOutOfGasRecoveryMiddleware(gasWanted uint64, ctx sdk.Context, next recoveryMiddleware) recoveryMiddleware {
handler := func(recoveryObj interface{}) error {
handler := func(recoveryObj any) error {
err, ok := recoveryObj.(storetypes.ErrorOutOfGas)
if !ok {
return nil
@ -68,7 +68,7 @@ func newOutOfGasRecoveryMiddleware(gasWanted uint64, ctx sdk.Context, next recov
// newDefaultRecoveryMiddleware creates a default (last in chain) recovery middleware for app.runTx method.
func newDefaultRecoveryMiddleware() recoveryMiddleware {
handler := func(recoveryObj interface{}) error {
handler := func(recoveryObj any) error {
return errorsmod.Wrap(
sdkerrors.ErrPanic, fmt.Sprintf(
"recovered: %v\nstack:\n%v", recoveryObj, string(debug.Stack()),

View File

@ -14,7 +14,7 @@ func TestRecoveryChain(t *testing.T) {
}
createHandler := func(id int, handle bool) RecoveryHandler {
return func(_ interface{}) error {
return func(_ any) error {
if handle {
return createError(id)
}

View File

@ -18,7 +18,7 @@ func TestNilCmsCheckBeforeSeal(t *testing.T) {
// 1. Invoking app.Init with a nil cms MUST not seal the app
// and should return an error firstly, which can later be reversed.
for i := 0; i < 10; i++ { // N times, the app shouldn't be sealed.
for range 10 { // N times, the app shouldn't be sealed.
err := app.Init()
require.Error(t, err)
require.Contains(t, err.Error(), "commit multi-store must not be nil")

View File

@ -49,7 +49,7 @@ func (app *BaseApp) RegisterStreamingServices(appOpts servertypes.AppOptions, ke
func (app *BaseApp) registerStreamingPlugin(
appOpts servertypes.AppOptions,
keys map[string]*storetypes.KVStoreKey,
streamingPlugin interface{},
streamingPlugin any,
) error {
v, ok := streamingPlugin.(storetypes.ABCIListener)
if !ok {

View File

@ -64,7 +64,7 @@ func TestABCI_MultiListener_StateChanges(t *testing.T) {
nBlocks := 3
txPerHeight := 5
for blockN := 0; blockN < nBlocks; blockN++ {
for blockN := range nBlocks {
txs := [][]byte{}
var expectedChangeSet []*storetypes.StoreKVPair
@ -73,15 +73,15 @@ func TestABCI_MultiListener_StateChanges(t *testing.T) {
_, err := suite.baseApp.FinalizeBlock(&abci.RequestFinalizeBlock{Height: int64(blockN) + 1, Txs: txs})
require.NoError(t, err)
for i := 0; i < txPerHeight; i++ {
for i := range txPerHeight {
counter := int64(blockN*txPerHeight + i)
tx := newTxCounter(t, suite.txConfig, counter, counter)
txBytes, err := suite.txConfig.TxEncoder()(tx)
require.NoError(t, err)
sKey := []byte(fmt.Sprintf("distKey%d", i))
sVal := []byte(fmt.Sprintf("distVal%d", i))
sKey := fmt.Appendf(nil, "distKey%d", i)
sVal := fmt.Appendf(nil, "distVal%d", i)
store := getFinalizeBlockStateCtx(suite.baseApp).KVStore(distKey1)
store.Set(sKey, sVal)
@ -134,7 +134,7 @@ func Test_Ctx_with_StreamingManager(t *testing.T) {
nBlocks := 2
for blockN := 0; blockN < nBlocks; blockN++ {
for blockN := range nBlocks {
_, err := suite.baseApp.FinalizeBlock(&abci.RequestFinalizeBlock{Height: int64(blockN) + 1})
require.NoError(t, err)

View File

@ -340,7 +340,7 @@ func (ctx Context) PrintProto(toPrint proto.Message) error {
// PrintObjectLegacy is a variant of PrintProto that doesn't require a proto.Message type
// and uses amino JSON encoding.
// Deprecated: It will be removed in the near future!
func (ctx Context) PrintObjectLegacy(toPrint interface{}) error {
func (ctx Context) PrintObjectLegacy(toPrint any) error {
out, err := ctx.LegacyAmino.MarshalJSON(toPrint)
if err != nil {
return err

View File

@ -29,7 +29,7 @@ var _ gogogrpc.ClientConn = Context{}
var fallBackCodec = codec.NewProtoCodec(types.NewInterfaceRegistry())
// Invoke implements the grpc ClientConn.Invoke method
func (ctx Context) Invoke(grpcCtx gocontext.Context, method string, req, reply interface{}, opts ...grpc.CallOption) (err error) {
func (ctx Context) Invoke(grpcCtx gocontext.Context, method string, req, reply any, opts ...grpc.CallOption) (err error) {
// Two things can happen here:
// 1. either we're broadcasting a Tx, in which call we call CometBFT's broadcast endpoint directly,
// 2-1. or we are querying for state, in which case we call grpc if grpc client set.

View File

@ -18,7 +18,7 @@ import (
func generatePubKeys(n int) []types.PubKey {
pks := make([]types.PubKey, n)
for i := 0; i < n; i++ {
for i := range n {
pks[i] = secp256k1.GenPrivKey().PubKey()
}
return pks

View File

@ -38,7 +38,7 @@ type mockContext struct {
wantErr bool
}
func (m mockContext) Invoke(_ context.Context, _ string, _, reply interface{}, _ ...grpc.CallOption) (err error) {
func (m mockContext) Invoke(_ context.Context, _ string, _, reply any, _ ...grpc.CallOption) (err error) {
if m.wantErr {
return fmt.Errorf("mock err")
}

View File

@ -37,7 +37,7 @@ func RegisterEvidences(cdc *LegacyAmino) {
// MarshalJSONIndent provides a utility for indented JSON encoding of an object
// via an Amino codec. It returns an error if it cannot serialize or indent as
// JSON.
func MarshalJSONIndent(cdc *LegacyAmino, obj interface{}) ([]byte, error) {
func MarshalJSONIndent(cdc *LegacyAmino, obj any) ([]byte, error) {
bz, err := cdc.MarshalJSON(obj)
if err != nil {
return nil, err
@ -51,7 +51,7 @@ func MarshalJSONIndent(cdc *LegacyAmino, obj interface{}) ([]byte, error) {
}
// MustMarshalJSONIndent executes MarshalJSONIndent except it panics upon failure.
func MustMarshalJSONIndent(cdc *LegacyAmino, obj interface{}) []byte {
func MustMarshalJSONIndent(cdc *LegacyAmino, obj any) []byte {
bz, err := MarshalJSONIndent(cdc, obj)
if err != nil {
panic(fmt.Sprintf("failed to marshal JSON: %s", err))
@ -60,23 +60,23 @@ func MustMarshalJSONIndent(cdc *LegacyAmino, obj interface{}) []byte {
return bz
}
func (cdc *LegacyAmino) marshalAnys(o interface{}) error {
func (cdc *LegacyAmino) marshalAnys(o any) error {
return types.UnpackInterfaces(o, types.AminoPacker{Cdc: cdc.Amino})
}
func (cdc *LegacyAmino) unmarshalAnys(o interface{}) error {
func (cdc *LegacyAmino) unmarshalAnys(o any) error {
return types.UnpackInterfaces(o, types.AminoUnpacker{Cdc: cdc.Amino})
}
func (cdc *LegacyAmino) jsonMarshalAnys(o interface{}) error {
func (cdc *LegacyAmino) jsonMarshalAnys(o any) error {
return types.UnpackInterfaces(o, types.AminoJSONPacker{Cdc: cdc.Amino})
}
func (cdc *LegacyAmino) jsonUnmarshalAnys(o interface{}) error {
func (cdc *LegacyAmino) jsonUnmarshalAnys(o any) error {
return types.UnpackInterfaces(o, types.AminoJSONUnpacker{Cdc: cdc.Amino})
}
func (cdc *LegacyAmino) Marshal(o interface{}) ([]byte, error) {
func (cdc *LegacyAmino) Marshal(o any) ([]byte, error) {
err := cdc.marshalAnys(o)
if err != nil {
return nil, err
@ -84,7 +84,7 @@ func (cdc *LegacyAmino) Marshal(o interface{}) ([]byte, error) {
return cdc.Amino.MarshalBinaryBare(o)
}
func (cdc *LegacyAmino) MustMarshal(o interface{}) []byte {
func (cdc *LegacyAmino) MustMarshal(o any) []byte {
bz, err := cdc.Marshal(o)
if err != nil {
panic(err)
@ -92,7 +92,7 @@ func (cdc *LegacyAmino) MustMarshal(o interface{}) []byte {
return bz
}
func (cdc *LegacyAmino) MarshalLengthPrefixed(o interface{}) ([]byte, error) {
func (cdc *LegacyAmino) MarshalLengthPrefixed(o any) ([]byte, error) {
err := cdc.marshalAnys(o)
if err != nil {
return nil, err
@ -100,7 +100,7 @@ func (cdc *LegacyAmino) MarshalLengthPrefixed(o interface{}) ([]byte, error) {
return cdc.Amino.MarshalBinaryLengthPrefixed(o)
}
func (cdc *LegacyAmino) MustMarshalLengthPrefixed(o interface{}) []byte {
func (cdc *LegacyAmino) MustMarshalLengthPrefixed(o any) []byte {
bz, err := cdc.MarshalLengthPrefixed(o)
if err != nil {
panic(err)
@ -108,7 +108,7 @@ func (cdc *LegacyAmino) MustMarshalLengthPrefixed(o interface{}) []byte {
return bz
}
func (cdc *LegacyAmino) Unmarshal(bz []byte, ptr interface{}) error {
func (cdc *LegacyAmino) Unmarshal(bz []byte, ptr any) error {
err := cdc.Amino.UnmarshalBinaryBare(bz, ptr)
if err != nil {
return err
@ -116,14 +116,14 @@ func (cdc *LegacyAmino) Unmarshal(bz []byte, ptr interface{}) error {
return cdc.unmarshalAnys(ptr)
}
func (cdc *LegacyAmino) MustUnmarshal(bz []byte, ptr interface{}) {
func (cdc *LegacyAmino) MustUnmarshal(bz []byte, ptr any) {
err := cdc.Unmarshal(bz, ptr)
if err != nil {
panic(err)
}
}
func (cdc *LegacyAmino) UnmarshalLengthPrefixed(bz []byte, ptr interface{}) error {
func (cdc *LegacyAmino) UnmarshalLengthPrefixed(bz []byte, ptr any) error {
err := cdc.Amino.UnmarshalBinaryLengthPrefixed(bz, ptr)
if err != nil {
return err
@ -131,7 +131,7 @@ func (cdc *LegacyAmino) UnmarshalLengthPrefixed(bz []byte, ptr interface{}) erro
return cdc.unmarshalAnys(ptr)
}
func (cdc *LegacyAmino) MustUnmarshalLengthPrefixed(bz []byte, ptr interface{}) {
func (cdc *LegacyAmino) MustUnmarshalLengthPrefixed(bz []byte, ptr any) {
err := cdc.UnmarshalLengthPrefixed(bz, ptr)
if err != nil {
panic(err)
@ -139,7 +139,7 @@ func (cdc *LegacyAmino) MustUnmarshalLengthPrefixed(bz []byte, ptr interface{})
}
// MarshalJSON implements codec.Codec interface
func (cdc *LegacyAmino) MarshalJSON(o interface{}) ([]byte, error) {
func (cdc *LegacyAmino) MarshalJSON(o any) ([]byte, error) {
err := cdc.jsonMarshalAnys(o)
if err != nil {
return nil, err
@ -147,7 +147,7 @@ func (cdc *LegacyAmino) MarshalJSON(o interface{}) ([]byte, error) {
return cdc.Amino.MarshalJSON(o)
}
func (cdc *LegacyAmino) MustMarshalJSON(o interface{}) []byte {
func (cdc *LegacyAmino) MustMarshalJSON(o any) []byte {
bz, err := cdc.MarshalJSON(o)
if err != nil {
panic(err)
@ -156,7 +156,7 @@ func (cdc *LegacyAmino) MustMarshalJSON(o interface{}) []byte {
}
// UnmarshalJSON implements codec.Codec interface
func (cdc *LegacyAmino) UnmarshalJSON(bz []byte, ptr interface{}) error {
func (cdc *LegacyAmino) UnmarshalJSON(bz []byte, ptr any) error {
err := cdc.Amino.UnmarshalJSON(bz, ptr)
if err != nil {
return err
@ -164,26 +164,26 @@ func (cdc *LegacyAmino) UnmarshalJSON(bz []byte, ptr interface{}) error {
return cdc.jsonUnmarshalAnys(ptr)
}
func (cdc *LegacyAmino) MustUnmarshalJSON(bz []byte, ptr interface{}) {
func (cdc *LegacyAmino) MustUnmarshalJSON(bz []byte, ptr any) {
err := cdc.UnmarshalJSON(bz, ptr)
if err != nil {
panic(err)
}
}
func (*LegacyAmino) UnpackAny(*types.Any, interface{}) error {
func (*LegacyAmino) UnpackAny(*types.Any, any) error {
return errors.New("AminoCodec can't handle unpack protobuf Any's")
}
func (cdc *LegacyAmino) RegisterInterface(ptr interface{}, iopts *amino.InterfaceOptions) {
func (cdc *LegacyAmino) RegisterInterface(ptr any, iopts *amino.InterfaceOptions) {
cdc.Amino.RegisterInterface(ptr, iopts)
}
func (cdc *LegacyAmino) RegisterConcrete(o interface{}, name string, copts *amino.ConcreteOptions) {
func (cdc *LegacyAmino) RegisterConcrete(o any, name string, copts *amino.ConcreteOptions) {
cdc.Amino.RegisterConcrete(o, name, copts)
}
func (cdc *LegacyAmino) MarshalJSONIndent(o interface{}, prefix, indent string) ([]byte, error) {
func (cdc *LegacyAmino) MarshalJSONIndent(o any, prefix, indent string) ([]byte, error) {
err := cdc.jsonMarshalAnys(o)
if err != nil {
panic(err)

View File

@ -104,7 +104,7 @@ func (ac *AminoCodec) MarshalInterface(i proto.Message) ([]byte, error) {
//
// var x MyInterface
// err := cdc.UnmarshalInterface(bz, &x)
func (ac *AminoCodec) UnmarshalInterface(bz []byte, ptr interface{}) error {
func (ac *AminoCodec) UnmarshalInterface(bz []byte, ptr any) error {
return ac.LegacyAmino.Unmarshal(bz, ptr)
}
@ -126,6 +126,6 @@ func (ac *AminoCodec) MarshalInterfaceJSON(i proto.Message) ([]byte, error) {
//
// var x MyInterface
// err := cdc.UnmarshalInterfaceJSON(bz, &x)
func (ac *AminoCodec) UnmarshalInterfaceJSON(bz []byte, ptr interface{}) error {
func (ac *AminoCodec) UnmarshalInterfaceJSON(bz []byte, ptr any) error {
return ac.LegacyAmino.UnmarshalJSON(bz, ptr)
}

View File

@ -72,7 +72,7 @@ type (
// UnmarshalInterface is a helper method which will parse binary enoded data
// into `Any` and unpack any into the `ptr`. It fails if the target interface type
// is not registered in codec, or is not compatible with the serialized data
UnmarshalInterface(bz []byte, ptr interface{}) error
UnmarshalInterface(bz []byte, ptr any) error
types.AnyUnpacker
}
@ -88,7 +88,7 @@ type (
// UnmarshalInterfaceJSON is a helper method which will parse JSON enoded data
// into `Any` and unpack any into the `ptr`. It fails if the target interface type
// is not registered in codec, or is not compatible with the serialized data
UnmarshalInterfaceJSON(bz []byte, ptr interface{}) error
UnmarshalInterfaceJSON(bz []byte, ptr any) error
// UnmarshalJSON parses the data encoded with MarshalJSON method and stores the result
// in the value pointed to by v.

View File

@ -13,7 +13,7 @@ import (
type interfaceMarshaler struct {
marshal func(i proto.Message) ([]byte, error)
unmarshal func(bz []byte, ptr interface{}) error
unmarshal func(bz []byte, ptr any) error
}
func testInterfaceMarshaling(require *require.Assertions, cdc interfaceMarshaler, isAminoBin bool) {

View File

@ -248,7 +248,7 @@ func (pc *ProtoCodec) MarshalInterface(i gogoproto.Message) ([]byte, error) {
//
// var x MyInterface
// err := cdc.UnmarshalInterface(bz, &x)
func (pc *ProtoCodec) UnmarshalInterface(bz []byte, ptr interface{}) error {
func (pc *ProtoCodec) UnmarshalInterface(bz []byte, ptr any) error {
any := &types.Any{}
err := pc.Unmarshal(bz, any)
if err != nil {
@ -278,7 +278,7 @@ func (pc *ProtoCodec) MarshalInterfaceJSON(x gogoproto.Message) ([]byte, error)
//
// var x MyInterface // must implement proto.Message
// err := cdc.UnmarshalInterfaceJSON(&x, bz)
func (pc *ProtoCodec) UnmarshalInterfaceJSON(bz []byte, iface interface{}) error {
func (pc *ProtoCodec) UnmarshalInterfaceJSON(bz []byte, iface any) error {
any := &types.Any{}
err := pc.UnmarshalJSON(bz, any)
if err != nil {
@ -290,7 +290,7 @@ func (pc *ProtoCodec) UnmarshalInterfaceJSON(bz []byte, iface interface{}) error
// UnpackAny implements AnyUnpacker.UnpackAny method,
// it unpacks the value in any to the interface pointer passed in as
// iface.
func (pc *ProtoCodec) UnpackAny(any *types.Any, iface interface{}) error {
func (pc *ProtoCodec) UnpackAny(any *types.Any, iface any) error {
return pc.interfaceRegistry.UnpackAny(any, iface)
}
@ -342,7 +342,7 @@ type grpcProtoCodec struct {
cdc *ProtoCodec
}
func (g grpcProtoCodec) Marshal(v interface{}) ([]byte, error) {
func (g grpcProtoCodec) Marshal(v any) ([]byte, error) {
switch m := v.(type) {
case proto.Message:
protov2MarshalOpts := proto.MarshalOptions{Deterministic: true}
@ -354,7 +354,7 @@ func (g grpcProtoCodec) Marshal(v interface{}) ([]byte, error) {
}
}
func (g grpcProtoCodec) Unmarshal(data []byte, v interface{}) error {
func (g grpcProtoCodec) Unmarshal(data []byte, v any) error {
switch m := v.(type) {
case proto.Message:
return proto.Unmarshal(data, m)
@ -369,7 +369,7 @@ func (g grpcProtoCodec) Name() string {
return "cosmos-sdk-grpc-codec"
}
func assertNotNil(i interface{}) error {
func assertNotNil(i any) error {
if i == nil {
return errors.New("can't marshal <nil> value")
}

View File

@ -121,7 +121,7 @@ func TestProtoCodecMarshal(t *testing.T) {
// Emulate grpc server implementation
// https://github.com/grpc/grpc-go/blob/b1d7f56b81b7902d871111b82dec6ba45f854ede/rpc_util.go#L590
func grpcServerEncode(c encoding.Codec, msg interface{}) ([]byte, error) {
func grpcServerEncode(c encoding.Codec, msg any) ([]byte, error) {
if msg == nil { // NOTE: typed nils will not be caught by this check
return nil, nil
}

75
codec/types/any_test.go Normal file
View File

@ -0,0 +1,75 @@
package types_test
import (
"fmt"
"runtime"
"testing"
"github.com/cosmos/gogoproto/proto"
"github.com/cosmos/cosmos-sdk/codec/types"
"github.com/cosmos/cosmos-sdk/testutil/testdata"
)
type errOnMarshal struct {
testdata.Dog
}
var _ proto.Message = (*errOnMarshal)(nil)
var errAlways = fmt.Errorf("always erroring")
func (eom *errOnMarshal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return nil, errAlways
}
var eom = &errOnMarshal{}
// Ensure that returning an error doesn't suddenly allocate and waste bytes.
// See https://github.com/cosmos/cosmos-sdk/issues/8537
func TestNewAnyWithCustomTypeURLWithErrorNoAllocation(t *testing.T) {
// This tests continues to fail inconsistently.
//
// Example: https://github.com/cosmos/cosmos-sdk/pull/9246/checks?check_run_id=2643313958#step:6:118
// Ref: https://github.com/cosmos/cosmos-sdk/issues/9010
t.SkipNow()
// make sure we're not in the middle of a GC.
runtime.GC()
var ms1, ms2 runtime.MemStats
runtime.ReadMemStats(&ms1)
any, err := types.NewAnyWithValue(eom)
runtime.ReadMemStats(&ms2)
// Ensure that no fresh allocation was made.
if diff := ms2.HeapAlloc - ms1.HeapAlloc; diff > 0 {
t.Errorf("Unexpected allocation of %d bytes", diff)
}
if err == nil {
t.Fatal("err wasn't returned")
}
if any != nil {
t.Fatalf("Unexpectedly got a non-nil Any value: %v", any)
}
}
var sink any
func BenchmarkNewAnyWithCustomTypeURLWithErrorReturned(b *testing.B) {
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
any, err := types.NewAnyWithValue(eom)
if err == nil {
b.Fatal("err wasn't returned")
}
if any != nil {
b.Fatalf("Unexpectedly got a non-nil Any value: %v", any)
}
sink = any
}
if sink == nil {
b.Fatal("benchmark didn't run")
}
sink = (any)(nil)
}

View File

@ -19,5 +19,5 @@ type AminoJSONUnpacker = gogoany.AminoJSONUnpacker
// AminoJSONPacker is an alias for github.com/cosmos/gogoproto/types/any.AminoJSONPacker.
type AminoJSONPacker = gogoany.AminoJSONPacker
// ProtoUnpacker is an alias for github.com/cosmos/gogoproto/types/any.ProtoJSONPacker.
// ProtoJSONPacker is an alias for github.com/cosmos/gogoproto/types/any.ProtoJSONPacker.
type ProtoJSONPacker = gogoany.ProtoJSONPacker

View File

@ -54,14 +54,14 @@ type InterfaceRegistry interface {
//
// Ex:
// registry.RegisterInterface("cosmos.base.v1beta1.Msg", (*sdk.Msg)(nil))
RegisterInterface(protoName string, iface interface{}, impls ...proto.Message)
RegisterInterface(protoName string, iface any, impls ...proto.Message)
// RegisterImplementations registers impls as concrete implementations of
// the interface iface.
//
// Ex:
// registry.RegisterImplementations((*sdk.Msg)(nil), &MsgSend{}, &MsgMultiSend{})
RegisterImplementations(iface interface{}, impls ...proto.Message)
RegisterImplementations(iface any, impls ...proto.Message)
// ListAllInterfaces list the type URLs of all registered interfaces.
ListAllInterfaces() []string
@ -71,7 +71,7 @@ type InterfaceRegistry interface {
ListImplementations(ifaceTypeURL string) []string
// EnsureRegistered ensures there is a registered interface for the given concrete type.
EnsureRegistered(iface interface{}) error
EnsureRegistered(iface any) error
protodesc.Resolver
@ -169,7 +169,7 @@ func NewInterfaceRegistryWithOptions(options InterfaceRegistryOptions) (Interfac
}, nil
}
func (registry *interfaceRegistry) RegisterInterface(protoName string, iface interface{}, impls ...proto.Message) {
func (registry *interfaceRegistry) RegisterInterface(protoName string, iface any, impls ...proto.Message) {
typ := reflect.TypeOf(iface)
if typ.Elem().Kind() != reflect.Interface {
panic(fmt.Errorf("%T is not an interface type", iface))
@ -182,7 +182,7 @@ func (registry *interfaceRegistry) RegisterInterface(protoName string, iface int
// EnsureRegistered ensures there is a registered interface for the given concrete type.
//
// Returns an error if not, and nil if so.
func (registry *interfaceRegistry) EnsureRegistered(impl interface{}) error {
func (registry *interfaceRegistry) EnsureRegistered(impl any) error {
if reflect.ValueOf(impl).Kind() != reflect.Ptr {
return fmt.Errorf("%T is not a pointer", impl)
}
@ -199,7 +199,7 @@ func (registry *interfaceRegistry) EnsureRegistered(impl interface{}) error {
//
// This function PANICs if different concrete types are registered under the
// same typeURL.
func (registry *interfaceRegistry) RegisterImplementations(iface interface{}, impls ...proto.Message) {
func (registry *interfaceRegistry) RegisterImplementations(iface any, impls ...proto.Message) {
for _, impl := range impls {
typeURL := MsgTypeURL(impl)
registry.registerImpl(iface, typeURL, impl)
@ -211,7 +211,7 @@ func (registry *interfaceRegistry) RegisterImplementations(iface interface{}, im
//
// This function PANICs if different concrete types are registered under the
// same typeURL.
func (registry *interfaceRegistry) RegisterCustomTypeURL(iface interface{}, typeURL string, impl proto.Message) {
func (registry *interfaceRegistry) RegisterCustomTypeURL(iface any, typeURL string, impl proto.Message) {
registry.registerImpl(iface, typeURL, impl)
}
@ -220,7 +220,7 @@ func (registry *interfaceRegistry) RegisterCustomTypeURL(iface interface{}, type
//
// This function PANICs if different concrete types are registered under the
// same typeURL.
func (registry *interfaceRegistry) registerImpl(iface interface{}, typeURL string, impl proto.Message) {
func (registry *interfaceRegistry) registerImpl(iface any, typeURL string, impl proto.Message) {
ityp := reflect.TypeOf(iface).Elem()
imap, found := registry.interfaceImpls[ityp]
if !found {
@ -283,7 +283,7 @@ func (registry *interfaceRegistry) ListImplementations(ifaceName string) []strin
return keys
}
func (registry *interfaceRegistry) UnpackAny(any *Any, iface interface{}) error {
func (registry *interfaceRegistry) UnpackAny(any *Any, iface any) error {
unpacker := &statefulUnpacker{
registry: registry,
maxDepth: MaxUnpackAnyRecursionDepth,

View File

@ -22,7 +22,7 @@ func base64Encode(src []byte) []byte {
func base64Decode(src []byte) ([]byte, error) {
numOfEquals := 4 - (len(src) % 4)
for i := 0; i < numOfEquals; i++ {
for range numOfEquals {
src = append(src, '=')
}

View File

@ -191,7 +191,7 @@ func bcrypt(password []byte, cost uint32, salt []byte) ([]byte, error) {
}
for i := 0; i < 24; i += 8 {
for j := 0; j < 64; j++ {
for range 64 {
c.Encrypt(cipherData[i:i+8], cipherData[i:i+8])
}
}
@ -218,9 +218,8 @@ func expensiveBlowfishSetup(key []byte, cost uint32, salt []byte) (*blowfish.Cip
return nil, err
}
var i, rounds uint64
rounds = 1 << cost
for i = 0; i < rounds; i++ {
var rounds uint64 = 1 << cost
for range rounds {
blowfish.ExpandKey(ckey, c)
blowfish.ExpandKey(csalt, c)
}
@ -239,7 +238,7 @@ func (p *hashed) Hash() []byte {
}
arr[n] = '$'
n++
copy(arr[n:], []byte(fmt.Sprintf("%02d", p.cost)))
copy(arr[n:], fmt.Appendf(nil, "%02d", p.cost))
n += 2
arr[n] = '$'
n++

View File

@ -139,7 +139,7 @@ func TestMarshalAmino(t *testing.T) {
testCases := []struct {
desc string
msg codec.AminoMarshaler
typ interface{}
typ any
expBinary []byte
expJSON string
}{
@ -195,9 +195,9 @@ func TestMarshalAmino_BackwardsCompatibility(t *testing.T) {
testCases := []struct {
desc string
tmKey interface{}
ourKey interface{}
marshalFn func(o interface{}) ([]byte, error)
tmKey any
ourKey any
marshalFn func(o any) ([]byte, error)
}{
{
"ed25519 private key, binary",

View File

@ -68,7 +68,7 @@ func (m *LegacyAminoPubKey) VerifyMultisignature(getSignBytes multisigtypes.GetS
}
// index in the list of signatures which we are concerned with.
sigIndex := 0
for i := 0; i < size; i++ {
for i := range size {
if bitarray.GetIndex(i) {
si := sig.Signatures[sigIndex]
switch si := si.(type) {
@ -108,7 +108,7 @@ func (m *LegacyAminoPubKey) VerifySignature(msg, sig []byte) bool {
func (m *LegacyAminoPubKey) GetPubKeys() []cryptotypes.PubKey {
if m != nil {
pubKeys := make([]cryptotypes.PubKey, len(m.PubKeys))
for i := 0; i < len(m.PubKeys); i++ {
for i := range m.PubKeys {
pubKeys[i] = m.PubKeys[i].GetCachedValue().(cryptotypes.PubKey)
}
return pubKeys
@ -130,7 +130,7 @@ func (m *LegacyAminoPubKey) Equals(key cryptotypes.PubKey) bool {
return false
}
for i := 0; i < len(pubKeys); i++ {
for i := range pubKeys {
if !pubKeys[i].Equals(otherPubKeys[i]) {
return false
}
@ -163,7 +163,7 @@ func (m *LegacyAminoPubKey) UnpackInterfaces(unpacker types.AnyUnpacker) error {
func packPubKeys(pubKeys []cryptotypes.PubKey) ([]*types.Any, error) {
anyPubKeys := make([]*types.Any, len(pubKeys))
for i := 0; i < len(pubKeys); i++ {
for i := range pubKeys {
any, err := types.NewAnyWithValue(pubKeys[i])
if err != nil {
return nil, err

View File

@ -134,7 +134,7 @@ func TestVerifyMultisignature(t *testing.T) {
sig = multisig.NewMultisig(len(pubKeys))
signBytesFn := func(mode signing.SignMode) ([]byte, error) { return msg, nil }
for i := 0; i < k-1; i++ {
for i := range k - 1 {
signingIndex := signingIndices[i]
require.NoError(
multisig.AddSignatureFromPubKey(sig, sigs[signingIndex], pubKeys[signingIndex], pubKeys),
@ -298,7 +298,7 @@ func TestPubKeyMultisigThresholdAminoToIface(t *testing.T) {
func generatePubKeys(n int) []cryptotypes.PubKey {
pks := make([]cryptotypes.PubKey, n)
for i := 0; i < n; i++ {
for i := range n {
pks[i] = secp256k1.GenPrivKey().PubKey()
}
return pks
@ -308,7 +308,7 @@ func generatePubKeysAndSignatures(n int, msg []byte) (pubKeys []cryptotypes.PubK
pubKeys = make([]cryptotypes.PubKey, n)
signatures = make([]signing.SignatureData, n)
for i := 0; i < n; i++ {
for i := range n {
privkey := secp256k1.GenPrivKey()
pubKeys[i] = privkey.PubKey()
@ -322,10 +322,10 @@ func generateNestedMultiSignature(n int, msg []byte) (multisig.PubKey, *signing.
pubKeys := make([]cryptotypes.PubKey, n)
signatures := make([]signing.SignatureData, n)
bitArray := cryptotypes.NewCompactBitArray(n)
for i := 0; i < n; i++ {
for i := range n {
nestedPks, nestedSigs := generatePubKeysAndSignatures(5, msg)
nestedBitArray := cryptotypes.NewCompactBitArray(5)
for j := 0; j < 5; j++ {
for j := range 5 {
nestedBitArray.SetIndex(j, true)
}
nestedSig := &signing.MultiSignatureData{

View File

@ -148,7 +148,7 @@ func TestRandomMessagesWithRandomKeys(t *testing.T) {
}
func signAndRecoverWithRandomMessages(t *testing.T, keys func() ([]byte, []byte)) {
for i := 0; i < TestCount; i++ {
for range TestCount {
pubkey1, seckey := keys()
msg := csprngEntropy(32)
sig, err := Sign(msg, seckey)
@ -180,7 +180,7 @@ func TestRecoveryOfRandomSignature(t *testing.T) {
pubkey1, _ := generateKeyPair()
msg := csprngEntropy(32)
for i := 0; i < TestCount; i++ {
for i := range TestCount {
// recovery can sometimes work, but if so should always give wrong pubkey
pubkey2, _ := RecoverPubkey(msg, randSig())
if bytes.Equal(pubkey1, pubkey2) {
@ -194,7 +194,7 @@ func TestRandomMessagesAgainstValidSig(t *testing.T) {
msg := csprngEntropy(32)
sig, _ := Sign(msg, seckey)
for i := 0; i < TestCount; i++ {
for i := range TestCount {
msg = csprngEntropy(32)
pubkey2, _ := RecoverPubkey(msg, sig)
// recovery can sometimes work, but if so should always give wrong pubkey

View File

@ -15,7 +15,7 @@ import (
// Note: run with CGO_ENABLED=0 or go test -tags !cgo.
func TestSignatureVerificationAndRejectUpperS(t *testing.T) {
msg := []byte("We have lingered long enough on the shores of the cosmic ocean.")
for i := 0; i < 500; i++ {
for range 500 {
priv := GenPrivKey()
sigStr, err := priv.Sign(msg)
require.NoError(t, err)

View File

@ -220,7 +220,7 @@ func TestSignAndValidateSecp256k1(t *testing.T) {
// in creating the privkey.
func TestSecp256k1LoadPrivkeyAndSerializeIsIdentity(t *testing.T) {
numberOfTests := 256
for i := 0; i < numberOfTests; i++ {
for range numberOfTests {
// Seed the test case with some random bytes
privKeyBytes := [32]byte{}
copy(privKeyBytes[:], crypto.CRandBytes(32))
@ -352,7 +352,7 @@ func TestMarshalAmino(t *testing.T) {
testCases := []struct {
desc string
msg codec.AminoMarshaler
typ interface{}
typ any
expBinary []byte
expJSON string
}{
@ -408,9 +408,9 @@ func TestMarshalAmino_BackwardsCompatibility(t *testing.T) {
testCases := []struct {
desc string
tmKey interface{}
ourKey interface{}
marshalFn func(o interface{}) ([]byte, error)
tmKey any
ourKey any
marshalFn func(o any) ([]byte, error)
}{
{
"secp256k1 private key, binary",

View File

@ -8,7 +8,7 @@ import (
cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types"
)
func checkAminoJSON(t *testing.T, src, dst interface{}, isNil bool) {
func checkAminoJSON(t *testing.T, src, dst any, isNil bool) {
t.Helper()
// Marshal to JSON bytes.

View File

@ -147,7 +147,7 @@ func (bA *CompactBitArray) StringIndented(indent string) string {
lines := []string{}
bits := ""
size := bA.Count()
for i := 0; i < size; i++ {
for i := range size {
if bA.GetIndex(i) {
bits += "x"
} else {
@ -184,7 +184,7 @@ func (bA *CompactBitArray) MarshalJSON() ([]byte, error) {
bits := `"`
size := bA.Count()
for i := 0; i < size; i++ {
for i := range size {
if bA.GetIndex(i) {
bits += `x`
} else {
@ -222,7 +222,7 @@ func (bA *CompactBitArray) UnmarshalJSON(bz []byte) error {
// Construct new CompactBitArray and copy over.
numBits := len(bits)
bA2 := NewCompactBitArray(numBits)
for i := 0; i < numBits; i++ {
for i := range numBits {
if bits[i] == 'x' {
bA2.SetIndex(i, true)
}

View File

@ -18,7 +18,7 @@ func randCompactBitArray(bits int) (*CompactBitArray, []byte) {
src := unsafe.Bytes((bits + 7) / 8)
bA := NewCompactBitArray(bits)
for i := 0; i < numBytes-1; i++ {
for i := range numBytes - 1 {
for j := uint8(0); j < 8; j++ {
bA.SetIndex(i*8+int(j), src[i]&(uint8(1)<<(8-j)) > 0)
}
@ -211,7 +211,7 @@ func TestCompactBitArrayNumOfTrueBitsBefore(t *testing.T) {
err := json.Unmarshal([]byte(tc.marshalledBA), &bA)
require.NoError(t, err)
for i := 0; i < len(tc.bAIndex); i++ {
for i := range tc.bAIndex {
require.Equal(t, tc.trueValueIndex[i], bA.NumTrueBitsBefore(tc.bAIndex[i]), "tc %d, i %d", tcIndex, i)
}
})
@ -222,11 +222,11 @@ func TestCompactBitArrayGetSetIndex(t *testing.T) {
r := rand.New(rand.NewSource(100))
numTests := 10
numBitsPerArr := 100
for i := 0; i < numTests; i++ {
for range numTests {
bits := r.Intn(1000)
bA, _ := randCompactBitArray(bits)
for j := 0; j < numBitsPerArr; j++ {
for range numBitsPerArr {
copy := bA.Copy()
index := r.Intn(bits)
val := (r.Int63() % 2) == 0

View File

@ -27,7 +27,7 @@ func NewMultisig(n int) *signing.MultiSignatureData {
// GetIndex returns the index of pk in keys. Returns -1 if not found
func getIndex(pk types.PubKey, keys []types.PubKey) int {
for i := 0; i < len(keys); i++ {
for i := range keys {
if pk.Equals(keys[i]) {
return i
}

View File

@ -22,7 +22,7 @@ func unsafeConvertStr() []byte {
func (s *StringSuite) TestUnsafeStrToBytes() {
// we convert in other function to trigger GC. We want to check that
// the underlying array in []bytes is accessible after GC will finish swapping.
for i := 0; i < 5; i++ {
for range 5 {
b := unsafeConvertStr()
runtime.GC()
<-time.NewTimer(2 * time.Millisecond).C
@ -39,7 +39,7 @@ func unsafeConvertBytes() string {
func (s *StringSuite) TestUnsafeBytesToStr() {
// we convert in other function to trigger GC. We want to check that
// the underlying array in []bytes is accessible after GC will finish swapping.
for i := 0; i < 5; i++ {
for range 5 {
str := unsafeConvertBytes()
runtime.GC()
<-time.NewTimer(2 * time.Millisecond).C

View File

@ -25,7 +25,7 @@ type AutoCLIQueryService struct {
}
// NewAutoCLIQueryService returns a AutoCLIQueryService for the provided modules.
func NewAutoCLIQueryService(appModules map[string]interface{}) *AutoCLIQueryService {
func NewAutoCLIQueryService(appModules map[string]any) *AutoCLIQueryService {
return &AutoCLIQueryService{
moduleOptions: ExtractAutoCLIOptions(appModules),
}
@ -36,7 +36,7 @@ func NewAutoCLIQueryService(appModules map[string]interface{}) *AutoCLIQueryServ
// Example Usage:
//
// ExtractAutoCLIOptions(ModuleManager.Modules)
func ExtractAutoCLIOptions(appModules map[string]interface{}) map[string]*autocliv1.ModuleOptions {
func ExtractAutoCLIOptions(appModules map[string]any) map[string]*autocliv1.ModuleOptions {
moduleOptions := map[string]*autocliv1.ModuleOptions{}
for modName, mod := range appModules {
if autoCliMod, ok := mod.(interface {
@ -113,7 +113,7 @@ func (a *autocliConfigurator) RegisterMigration(string, uint64, module.Migration
return nil
}
func (a *autocliConfigurator) RegisterService(sd *grpc.ServiceDesc, ss interface{}) {
func (a *autocliConfigurator) RegisterService(sd *grpc.ServiceDesc, ss any) {
if a.registryCache == nil {
a.registryCache, a.err = proto.MergedRegistry()
}
@ -137,7 +137,7 @@ type autocliServiceRegistrar struct {
serviceName string
}
func (a *autocliServiceRegistrar) RegisterService(sd *grpc.ServiceDesc, _ interface{}) {
func (a *autocliServiceRegistrar) RegisterService(sd *grpc.ServiceDesc, _ any) {
a.serviceName = sd.ServiceName
}

View File

@ -163,9 +163,9 @@ func TestGlobalLabelsEventsMarshalling(t *testing.T) {
func TestGlobalLabelsWriteRead(t *testing.T) {
expected := [][]string{{"labelname3", "labelvalue3"}, {"labelname4", "labelvalue4"}}
expectedRaw := make([]interface{}, len(expected))
expectedRaw := make([]any, len(expected))
for i, exp := range expected {
pair := make([]interface{}, len(exp))
pair := make([]any, len(exp))
for j, s := range exp {
pair[j] = s
}

View File

@ -283,7 +283,7 @@ func SetConfigTemplate(customTemplate string) {
// WriteConfigFile renders config using the template and writes it to
// configFilePath.
func WriteConfigFile(configFilePath string, config interface{}) {
func WriteConfigFile(configFilePath string, config any) {
var buffer bytes.Buffer
if err := configTemplate.Execute(&buffer, config); err != nil {

View File

@ -309,7 +309,7 @@ func (s *serverReflectionServer) fileDescEncodingByFilename(name string, sentFil
// For SupportPackageIsVersion4, m is the name of the proto file, we
// call proto.FileDescriptor to get the byte slice.
// For SupportPackageIsVersion3, m is a byte slice itself.
func parseMetadata(meta interface{}) ([]byte, bool) {
func parseMetadata(meta any) ([]byte, bool) {
// Check if meta is the file name.
if fileNameForMeta, ok := meta.(string); ok {
return getFileDescriptor(fileNameForMeta), true

View File

@ -17,7 +17,7 @@ type CometLoggerWrapper struct {
// With returns a new wrapped logger with additional context provided by a set
// of key/value tuples. The number of tuples must be even and the key of the
// tuple must be a string.
func (cmt CometLoggerWrapper) With(keyVals ...interface{}) cmtlog.Logger {
func (cmt CometLoggerWrapper) With(keyVals ...any) cmtlog.Logger {
logger := cmt.Logger.With(keyVals...)
return CometLoggerWrapper{logger}
}

View File

@ -153,7 +153,7 @@ type MsgServerImpl struct {
capKeyMainStore *storetypes.KVStoreKey
}
func MsgTestHandler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
func MsgTestHandler(srv any, ctx context.Context, dec func(any) error, interceptor grpc.UnaryServerInterceptor) (any, error) {
in := new(KVStoreTx)
if err := dec(in); err != nil {
return nil, err
@ -165,7 +165,7 @@ func MsgTestHandler(srv interface{}, ctx context.Context, dec func(interface{})
Server: srv,
FullMethod: "/KVStoreTx",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
handler := func(ctx context.Context, req any) (any, error) {
return srv.(MsgServer).Test(ctx, req.(*KVStoreTx))
}
return interceptor(ctx, in, info, handler)

View File

@ -829,7 +829,7 @@ func testnetify(ctx *Context, testnetAppCreator types.AppCreator, db dbm.DB, tra
state.AppHash = appHash
} else {
// Node was likely stopped via SIGTERM, delete the next block's seen commit
err := blockStoreDB.Delete([]byte(fmt.Sprintf("SC:%v", blockStore.Height()+1)))
err := blockStoreDB.Delete(fmt.Appendf(nil, "SC:%v", blockStore.Height()+1))
if err != nil {
return nil, err
}
@ -928,19 +928,19 @@ func testnetify(ctx *Context, testnetAppCreator types.AppCreator, db dbm.DB, tra
}
// Modfiy Validators stateDB entry.
err = stateDB.Set([]byte(fmt.Sprintf("validatorsKey:%v", blockStore.Height())), buf)
err = stateDB.Set(fmt.Appendf(nil, "validatorsKey:%v", blockStore.Height()), buf)
if err != nil {
return nil, err
}
// Modify LastValidators stateDB entry.
err = stateDB.Set([]byte(fmt.Sprintf("validatorsKey:%v", blockStore.Height()-1)), buf)
err = stateDB.Set(fmt.Appendf(nil, "validatorsKey:%v", blockStore.Height()-1), buf)
if err != nil {
return nil, err
}
// Modify NextValidators stateDB entry.
err = stateDB.Set([]byte(fmt.Sprintf("validatorsKey:%v", blockStore.Height()+1)), buf)
err = stateDB.Set(fmt.Appendf(nil, "validatorsKey:%v", blockStore.Height()+1), buf)
if err != nil {
return nil, err
}

View File

@ -28,7 +28,7 @@ type (
// the expected types and could result in type assertion errors. It is recommend
// to either use the cast package or perform manual conversion for safety.
AppOptions interface {
Get(string) interface{}
Get(string) any
}
// Application defines an application interface that wraps abci.Application.

View File

@ -99,7 +99,7 @@ func bindFlags(basename string, cmd *cobra.Command, v *viper.Viper) (err error)
// InterceptConfigsPreRunHandler is identical to InterceptConfigsAndCreateContext
// except it also sets the server context on the command and the server logger.
func InterceptConfigsPreRunHandler(cmd *cobra.Command, customAppConfigTemplate string, customAppConfig interface{}, cmtConfig *cmtcfg.Config) error {
func InterceptConfigsPreRunHandler(cmd *cobra.Command, customAppConfigTemplate string, customAppConfig any, cmtConfig *cmtcfg.Config) error {
serverCtx, err := InterceptConfigsAndCreateContext(cmd, customAppConfigTemplate, customAppConfig, cmtConfig)
if err != nil {
return err
@ -126,7 +126,7 @@ func InterceptConfigsPreRunHandler(cmd *cobra.Command, customAppConfigTemplate s
// is used to read and parse the application configuration. Command handlers can
// fetch the server Context to get the CometBFT configuration or to get access
// to Viper.
func InterceptConfigsAndCreateContext(cmd *cobra.Command, customAppConfigTemplate string, customAppConfig interface{}, cmtConfig *cmtcfg.Config) (*Context, error) {
func InterceptConfigsAndCreateContext(cmd *cobra.Command, customAppConfigTemplate string, customAppConfig any, cmtConfig *cmtcfg.Config) (*Context, error) {
serverCtx := NewDefaultContext()
// Get the executable name and configure the viper instance so that environmental
@ -231,7 +231,7 @@ func SetCmdServerContext(cmd *cobra.Command, serverCtx *Context) error {
// configuration file. The CometBFT configuration file is parsed given a root
// Viper object, whereas the application is parsed with the private package-aware
// viperCfg object.
func interceptConfigs(rootViper *viper.Viper, customAppTemplate string, customConfig interface{}, cmtConfig *cmtcfg.Config) (*cmtcfg.Config, error) {
func interceptConfigs(rootViper *viper.Viper, customAppTemplate string, customConfig any, cmtConfig *cmtcfg.Config) (*cmtcfg.Config, error) {
rootDir := rootViper.GetString(flags.FlagHome)
configPath := filepath.Join(rootDir, "config")
cmtCfgFile := filepath.Join(configPath, "config.toml")

View File

@ -460,9 +460,9 @@ func TestEmptyMinGasPrices(t *testing.T) {
require.Errorf(t, err, sdkerrors.ErrAppConfig.Error())
}
type mapGetter map[string]interface{}
type mapGetter map[string]any
func (m mapGetter) Get(key string) interface{} {
func (m mapGetter) Get(key string) any {
return m[key]
}

View File

@ -65,7 +65,7 @@ func DeliverSimsMsg(
}
accountNumbers := make([]uint64, len(senders))
sequenceNumbers := make([]uint64, len(senders))
for i := 0; i < len(senders); i++ {
for i := range senders {
acc := ak.GetAccount(ctx, senders[i].Address)
accountNumbers[i] = acc.GetAccountNumber()
sequenceNumbers[i] = acc.GetSequence()

View File

@ -23,7 +23,7 @@ func Collect[T, E any](source []T, f func(a T) E) []E {
// First returns the first element in the slice that matches the condition
func First[T any](source []T, f func(a T) bool) *T {
for i := 0; i < len(source); i++ {
for i := range source {
if f(source[i]) {
return &source[i]
}

View File

@ -32,12 +32,12 @@ func TestMetrics_InMem(t *testing.T) {
require.NoError(t, err)
require.Equal(t, gr.ContentType, "application/json")
jsonMetrics := make(map[string]interface{})
jsonMetrics := make(map[string]any)
require.NoError(t, json.Unmarshal(gr.Metrics, &jsonMetrics))
counters := jsonMetrics["Counters"].([]interface{})
require.Equal(t, counters[0].(map[string]interface{})["Count"].(float64), 10.0)
require.Equal(t, counters[0].(map[string]interface{})["Name"].(string), "test.dummy_counter")
counters := jsonMetrics["Counters"].([]any)
require.Equal(t, counters[0].(map[string]any)["Count"].(float64), 10.0)
require.Equal(t, counters[0].(map[string]any)["Name"].(string), "test.dummy_counter")
}
func TestMetrics_Prom(t *testing.T) {

View File

@ -14,7 +14,7 @@ import (
// Takes a network, wait for two blocks and fetch the transaction from its hash
func CheckTxCode(network *network.Network, clientCtx client.Context, txHash string, expectedCode uint32) error {
// wait for 2 blocks
for i := 0; i < 2; i++ {
for range 2 {
if err := network.WaitForNextBlock(); err != nil {
return fmt.Errorf("failed to wait for next block: %w", err)
}
@ -42,7 +42,7 @@ func CheckTxCode(network *network.Network, clientCtx client.Context, txHash stri
// Takes a network, wait for two blocks and fetch the transaction from its hash
func GetTxResponse(network *network.Network, clientCtx client.Context, txHash string) (sdk.TxResponse, error) {
// wait for 2 blocks
for i := 0; i < 2; i++ {
for range 2 {
if err := network.WaitForNextBlock(); err != nil {
return sdk.TxResponse{}, fmt.Errorf("failed to wait for next block: %w", err)
}

View File

@ -10,7 +10,7 @@ import (
// RequireProtoDeepEqual fails the test t if p1 and p2 are not equivalent protobuf messages.
// Where p1 and p2 are proto.Message or slices of proto.Message.
func RequireProtoDeepEqual(t *testing.T, p1, p2 interface{}) {
func RequireProtoDeepEqual(t *testing.T, p1, p2 any) {
t.Helper()
require.Empty(t, cmp.Diff(p1, p2, protocmp.Transform()))
}

View File

@ -47,7 +47,7 @@ func (m *MockAnteDecorator) AnteHandle(ctx types.Context, tx types.Tx, simulate
}
// AnteHandle indicates an expected call of AnteHandle.
func (mr *MockAnteDecoratorMockRecorder) AnteHandle(ctx, tx, simulate, next interface{}) *gomock.Call {
func (mr *MockAnteDecoratorMockRecorder) AnteHandle(ctx, tx, simulate, next any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AnteHandle", reflect.TypeOf((*MockAnteDecorator)(nil).AnteHandle), ctx, tx, simulate, next)
}
@ -84,7 +84,7 @@ func (m *MockPostDecorator) PostHandle(ctx types.Context, tx types.Tx, simulate,
}
// PostHandle indicates an expected call of PostHandle.
func (mr *MockPostDecoratorMockRecorder) PostHandle(ctx, tx, simulate, success, next interface{}) *gomock.Call {
func (mr *MockPostDecoratorMockRecorder) PostHandle(ctx, tx, simulate, success, next any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PostHandle", reflect.TypeOf((*MockPostDecorator)(nil).PostHandle), ctx, tx, simulate, success, next)
}

View File

@ -70,7 +70,7 @@ var (
func init() {
closeFns := []func() error{}
for i := 0; i < 200; i++ {
for range 200 {
_, port, closeFn, err := FreeTCPAddr()
if err != nil {
panic(err)
@ -304,8 +304,8 @@ type (
// Logger is a network logger interface that exposes testnet-level Log() methods for an in-process testing network
// This is not to be confused with logging that may happen at an individual node or validator level
Logger interface {
Log(args ...interface{})
Logf(format string, args ...interface{})
Log(args ...any)
Logf(format string, args ...any)
}
)
@ -329,12 +329,12 @@ type CLILogger struct {
}
// Log logs given args.
func (s CLILogger) Log(args ...interface{}) {
func (s CLILogger) Log(args ...any) {
s.cmd.Println(args...)
}
// Logf logs given args according to a format specifier.
func (s CLILogger) Logf(format string, args ...interface{}) {
func (s CLILogger) Logf(format string, args ...any) {
s.cmd.Printf(format, args...)
}
@ -371,7 +371,7 @@ func New(l Logger, baseDir string, cfg Config) (*Network, error) {
buf := bufio.NewReader(os.Stdin)
// generate private keys, node IDs, and initial transactions
for i := 0; i < cfg.NumValidators; i++ {
for i := range cfg.NumValidators {
appCfg := srvconfig.DefaultConfig()
appCfg.Pruning = cfg.PruningStrategy
appCfg.MinGasPrices = cfg.MinGasPrices
@ -757,7 +757,7 @@ func (n *Network) WaitForHeightWithTimeout(h int64, t time.Duration) (int64, err
// It will do this until the function returns a nil error or until the number of
// blocks has been reached.
func (n *Network) RetryForBlocks(retryFunc func() error, blocks int) error {
for i := 0; i < blocks; i++ {
for i := range blocks {
_ = n.WaitForNextBlock() // ignore the error as we use the retry for validation
err := retryFunc()
if err == nil {

View File

@ -128,7 +128,7 @@ func startInProcess(cfg Config, val *Validator) error {
func collectGenFiles(cfg Config, vals []*Validator, outputDir string) error {
genTime := cmttime.Now()
for i := 0; i < cfg.NumValidators; i++ {
for i := range cfg.NumValidators {
cmtCfg := vals[i].Ctx.Config
nodeDir := filepath.Join(outputDir, vals[i].Moniker, "simd")
@ -194,7 +194,7 @@ func initGenFiles(cfg Config, genAccounts []authtypes.GenesisAccount, genBalance
}
// generate empty genesis files for each validator and save
for i := 0; i < cfg.NumValidators; i++ {
for i := range cfg.NumValidators {
if err := appGenesis.SaveAs(genFiles[i]); err != nil {
return err
}

View File

@ -101,7 +101,7 @@ func CreateIncrementalAccounts(accNum int) []sdk.AccAddress {
// CreateRandomAccounts is a strategy used by addTestAddrs() in order to generated addresses in random order.
func CreateRandomAccounts(accNum int) []sdk.AccAddress {
testAddrs := make([]sdk.AccAddress, accNum)
for i := 0; i < accNum; i++ {
for i := range accNum {
pk := ed25519.GenPrivKey().PubKey()
testAddrs[i] = sdk.AccAddress(pk.Address())
}

View File

@ -96,13 +96,13 @@ func DefaultStartUpConfig() StartupConfig {
// Setup initializes a new runtime.App and can inject values into extraOutputs.
// It uses SetupWithConfiguration under the hood.
func Setup(appConfig depinject.Config, extraOutputs ...interface{}) (*runtime.App, error) {
func Setup(appConfig depinject.Config, extraOutputs ...any) (*runtime.App, error) {
return SetupWithConfiguration(appConfig, DefaultStartUpConfig(), extraOutputs...)
}
// SetupAtGenesis initializes a new runtime.App at genesis and can inject values into extraOutputs.
// It uses SetupWithConfiguration under the hood.
func SetupAtGenesis(appConfig depinject.Config, extraOutputs ...interface{}) (*runtime.App, error) {
func SetupAtGenesis(appConfig depinject.Config, extraOutputs ...any) (*runtime.App, error) {
cfg := DefaultStartUpConfig()
cfg.AtGenesis = true
return SetupWithConfiguration(appConfig, cfg, extraOutputs...)
@ -136,7 +136,7 @@ func NextBlock(app *runtime.App, ctx sdk.Context, jumpTime time.Duration) (sdk.C
// SetupWithConfiguration initializes a new runtime.App. A Nop logger is set in runtime.App.
// appConfig defines the application configuration (f.e. app_config.go).
// extraOutputs defines the extra outputs to be assigned by the dependency injector (depinject).
func SetupWithConfiguration(appConfig depinject.Config, startupConfig StartupConfig, extraOutputs ...interface{}) (*runtime.App, error) {
func SetupWithConfiguration(appConfig depinject.Config, startupConfig StartupConfig, extraOutputs ...any) (*runtime.App, error) {
// create the app with depinject
var (
app *runtime.App
@ -285,17 +285,17 @@ func GenesisStateWithValSet(
type EmptyAppOptions struct{}
// Get implements AppOptions
func (ao EmptyAppOptions) Get(o string) interface{} {
func (ao EmptyAppOptions) Get(o string) any {
return nil
}
// AppOptionsMap is a stub implementing AppOptions which can get data from a map
type AppOptionsMap map[string]interface{}
type AppOptionsMap map[string]any
func (m AppOptionsMap) Get(key string) interface{} {
func (m AppOptionsMap) Get(key string) any {
v, ok := m[key]
if !ok {
return interface{}(nil)
return any(nil)
}
return v

View File

@ -123,7 +123,7 @@ func PrintStats(db dbm.DB) {
// GetSimulationLog unmarshals the KVPair's Value to the corresponding type based on the
// each's module store key and the prefix bytes of the KVPair's key.
func GetSimulationLog(storeName string, sdr simtypes.StoreDecoderRegistry, kvAs, kvBs []kv.Pair) (log string) {
for i := 0; i < len(kvAs); i++ {
for i := range kvAs {
if len(kvAs[i].Value) == 0 && len(kvBs[i].Value) == 0 {
// skip if the value doesn't have any bytes
continue

View File

@ -65,7 +65,7 @@ func AppStateFnWithExtendedCbs(
cdc codec.JSONCodec,
simManager *module.SimulationManager,
genesisState map[string]json.RawMessage,
moduleStateCb func(moduleName string, genesisState interface{}),
moduleStateCb func(moduleName string, genesisState any),
rawStateCb func(rawState map[string]json.RawMessage),
) simtypes.AppStateFn {
return func(

View File

@ -244,7 +244,7 @@ func (aa AccAddress) MarshalJSON() ([]byte, error) {
}
// MarshalYAML marshals to YAML using Bech32.
func (aa AccAddress) MarshalYAML() (interface{}, error) {
func (aa AccAddress) MarshalYAML() (any, error) {
return aa.String(), nil
}
@ -406,7 +406,7 @@ func (va ValAddress) MarshalJSON() ([]byte, error) {
}
// MarshalYAML marshals to YAML using Bech32.
func (va ValAddress) MarshalYAML() (interface{}, error) {
func (va ValAddress) MarshalYAML() (any, error) {
return va.String(), nil
}
@ -566,7 +566,7 @@ func (ca ConsAddress) MarshalJSON() ([]byte, error) {
}
// MarshalYAML marshals to YAML using Bech32.
func (ca ConsAddress) MarshalYAML() (interface{}, error) {
func (ca ConsAddress) MarshalYAML() (any, error) {
return ca.String(), nil
}

View File

@ -2,6 +2,7 @@ package address
import (
"crypto/sha256"
"slices"
"testing"
"github.com/cometbft/cometbft/crypto/tmhash"
@ -40,7 +41,7 @@ func (suite *AddressSuite) TestComposed() {
assert.Len(ac, Len)
// check if optimizations work
checkingKey := append([]byte{}, a1.AddressWithLen(suite.T())...)
checkingKey := slices.Clone(a1.AddressWithLen(suite.T()))
checkingKey = append(checkingKey, a2.AddressWithLen(suite.T())...)
ac2 := Hash(typ, checkingKey)
assert.Equal(ac, ac2, "NewComposed works correctly")

View File

@ -52,7 +52,7 @@ func (s *addressTestSuite) TestAddressRace() {
close(cancel)
// cleanup
for i := 0; i < 4; i++ {
for range 4 {
<-done
}
}

View File

@ -44,7 +44,7 @@ var invalidStrs = []string{
types.Bech32PrefixConsPub + "6789",
}
func (s *addressTestSuite) testMarshal(original, res interface{}, marshal func() ([]byte, error), unmarshal func([]byte) error) {
func (s *addressTestSuite) testMarshal(original, res any, marshal func() ([]byte, error), unmarshal func([]byte) error) {
bz, err := marshal()
s.Require().Nil(err)
s.Require().Nil(unmarshal(bz))
@ -94,7 +94,7 @@ func (s *addressTestSuite) TestRandBech32AccAddrConsistency() {
pubBz := make([]byte, ed25519.PubKeySize)
pub := &ed25519.PubKey{Key: pubBz}
for i := 0; i < 1000; i++ {
for range 1000 {
_, err := rand.Read(pub.Key)
s.Require().NoError(err)
@ -207,7 +207,7 @@ func (s *addressTestSuite) TestValAddr() {
pubBz := make([]byte, ed25519.PubKeySize)
pub := &ed25519.PubKey{Key: pubBz}
for i := 0; i < 20; i++ {
for range 20 {
_, err := rand.Read(pub.Key)
s.Require().NoError(err)
@ -249,7 +249,7 @@ func (s *addressTestSuite) TestConsAddress() {
pubBz := make([]byte, ed25519.PubKeySize)
pub := &ed25519.PubKey{Key: pubBz}
for i := 0; i < 20; i++ {
for range 20 {
_, err := rand.Read(pub.Key)
s.Require().NoError(err)

View File

@ -676,7 +676,7 @@ func (coins Coins) Equal(coinsB Coins) bool {
coins = coins.Sort()
coinsB = coinsB.Sort()
for i := 0; i < len(coins); i++ {
for i := range coins {
if !coins[i].Equal(coinsB[i]) {
return false
}

View File

@ -20,10 +20,10 @@ func BenchmarkCoinsAdditionIntersect(b *testing.B) {
coinsA := Coins(make([]Coin, numCoinsA))
coinsB := Coins(make([]Coin, numCoinsB))
for i := 0; i < numCoinsA; i++ {
for i := range numCoinsA {
coinsA[i] = NewCoin(coinName(i), math.NewInt(int64(i)))
}
for i := 0; i < numCoinsB; i++ {
for i := range numCoinsB {
coinsB[i] = NewCoin(coinName(i), math.NewInt(int64(i)))
}
@ -36,7 +36,7 @@ func BenchmarkCoinsAdditionIntersect(b *testing.B) {
}
benchmarkSizes := [][]int{{1, 1}, {5, 5}, {5, 20}, {1, 1000}, {2, 1000}}
for i := 0; i < len(benchmarkSizes); i++ {
for i := range benchmarkSizes {
sizeA := benchmarkSizes[i][0]
sizeB := benchmarkSizes[i][1]
b.Run(fmt.Sprintf("sizes: A_%d, B_%d", sizeA, sizeB), benchmarkingFunc(sizeA, sizeB))
@ -52,10 +52,10 @@ func BenchmarkCoinsAdditionNoIntersect(b *testing.B) {
coinsA := Coins(make([]Coin, numCoinsA))
coinsB := Coins(make([]Coin, numCoinsB))
for i := 0; i < numCoinsA; i++ {
for i := range numCoinsA {
coinsA[i] = NewCoin(coinName(numCoinsB+i), math.NewInt(int64(i)))
}
for i := 0; i < numCoinsB; i++ {
for i := range numCoinsB {
coinsB[i] = NewCoin(coinName(i), math.NewInt(int64(i)))
}
@ -68,7 +68,7 @@ func BenchmarkCoinsAdditionNoIntersect(b *testing.B) {
}
benchmarkSizes := [][]int{{1, 1}, {5, 5}, {5, 20}, {1, 1000}, {2, 1000}, {1000, 2}}
for i := 0; i < len(benchmarkSizes); i++ {
for i := range benchmarkSizes {
sizeA := benchmarkSizes[i][0]
sizeB := benchmarkSizes[i][1]
b.Run(fmt.Sprintf("sizes: A_%d, B_%d", sizeA, sizeB), benchmarkingFunc(sizeA, sizeB))
@ -87,14 +87,14 @@ func BenchmarkSumOfCoinAdds(b *testing.B) {
addCoins := make([]Coins, numAdds)
nonIntersectingCoins := coinsPerAdd - numIntersectingCoins
for i := 0; i < numAdds; i++ {
for i := range numAdds {
intersectCoins := make([]Coin, numIntersectingCoins)
num := math.NewInt(int64(i))
for j := 0; j < numIntersectingCoins; j++ {
for j := range numIntersectingCoins {
intersectCoins[j] = NewCoin(coinName(j+1_000_000_000), num)
}
addCoins[i] = intersectCoins
for j := 0; j < nonIntersectingCoins; j++ {
for j := range nonIntersectingCoins {
addCoins[i] = addCoins[i].Add(NewCoin(coinName(i*nonIntersectingCoins+j), num))
}
}
@ -131,8 +131,8 @@ func BenchmarkSumOfCoinAdds(b *testing.B) {
}{
{"MapCoins", MapCoinsSumFn}, {"Coins", CoinsSumFn},
}
for i := 0; i < len(benchmarkSizes); i++ {
for j := 0; j < 2; j++ {
for i := range benchmarkSizes {
for j := range 2 {
coinsPerAdd := benchmarkSizes[i][0]
intersectingCoinsPerAdd := benchmarkSizes[i][1]
numAdds := benchmarkSizes[i][2]

View File

@ -322,12 +322,12 @@ func (c Context) IsZero() bool {
return c.ms == nil
}
func (c Context) WithValue(key, value interface{}) Context {
func (c Context) WithValue(key, value any) Context {
c.baseCtx = context.WithValue(c.baseCtx, key, value)
return c
}
func (c Context) Value(key interface{}) interface{} {
func (c Context) Value(key any) any {
if key == SdkContextKey {
return c
}

View File

@ -490,7 +490,7 @@ func (coins DecCoins) Equal(coinsB DecCoins) bool {
coins = coins.Sort()
coinsB = coinsB.Sort()
for i := 0; i < len(coins); i++ {
for i := range coins {
if !coins[i].Equal(coinsB[i]) {
return false
}

View File

@ -90,7 +90,7 @@ func (s *eventsTestSuite) TestEventManager() {
func (s *eventsTestSuite) TestEmitTypedEvent() {
s.Run("deterministic key-value order", func() {
for i := 0; i < 10; i++ {
for range 10 {
em := sdk.NewEventManager()
coin := sdk.NewCoin("fakedenom", math.NewInt(1999999))
s.Require().NoError(em.EmitTypedEvent(&coin))

View File

@ -42,7 +42,7 @@ func ChainAnteDecorators(chain ...AnteDecorator) AnteHandler {
handlerChain[len(chain)] = func(ctx Context, tx Tx, simulate bool) (Context, error) {
return ctx, nil
}
for i := 0; i < len(chain); i++ {
for i := range chain {
ii := i
handlerChain[ii] = func(ctx Context, tx Tx, simulate bool) (Context, error) {
return chain[ii].AnteHandle(ctx, tx, simulate, handlerChain[ii+1])
@ -70,7 +70,7 @@ func ChainPostDecorators(chain ...PostDecorator) PostHandler {
handlerChain[len(chain)] = func(ctx Context, tx Tx, simulate, success bool) (Context, error) {
return ctx, nil
}
for i := 0; i < len(chain); i++ {
for i := range chain {
ii := i
handlerChain[ii] = func(ctx Context, tx Tx, simulate, success bool) (Context, error) {
return chain[ii].PostHandle(ctx, tx, simulate, success, handlerChain[ii+1])

View File

@ -153,7 +153,7 @@ func (s *MempoolTestSuite) TestDefaultMempool() {
txCount := 1000
var txs []testTx
for i := 0; i < txCount; i++ {
for i := range txCount {
acc := accounts[i%len(accounts)]
tx := testTx{
nonce: 0,

View File

@ -497,7 +497,7 @@ func (s *MempoolTestSuite) TestPriorityTies() {
{p: 10, n: 2, a: sa},
}
for i := 0; i < 100; i++ {
for range 100 {
s.mempool = mempool.DefaultPriorityMempool()
var shuffled []txSpec
for _, t := range txSet {
@ -528,7 +528,7 @@ func (s *MempoolTestSuite) TestPriorityTies() {
}
func (s *MempoolTestSuite) TestRandomTxOrderManyTimes() {
for i := 0; i < 3; i++ {
for range 3 {
s.Run("TestRandomGeneratedTxs", func() {
s.TestRandomGeneratedTxs()
})
@ -663,7 +663,7 @@ func (s *MempoolTestSuite) TestRandomWalkTxs() {
require.Equal(t, len(ordered), len(selected))
var orderedStr, selectedStr string
for i := 0; i < s.numTxs; i++ {
for i := range s.numTxs {
otx := ordered[i]
stx := selected[i].(testTx)
orderedStr = fmt.Sprintf("%s\n%s, %d, %d; %d",
@ -693,7 +693,7 @@ func genRandomTxs(seed int64, countTx, countAccount int) (res []testTx) {
accountNonces[account.Address.String()] = 0
}
for i := 0; i < countTx; i++ {
for i := range countTx {
addr := accounts[r.Intn(countAccount)].Address
priority := int64(r.Intn(maxPriority + 1))
nonce := accountNonces[addr.String()]

View File

@ -59,7 +59,7 @@ type configurator struct {
}
// RegisterService implements the grpc.Server interface.
func (c *configurator) RegisterService(sd *googlegrpc.ServiceDesc, ss interface{}) {
func (c *configurator) RegisterService(sd *googlegrpc.ServiceDesc, ss any) {
desc, err := c.cdc.InterfaceRegistry().FindDescriptorByName(protoreflect.FullName(sd.ServiceName))
if err != nil {
c.err = err

View File

@ -276,7 +276,7 @@ func (gam GenesisOnlyAppModule) ConsensusVersion() uint64 { return 1 }
// Manager defines a module manager that provides the high level utility for managing and executing
// operations for a group of modules
type Manager struct {
Modules map[string]interface{} // interface{} is used now to support the legacy AppModule as well as new core appmodule.AppModule.
Modules map[string]any // interface{} is used now to support the legacy AppModule as well as new core appmodule.AppModule.
OrderInitGenesis []string
OrderExportGenesis []string
OrderPreBlockers []string
@ -289,7 +289,7 @@ type Manager struct {
// NewManager creates a new Manager object.
func NewManager(modules ...AppModule) *Manager {
moduleMap := make(map[string]interface{})
moduleMap := make(map[string]any)
modulesStr := make([]string, 0, len(modules))
preBlockModulesStr := make([]string, 0)
for _, module := range modules {
@ -319,7 +319,7 @@ func NewManager(modules ...AppModule) *Manager {
// NewManagerFromMap creates a new Manager object from a map of module names to module implementations.
// This method should be used for apps and modules which have migrated to the cosmossdk.io/core.appmodule.AppModule API.
func NewManagerFromMap(moduleMap map[string]appmodule.AppModule) *Manager {
simpleModuleMap := make(map[string]interface{})
simpleModuleMap := make(map[string]any)
modulesStr := make([]string, 0, len(simpleModuleMap))
preBlockModulesStr := make([]string, 0)
for name, module := range moduleMap {

View File

@ -17,7 +17,7 @@ type TestSuite struct {
func (s *TestSuite) TestAssertNoForgottenModules() {
m := Manager{
Modules: map[string]interface{}{"a": nil, "b": nil},
Modules: map[string]any{"a": nil, "b": nil},
}
tcs := []struct {
name string
@ -42,7 +42,7 @@ func (s *TestSuite) TestAssertNoForgottenModules() {
func (s *TestSuite) TestModuleNames() {
m := Manager{
Modules: map[string]interface{}{"a": nil, "b": nil},
Modules: map[string]any{"a": nil, "b": nil},
}
ms := m.ModuleNames()
sort.Strings(ms)

View File

@ -61,7 +61,7 @@ func NewSimulationManager(modules ...AppModuleSimulation) *SimulationManager {
// with the same moduleName.
// Then it attempts to cast every provided AppModule into an AppModuleSimulation.
// If the cast succeeds, its included, otherwise it is excluded.
func NewSimulationManagerFromAppModules(modules map[string]interface{}, overrideModules map[string]AppModuleSimulation) *SimulationManager {
func NewSimulationManagerFromAppModules(modules map[string]any, overrideModules map[string]AppModuleSimulation) *SimulationManager {
simModules := []AppModuleSimulation{}
appModuleNamesSorted := make([]string, 0, len(modules))
for moduleName := range modules {

View File

@ -41,7 +41,7 @@ func RegisterMsgServiceDesc(registry codectypes.InterfaceRegistry, sd *grpc.Serv
}
prefSd := fd.Services().ByName(protoreflect.FullName(sd.ServiceName).Name())
for i := 0; i < prefSd.Methods().Len(); i++ {
for i := range prefSd.Methods().Len() {
md := prefSd.Methods().Get(i)
requestDesc := md.Input()
responseDesc := md.Output()

View File

@ -26,7 +26,7 @@ func ValidateProtoAnnotations(protoFiles signing.ProtoFileResolver) error {
var serviceErrs []error
protoFiles.RangeFiles(func(fd protoreflect.FileDescriptor) bool {
for i := 0; i < fd.Services().Len(); i++ {
for i := range fd.Services().Len() {
sd := fd.Services().Get(i)
if sd.Name() == "Msg" {
// We use the heuristic that services name Msg are exactly the

View File

@ -19,12 +19,12 @@ var addr1 = sdk.AccAddress([]byte("addr1"))
func (s *paginationTestSuite) TestFilteredPaginations() {
var balances sdk.Coins
for i := 0; i < numBalances; i++ {
for i := range numBalances {
denom := fmt.Sprintf("foo%ddenom", i)
balances = append(balances, sdk.NewInt64Coin(denom, 100))
}
for i := 0; i < 4; i++ {
for i := range 4 {
denom := fmt.Sprintf("test%ddenom", i)
balances = append(balances, sdk.NewInt64Coin(denom, 250))
}
@ -92,12 +92,12 @@ func (s *paginationTestSuite) TestFilteredPaginations() {
func (s *paginationTestSuite) TestReverseFilteredPaginations() {
var balances sdk.Coins
for i := 0; i < numBalances; i++ {
for i := range numBalances {
denom := fmt.Sprintf("foo%ddenom", i)
balances = append(balances, sdk.NewInt64Coin(denom, 100))
}
for i := 0; i < 10; i++ {
for i := range 10 {
denom := fmt.Sprintf("test%ddenom", i)
balances = append(balances, sdk.NewInt64Coin(denom, 250))
}
@ -170,12 +170,12 @@ func (s *paginationTestSuite) TestReverseFilteredPaginations() {
func (s *paginationTestSuite) TestFilteredPaginate() {
var balances sdk.Coins
for i := 0; i < numBalances; i++ {
for i := range numBalances {
denom := fmt.Sprintf("foo%ddenom", i)
balances = append(balances, sdk.NewInt64Coin(denom, 100))
}
for i := 0; i < 5; i++ {
for i := range 5 {
denom := fmt.Sprintf("test%ddenom", i)
balances = append(balances, sdk.NewInt64Coin(denom, 250))
}

View File

@ -40,7 +40,7 @@ func FuzzPagination(f *testing.F) {
}
// 1.5. Use the inprocess fuzzer to mutate variables.
for i := 0; i < 1000; i++ {
for range 1000 {
qr := new(query.PageRequest)
gf.Fuzz(qr)
seeds = append(seeds, qr)
@ -57,7 +57,7 @@ func FuzzPagination(f *testing.F) {
// 3. Setup the keystore.
var balances sdk.Coins
for i := 0; i < 5; i++ {
for i := range 5 {
denom := fmt.Sprintf("foo%ddenom", i)
balances = append(balances, sdk.NewInt64Coin(denom, int64(100+i)))
}

View File

@ -112,7 +112,7 @@ func (s *paginationTestSuite) TestPagination() {
var balances sdk.Coins
for i := 0; i < numBalances; i++ {
for i := range numBalances {
denom := fmt.Sprintf("foo%ddenom", i)
balances = append(balances, sdk.NewInt64Coin(denom, 100))
}
@ -220,7 +220,7 @@ func (s *paginationTestSuite) TestReversePagination() {
var balances sdk.Coins
for i := 0; i < numBalances; i++ {
for i := range numBalances {
denom := fmt.Sprintf("foo%ddenom", i)
balances = append(balances, sdk.NewInt64Coin(denom, 100))
}
@ -339,7 +339,7 @@ func (s *paginationTestSuite) TestReversePagination() {
func (s *paginationTestSuite) TestPaginate() {
var balances sdk.Coins
for i := 0; i < 2; i++ {
for i := range 2 {
denom := fmt.Sprintf("foo%ddenom", i)
balances = append(balances, sdk.NewInt64Coin(denom, 100))
}

View File

@ -157,7 +157,7 @@ type AppParams map[string]json.RawMessage
// object. If it exists, it'll be decoded and returned. Otherwise, the provided
// ParamSimulator is used to generate a random value or default value (eg: in the
// case of operation weights where Rand is not used).
func (sp AppParams) GetOrGenerate(key string, ptr interface{}, r *rand.Rand, ps ParamSimulator) {
func (sp AppParams) GetOrGenerate(key string, ptr any, r *rand.Rand, ps ParamSimulator) {
if v, ok := sp[key]; ok && v != nil {
err := json.Unmarshal(v, ptr)
if err != nil {

View File

@ -5,7 +5,7 @@ import (
)
// TxExtensionOptionI defines the interface for tx extension options
type TxExtensionOptionI interface{}
type TxExtensionOptionI any
// unpackTxExtensionOptionsI unpacks Any's to TxExtensionOptionI's.
func unpackTxExtensionOptionsI(unpacker types.AnyUnpacker, anys []*types.Any) error {

View File

@ -14,7 +14,7 @@ const (
// MsgResponse is the interface all Msg server handlers' response types need to
// implement. It's the interface that's representing all Msg responses packed
// in Anys.
type MsgResponse interface{}
type MsgResponse any
// SetMsgs takes a slice of sdk.Msg's and turn them into Any's.
func SetMsgs(msgs []sdk.Msg) ([]*types.Any, error) {

View File

@ -18,7 +18,7 @@ import (
// If the passed JSON isn't valid it will return an error.
// Deprecated: SortJSON was used for GetSignbytes, this is now automatic with amino signing
func SortJSON(toSortJSON []byte) ([]byte, error) {
var c interface{}
var c any
err := json.Unmarshal(toSortJSON, &c)
if err != nil {
return nil, err

View File

@ -2,6 +2,7 @@ package types_test
import (
"bytes"
"slices"
"testing"
"time"
@ -134,7 +135,7 @@ func (s *utilsTestSuite) TestAppendParseBytes() {
testByte2 := []byte(test2)
combinedBytes := sdk.AppendLengthPrefixedBytes(address.MustLengthPrefix(testByte1), address.MustLengthPrefix(testByte2))
testCombineBytes := append([]byte{}, address.MustLengthPrefix(testByte1)...)
testCombineBytes := slices.Clone(address.MustLengthPrefix(testByte1))
testCombineBytes = append(testCombineBytes, address.MustLengthPrefix(testByte2)...)
s.Require().Equal(combinedBytes, testCombineBytes)

View File

@ -122,5 +122,5 @@ func (d buildDep) String() string {
return fmt.Sprintf("%s@%s", d.Path, d.Version)
}
func (d buildDep) MarshalJSON() ([]byte, error) { return json.Marshal(d.String()) }
func (d buildDep) MarshalYAML() (interface{}, error) { return d.String(), nil }
func (d buildDep) MarshalJSON() ([]byte, error) { return json.Marshal(d.String()) }
func (d buildDep) MarshalYAML() (any, error) { return d.String(), nil }

View File

@ -1195,7 +1195,7 @@ func TestAnteHandlerSetPubKey(t *testing.T) {
func generatePubKeysAndSignatures(n int, msg []byte, _ bool) (pubkeys []cryptotypes.PubKey, signatures [][]byte) {
pubkeys = make([]cryptotypes.PubKey, n)
signatures = make([][]byte, n)
for i := 0; i < n; i++ {
for i := range n {
var privkey cryptotypes.PrivKey = secp256k1.GenPrivKey()
// TODO: also generate ed25519 keys as below when ed25519 keys are
@ -1232,7 +1232,7 @@ func expectedGasCostByKeys(pubkeys []cryptotypes.PubKey) uint64 {
func TestCountSubkeys(t *testing.T) {
genPubKeys := func(n int) []cryptotypes.PubKey {
var ret []cryptotypes.PubKey
for i := 0; i < n; i++ {
for range n {
ret = append(ret, secp256k1.GenPrivKey().PubKey())
}
return ret
@ -1274,7 +1274,7 @@ func TestAnteHandlerSigLimitExceeded(t *testing.T) {
addrs []sdk.AccAddress
privs []cryptotypes.PrivKey
)
for i := 0; i < 8; i++ {
for i := range 8 {
addrs = append(addrs, accs[i].acc.GetAddress())
privs = append(privs, accs[i].priv)
}

View File

@ -472,7 +472,7 @@ func ConsumeMultisignatureVerificationGas(
size := sig.BitArray.Count()
sigIndex := 0
for i := 0; i < size; i++ {
for i := range size {
if !sig.BitArray.GetIndex(i) {
continue
}

View File

@ -83,7 +83,7 @@ func TestConsumeSignatureVerificationGas(t *testing.T) {
multisigKey1 := kmultisig.NewLegacyAminoPubKey(2, pkSet1)
multisignature1 := multisig.NewMultisig(len(pkSet1))
expectedCost1 := expectedGasCostByKeys(pkSet1)
for i := 0; i < len(pkSet1); i++ {
for i := range pkSet1 {
stdSig := legacytx.StdSignature{PubKey: pkSet1[i], Signature: sigSet1[i]} //nolint:staticcheck // SA1019: legacytx.StdSignature is deprecated
sigV2, err := legacytx.StdSignatureToSignatureV2(suite.clientCtx.LegacyAmino, stdSig)
require.NoError(t, err)

View File

@ -117,7 +117,7 @@ func SetupTestSuite(t *testing.T, isCheckTx bool) *AnteTestSuite {
func (suite *AnteTestSuite) CreateTestAccounts(numAccs int) []TestAccount {
var accounts []TestAccount
for i := 0; i < numAccs; i++ {
for i := range numAccs {
priv, _, addr := testdata.KeyTestPubAddr()
acc := suite.accountKeeper.NewAccountWithAddress(suite.ctx, addr)
err := acc.SetAccountNumber(uint64(i + 1000))

View File

@ -106,7 +106,7 @@ func (suite *DeterministicTestSuite) createAndSetAccounts(t *rapid.T, count int)
accNums[i] += lane * 1000
}
for i := 0; i < count; i++ {
for i := range count {
pub := pubkeyGenerator(t).Draw(t, "pubkey")
addr := sdk.AccAddress(pub.Address())
accNum := accNums[i]
@ -154,7 +154,7 @@ func (suite *DeterministicTestSuite) TestGRPCQueryAccounts() {
req := &types.QueryAccountsRequest{Pagination: testdata.PaginationGenerator(t, uint64(numAccs)).Draw(t, "accounts")}
testdata.DeterministicIterations(suite.ctx, suite.T(), req, suite.queryClient.Accounts, 0, true)
for i := 0; i < numAccs; i++ {
for i := range numAccs {
suite.accountKeeper.RemoveAccount(suite.ctx, accs[i])
}
})
@ -270,12 +270,12 @@ func (suite *DeterministicTestSuite) TestGRPCQueryModuleAccounts() {
maccsCount := rapid.IntRange(1, 10).Draw(t, "accounts")
maccs := make([]string, maccsCount)
for i := 0; i < maccsCount; i++ {
for i := range maccsCount {
maccs[i] = rapid.StringMatching(`[a-z]{5,}`).Draw(t, "module-name")
}
maccPerms := make(map[string][]string)
for i := 0; i < maccsCount; i++ {
for i := range maccsCount {
mPerms := make([]string, 0, 4)
for _, permission := range permissions {
if rapid.Bool().Draw(t, "permissions") {

View File

@ -39,7 +39,7 @@ func MultiSignatureDataToAminoMultisignature(cdc *codec.LegacyAmino, mSig *signi
n := len(mSig.Signatures)
sigs := make([][]byte, n)
for i := 0; i < n; i++ {
for i := range n {
var err error
sigs[i], err = SignatureDataToAminoSignature(cdc, mSig.Signatures[i])
if err != nil {

View File

@ -109,7 +109,7 @@ func (ss StdSignature) GetPubKey() cryptotypes.PubKey {
}
// MarshalYAML returns the YAML representation of the signature.
func (ss StdSignature) MarshalYAML() (interface{}, error) {
func (ss StdSignature) MarshalYAML() (any, error) {
pk := ""
if ss.PubKey != nil {
pk = ss.String()
@ -168,7 +168,7 @@ func pubKeySigToSigData(cdc *codec.LegacyAmino, key cryptotypes.PubKey, sig []by
n := multiSig.BitArray.Count()
signatures := multisig.NewMultisig(n)
sigIdx := 0
for i := 0; i < n; i++ {
for i := range n {
if bitArray.GetIndex(i) {
data, err := pubKeySigToSigData(cdc, pubKeys[i], multiSig.Sigs[sigIdx])
if err != nil {

View File

@ -136,7 +136,7 @@ func SignatureV2ToStdSignature(cdc *codec.LegacyAmino, sig signing.SignatureV2)
}
// Unmarshaler is a generic type for Unmarshal functions
type Unmarshaler func(bytes []byte, ptr interface{}) error
type Unmarshaler func(bytes []byte, ptr any) error
// DefaultTxEncoder logic for standard transaction encoding
func DefaultTxEncoder(cdc *codec.LegacyAmino) sdk.TxEncoder {

View File

@ -4,6 +4,7 @@ import (
"encoding/binary"
"fmt"
"math"
"slices"
"testing"
"github.com/stretchr/testify/require"
@ -189,14 +190,14 @@ func TestRejectNonADR027(t *testing.T) {
//
// Consume "BodyBytes" field.
_, _, m := protowire.ConsumeField(txBz)
bodyBz = append([]byte{}, txBz[:m]...)
bodyBz = slices.Clone(txBz[:m])
txBz = txBz[m:] // Skip over "BodyBytes" bytes.
// Consume "AuthInfoBytes" field.
_, _, m = protowire.ConsumeField(txBz)
authInfoBz = append([]byte{}, txBz[:m]...)
authInfoBz = slices.Clone(txBz[:m])
txBz = txBz[m:] // Skip over "AuthInfoBytes" bytes.
// Consume "Signature" field, it's the remaining bytes.
sigsBz := append([]byte{}, txBz...)
sigsBz := slices.Clone(txBz)
// bodyBz's length prefix is 5, with `5` as varint encoding. We also try a
// longer varint encoding for 5: `133 00`.

Some files were not shown because too many files have changed in this diff Show More