refactor(x/upgrade)!: use KVStoreService and context.Context (#16227)
This commit is contained in:
parent
5089313a0a
commit
bbab968ee2
@ -137,6 +137,7 @@ Ref: https://keepachangelog.com/en/1.0.0/
|
||||
### API Breaking Changes
|
||||
|
||||
* (x/slashing) [#16246](https://github.com/cosmos/cosmos-sdk/issues/16246) `NewKeeper` now takes a `KVStoreService` instead of a `StoreKey`, and methods in the `Keeper` now take a `context.Context` instead of a `sdk.Context` and return an `error`. `GetValidatorSigningInfo` now returns an error instead of a `found bool`, the error can be `nil` (found), `ErrNoSigningInfoFound` (not found) and any other error.
|
||||
* (module) [#16227](https://github.com/cosmos/cosmos-sdk/issues/16227) `manager.RunMigrations()` now take a `context.Context` instead of a `sdk.Context`.
|
||||
* (x/mint) [#16179](https://github.com/cosmos/cosmos-sdk/issues/16179) `NewKeeper` now takes a `KVStoreService` instead of a `StoreKey`, and methods in the `Keeper` now take a `context.Context` instead of a `sdk.Context` and return an `error`.
|
||||
* (x/crisis) [#16216](https://github.com/cosmos/cosmos-sdk/issues/16216) `NewKeeper` now takes a `KVStoreService` instead of a `StoreKey`, methods in the `Keeper` now take a `context.Context` instead of a `sdk.Context` and return an `error` instead of panicking.
|
||||
* (x/gov) [#15988](https://github.com/cosmos/cosmos-sdk/issues/15988) `NewKeeper` now takes a `KVStoreService` instead of a `StoreKey`, methods in the `Keeper` now take a `context.Context` instead of a `sdk.Context` and return an `error` (instead of panicking or returning a `found bool`). Iterators callback functions now return an error instead of a `bool`.
|
||||
|
||||
@ -89,6 +89,7 @@ The following modules `NewKeeper` function now take a `KVStoreService` instead o
|
||||
* `x/mint`
|
||||
* `x/nft`
|
||||
* `x/slashing`
|
||||
* `x/upgrade`
|
||||
|
||||
Users manually wiring their chain need to use the `runtime.NewKVStoreService` method to create a `KVStoreService` from a `StoreKey`:
|
||||
|
||||
@ -111,6 +112,7 @@ The following modules' `Keeper` methods now take in a `context.Context` instead
|
||||
* `x/evidence`
|
||||
* `x/gov`
|
||||
* `x/slashing`
|
||||
* `x/upgrade`
|
||||
|
||||
**Users using depinject do not need any changes, this is automatically done for them.**
|
||||
|
||||
|
||||
@ -358,7 +358,7 @@ func NewSimApp(
|
||||
}
|
||||
homePath := cast.ToString(appOpts.Get(flags.FlagHome))
|
||||
// set the governance module account as the authority for conducting upgrades
|
||||
app.UpgradeKeeper = upgradekeeper.NewKeeper(skipUpgradeHeights, keys[upgradetypes.StoreKey], appCodec, homePath, app.BaseApp, authtypes.NewModuleAddress(govtypes.ModuleName).String())
|
||||
app.UpgradeKeeper = upgradekeeper.NewKeeper(skipUpgradeHeights, runtime.NewKVStoreService(keys[upgradetypes.StoreKey]), appCodec, homePath, app.BaseApp, authtypes.NewModuleAddress(govtypes.ModuleName).String())
|
||||
|
||||
// Register the proposal types
|
||||
// Deprecated: Avoid adding new handlers, instead use the new proposal flow
|
||||
|
||||
@ -270,7 +270,8 @@ func TestUpgradeStateOnGenesis(t *testing.T) {
|
||||
|
||||
// make sure the upgrade keeper has version map in state
|
||||
ctx := app.NewContext(false)
|
||||
vm := app.UpgradeKeeper.GetModuleVersionMap(ctx)
|
||||
vm, err := app.UpgradeKeeper.GetModuleVersionMap(ctx)
|
||||
require.NoError(t, err)
|
||||
for v, i := range app.ModuleManager.Modules {
|
||||
if i, ok := i.(module.HasConsensusVersion); ok {
|
||||
require.Equal(t, vm[v], i.ConsensusVersion())
|
||||
|
||||
@ -1,10 +1,11 @@
|
||||
package simapp
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
storetypes "cosmossdk.io/store/types"
|
||||
upgradetypes "cosmossdk.io/x/upgrade/types"
|
||||
|
||||
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||
"github.com/cosmos/cosmos-sdk/types/module"
|
||||
)
|
||||
|
||||
@ -19,7 +20,7 @@ const UpgradeName = "v047-to-v050"
|
||||
func (app SimApp) RegisterUpgradeHandlers() {
|
||||
app.UpgradeKeeper.SetUpgradeHandler(
|
||||
UpgradeName,
|
||||
func(ctx sdk.Context, _ upgradetypes.Plan, fromVM module.VersionMap) (module.VersionMap, error) {
|
||||
func(ctx context.Context, _ upgradetypes.Plan, fromVM module.VersionMap) (module.VersionMap, error) {
|
||||
return app.ModuleManager.RunMigrations(ctx, app.Configurator(), fromVM)
|
||||
},
|
||||
)
|
||||
|
||||
@ -76,10 +76,13 @@ func (s *E2ETestSuite) TestModuleVersionsCLI() {
|
||||
// avoid printing as yaml from CLI command
|
||||
clientCtx.OutputFormat = "JSON"
|
||||
|
||||
vm := s.upgradeKeeper.GetModuleVersionMap(s.ctx)
|
||||
mv := s.upgradeKeeper.GetModuleVersions(s.ctx)
|
||||
vm, err := s.upgradeKeeper.GetModuleVersionMap(s.ctx)
|
||||
s.Require().NoError(err)
|
||||
s.Require().NotEmpty(vm)
|
||||
|
||||
mv, err := s.upgradeKeeper.GetModuleVersions(s.ctx)
|
||||
s.Require().NoError(err)
|
||||
|
||||
for _, tc := range testCases {
|
||||
s.Run(fmt.Sprintf("Case %s", tc.msg), func() {
|
||||
expect := mv
|
||||
|
||||
@ -598,7 +598,7 @@ type VersionMap map[string]uint64
|
||||
// Example:
|
||||
//
|
||||
// cfg := module.NewConfigurator(...)
|
||||
// app.UpgradeKeeper.SetUpgradeHandler("my-plan", func(ctx sdk.Context, plan upgradetypes.Plan, fromVM module.VersionMap) (module.VersionMap, error) {
|
||||
// app.UpgradeKeeper.SetUpgradeHandler("my-plan", func(ctx context.Context, plan upgradetypes.Plan, fromVM module.VersionMap) (module.VersionMap, error) {
|
||||
// return app.mm.RunMigrations(ctx, cfg, fromVM)
|
||||
// })
|
||||
//
|
||||
@ -625,7 +625,7 @@ type VersionMap map[string]uint64
|
||||
// Example:
|
||||
//
|
||||
// cfg := module.NewConfigurator(...)
|
||||
// app.UpgradeKeeper.SetUpgradeHandler("my-plan", func(ctx sdk.Context, plan upgradetypes.Plan, fromVM module.VersionMap) (module.VersionMap, error) {
|
||||
// app.UpgradeKeeper.SetUpgradeHandler("my-plan", func(ctx context.Context, plan upgradetypes.Plan, fromVM module.VersionMap) (module.VersionMap, error) {
|
||||
// // Assume "foo" is a new module.
|
||||
// // `fromVM` is fetched from existing x/upgrade store. Since foo didn't exist
|
||||
// // before this upgrade, `v, exists := fromVM["foo"]; exists == false`, and RunMigration will by default
|
||||
@ -638,7 +638,7 @@ type VersionMap map[string]uint64
|
||||
// })
|
||||
//
|
||||
// Please also refer to https://docs.cosmos.network/main/core/upgrade for more information.
|
||||
func (m Manager) RunMigrations(ctx sdk.Context, cfg Configurator, fromVM VersionMap) (VersionMap, error) {
|
||||
func (m Manager) RunMigrations(ctx context.Context, cfg Configurator, fromVM VersionMap) (VersionMap, error) {
|
||||
c, ok := cfg.(*configurator)
|
||||
if !ok {
|
||||
return nil, errorsmod.Wrapf(sdkerrors.ErrInvalidType, "expected %T, got %T", &configurator{}, cfg)
|
||||
@ -648,6 +648,7 @@ func (m Manager) RunMigrations(ctx sdk.Context, cfg Configurator, fromVM Version
|
||||
modules = DefaultMigrationsOrder(m.ModuleNames())
|
||||
}
|
||||
|
||||
sdkCtx := sdk.UnwrapSDKContext(ctx)
|
||||
updatedVM := VersionMap{}
|
||||
for _, moduleName := range modules {
|
||||
module := m.Modules[moduleName]
|
||||
@ -666,14 +667,14 @@ func (m Manager) RunMigrations(ctx sdk.Context, cfg Configurator, fromVM Version
|
||||
// 2. An existing chain is upgrading from version < 0.43 to v0.43+ for the first time.
|
||||
// In this case, all modules have yet to be added to x/upgrade's VersionMap store.
|
||||
if exists {
|
||||
err := c.runModuleMigrations(ctx, moduleName, fromVersion, toVersion)
|
||||
err := c.runModuleMigrations(sdkCtx, moduleName, fromVersion, toVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
ctx.Logger().Info(fmt.Sprintf("adding a new module: %s", moduleName))
|
||||
sdkCtx.Logger().Info(fmt.Sprintf("adding a new module: %s", moduleName))
|
||||
if module, ok := m.Modules[moduleName].(HasGenesis); ok {
|
||||
moduleValUpdates := module.InitGenesis(ctx, c.cdc, module.DefaultGenesis(c.cdc))
|
||||
moduleValUpdates := module.InitGenesis(sdkCtx, c.cdc, module.DefaultGenesis(c.cdc))
|
||||
// The module manager assumes only one module will update the
|
||||
// validator set, and it can't be a new module.
|
||||
if len(moduleValUpdates) > 0 {
|
||||
|
||||
@ -29,3 +29,7 @@ Ref: https://keepachangelog.com/en/1.0.0/
|
||||
|
||||
* [#14880](https://github.com/cosmos/cosmos-sdk/pull/14880) Switch from using gov v1beta1 to gov v1 in upgrade CLIs.
|
||||
* [#14764](https://github.com/cosmos/cosmos-sdk/pull/14764) The `x/upgrade` module is extracted to have a separate go.mod file which allows it be a standalone module.
|
||||
|
||||
### API Breaking
|
||||
|
||||
* (x/upgrade) [#16227](https://github.com/cosmos/cosmos-sdk/issues/16227) `NewKeeper` now takes a `KVStoreService` instead of a `StoreKey`, methods in the `Keeper` now take a `context.Context` instead of a `sdk.Context` and return an `error`. `UpgradeHandler` now receives a `context.Context`. `GetUpgradedClient`, `GetUpgradedConsensusState`, `GetUpgradePlan` now return a specific error for "not found".
|
||||
|
||||
@ -1,6 +1,8 @@
|
||||
package upgrade
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
@ -20,10 +22,16 @@ import (
|
||||
// The purpose is to ensure the binary is switched EXACTLY at the desired block, and to allow
|
||||
// a migration to be executed if needed upon this switch (migration defined in the new binary)
|
||||
// skipUpgradeHeightArray is a set of block heights for which the upgrade must be skipped
|
||||
func BeginBlocker(k *keeper.Keeper, ctx sdk.Context) {
|
||||
func BeginBlocker(ctx context.Context, k *keeper.Keeper) error {
|
||||
defer telemetry.ModuleMeasureSince(types.ModuleName, time.Now(), telemetry.MetricKeyBeginBlocker)
|
||||
|
||||
plan, found := k.GetUpgradePlan(ctx)
|
||||
sdkCtx := sdk.UnwrapSDKContext(ctx)
|
||||
blockHeight := sdkCtx.HeaderInfo().Height
|
||||
plan, err := k.GetUpgradePlan(ctx)
|
||||
if err != nil && !errors.Is(err, types.ErrNoUpgradePlanFound) {
|
||||
return err
|
||||
}
|
||||
found := err == nil
|
||||
|
||||
if !k.DowngradeVerified() {
|
||||
k.SetDowngradeVerified(true)
|
||||
@ -32,66 +40,75 @@ func BeginBlocker(k *keeper.Keeper, ctx sdk.Context) {
|
||||
// 1. If there is no scheduled upgrade.
|
||||
// 2. If the plan is not ready.
|
||||
// 3. If the plan is ready and skip upgrade height is set for current height.
|
||||
if !found || !plan.ShouldExecute(ctx) || (plan.ShouldExecute(ctx) && k.IsSkipHeight(ctx.HeaderInfo().Height)) {
|
||||
lastAppliedPlan, _ := k.GetLastCompletedUpgrade(ctx)
|
||||
if !found || !plan.ShouldExecute(blockHeight) || (plan.ShouldExecute(blockHeight) && k.IsSkipHeight(blockHeight)) {
|
||||
lastAppliedPlan, _, err := k.GetLastCompletedUpgrade(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if lastAppliedPlan != "" && !k.HasHandler(lastAppliedPlan) {
|
||||
var appVersion uint64
|
||||
|
||||
cp := ctx.ConsensusParams()
|
||||
cp := sdkCtx.ConsensusParams()
|
||||
if cp.Version != nil {
|
||||
appVersion = cp.Version.App
|
||||
}
|
||||
|
||||
panic(fmt.Sprintf("Wrong app version %d, upgrade handler is missing for %s upgrade plan", appVersion, lastAppliedPlan))
|
||||
return fmt.Errorf("wrong app version %d, upgrade handler is missing for %s upgrade plan", appVersion, lastAppliedPlan)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
return
|
||||
return nil
|
||||
}
|
||||
logger := ctx.Logger()
|
||||
|
||||
logger := k.Logger(ctx)
|
||||
|
||||
// To make sure clear upgrade is executed at the same block
|
||||
if plan.ShouldExecute(ctx) {
|
||||
if plan.ShouldExecute(blockHeight) {
|
||||
// If skip upgrade has been set for current height, we clear the upgrade plan
|
||||
if k.IsSkipHeight(ctx.HeaderInfo().Height) {
|
||||
if k.IsSkipHeight(blockHeight) {
|
||||
skipUpgradeMsg := fmt.Sprintf("UPGRADE \"%s\" SKIPPED at %d: %s", plan.Name, plan.Height, plan.Info)
|
||||
logger.Info(skipUpgradeMsg)
|
||||
|
||||
// Clear the upgrade plan at current height
|
||||
k.ClearUpgradePlan(ctx)
|
||||
return
|
||||
return k.ClearUpgradePlan(ctx)
|
||||
}
|
||||
|
||||
// Prepare shutdown if we don't have an upgrade handler for this upgrade name (meaning this software is out of date)
|
||||
if !k.HasHandler(plan.Name) {
|
||||
// Write the upgrade info to disk. The UpgradeStoreLoader uses this info to perform or skip
|
||||
// store migrations.
|
||||
err := k.DumpUpgradeInfoToDisk(ctx.HeaderInfo().Height, plan)
|
||||
err := k.DumpUpgradeInfoToDisk(blockHeight, plan)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("unable to write upgrade info to filesystem: %s", err.Error()))
|
||||
return fmt.Errorf("unable to write upgrade info to filesystem: %s", err.Error())
|
||||
}
|
||||
|
||||
upgradeMsg := BuildUpgradeNeededMsg(plan)
|
||||
logger.Error(upgradeMsg)
|
||||
panic(upgradeMsg)
|
||||
|
||||
// Returning an error will end up in a panic
|
||||
return fmt.Errorf(upgradeMsg)
|
||||
}
|
||||
|
||||
// We have an upgrade handler for this upgrade name, so apply the upgrade
|
||||
ctx.Logger().Info(fmt.Sprintf("applying upgrade \"%s\" at %s", plan.Name, plan.DueAt()))
|
||||
ctx = ctx.WithBlockGasMeter(storetypes.NewInfiniteGasMeter())
|
||||
k.ApplyUpgrade(ctx, plan)
|
||||
return
|
||||
logger.Info(fmt.Sprintf("applying upgrade \"%s\" at %s", plan.Name, plan.DueAt()))
|
||||
sdkCtx = sdkCtx.WithBlockGasMeter(storetypes.NewInfiniteGasMeter())
|
||||
return k.ApplyUpgrade(sdkCtx, plan)
|
||||
}
|
||||
|
||||
// if we have a pending upgrade, but it is not yet time, make sure we did not
|
||||
// set the handler already
|
||||
if k.HasHandler(plan.Name) {
|
||||
downgradeMsg := fmt.Sprintf("BINARY UPDATED BEFORE TRIGGER! UPGRADE \"%s\" - in binary but not executed on chain. Downgrade your binary", plan.Name)
|
||||
ctx.Logger().Error(downgradeMsg)
|
||||
panic(downgradeMsg)
|
||||
logger.Error(downgradeMsg)
|
||||
|
||||
// Returning an error will end up in a panic
|
||||
return fmt.Errorf(downgradeMsg)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// BuildUpgradeNeededMsg prints the message that notifies that an upgrade is needed.
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
package upgrade_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"os"
|
||||
"testing"
|
||||
@ -10,8 +11,12 @@ import (
|
||||
"cosmossdk.io/core/header"
|
||||
"cosmossdk.io/log"
|
||||
storetypes "cosmossdk.io/store/types"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/cosmos/cosmos-sdk/baseapp"
|
||||
addresscodec "github.com/cosmos/cosmos-sdk/codec/address"
|
||||
"github.com/cosmos/cosmos-sdk/runtime"
|
||||
"github.com/cosmos/cosmos-sdk/testutil"
|
||||
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
|
||||
@ -20,8 +25,6 @@ import (
|
||||
authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
|
||||
govtypes "github.com/cosmos/cosmos-sdk/x/gov/types"
|
||||
govtypesv1beta1 "github.com/cosmos/cosmos-sdk/x/gov/types/v1beta1"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"cosmossdk.io/x/upgrade"
|
||||
"cosmossdk.io/x/upgrade/keeper"
|
||||
@ -44,6 +47,7 @@ var s TestSuite
|
||||
func setupTest(t *testing.T, height int64, skip map[int64]bool) *TestSuite {
|
||||
s.encCfg = moduletestutil.MakeTestEncodingConfig(upgrade.AppModuleBasic{})
|
||||
key := storetypes.NewKVStoreKey(types.StoreKey)
|
||||
storeService := runtime.NewKVStoreService(key)
|
||||
testCtx := testutil.DefaultContextWithDB(s.T(), key, storetypes.NewTransientStoreKey("transient_test"))
|
||||
|
||||
s.baseApp = baseapp.NewBaseApp(
|
||||
@ -53,7 +57,7 @@ func setupTest(t *testing.T, height int64, skip map[int64]bool) *TestSuite {
|
||||
s.encCfg.TxConfig.TxDecoder(),
|
||||
)
|
||||
|
||||
s.keeper = keeper.NewKeeper(skip, key, s.encCfg.Codec, t.TempDir(), nil, authtypes.NewModuleAddress(govtypes.ModuleName).String())
|
||||
s.keeper = keeper.NewKeeper(skip, storeService, s.encCfg.Codec, t.TempDir(), nil, authtypes.NewModuleAddress(govtypes.ModuleName).String())
|
||||
s.keeper.SetVersionSetter(s.baseApp)
|
||||
|
||||
s.ctx = testCtx.Ctx.WithHeaderInfo(header.Info{Time: time.Now(), Height: height})
|
||||
@ -102,34 +106,33 @@ func VerifyDoUpgrade(t *testing.T) {
|
||||
t.Log("Verify that a panic happens at the upgrade height")
|
||||
newCtx := s.ctx.WithHeaderInfo(header.Info{Height: s.ctx.HeaderInfo().Height + 1, Time: time.Now()})
|
||||
|
||||
require.Panics(t, func() {
|
||||
s.module.BeginBlock(newCtx)
|
||||
})
|
||||
err := s.module.BeginBlock(newCtx)
|
||||
require.ErrorContains(t, err, "UPGRADE \"test\" NEEDED at height: 11: ")
|
||||
|
||||
t.Log("Verify that the upgrade can be successfully applied with a handler")
|
||||
s.keeper.SetUpgradeHandler("test", func(ctx sdk.Context, plan types.Plan, vm module.VersionMap) (module.VersionMap, error) {
|
||||
s.keeper.SetUpgradeHandler("test", func(ctx context.Context, plan types.Plan, vm module.VersionMap) (module.VersionMap, error) {
|
||||
return vm, nil
|
||||
})
|
||||
require.NotPanics(t, func() {
|
||||
s.module.BeginBlock(newCtx)
|
||||
})
|
||||
|
||||
err = s.module.BeginBlock(newCtx)
|
||||
require.NoError(t, err)
|
||||
|
||||
VerifyCleared(t, newCtx)
|
||||
}
|
||||
|
||||
func VerifyDoUpgradeWithCtx(t *testing.T, newCtx sdk.Context, proposalName string) {
|
||||
t.Log("Verify that a panic happens at the upgrade height")
|
||||
require.Panics(t, func() {
|
||||
s.module.BeginBlock(newCtx)
|
||||
})
|
||||
|
||||
err := s.module.BeginBlock(newCtx)
|
||||
require.ErrorContains(t, err, "UPGRADE \""+proposalName+"\" NEEDED at height: ")
|
||||
|
||||
t.Log("Verify that the upgrade can be successfully applied with a handler")
|
||||
s.keeper.SetUpgradeHandler(proposalName, func(ctx sdk.Context, plan types.Plan, vm module.VersionMap) (module.VersionMap, error) {
|
||||
s.keeper.SetUpgradeHandler(proposalName, func(ctx context.Context, plan types.Plan, vm module.VersionMap) (module.VersionMap, error) {
|
||||
return vm, nil
|
||||
})
|
||||
require.NotPanics(t, func() {
|
||||
s.module.BeginBlock(newCtx)
|
||||
})
|
||||
|
||||
err = s.module.BeginBlock(newCtx)
|
||||
require.NoError(t, err)
|
||||
|
||||
VerifyCleared(t, newCtx)
|
||||
}
|
||||
@ -138,31 +141,30 @@ func TestHaltIfTooNew(t *testing.T) {
|
||||
s := setupTest(t, 10, map[int64]bool{})
|
||||
t.Log("Verify that we don't panic with registered plan not in database at all")
|
||||
var called int
|
||||
s.keeper.SetUpgradeHandler("future", func(_ sdk.Context, _ types.Plan, vm module.VersionMap) (module.VersionMap, error) {
|
||||
s.keeper.SetUpgradeHandler("future", func(_ context.Context, _ types.Plan, vm module.VersionMap) (module.VersionMap, error) {
|
||||
called++
|
||||
return vm, nil
|
||||
})
|
||||
|
||||
newCtx := s.ctx.WithHeaderInfo(header.Info{Height: s.ctx.HeaderInfo().Height + 1, Time: time.Now()})
|
||||
require.NotPanics(t, func() {
|
||||
s.module.BeginBlock(newCtx)
|
||||
})
|
||||
|
||||
err := s.module.BeginBlock(newCtx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, called)
|
||||
|
||||
t.Log("Verify we panic if we have a registered handler ahead of time")
|
||||
err := s.handler(s.ctx, &types.SoftwareUpgradeProposal{Title: "prop", Plan: types.Plan{Name: "future", Height: s.ctx.HeaderInfo().Height + 3}}) //nolint:staticcheck // we're testing deprecated code
|
||||
t.Log("Verify we error if we have a registered handler ahead of time")
|
||||
err = s.handler(s.ctx, &types.SoftwareUpgradeProposal{Title: "prop", Plan: types.Plan{Name: "future", Height: s.ctx.HeaderInfo().Height + 3}}) //nolint:staticcheck // we're testing deprecated code
|
||||
require.NoError(t, err)
|
||||
require.Panics(t, func() {
|
||||
s.module.BeginBlock(newCtx)
|
||||
})
|
||||
|
||||
err = s.module.BeginBlock(newCtx)
|
||||
require.EqualError(t, err, "BINARY UPDATED BEFORE TRIGGER! UPGRADE \"future\" - in binary but not executed on chain. Downgrade your binary")
|
||||
require.Equal(t, 0, called)
|
||||
|
||||
t.Log("Verify we no longer panic if the plan is on time")
|
||||
|
||||
futCtx := s.ctx.WithHeaderInfo(header.Info{Height: s.ctx.HeaderInfo().Height + 3, Time: time.Now()})
|
||||
require.NotPanics(t, func() {
|
||||
s.module.BeginBlock(futCtx)
|
||||
})
|
||||
err = s.module.BeginBlock(futCtx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, called)
|
||||
|
||||
VerifyCleared(t, futCtx)
|
||||
@ -170,9 +172,8 @@ func TestHaltIfTooNew(t *testing.T) {
|
||||
|
||||
func VerifyCleared(t *testing.T, newCtx sdk.Context) {
|
||||
t.Log("Verify that the upgrade plan has been cleared")
|
||||
plan, _ := s.keeper.GetUpgradePlan(newCtx)
|
||||
expected := types.Plan{}
|
||||
require.Equal(t, plan, expected)
|
||||
_, err := s.keeper.GetUpgradePlan(newCtx)
|
||||
require.ErrorIs(t, err, types.ErrNoUpgradePlanFound)
|
||||
}
|
||||
|
||||
func TestCanClear(t *testing.T) {
|
||||
@ -202,9 +203,8 @@ func TestCantApplySameUpgradeTwice(t *testing.T) {
|
||||
func TestNoSpuriousUpgrades(t *testing.T) {
|
||||
s := setupTest(t, 10, map[int64]bool{})
|
||||
t.Log("Verify that no upgrade panic is triggered in the BeginBlocker when we haven't scheduled an upgrade")
|
||||
require.NotPanics(t, func() {
|
||||
s.module.BeginBlock(s.ctx)
|
||||
})
|
||||
err := s.module.BeginBlock(s.ctx)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestPlanStringer(t *testing.T) {
|
||||
@ -214,14 +214,16 @@ func TestPlanStringer(t *testing.T) {
|
||||
|
||||
func VerifyNotDone(t *testing.T, newCtx sdk.Context, name string) {
|
||||
t.Log("Verify that upgrade was not done")
|
||||
height := s.keeper.GetDoneHeight(newCtx, name)
|
||||
height, err := s.keeper.GetDoneHeight(newCtx, name)
|
||||
require.Zero(t, height)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func VerifyDone(t *testing.T, newCtx sdk.Context, name string) {
|
||||
t.Log("Verify that the upgrade plan has been executed")
|
||||
height := s.keeper.GetDoneHeight(newCtx, name)
|
||||
height, err := s.keeper.GetDoneHeight(newCtx, name)
|
||||
require.NotZero(t, height)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func VerifySet(t *testing.T, skipUpgradeHeights map[int64]bool) {
|
||||
@ -260,18 +262,16 @@ func TestSkipUpgradeSkippingAll(t *testing.T) {
|
||||
VerifySet(t, map[int64]bool{skipOne: true, skipTwo: true})
|
||||
|
||||
newCtx = newCtx.WithHeaderInfo(header.Info{Height: skipOne})
|
||||
require.NotPanics(t, func() {
|
||||
s.module.BeginBlock(newCtx)
|
||||
})
|
||||
err = s.module.BeginBlock(newCtx)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Log("Verify a second proposal also is being cleared")
|
||||
err = s.handler(s.ctx, &types.SoftwareUpgradeProposal{Title: "prop2", Plan: types.Plan{Name: "test2", Height: skipTwo}}) //nolint:staticcheck // we're testing deprecated code
|
||||
require.NoError(t, err)
|
||||
|
||||
newCtx = newCtx.WithHeaderInfo(header.Info{Height: skipTwo})
|
||||
require.NotPanics(t, func() {
|
||||
s.module.BeginBlock(newCtx)
|
||||
})
|
||||
err = s.module.BeginBlock(newCtx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// To ensure verification is being done only after both upgrades are cleared
|
||||
t.Log("Verify if both proposals are cleared")
|
||||
@ -297,9 +297,8 @@ func TestUpgradeSkippingOne(t *testing.T) {
|
||||
|
||||
// Setting block height of proposal test
|
||||
newCtx = newCtx.WithHeaderInfo(header.Info{Height: skipOne})
|
||||
require.NotPanics(t, func() {
|
||||
s.module.BeginBlock(newCtx)
|
||||
})
|
||||
err = s.module.BeginBlock(newCtx)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Log("Verify the second proposal is not skipped")
|
||||
err = s.handler(s.ctx, &types.SoftwareUpgradeProposal{Title: "prop2", Plan: types.Plan{Name: "test2", Height: skipTwo}}) //nolint:staticcheck // we're testing deprecated code
|
||||
@ -331,18 +330,16 @@ func TestUpgradeSkippingOnlyTwo(t *testing.T) {
|
||||
|
||||
// Setting block height of proposal test
|
||||
newCtx = newCtx.WithHeaderInfo(header.Info{Height: skipOne})
|
||||
require.NotPanics(t, func() {
|
||||
s.module.BeginBlock(newCtx)
|
||||
})
|
||||
err = s.module.BeginBlock(newCtx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// A new proposal with height in skipUpgradeHeights
|
||||
err = s.handler(s.ctx, &types.SoftwareUpgradeProposal{Title: "prop2", Plan: types.Plan{Name: "test2", Height: skipTwo}}) //nolint:staticcheck // we're testing deprecated code
|
||||
require.NoError(t, err)
|
||||
// Setting block height of proposal test2
|
||||
newCtx = newCtx.WithHeaderInfo(header.Info{Height: skipTwo})
|
||||
require.NotPanics(t, func() {
|
||||
s.module.BeginBlock(newCtx)
|
||||
})
|
||||
err = s.module.BeginBlock(newCtx)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Log("Verify a new proposal is not skipped")
|
||||
err = s.handler(s.ctx, &types.SoftwareUpgradeProposal{Title: "prop3", Plan: types.Plan{Name: "test3", Height: skipThree}}) //nolint:staticcheck // we're testing deprecated code
|
||||
@ -362,9 +359,9 @@ func TestUpgradeWithoutSkip(t *testing.T) {
|
||||
err := s.handler(s.ctx, &types.SoftwareUpgradeProposal{Title: "prop", Plan: types.Plan{Name: "test", Height: s.ctx.HeaderInfo().Height + 1}}) //nolint:staticcheck // we're testing deprecated code
|
||||
require.NoError(t, err)
|
||||
t.Log("Verify if upgrade happens without skip upgrade")
|
||||
require.Panics(t, func() {
|
||||
s.module.BeginBlock(newCtx)
|
||||
})
|
||||
|
||||
err = s.module.BeginBlock(newCtx)
|
||||
require.ErrorContains(t, err, "UPGRADE \"test\" NEEDED at height:")
|
||||
|
||||
VerifyDoUpgrade(t)
|
||||
VerifyDone(t, s.ctx, "test")
|
||||
@ -409,7 +406,7 @@ func TestBinaryVersion(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
preRun func() sdk.Context
|
||||
expectPanic bool
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
"test not panic: no scheduled upgrade or applied upgrade is present",
|
||||
@ -421,7 +418,7 @@ func TestBinaryVersion(t *testing.T) {
|
||||
{
|
||||
"test not panic: upgrade handler is present for last applied upgrade",
|
||||
func() sdk.Context {
|
||||
s.keeper.SetUpgradeHandler("test0", func(_ sdk.Context, _ types.Plan, vm module.VersionMap) (module.VersionMap, error) {
|
||||
s.keeper.SetUpgradeHandler("test0", func(_ context.Context, _ types.Plan, vm module.VersionMap) (module.VersionMap, error) {
|
||||
return vm, nil
|
||||
})
|
||||
|
||||
@ -453,14 +450,11 @@ func TestBinaryVersion(t *testing.T) {
|
||||
|
||||
for _, tc := range testCases {
|
||||
ctx := tc.preRun()
|
||||
if tc.expectPanic {
|
||||
require.Panics(t, func() {
|
||||
s.module.BeginBlock(ctx)
|
||||
})
|
||||
err := s.module.BeginBlock(ctx)
|
||||
if tc.expectError {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NotPanics(t, func() {
|
||||
s.module.BeginBlock(ctx)
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -470,12 +464,13 @@ func TestDowngradeVerification(t *testing.T) {
|
||||
// for the two keepers.
|
||||
encCfg := moduletestutil.MakeTestEncodingConfig(upgrade.AppModuleBasic{})
|
||||
key := storetypes.NewKVStoreKey(types.StoreKey)
|
||||
storeService := runtime.NewKVStoreService(key)
|
||||
testCtx := testutil.DefaultContextWithDB(s.T(), key, storetypes.NewTransientStoreKey("transient_test"))
|
||||
ctx := testCtx.Ctx.WithHeaderInfo(header.Info{Time: time.Now(), Height: 10})
|
||||
|
||||
skip := map[int64]bool{}
|
||||
tempDir := t.TempDir()
|
||||
k := keeper.NewKeeper(skip, key, encCfg.Codec, tempDir, nil, authtypes.NewModuleAddress(govtypes.ModuleName).String())
|
||||
k := keeper.NewKeeper(skip, storeService, encCfg.Codec, tempDir, nil, authtypes.NewModuleAddress(govtypes.ModuleName).String())
|
||||
m := upgrade.NewAppModule(k, addresscodec.NewBech32Codec("cosmos"))
|
||||
handler := upgrade.NewSoftwareUpgradeProposalHandler(k)
|
||||
|
||||
@ -486,23 +481,21 @@ func TestDowngradeVerification(t *testing.T) {
|
||||
ctx = ctx.WithHeaderInfo(header.Info{Height: ctx.HeaderInfo().Height + 1})
|
||||
|
||||
// set the handler.
|
||||
k.SetUpgradeHandler(planName, func(ctx sdk.Context, plan types.Plan, vm module.VersionMap) (module.VersionMap, error) {
|
||||
k.SetUpgradeHandler(planName, func(_ context.Context, _ types.Plan, vm module.VersionMap) (module.VersionMap, error) {
|
||||
return vm, nil
|
||||
})
|
||||
|
||||
// successful upgrade.
|
||||
require.NotPanics(t, func() {
|
||||
m.BeginBlock(ctx)
|
||||
})
|
||||
require.NoError(t, m.BeginBlock(ctx))
|
||||
ctx = ctx.WithHeaderInfo(header.Info{Height: ctx.HeaderInfo().Height + 1})
|
||||
|
||||
testCases := map[string]struct {
|
||||
preRun func(*keeper.Keeper, sdk.Context, string)
|
||||
expectPanic bool
|
||||
expectError bool
|
||||
}{
|
||||
"valid binary": {
|
||||
preRun: func(k *keeper.Keeper, ctx sdk.Context, name string) {
|
||||
k.SetUpgradeHandler(planName, func(ctx sdk.Context, plan types.Plan, vm module.VersionMap) (module.VersionMap, error) {
|
||||
k.SetUpgradeHandler(planName, func(ctx context.Context, plan types.Plan, vm module.VersionMap) (module.VersionMap, error) {
|
||||
return vm, nil
|
||||
})
|
||||
},
|
||||
@ -513,10 +506,10 @@ func TestDowngradeVerification(t *testing.T) {
|
||||
err := handler(ctx, &types.SoftwareUpgradeProposal{Title: "test", Plan: types.Plan{Name: "another" + planName, Height: ctx.HeaderInfo().Height + 1}}) //nolint:staticcheck // we're testing deprecated code
|
||||
require.NoError(t, err, name)
|
||||
},
|
||||
expectPanic: true,
|
||||
expectError: true,
|
||||
},
|
||||
"downgrade without any active plan": {
|
||||
expectPanic: true,
|
||||
expectError: true,
|
||||
},
|
||||
}
|
||||
|
||||
@ -524,29 +517,27 @@ func TestDowngradeVerification(t *testing.T) {
|
||||
ctx, _ := ctx.CacheContext()
|
||||
|
||||
// downgrade. now keeper does not have the handler.
|
||||
k := keeper.NewKeeper(skip, key, encCfg.Codec, tempDir, nil, authtypes.NewModuleAddress(govtypes.ModuleName).String())
|
||||
k := keeper.NewKeeper(skip, storeService, encCfg.Codec, tempDir, nil, authtypes.NewModuleAddress(govtypes.ModuleName).String())
|
||||
m := upgrade.NewAppModule(k, addresscodec.NewBech32Codec("cosmos"))
|
||||
|
||||
// assertions
|
||||
lastAppliedPlan, _ := k.GetLastCompletedUpgrade(ctx)
|
||||
lastAppliedPlan, _, err := k.GetLastCompletedUpgrade(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, planName, lastAppliedPlan)
|
||||
require.False(t, k.HasHandler(planName))
|
||||
require.False(t, k.DowngradeVerified())
|
||||
_, found := k.GetUpgradePlan(ctx)
|
||||
require.False(t, found)
|
||||
_, err = k.GetUpgradePlan(ctx)
|
||||
require.ErrorIs(t, err, types.ErrNoUpgradePlanFound)
|
||||
|
||||
if tc.preRun != nil {
|
||||
tc.preRun(k, ctx, name)
|
||||
}
|
||||
|
||||
if tc.expectPanic {
|
||||
require.Panics(t, func() {
|
||||
m.BeginBlock(ctx)
|
||||
}, name)
|
||||
err = m.BeginBlock(ctx)
|
||||
if tc.expectError {
|
||||
require.Error(t, err, name)
|
||||
} else {
|
||||
require.NotPanics(t, func() {
|
||||
m.BeginBlock(ctx)
|
||||
}, name)
|
||||
require.NoError(t, err, name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -38,6 +38,5 @@ func handleSoftwareUpgradeProposal(ctx sdk.Context, k *keeper.Keeper, p *types.S
|
||||
|
||||
//nolint:staticcheck // we are intentionally using a deprecated proposal here.
|
||||
func handleCancelSoftwareUpgradeProposal(ctx sdk.Context, k *keeper.Keeper, _ *types.CancelSoftwareUpgradeProposal) error {
|
||||
k.ClearUpgradePlan(ctx)
|
||||
return nil
|
||||
return k.ClearUpgradePlan(ctx)
|
||||
}
|
||||
|
||||
@ -2,12 +2,12 @@ package keeper
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
errorsmod "cosmossdk.io/errors"
|
||||
"cosmossdk.io/x/upgrade/types"
|
||||
|
||||
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||
"github.com/cosmos/cosmos-sdk/types/errors"
|
||||
)
|
||||
|
||||
var _ types.QueryServer = Keeper{}
|
||||
@ -16,9 +16,13 @@ var _ types.QueryServer = Keeper{}
|
||||
func (k Keeper) CurrentPlan(c context.Context, req *types.QueryCurrentPlanRequest) (*types.QueryCurrentPlanResponse, error) {
|
||||
ctx := sdk.UnwrapSDKContext(c)
|
||||
|
||||
plan, found := k.GetUpgradePlan(ctx)
|
||||
if !found {
|
||||
return &types.QueryCurrentPlanResponse{}, nil
|
||||
plan, err := k.GetUpgradePlan(ctx)
|
||||
if err != nil {
|
||||
if errors.Is(err, types.ErrNoUpgradePlanFound) {
|
||||
return &types.QueryCurrentPlanResponse{}, nil
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &types.QueryCurrentPlanResponse{Plan: &plan}, nil
|
||||
@ -28,21 +32,22 @@ func (k Keeper) CurrentPlan(c context.Context, req *types.QueryCurrentPlanReques
|
||||
func (k Keeper) AppliedPlan(c context.Context, req *types.QueryAppliedPlanRequest) (*types.QueryAppliedPlanResponse, error) {
|
||||
ctx := sdk.UnwrapSDKContext(c)
|
||||
|
||||
applied := k.GetDoneHeight(ctx, req.Name)
|
||||
if applied == 0 {
|
||||
return &types.QueryAppliedPlanResponse{}, nil
|
||||
}
|
||||
applied, err := k.GetDoneHeight(ctx, req.Name)
|
||||
|
||||
return &types.QueryAppliedPlanResponse{Height: applied}, nil
|
||||
return &types.QueryAppliedPlanResponse{Height: applied}, err
|
||||
}
|
||||
|
||||
// UpgradedConsensusState implements the Query/UpgradedConsensusState gRPC method
|
||||
func (k Keeper) UpgradedConsensusState(c context.Context, req *types.QueryUpgradedConsensusStateRequest) (*types.QueryUpgradedConsensusStateResponse, error) { //nolint:staticcheck // we're using a deprecated call for compatibility
|
||||
ctx := sdk.UnwrapSDKContext(c)
|
||||
|
||||
consState, found := k.GetUpgradedConsensusState(ctx, req.LastHeight)
|
||||
if !found {
|
||||
return &types.QueryUpgradedConsensusStateResponse{}, nil //nolint:staticcheck // we're using a deprecated call for compatibility
|
||||
consState, err := k.GetUpgradedConsensusState(ctx, req.LastHeight)
|
||||
if err != nil {
|
||||
if errors.Is(err, types.ErrNoUpgradedConsensusStateFound) {
|
||||
return &types.QueryUpgradedConsensusStateResponse{}, nil //nolint:staticcheck // we're using a deprecated call for compatibility
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &types.QueryUpgradedConsensusStateResponse{ //nolint:staticcheck // we're using a deprecated call for compatibility
|
||||
@ -56,17 +61,23 @@ func (k Keeper) ModuleVersions(c context.Context, req *types.QueryModuleVersions
|
||||
|
||||
// check if a specific module was requested
|
||||
if len(req.ModuleName) > 0 {
|
||||
if version, ok := k.getModuleVersion(ctx, req.ModuleName); ok {
|
||||
// return the requested module
|
||||
res := []*types.ModuleVersion{{Name: req.ModuleName, Version: version}}
|
||||
return &types.QueryModuleVersionsResponse{ModuleVersions: res}, nil
|
||||
version, err := k.getModuleVersion(ctx, req.ModuleName)
|
||||
if err != nil {
|
||||
// module requested, but not found or error happened
|
||||
return nil, errorsmod.Wrapf(err, "x/upgrade: QueryModuleVersions module %s not found", req.ModuleName)
|
||||
}
|
||||
// module requested, but not found
|
||||
return nil, errorsmod.Wrapf(errors.ErrNotFound, "x/upgrade: QueryModuleVersions module %s not found", req.ModuleName)
|
||||
|
||||
// return the requested module
|
||||
res := []*types.ModuleVersion{{Name: req.ModuleName, Version: version}}
|
||||
return &types.QueryModuleVersionsResponse{ModuleVersions: res}, nil
|
||||
}
|
||||
|
||||
// if no module requested return all module versions from state
|
||||
mv := k.GetModuleVersions(ctx)
|
||||
mv, err := k.GetModuleVersions(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &types.QueryModuleVersionsResponse{
|
||||
ModuleVersions: mv,
|
||||
}, nil
|
||||
|
||||
@ -14,6 +14,7 @@ import (
|
||||
"cosmossdk.io/x/upgrade/types"
|
||||
|
||||
"github.com/cosmos/cosmos-sdk/baseapp"
|
||||
"github.com/cosmos/cosmos-sdk/runtime"
|
||||
"github.com/cosmos/cosmos-sdk/testutil"
|
||||
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||
"github.com/cosmos/cosmos-sdk/types/module"
|
||||
@ -34,12 +35,13 @@ type UpgradeTestSuite struct {
|
||||
func (suite *UpgradeTestSuite) SetupTest() {
|
||||
suite.encCfg = moduletestutil.MakeTestEncodingConfig(upgrade.AppModuleBasic{})
|
||||
key := storetypes.NewKVStoreKey(types.StoreKey)
|
||||
storeService := runtime.NewKVStoreService(key)
|
||||
testCtx := testutil.DefaultContextWithDB(suite.T(), key, storetypes.NewTransientStoreKey("transient_test"))
|
||||
suite.ctx = testCtx.Ctx
|
||||
|
||||
skipUpgradeHeights := make(map[int64]bool)
|
||||
|
||||
suite.upgradeKeeper = keeper.NewKeeper(skipUpgradeHeights, key, suite.encCfg.Codec, "", nil, authtypes.NewModuleAddress(govtypes.ModuleName).String())
|
||||
suite.upgradeKeeper = keeper.NewKeeper(skipUpgradeHeights, storeService, suite.encCfg.Codec, "", nil, authtypes.NewModuleAddress(govtypes.ModuleName).String())
|
||||
suite.upgradeKeeper.SetModuleVersionMap(suite.ctx, module.VersionMap{
|
||||
"bank": 0,
|
||||
})
|
||||
@ -128,7 +130,7 @@ func (suite *UpgradeTestSuite) TestAppliedCurrentPlan() {
|
||||
suite.upgradeKeeper.ScheduleUpgrade(suite.ctx, plan)
|
||||
|
||||
suite.ctx = suite.ctx.WithHeaderInfo(header.Info{Height: expHeight})
|
||||
suite.upgradeKeeper.SetUpgradeHandler(planName, func(ctx sdk.Context, plan types.Plan, vm module.VersionMap) (module.VersionMap, error) {
|
||||
suite.upgradeKeeper.SetUpgradeHandler(planName, func(ctx context.Context, plan types.Plan, vm module.VersionMap) (module.VersionMap, error) {
|
||||
return vm, nil
|
||||
})
|
||||
suite.upgradeKeeper.ApplyUpgrade(suite.ctx, plan)
|
||||
@ -185,8 +187,11 @@ func (suite *UpgradeTestSuite) TestModuleVersions() {
|
||||
},
|
||||
}
|
||||
|
||||
vm := suite.upgradeKeeper.GetModuleVersionMap(suite.ctx)
|
||||
mv := suite.upgradeKeeper.GetModuleVersions(suite.ctx)
|
||||
vm, err := suite.upgradeKeeper.GetModuleVersionMap(suite.ctx)
|
||||
suite.Require().NoError(err)
|
||||
|
||||
mv, err := suite.upgradeKeeper.GetModuleVersions(suite.ctx)
|
||||
suite.Require().NoError(err)
|
||||
|
||||
for _, tc := range testCases {
|
||||
suite.Run(fmt.Sprintf("Case %s", tc.msg), func() {
|
||||
|
||||
@ -1,8 +1,10 @@
|
||||
package keeper
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
@ -10,6 +12,7 @@ import (
|
||||
"sort"
|
||||
"strconv"
|
||||
|
||||
corestore "cosmossdk.io/core/store"
|
||||
errorsmod "cosmossdk.io/errors"
|
||||
"cosmossdk.io/log"
|
||||
"cosmossdk.io/store/prefix"
|
||||
@ -18,7 +21,9 @@ import (
|
||||
"cosmossdk.io/x/upgrade/types"
|
||||
|
||||
"github.com/armon/go-metrics"
|
||||
|
||||
"github.com/cosmos/cosmos-sdk/codec"
|
||||
"github.com/cosmos/cosmos-sdk/runtime"
|
||||
"github.com/cosmos/cosmos-sdk/telemetry"
|
||||
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
|
||||
@ -33,7 +38,7 @@ const UpgradeInfoFileName string = "upgrade-info.json"
|
||||
type Keeper struct {
|
||||
homePath string // root directory of app config
|
||||
skipUpgradeHeights map[int64]bool // map of heights to skip for an upgrade
|
||||
storeKey storetypes.StoreKey // key to access x/upgrade store
|
||||
storeService corestore.KVStoreService // key to access x/upgrade store
|
||||
cdc codec.BinaryCodec // App-wide binary codec
|
||||
upgradeHandlers map[string]types.UpgradeHandler // map of plan name to upgrade handler
|
||||
versionSetter xp.ProtocolVersionSetter // implements setting the protocol version field on BaseApp
|
||||
@ -48,11 +53,11 @@ type Keeper struct {
|
||||
// cdc - the app-wide binary codec
|
||||
// homePath - root directory of the application's config
|
||||
// vs - the interface implemented by baseapp which allows setting baseapp's protocol version field
|
||||
func NewKeeper(skipUpgradeHeights map[int64]bool, storeKey storetypes.StoreKey, cdc codec.BinaryCodec, homePath string, vs xp.ProtocolVersionSetter, authority string) *Keeper {
|
||||
func NewKeeper(skipUpgradeHeights map[int64]bool, storeService corestore.KVStoreService, cdc codec.BinaryCodec, homePath string, vs xp.ProtocolVersionSetter, authority string) *Keeper {
|
||||
k := &Keeper{
|
||||
homePath: homePath,
|
||||
skipUpgradeHeights: skipUpgradeHeights,
|
||||
storeKey: storeKey,
|
||||
storeService: storeService,
|
||||
cdc: cdc,
|
||||
upgradeHandlers: map[string]types.UpgradeHandler{},
|
||||
versionSetter: vs,
|
||||
@ -96,31 +101,38 @@ func (k Keeper) SetUpgradeHandler(name string, upgradeHandler types.UpgradeHandl
|
||||
}
|
||||
|
||||
// setProtocolVersion sets the protocol version to state
|
||||
func (k Keeper) setProtocolVersion(ctx sdk.Context, v uint64) {
|
||||
store := ctx.KVStore(k.storeKey)
|
||||
func (k Keeper) setProtocolVersion(ctx context.Context, v uint64) error {
|
||||
store := k.storeService.OpenKVStore(ctx)
|
||||
versionBytes := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(versionBytes, v)
|
||||
store.Set([]byte{types.ProtocolVersionByte}, versionBytes)
|
||||
return store.Set([]byte{types.ProtocolVersionByte}, versionBytes)
|
||||
}
|
||||
|
||||
// getProtocolVersion gets the protocol version from state
|
||||
func (k Keeper) getProtocolVersion(ctx sdk.Context) uint64 {
|
||||
store := ctx.KVStore(k.storeKey)
|
||||
ok := store.Has([]byte{types.ProtocolVersionByte})
|
||||
if ok {
|
||||
pvBytes := store.Get([]byte{types.ProtocolVersionByte})
|
||||
protocolVersion := binary.BigEndian.Uint64(pvBytes)
|
||||
func (k Keeper) getProtocolVersion(ctx context.Context) (uint64, error) {
|
||||
store := k.storeService.OpenKVStore(ctx)
|
||||
ok, err := store.Has([]byte{types.ProtocolVersionByte})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return protocolVersion
|
||||
if ok {
|
||||
pvBytes, err := store.Get([]byte{types.ProtocolVersionByte})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
protocolVersion := binary.BigEndian.Uint64(pvBytes)
|
||||
return protocolVersion, nil
|
||||
}
|
||||
// default value
|
||||
return 0
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// SetModuleVersionMap saves a given version map to state
|
||||
func (k Keeper) SetModuleVersionMap(ctx sdk.Context, vm module.VersionMap) {
|
||||
func (k Keeper) SetModuleVersionMap(ctx context.Context, vm module.VersionMap) error {
|
||||
if len(vm) > 0 {
|
||||
store := ctx.KVStore(k.storeKey)
|
||||
store := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx))
|
||||
versionStore := prefix.NewStore(store, []byte{types.VersionMapByte})
|
||||
// Even though the underlying store (cachekv) store is sorted, we still
|
||||
// prefer a deterministic iteration order of the map, to avoid undesired
|
||||
@ -140,16 +152,22 @@ func (k Keeper) SetModuleVersionMap(ctx sdk.Context, vm module.VersionMap) {
|
||||
versionStore.Set(nameBytes, verBytes)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetModuleVersionMap returns a map of key module name and value module consensus version
|
||||
// as defined in ADR-041.
|
||||
func (k Keeper) GetModuleVersionMap(ctx sdk.Context) module.VersionMap {
|
||||
store := ctx.KVStore(k.storeKey)
|
||||
it := storetypes.KVStorePrefixIterator(store, []byte{types.VersionMapByte})
|
||||
func (k Keeper) GetModuleVersionMap(ctx context.Context) (module.VersionMap, error) {
|
||||
store := k.storeService.OpenKVStore(ctx)
|
||||
prefix := []byte{types.VersionMapByte}
|
||||
it, err := store.Iterator(prefix, storetypes.PrefixEndBytes(prefix))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer it.Close()
|
||||
|
||||
vm := make(module.VersionMap)
|
||||
defer it.Close()
|
||||
for ; it.Valid(); it.Next() {
|
||||
moduleBytes := it.Key()
|
||||
// first byte is prefix key, so we remove it here
|
||||
@ -158,13 +176,17 @@ func (k Keeper) GetModuleVersionMap(ctx sdk.Context) module.VersionMap {
|
||||
vm[name] = moduleVersion
|
||||
}
|
||||
|
||||
return vm
|
||||
return vm, nil
|
||||
}
|
||||
|
||||
// GetModuleVersions gets a slice of module consensus versions
|
||||
func (k Keeper) GetModuleVersions(ctx sdk.Context) []*types.ModuleVersion {
|
||||
store := ctx.KVStore(k.storeKey)
|
||||
it := storetypes.KVStorePrefixIterator(store, []byte{types.VersionMapByte})
|
||||
func (k Keeper) GetModuleVersions(ctx context.Context) ([]*types.ModuleVersion, error) {
|
||||
store := k.storeService.OpenKVStore(ctx)
|
||||
prefix := []byte{types.VersionMapByte}
|
||||
it, err := store.Iterator(prefix, storetypes.PrefixEndBytes(prefix))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer it.Close()
|
||||
|
||||
mv := make([]*types.ModuleVersion, 0)
|
||||
@ -177,55 +199,82 @@ func (k Keeper) GetModuleVersions(ctx sdk.Context) []*types.ModuleVersion {
|
||||
Version: moduleVersion,
|
||||
})
|
||||
}
|
||||
return mv
|
||||
|
||||
return mv, nil
|
||||
}
|
||||
|
||||
// getModuleVersion gets the version for a given module, and returns true if it exists, false otherwise
|
||||
func (k Keeper) getModuleVersion(ctx sdk.Context, name string) (uint64, bool) {
|
||||
store := ctx.KVStore(k.storeKey)
|
||||
it := storetypes.KVStorePrefixIterator(store, []byte{types.VersionMapByte})
|
||||
// getModuleVersion gets the version for a given module. If it doesn't exist it returns ErrNoModuleVersionFound, other
|
||||
// errors may be returned if there is an error reading from the store.
|
||||
func (k Keeper) getModuleVersion(ctx context.Context, name string) (uint64, error) {
|
||||
store := k.storeService.OpenKVStore(ctx)
|
||||
prefix := []byte{types.VersionMapByte}
|
||||
it, err := store.Iterator(prefix, storetypes.PrefixEndBytes(prefix))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer it.Close()
|
||||
|
||||
for ; it.Valid(); it.Next() {
|
||||
moduleName := string(it.Key()[1:])
|
||||
if moduleName == name {
|
||||
version := binary.BigEndian.Uint64(it.Value())
|
||||
return version, true
|
||||
return version, nil
|
||||
}
|
||||
}
|
||||
return 0, false
|
||||
|
||||
return 0, types.ErrNoModuleVersionFound
|
||||
}
|
||||
|
||||
// ScheduleUpgrade schedules an upgrade based on the specified plan.
|
||||
// If there is another Plan already scheduled, it will cancel and overwrite it.
|
||||
// ScheduleUpgrade will also write the upgraded IBC ClientState to the upgraded client
|
||||
// path if it is specified in the plan.
|
||||
func (k Keeper) ScheduleUpgrade(ctx sdk.Context, plan types.Plan) error {
|
||||
func (k Keeper) ScheduleUpgrade(ctx context.Context, plan types.Plan) error {
|
||||
if err := plan.ValidateBasic(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// NOTE: allow for the possibility of chains to schedule upgrades in begin block of the same block
|
||||
// as a strategy for emergency hard fork recoveries
|
||||
fmt.Println(plan.Height, ctx.HeaderInfo().Height)
|
||||
if plan.Height < ctx.HeaderInfo().Height {
|
||||
sdkCtx := sdk.UnwrapSDKContext(ctx)
|
||||
if plan.Height < sdkCtx.HeaderInfo().Height {
|
||||
return errorsmod.Wrap(sdkerrors.ErrInvalidRequest, "upgrade cannot be scheduled in the past")
|
||||
}
|
||||
|
||||
if k.GetDoneHeight(ctx, plan.Name) != 0 {
|
||||
doneHeight, err := k.GetDoneHeight(ctx, plan.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if doneHeight != 0 {
|
||||
return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "upgrade with name %s has already been completed", plan.Name)
|
||||
}
|
||||
|
||||
store := ctx.KVStore(k.storeKey)
|
||||
store := k.storeService.OpenKVStore(ctx)
|
||||
|
||||
// clear any old IBC state stored by previous plan
|
||||
oldPlan, found := k.GetUpgradePlan(ctx)
|
||||
if found {
|
||||
k.ClearIBCState(ctx, oldPlan.Height)
|
||||
oldPlan, err := k.GetUpgradePlan(ctx)
|
||||
// if there's an error but it's not ErrNoUpgradePlanFound, return error
|
||||
if err != nil && !errors.Is(err, types.ErrNoUpgradePlanFound) {
|
||||
return err
|
||||
}
|
||||
|
||||
bz := k.cdc.MustMarshal(&plan)
|
||||
store.Set(types.PlanKey(), bz)
|
||||
if err == nil {
|
||||
err = k.ClearIBCState(ctx, oldPlan.Height)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
bz, err := k.cdc.Marshal(&plan)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = store.Set(types.PlanKey(), bz)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
telemetry.SetGaugeWithLabels([]string{"server", "info"}, 1, []metrics.Label{telemetry.NewLabel("upgrade_height", strconv.FormatInt(plan.Height, 10))})
|
||||
|
||||
@ -233,52 +282,66 @@ func (k Keeper) ScheduleUpgrade(ctx sdk.Context, plan types.Plan) error {
|
||||
}
|
||||
|
||||
// SetUpgradedClient sets the expected upgraded client for the next version of this chain at the last height the current chain will commit.
|
||||
func (k Keeper) SetUpgradedClient(ctx sdk.Context, planHeight int64, bz []byte) error {
|
||||
store := ctx.KVStore(k.storeKey)
|
||||
store.Set(types.UpgradedClientKey(planHeight), bz)
|
||||
return nil
|
||||
func (k Keeper) SetUpgradedClient(ctx context.Context, planHeight int64, bz []byte) error {
|
||||
store := k.storeService.OpenKVStore(ctx)
|
||||
return store.Set(types.UpgradedClientKey(planHeight), bz)
|
||||
}
|
||||
|
||||
// GetUpgradedClient gets the expected upgraded client for the next version of this chain
|
||||
func (k Keeper) GetUpgradedClient(ctx sdk.Context, height int64) ([]byte, bool) {
|
||||
store := ctx.KVStore(k.storeKey)
|
||||
bz := store.Get(types.UpgradedClientKey(height))
|
||||
if len(bz) == 0 {
|
||||
return nil, false
|
||||
// GetUpgradedClient gets the expected upgraded client for the next version of this chain. If not found it returns
|
||||
// ErrNoUpgradedClientFound, but other errors may be returned if there is an error reading from the store.
|
||||
func (k Keeper) GetUpgradedClient(ctx context.Context, height int64) ([]byte, error) {
|
||||
store := k.storeService.OpenKVStore(ctx)
|
||||
bz, err := store.Get(types.UpgradedClientKey(height))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return bz, true
|
||||
if bz == nil {
|
||||
return nil, types.ErrNoUpgradedClientFound
|
||||
}
|
||||
|
||||
return bz, nil
|
||||
}
|
||||
|
||||
// SetUpgradedConsensusState sets the expected upgraded consensus state for the next version of this chain
|
||||
// using the last height committed on this chain.
|
||||
func (k Keeper) SetUpgradedConsensusState(ctx sdk.Context, planHeight int64, bz []byte) error {
|
||||
store := ctx.KVStore(k.storeKey)
|
||||
store.Set(types.UpgradedConsStateKey(planHeight), bz)
|
||||
return nil
|
||||
func (k Keeper) SetUpgradedConsensusState(ctx context.Context, planHeight int64, bz []byte) error {
|
||||
store := k.storeService.OpenKVStore(ctx)
|
||||
return store.Set(types.UpgradedConsStateKey(planHeight), bz)
|
||||
}
|
||||
|
||||
// GetUpgradedConsensusState gets the expected upgraded consensus state for the next version of this chain
|
||||
func (k Keeper) GetUpgradedConsensusState(ctx sdk.Context, lastHeight int64) ([]byte, bool) {
|
||||
store := ctx.KVStore(k.storeKey)
|
||||
bz := store.Get(types.UpgradedConsStateKey(lastHeight))
|
||||
if len(bz) == 0 {
|
||||
return nil, false
|
||||
// GetUpgradedConsensusState gets the expected upgraded consensus state for the next version of this chain. If not found
|
||||
// it returns ErrNoUpgradedConsensusStateFound, but other errors may be returned if there is an error reading from the store.
|
||||
func (k Keeper) GetUpgradedConsensusState(ctx context.Context, lastHeight int64) ([]byte, error) {
|
||||
store := k.storeService.OpenKVStore(ctx)
|
||||
bz, err := store.Get(types.UpgradedConsStateKey(lastHeight))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return bz, true
|
||||
if bz == nil {
|
||||
return nil, types.ErrNoUpgradedConsensusStateFound
|
||||
}
|
||||
|
||||
return bz, nil
|
||||
}
|
||||
|
||||
// GetLastCompletedUpgrade returns the last applied upgrade name and height.
|
||||
func (k Keeper) GetLastCompletedUpgrade(ctx sdk.Context) (string, int64) {
|
||||
iter := storetypes.KVStoreReversePrefixIterator(ctx.KVStore(k.storeKey), []byte{types.DoneByte})
|
||||
defer iter.Close()
|
||||
func (k Keeper) GetLastCompletedUpgrade(ctx context.Context) (string, int64, error) {
|
||||
store := k.storeService.OpenKVStore(ctx)
|
||||
prefix := []byte{types.DoneByte}
|
||||
it, err := store.ReverseIterator(prefix, storetypes.PrefixEndBytes(prefix))
|
||||
if err != nil {
|
||||
return "", 0, err
|
||||
}
|
||||
defer it.Close()
|
||||
|
||||
if iter.Valid() {
|
||||
return parseDoneKey(iter.Key())
|
||||
if it.Valid() {
|
||||
name, height := parseDoneKey(it.Key())
|
||||
return name, height, nil
|
||||
}
|
||||
|
||||
return "", 0
|
||||
return "", 0, nil
|
||||
}
|
||||
|
||||
// parseDoneKey - split upgrade name and height from the done key
|
||||
@ -299,61 +362,90 @@ func encodeDoneKey(name string, height int64) []byte {
|
||||
}
|
||||
|
||||
// GetDoneHeight returns the height at which the given upgrade was executed
|
||||
func (k Keeper) GetDoneHeight(ctx sdk.Context, name string) int64 {
|
||||
iter := storetypes.KVStorePrefixIterator(ctx.KVStore(k.storeKey), []byte{types.DoneByte})
|
||||
defer iter.Close()
|
||||
func (k Keeper) GetDoneHeight(ctx context.Context, name string) (int64, error) {
|
||||
store := k.storeService.OpenKVStore(ctx)
|
||||
prefix := []byte{types.DoneByte}
|
||||
it, err := store.Iterator(prefix, storetypes.PrefixEndBytes(prefix))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer it.Close()
|
||||
|
||||
for ; iter.Valid(); iter.Next() {
|
||||
upgradeName, height := parseDoneKey(iter.Key())
|
||||
for ; it.Valid(); it.Next() {
|
||||
upgradeName, height := parseDoneKey(it.Key())
|
||||
if upgradeName == name {
|
||||
return height
|
||||
return height, nil
|
||||
}
|
||||
}
|
||||
return 0
|
||||
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// ClearIBCState clears any planned IBC state
|
||||
func (k Keeper) ClearIBCState(ctx sdk.Context, lastHeight int64) {
|
||||
func (k Keeper) ClearIBCState(ctx context.Context, lastHeight int64) error {
|
||||
// delete IBC client and consensus state from store if this is IBC plan
|
||||
store := ctx.KVStore(k.storeKey)
|
||||
store.Delete(types.UpgradedClientKey(lastHeight))
|
||||
store.Delete(types.UpgradedConsStateKey(lastHeight))
|
||||
store := k.storeService.OpenKVStore(ctx)
|
||||
err := store.Delete(types.UpgradedClientKey(lastHeight))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return store.Delete(types.UpgradedConsStateKey(lastHeight))
|
||||
}
|
||||
|
||||
// ClearUpgradePlan clears any schedule upgrade and associated IBC states.
|
||||
func (k Keeper) ClearUpgradePlan(ctx sdk.Context) {
|
||||
// clear IBC states everytime upgrade plan is removed
|
||||
oldPlan, found := k.GetUpgradePlan(ctx)
|
||||
if found {
|
||||
k.ClearIBCState(ctx, oldPlan.Height)
|
||||
func (k Keeper) ClearUpgradePlan(ctx context.Context) error {
|
||||
// clear IBC states every time upgrade plan is removed
|
||||
oldPlan, err := k.GetUpgradePlan(ctx)
|
||||
if err != nil {
|
||||
// if there's no upgrade plan, return nil to match previous behavior
|
||||
if errors.Is(err, types.ErrNoUpgradePlanFound) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
store := ctx.KVStore(k.storeKey)
|
||||
store.Delete(types.PlanKey())
|
||||
err = k.ClearIBCState(ctx, oldPlan.Height)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
store := k.storeService.OpenKVStore(ctx)
|
||||
return store.Delete(types.PlanKey())
|
||||
}
|
||||
|
||||
// Logger returns a module-specific logger.
|
||||
func (k Keeper) Logger(ctx sdk.Context) log.Logger {
|
||||
return ctx.Logger().With("module", "x/"+types.ModuleName)
|
||||
func (k Keeper) Logger(ctx context.Context) log.Logger {
|
||||
sdkCtx := sdk.UnwrapSDKContext(ctx)
|
||||
return sdkCtx.Logger().With("module", "x/"+types.ModuleName)
|
||||
}
|
||||
|
||||
// GetUpgradePlan returns the currently scheduled Plan if any, setting havePlan to true if there is a scheduled
|
||||
// upgrade or false if there is none
|
||||
func (k Keeper) GetUpgradePlan(ctx sdk.Context) (plan types.Plan, havePlan bool) {
|
||||
store := ctx.KVStore(k.storeKey)
|
||||
bz := store.Get(types.PlanKey())
|
||||
if bz == nil {
|
||||
return plan, false
|
||||
// GetUpgradePlan returns the currently scheduled Plan if any. If not found it returns
|
||||
// ErrNoUpgradePlanFound, but other errors may be returned if there is an error reading from the store.
|
||||
func (k Keeper) GetUpgradePlan(ctx context.Context) (plan types.Plan, err error) {
|
||||
store := k.storeService.OpenKVStore(ctx)
|
||||
bz, err := store.Get(types.PlanKey())
|
||||
if err != nil {
|
||||
return plan, err
|
||||
}
|
||||
|
||||
k.cdc.MustUnmarshal(bz, &plan)
|
||||
return plan, true
|
||||
if bz == nil {
|
||||
return plan, types.ErrNoUpgradePlanFound
|
||||
}
|
||||
|
||||
err = k.cdc.Unmarshal(bz, &plan)
|
||||
if err != nil {
|
||||
return plan, err
|
||||
}
|
||||
|
||||
return plan, err
|
||||
}
|
||||
|
||||
// setDone marks this upgrade name as being done so the name can't be reused accidentally
|
||||
func (k Keeper) setDone(ctx sdk.Context, name string) {
|
||||
store := ctx.KVStore(k.storeKey)
|
||||
store.Set(encodeDoneKey(name, ctx.HeaderInfo().Height), []byte{1})
|
||||
func (k Keeper) setDone(ctx context.Context, name string) error {
|
||||
store := k.storeService.OpenKVStore(ctx)
|
||||
sdkCtx := sdk.UnwrapSDKContext(ctx)
|
||||
return store.Set(encodeDoneKey(name, sdkCtx.HeaderInfo().Height), []byte{1})
|
||||
}
|
||||
|
||||
// HasHandler returns true iff there is a handler registered for this name
|
||||
@ -363,22 +455,38 @@ func (k Keeper) HasHandler(name string) bool {
|
||||
}
|
||||
|
||||
// ApplyUpgrade will execute the handler associated with the Plan and mark the plan as done.
|
||||
func (k Keeper) ApplyUpgrade(ctx sdk.Context, plan types.Plan) {
|
||||
func (k Keeper) ApplyUpgrade(ctx context.Context, plan types.Plan) error {
|
||||
handler := k.upgradeHandlers[plan.Name]
|
||||
if handler == nil {
|
||||
panic("ApplyUpgrade should never be called without first checking HasHandler")
|
||||
return fmt.Errorf("ApplyUpgrade should never be called without first checking HasHandler")
|
||||
}
|
||||
|
||||
updatedVM, err := handler(ctx, plan, k.GetModuleVersionMap(ctx))
|
||||
vm, err := k.GetModuleVersionMap(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
return err
|
||||
}
|
||||
|
||||
k.SetModuleVersionMap(ctx, updatedVM)
|
||||
updatedVM, err := handler(ctx, plan, vm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = k.SetModuleVersionMap(ctx, updatedVM)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// incremement the protocol version and set it in state and baseapp
|
||||
nextProtocolVersion := k.getProtocolVersion(ctx) + 1
|
||||
k.setProtocolVersion(ctx, nextProtocolVersion)
|
||||
nextProtocolVersion, err := k.getProtocolVersion(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nextProtocolVersion++
|
||||
err = k.setProtocolVersion(ctx, nextProtocolVersion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if k.versionSetter != nil {
|
||||
// set protocol version on BaseApp
|
||||
k.versionSetter.SetProtocolVersion(nextProtocolVersion)
|
||||
@ -386,9 +494,17 @@ func (k Keeper) ApplyUpgrade(ctx sdk.Context, plan types.Plan) {
|
||||
|
||||
// Must clear IBC state after upgrade is applied as it is stored separately from the upgrade plan.
|
||||
// This will prevent resubmission of upgrade msg after upgrade is already completed.
|
||||
k.ClearIBCState(ctx, plan.Height)
|
||||
k.ClearUpgradePlan(ctx)
|
||||
k.setDone(ctx, plan.Name)
|
||||
err = k.ClearIBCState(ctx, plan.Height)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = k.ClearUpgradePlan(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return k.setDone(ctx, plan.Name)
|
||||
}
|
||||
|
||||
// IsSkipHeight checks if the given height is part of skipUpgradeHeights
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
package keeper_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
@ -15,6 +16,7 @@ import (
|
||||
"cosmossdk.io/x/upgrade/types"
|
||||
|
||||
"github.com/cosmos/cosmos-sdk/baseapp"
|
||||
"github.com/cosmos/cosmos-sdk/runtime"
|
||||
"github.com/cosmos/cosmos-sdk/testutil"
|
||||
simtestutil "github.com/cosmos/cosmos-sdk/testutil/sims"
|
||||
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||
@ -40,6 +42,7 @@ type KeeperTestSuite struct {
|
||||
func (s *KeeperTestSuite) SetupTest() {
|
||||
s.encCfg = moduletestutil.MakeTestEncodingConfig(upgrade.AppModuleBasic{})
|
||||
key := storetypes.NewKVStoreKey(types.StoreKey)
|
||||
storeService := runtime.NewKVStoreService(key)
|
||||
s.key = key
|
||||
testCtx := testutil.DefaultContextWithDB(s.T(), key, storetypes.NewTransientStoreKey("transient_test"))
|
||||
|
||||
@ -53,7 +56,7 @@ func (s *KeeperTestSuite) SetupTest() {
|
||||
skipUpgradeHeights := make(map[int64]bool)
|
||||
|
||||
homeDir := filepath.Join(s.T().TempDir(), "x_upgrade_keeper_test")
|
||||
s.upgradeKeeper = keeper.NewKeeper(skipUpgradeHeights, key, s.encCfg.Codec, homeDir, nil, authtypes.NewModuleAddress(govtypes.ModuleName).String())
|
||||
s.upgradeKeeper = keeper.NewKeeper(skipUpgradeHeights, storeService, s.encCfg.Codec, homeDir, nil, authtypes.NewModuleAddress(govtypes.ModuleName).String())
|
||||
s.upgradeKeeper.SetVersionSetter(s.baseApp)
|
||||
|
||||
vs := s.upgradeKeeper.GetVersionSetter()
|
||||
@ -146,7 +149,7 @@ func (s *KeeperTestSuite) TestScheduleUpgrade() {
|
||||
Height: 123450000,
|
||||
},
|
||||
setup: func() {
|
||||
s.upgradeKeeper.SetUpgradeHandler("all-good", func(ctx sdk.Context, plan types.Plan, vm module.VersionMap) (module.VersionMap, error) {
|
||||
s.upgradeKeeper.SetUpgradeHandler("all-good", func(ctx context.Context, plan types.Plan, vm module.VersionMap) (module.VersionMap, error) {
|
||||
return vm, nil
|
||||
})
|
||||
s.upgradeKeeper.ApplyUpgrade(s.ctx, types.Plan{
|
||||
@ -212,13 +215,14 @@ func (s *KeeperTestSuite) TestSetUpgradedClient() {
|
||||
// setup test case
|
||||
tc.setup()
|
||||
|
||||
gotCs, exists := s.upgradeKeeper.GetUpgradedClient(s.ctx, tc.height)
|
||||
gotCs, err := s.upgradeKeeper.GetUpgradedClient(s.ctx, tc.height)
|
||||
|
||||
if tc.exists {
|
||||
s.Require().Equal(cs, gotCs, "valid case: %s did not retrieve correct client state", tc.name)
|
||||
s.Require().True(exists, "valid case: %s did not retrieve client state", tc.name)
|
||||
s.Require().NoError(err, "valid case: %s did not retrieve client state", tc.name)
|
||||
} else {
|
||||
s.Require().Nil(gotCs, "invalid case: %s retrieved valid client state", tc.name)
|
||||
s.Require().False(exists, "invalid case: %s retrieved valid client state", tc.name)
|
||||
s.Require().Error(err, "invalid case: %s retrieved valid client state", tc.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -228,7 +232,8 @@ func (s *KeeperTestSuite) TestIsSkipHeight() {
|
||||
ok := s.upgradeKeeper.IsSkipHeight(11)
|
||||
s.Require().False(ok)
|
||||
skip := map[int64]bool{skipOne: true}
|
||||
upgradeKeeper := keeper.NewKeeper(skip, s.key, s.encCfg.Codec, s.T().TempDir(), nil, authtypes.NewModuleAddress(govtypes.ModuleName).String())
|
||||
storeService := runtime.NewKVStoreService(s.key)
|
||||
upgradeKeeper := keeper.NewKeeper(skip, storeService, s.encCfg.Codec, s.T().TempDir(), nil, authtypes.NewModuleAddress(govtypes.ModuleName).String())
|
||||
upgradeKeeper.SetVersionSetter(s.baseApp)
|
||||
s.Require().True(upgradeKeeper.IsSkipHeight(9))
|
||||
s.Require().False(upgradeKeeper.IsSkipHeight(10))
|
||||
@ -237,9 +242,9 @@ func (s *KeeperTestSuite) TestIsSkipHeight() {
|
||||
func (s *KeeperTestSuite) TestUpgradedConsensusState() {
|
||||
cs := []byte("IBC consensus state")
|
||||
s.Require().NoError(s.upgradeKeeper.SetUpgradedConsensusState(s.ctx, 10, cs))
|
||||
bz, ok := s.upgradeKeeper.GetUpgradedConsensusState(s.ctx, 10)
|
||||
s.Require().True(ok)
|
||||
bz, err := s.upgradeKeeper.GetUpgradedConsensusState(s.ctx, 10)
|
||||
s.Require().Equal(cs, bz)
|
||||
s.Require().NoError(err)
|
||||
}
|
||||
|
||||
func (s *KeeperTestSuite) TestDowngradeVerified() {
|
||||
@ -259,13 +264,11 @@ func (s *KeeperTestSuite) TestIncrementProtocolVersion() {
|
||||
Info: "some text here",
|
||||
Height: 100,
|
||||
}
|
||||
s.Require().PanicsWithValue("ApplyUpgrade should never be called without first checking HasHandler",
|
||||
func() {
|
||||
s.upgradeKeeper.ApplyUpgrade(s.ctx, dummyPlan)
|
||||
},
|
||||
)
|
||||
|
||||
s.upgradeKeeper.SetUpgradeHandler("dummy", func(_ sdk.Context, _ types.Plan, vm module.VersionMap) (module.VersionMap, error) { return vm, nil })
|
||||
err := s.upgradeKeeper.ApplyUpgrade(s.ctx, dummyPlan)
|
||||
s.Require().EqualError(err, "ApplyUpgrade should never be called without first checking HasHandler")
|
||||
|
||||
s.upgradeKeeper.SetUpgradeHandler("dummy", func(_ context.Context, _ types.Plan, vm module.VersionMap) (module.VersionMap, error) { return vm, nil })
|
||||
s.upgradeKeeper.ApplyUpgrade(s.ctx, dummyPlan)
|
||||
upgradedProtocolVersion := s.baseApp.AppVersion()
|
||||
|
||||
@ -277,8 +280,10 @@ func (s *KeeperTestSuite) TestIncrementProtocolVersion() {
|
||||
func (s *KeeperTestSuite) TestMigrations() {
|
||||
initialVM := module.VersionMap{"bank": uint64(1)}
|
||||
s.upgradeKeeper.SetModuleVersionMap(s.ctx, initialVM)
|
||||
vmBefore := s.upgradeKeeper.GetModuleVersionMap(s.ctx)
|
||||
s.upgradeKeeper.SetUpgradeHandler("dummy", func(_ sdk.Context, _ types.Plan, vm module.VersionMap) (module.VersionMap, error) {
|
||||
vmBefore, err := s.upgradeKeeper.GetModuleVersionMap(s.ctx)
|
||||
s.Require().NoError(err)
|
||||
|
||||
s.upgradeKeeper.SetUpgradeHandler("dummy", func(_ context.Context, _ types.Plan, vm module.VersionMap) (module.VersionMap, error) {
|
||||
// simulate upgrading the bank module
|
||||
vm["bank"]++
|
||||
return vm, nil
|
||||
@ -290,8 +295,9 @@ func (s *KeeperTestSuite) TestMigrations() {
|
||||
}
|
||||
|
||||
s.upgradeKeeper.ApplyUpgrade(s.ctx, dummyPlan)
|
||||
vm := s.upgradeKeeper.GetModuleVersionMap(s.ctx)
|
||||
vm, err := s.upgradeKeeper.GetModuleVersionMap(s.ctx)
|
||||
s.Require().Equal(vmBefore["bank"]+1, vm["bank"])
|
||||
s.Require().NoError(err)
|
||||
}
|
||||
|
||||
func (s *KeeperTestSuite) TestLastCompletedUpgrade() {
|
||||
@ -299,11 +305,12 @@ func (s *KeeperTestSuite) TestLastCompletedUpgrade() {
|
||||
require := s.Require()
|
||||
|
||||
s.T().Log("verify empty name if applied upgrades are empty")
|
||||
name, height := keeper.GetLastCompletedUpgrade(s.ctx)
|
||||
name, height, err := keeper.GetLastCompletedUpgrade(s.ctx)
|
||||
require.Equal("", name)
|
||||
require.Equal(int64(0), height)
|
||||
require.NoError(err)
|
||||
|
||||
keeper.SetUpgradeHandler("test0", func(_ sdk.Context, _ types.Plan, vm module.VersionMap) (module.VersionMap, error) {
|
||||
keeper.SetUpgradeHandler("test0", func(_ context.Context, _ types.Plan, vm module.VersionMap) (module.VersionMap, error) {
|
||||
return vm, nil
|
||||
})
|
||||
|
||||
@ -313,11 +320,12 @@ func (s *KeeperTestSuite) TestLastCompletedUpgrade() {
|
||||
})
|
||||
|
||||
s.T().Log("verify valid upgrade name and height")
|
||||
name, height = keeper.GetLastCompletedUpgrade(s.ctx)
|
||||
name, height, err = keeper.GetLastCompletedUpgrade(s.ctx)
|
||||
require.Equal("test0", name)
|
||||
require.Equal(int64(10), height)
|
||||
require.NoError(err)
|
||||
|
||||
keeper.SetUpgradeHandler("test1", func(_ sdk.Context, _ types.Plan, vm module.VersionMap) (module.VersionMap, error) {
|
||||
keeper.SetUpgradeHandler("test1", func(_ context.Context, _ types.Plan, vm module.VersionMap) (module.VersionMap, error) {
|
||||
return vm, nil
|
||||
})
|
||||
|
||||
@ -328,9 +336,10 @@ func (s *KeeperTestSuite) TestLastCompletedUpgrade() {
|
||||
})
|
||||
|
||||
s.T().Log("verify valid upgrade name and height with multiple upgrades")
|
||||
name, height = keeper.GetLastCompletedUpgrade(newCtx)
|
||||
name, height, err = keeper.GetLastCompletedUpgrade(newCtx)
|
||||
require.Equal("test1", name)
|
||||
require.Equal(int64(15), height)
|
||||
require.NoError(err)
|
||||
}
|
||||
|
||||
// This test ensures that `GetLastDoneUpgrade` always returns the last upgrade according to the block height
|
||||
@ -340,7 +349,7 @@ func (s *KeeperTestSuite) TestLastCompletedUpgradeOrdering() {
|
||||
require := s.Require()
|
||||
|
||||
// apply first upgrade
|
||||
keeper.SetUpgradeHandler("test-v0.9", func(_ sdk.Context, _ types.Plan, vm module.VersionMap) (module.VersionMap, error) {
|
||||
keeper.SetUpgradeHandler("test-v0.9", func(_ context.Context, _ types.Plan, vm module.VersionMap) (module.VersionMap, error) {
|
||||
return vm, nil
|
||||
})
|
||||
|
||||
@ -349,24 +358,27 @@ func (s *KeeperTestSuite) TestLastCompletedUpgradeOrdering() {
|
||||
Height: 10,
|
||||
})
|
||||
|
||||
name, height := keeper.GetLastCompletedUpgrade(s.ctx)
|
||||
name, height, err := keeper.GetLastCompletedUpgrade(s.ctx)
|
||||
require.Equal("test-v0.9", name)
|
||||
require.Equal(int64(10), height)
|
||||
require.NoError(err)
|
||||
|
||||
// apply second upgrade
|
||||
keeper.SetUpgradeHandler("test-v0.10", func(_ sdk.Context, _ types.Plan, vm module.VersionMap) (module.VersionMap, error) {
|
||||
keeper.SetUpgradeHandler("test-v0.10", func(_ context.Context, _ types.Plan, vm module.VersionMap) (module.VersionMap, error) {
|
||||
return vm, nil
|
||||
})
|
||||
|
||||
newCtx := s.ctx.WithHeaderInfo(header.Info{Height: 15})
|
||||
keeper.ApplyUpgrade(newCtx, types.Plan{
|
||||
err = keeper.ApplyUpgrade(newCtx, types.Plan{
|
||||
Name: "test-v0.10",
|
||||
Height: 15,
|
||||
})
|
||||
require.NoError(err)
|
||||
|
||||
name, height = keeper.GetLastCompletedUpgrade(newCtx)
|
||||
name, height, err = keeper.GetLastCompletedUpgrade(newCtx)
|
||||
require.Equal("test-v0.10", name)
|
||||
require.Equal(int64(15), height)
|
||||
require.NoError(err)
|
||||
}
|
||||
|
||||
func TestKeeperTestSuite(t *testing.T) {
|
||||
|
||||
@ -3,10 +3,11 @@ package keeper
|
||||
import (
|
||||
"encoding/binary"
|
||||
|
||||
storetypes "cosmossdk.io/core/store"
|
||||
"cosmossdk.io/store/prefix"
|
||||
storetypes "cosmossdk.io/store/types"
|
||||
"cosmossdk.io/x/upgrade/types"
|
||||
|
||||
"github.com/cosmos/cosmos-sdk/runtime"
|
||||
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||
)
|
||||
|
||||
@ -22,12 +23,12 @@ func NewMigrator(keeper *Keeper) Migrator {
|
||||
|
||||
// Migrate1to2 migrates from version 1 to 2.
|
||||
func (m Migrator) Migrate1to2(ctx sdk.Context) error {
|
||||
return migrateDoneUpgradeKeys(ctx, m.keeper.storeKey)
|
||||
return migrateDoneUpgradeKeys(ctx, m.keeper.storeService)
|
||||
}
|
||||
|
||||
func migrateDoneUpgradeKeys(ctx sdk.Context, storeKey storetypes.StoreKey) error {
|
||||
store := ctx.KVStore(storeKey)
|
||||
oldDoneStore := prefix.NewStore(store, []byte{types.DoneByte})
|
||||
func migrateDoneUpgradeKeys(ctx sdk.Context, storeService storetypes.KVStoreService) error {
|
||||
store := storeService.OpenKVStore(ctx)
|
||||
oldDoneStore := prefix.NewStore(runtime.KVStoreAdapter(store), []byte{types.DoneByte})
|
||||
oldDoneStoreIter := oldDoneStore.Iterator(nil, nil)
|
||||
defer oldDoneStoreIter.Close()
|
||||
|
||||
@ -37,7 +38,11 @@ func migrateDoneUpgradeKeys(ctx sdk.Context, storeKey storetypes.StoreKey) error
|
||||
upgradeHeight := int64(binary.BigEndian.Uint64(oldDoneStoreIter.Value()))
|
||||
newKey := encodeDoneKey(upgradeName, upgradeHeight)
|
||||
|
||||
store.Set(newKey, []byte{1})
|
||||
err := store.Set(newKey, []byte{1})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
oldDoneStore.Delete(oldKey)
|
||||
}
|
||||
return nil
|
||||
|
||||
@ -8,6 +8,7 @@ import (
|
||||
"cosmossdk.io/x/upgrade/types"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/cosmos/cosmos-sdk/runtime"
|
||||
"github.com/cosmos/cosmos-sdk/testutil"
|
||||
)
|
||||
|
||||
@ -22,8 +23,9 @@ func encodeOldDoneKey(upgrade storedUpgrade) []byte {
|
||||
|
||||
func TestMigrateDoneUpgradeKeys(t *testing.T) {
|
||||
upgradeKey := storetypes.NewKVStoreKey("upgrade")
|
||||
storeService := runtime.NewKVStoreService(upgradeKey)
|
||||
ctx := testutil.DefaultContext(upgradeKey, storetypes.NewTransientStoreKey("transient_test"))
|
||||
store := ctx.KVStore(upgradeKey)
|
||||
store := storeService.OpenKVStore(ctx)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
@ -44,17 +46,22 @@ func TestMigrateDoneUpgradeKeys(t *testing.T) {
|
||||
bz := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(bz, uint64(upgrade.height))
|
||||
oldKey := encodeOldDoneKey(upgrade)
|
||||
store.Set(oldKey, bz)
|
||||
require.NoError(t, store.Set(oldKey, bz))
|
||||
}
|
||||
|
||||
err := migrateDoneUpgradeKeys(ctx, upgradeKey)
|
||||
err := migrateDoneUpgradeKeys(ctx, storeService)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, upgrade := range tc.upgrades {
|
||||
newKey := encodeDoneKey(upgrade.name, upgrade.height)
|
||||
oldKey := encodeOldDoneKey(upgrade)
|
||||
require.Nil(t, store.Get(oldKey))
|
||||
require.Equal(t, []byte{1}, store.Get(newKey))
|
||||
v, err := store.Get(oldKey)
|
||||
require.Nil(t, v)
|
||||
require.NoError(t, err)
|
||||
|
||||
nv, err := store.Get(newKey)
|
||||
require.Equal(t, []byte{1}, nv)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -50,7 +50,10 @@ func (k msgServer) CancelUpgrade(ctx context.Context, msg *types.MsgCancelUpgrad
|
||||
}
|
||||
|
||||
sdkCtx := sdk.UnwrapSDKContext(ctx)
|
||||
k.ClearUpgradePlan(sdkCtx)
|
||||
err := k.ClearUpgradePlan(sdkCtx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &types.MsgCancelUpgradeResponse{}, nil
|
||||
}
|
||||
|
||||
@ -2,6 +2,7 @@ package keeper_test
|
||||
|
||||
import (
|
||||
"cosmossdk.io/x/upgrade/types"
|
||||
|
||||
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||
"github.com/cosmos/cosmos-sdk/types/address"
|
||||
)
|
||||
@ -73,8 +74,8 @@ func (s *KeeperTestSuite) TestSoftwareUpgrade() {
|
||||
s.Require().Contains(err.Error(), tc.errMsg)
|
||||
} else {
|
||||
s.Require().NoError(err)
|
||||
plan, found := s.upgradeKeeper.GetUpgradePlan(s.ctx)
|
||||
s.Require().Equal(true, found)
|
||||
plan, err := s.upgradeKeeper.GetUpgradePlan(s.ctx)
|
||||
s.Require().NoError(err)
|
||||
s.Require().Equal(tc.req.Plan, plan)
|
||||
}
|
||||
})
|
||||
@ -130,8 +131,8 @@ func (s *KeeperTestSuite) TestCancelUpgrade() {
|
||||
s.Require().Contains(err.Error(), tc.errMsg)
|
||||
} else {
|
||||
s.Require().NoError(err)
|
||||
_, found := s.upgradeKeeper.GetUpgradePlan(s.ctx)
|
||||
s.Require().Equal(false, found)
|
||||
_, err := s.upgradeKeeper.GetUpgradePlan(s.ctx)
|
||||
s.Require().ErrorIs(err, types.ErrNoUpgradePlanFound)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@ -15,7 +15,7 @@ import (
|
||||
"cosmossdk.io/core/appmodule"
|
||||
"cosmossdk.io/depinject"
|
||||
|
||||
store "cosmossdk.io/store/types"
|
||||
"cosmossdk.io/core/store"
|
||||
"cosmossdk.io/x/upgrade/client/cli"
|
||||
"cosmossdk.io/x/upgrade/keeper"
|
||||
"cosmossdk.io/x/upgrade/types"
|
||||
@ -124,13 +124,21 @@ func (am AppModule) InitGenesis(ctx sdk.Context, _ codec.JSONCodec, _ json.RawMe
|
||||
if versionMap := am.keeper.GetInitVersionMap(); versionMap != nil {
|
||||
// chains can still use a custom init chainer for setting the version map
|
||||
// this means that we need to combine the manually wired modules version map with app wiring enabled modules version map
|
||||
for name, version := range am.keeper.GetModuleVersionMap(ctx) {
|
||||
moduleVM, err := am.keeper.GetModuleVersionMap(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
for name, version := range moduleVM {
|
||||
if _, ok := versionMap[name]; !ok {
|
||||
versionMap[name] = version
|
||||
}
|
||||
}
|
||||
|
||||
am.keeper.SetModuleVersionMap(ctx, versionMap)
|
||||
err = am.keeper.SetModuleVersionMap(ctx, versionMap)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
return []abci.ValidatorUpdate{}
|
||||
@ -158,9 +166,7 @@ func (AppModule) ConsensusVersion() uint64 { return ConsensusVersion }
|
||||
//
|
||||
// CONTRACT: this is registered in BeginBlocker *before* all other modules' BeginBlock functions
|
||||
func (am AppModule) BeginBlock(ctx context.Context) error {
|
||||
c := sdk.UnwrapSDKContext(ctx)
|
||||
BeginBlocker(am.keeper, c)
|
||||
return nil
|
||||
return BeginBlocker(ctx, am.keeper)
|
||||
}
|
||||
|
||||
//
|
||||
@ -178,7 +184,7 @@ type ModuleInputs struct {
|
||||
depinject.In
|
||||
|
||||
Config *modulev1.Module
|
||||
Key *store.KVStoreKey
|
||||
StoreService store.KVStoreService
|
||||
Cdc codec.Codec
|
||||
AddressCodec address.Codec
|
||||
|
||||
@ -215,7 +221,7 @@ func ProvideModule(in ModuleInputs) ModuleOutputs {
|
||||
}
|
||||
|
||||
// set the governance module account as the authority for conducting upgrades
|
||||
k := keeper.NewKeeper(skipUpgradeHeights, in.Key, in.Cdc, homePath, nil, authority.String())
|
||||
k := keeper.NewKeeper(skipUpgradeHeights, in.StoreService, in.Cdc, homePath, nil, authority.String())
|
||||
baseappOpt := func(app *baseapp.BaseApp) {
|
||||
k.SetVersionSetter(app)
|
||||
}
|
||||
|
||||
17
x/upgrade/types/errors.go
Normal file
17
x/upgrade/types/errors.go
Normal file
@ -0,0 +1,17 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"cosmossdk.io/errors"
|
||||
)
|
||||
|
||||
// x/authz module sentinel errors
|
||||
var (
|
||||
// ErrNoModuleVersionFound error if there is no version found in the module-version map
|
||||
ErrNoModuleVersionFound = errors.Register(ModuleName, 2, "module version not found")
|
||||
// ErrNoUpgradePlanFound error if there is no scheduled upgrade plan found
|
||||
ErrNoUpgradePlanFound = errors.Register(ModuleName, 3, "upgrade plan not found")
|
||||
// ErrNoUpgradedClientFound error if there is no upgraded client for the next version
|
||||
ErrNoUpgradedClientFound = errors.Register(ModuleName, 4, "upgraded client not found")
|
||||
// ErrNoUpgradedConsensusStateFound error if there is no upgraded consensus state for the next version
|
||||
ErrNoUpgradedConsensusStateFound = errors.Register(ModuleName, 5, "upgraded consensus state not found")
|
||||
)
|
||||
@ -1,7 +1,8 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||
context "context"
|
||||
|
||||
"github.com/cosmos/cosmos-sdk/types/module"
|
||||
)
|
||||
|
||||
@ -23,4 +24,4 @@ import (
|
||||
// function.
|
||||
//
|
||||
// Please also refer to docs/core/upgrade.md for more information.
|
||||
type UpgradeHandler func(ctx sdk.Context, plan Plan, fromVM module.VersionMap) (module.VersionMap, error)
|
||||
type UpgradeHandler func(ctx context.Context, plan Plan, fromVM module.VersionMap) (module.VersionMap, error)
|
||||
|
||||
@ -5,7 +5,6 @@ import (
|
||||
|
||||
errorsmod "cosmossdk.io/errors"
|
||||
|
||||
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
|
||||
)
|
||||
|
||||
@ -30,12 +29,9 @@ func (p Plan) ValidateBasic() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ShouldExecute returns true if the Plan is ready to execute given the current context
|
||||
func (p Plan) ShouldExecute(ctx sdk.Context) bool {
|
||||
if p.Height > 0 {
|
||||
return p.Height <= ctx.HeaderInfo().Height
|
||||
}
|
||||
return false
|
||||
// ShouldExecute returns true if the Plan is ready to execute given the current block height
|
||||
func (p Plan) ShouldExecute(blockHeight int64) bool {
|
||||
return p.Height > 0 && p.Height <= blockHeight
|
||||
}
|
||||
|
||||
// DueAt is a string representation of when this plan is due to be executed
|
||||
|
||||
@ -4,16 +4,12 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
cmtproto "github.com/cometbft/cometbft/proto/tendermint/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"cosmossdk.io/core/header"
|
||||
"cosmossdk.io/log"
|
||||
"cosmossdk.io/x/upgrade/types"
|
||||
|
||||
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
|
||||
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||
)
|
||||
|
||||
func mustParseTime(s string) time.Time {
|
||||
@ -148,12 +144,7 @@ func TestShouldExecute(t *testing.T) {
|
||||
for name, tc := range cases {
|
||||
tc := tc // copy to local variable for scopelint
|
||||
t.Run(name, func(t *testing.T) {
|
||||
ctx := sdk.NewContext(nil, cmtproto.Header{Height: tc.ctxHeight, Time: tc.ctxTime}, false, log.NewNopLogger())
|
||||
ctx = ctx.WithHeaderInfo(header.Info{
|
||||
Height: tc.ctxHeight,
|
||||
Time: tc.ctxTime,
|
||||
})
|
||||
should := tc.p.ShouldExecute(ctx)
|
||||
should := tc.p.ShouldExecute(tc.ctxHeight)
|
||||
assert.Equal(t, tc.expected, should)
|
||||
})
|
||||
}
|
||||
|
||||
Loading…
Reference in New Issue
Block a user