wire up network upgrade logic for v2 actors upgrade
This commit is contained in:
parent
c88d124954
commit
d1000e38d7
@ -14,6 +14,7 @@ const BreezeGasTampingDuration = 0
|
||||
const UpgradeSmokeHeight = -1
|
||||
const UpgradeIgnitionHeight = -2
|
||||
const UpgradeLiftoffHeight = -3
|
||||
const UpgradeActorsV2 = 10
|
||||
|
||||
var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
|
||||
0: DrandMainnet,
|
||||
|
@ -77,6 +77,7 @@ var (
|
||||
UpgradeSmokeHeight abi.ChainEpoch = -1
|
||||
UpgradeIgnitionHeight abi.ChainEpoch = -2
|
||||
UpgradeLiftoffHeight abi.ChainEpoch = -3
|
||||
UpgradeActorsV2 abi.ChainEpoch = 10
|
||||
|
||||
DrandSchedule = map[abi.ChainEpoch]DrandEnum{
|
||||
0: DrandMainnet,
|
||||
|
@ -23,6 +23,8 @@ const UpgradeSmokeHeight = 51000
|
||||
|
||||
const UpgradeIgnitionHeight = 94000
|
||||
|
||||
const UpgradeActorsV2 = 128888
|
||||
|
||||
// This signals our tentative epoch for mainnet launch. Can make it later, but not earlier.
|
||||
// Miners, clients, developers, custodians all need time to prepare.
|
||||
// We still have upgrades and state changes to do, but can happen after signaling timing here.
|
||||
|
@ -21,8 +21,10 @@ import (
|
||||
adt0 "github.com/filecoin-project/specs-actors/actors/util/adt"
|
||||
|
||||
"github.com/filecoin-project/specs-actors/actors/migration/nv3"
|
||||
m2 "github.com/filecoin-project/specs-actors/v2/actors/migration"
|
||||
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/multisig"
|
||||
"github.com/filecoin-project/lotus/chain/state"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
@ -32,6 +34,7 @@ import (
|
||||
var ForksAtHeight = map[abi.ChainEpoch]func(context.Context, *StateManager, ExecCallback, cid.Cid, *types.TipSet) (cid.Cid, error){
|
||||
build.UpgradeBreezeHeight: UpgradeFaucetBurnRecovery,
|
||||
build.UpgradeIgnitionHeight: UpgradeIgnition,
|
||||
build.UpgradeActorsV2: UpgradeActorsV2,
|
||||
build.UpgradeLiftoffHeight: UpgradeLiftoff,
|
||||
}
|
||||
|
||||
@ -414,6 +417,43 @@ func UpgradeIgnition(ctx context.Context, sm *StateManager, cb ExecCallback, roo
|
||||
return tree.Flush(ctx)
|
||||
}
|
||||
|
||||
func UpgradeActorsV2(ctx context.Context, sm *StateManager, cb ExecCallback, root cid.Cid, ts *types.TipSet) (cid.Cid, error) {
|
||||
store := sm.cs.Store(ctx)
|
||||
|
||||
info, err := store.Put(ctx, new(types.StateInfo))
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("failed to create new state info for actors v2: %w", err)
|
||||
}
|
||||
|
||||
newHamtRoot, err := m2.MigrateStateTree(ctx, store, root)
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("upgrading to actors v2: %w", err)
|
||||
}
|
||||
|
||||
newRoot, err := store.Put(ctx, &types.StateRoot{
|
||||
// TODO: ActorUpgrade: should be state-tree specific, not just the actors version.
|
||||
Version: actors.Version2,
|
||||
Actors: newHamtRoot,
|
||||
Info: info,
|
||||
})
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err)
|
||||
}
|
||||
|
||||
// perform some basic sanity checks.
|
||||
if newSm, err := state.LoadStateTree(store, newRoot); err != nil {
|
||||
return cid.Undef, xerrors.Errorf("state tree sanity load failed: %w", err)
|
||||
} else if newRoot2, err := newSm.Flush(ctx); err != nil {
|
||||
return cid.Undef, xerrors.Errorf("state tree sanity flush failed: %w", err)
|
||||
} else if newRoot2 != newRoot {
|
||||
return cid.Undef, xerrors.Errorf("state-root mismatch: %s != %s", newRoot, newRoot2)
|
||||
} else if _, err := newSm.GetActor(builtin0.InitActorAddr); err != nil {
|
||||
return cid.Undef, xerrors.Errorf("failed to load init actor after upgrade: %w", err)
|
||||
}
|
||||
|
||||
return newRoot, nil
|
||||
}
|
||||
|
||||
func UpgradeLiftoff(ctx context.Context, sm *StateManager, cb ExecCallback, root cid.Cid, ts *types.TipSet) (cid.Cid, error) {
|
||||
tree, err := sm.StateTree(root)
|
||||
if err != nil {
|
||||
|
Loading…
Reference in New Issue
Block a user