From 8c9321ce7bb81b5a78c41940e3451561bd1bbe37 Mon Sep 17 00:00:00 2001
From: ZenGround0 <ZenGround0@users.noreply.github.com>
Date: Tue, 10 Aug 2021 15:50:37 -0400
Subject: [PATCH] revert pricelist by version to pricelist by epoch

---
 api/version.go                              |    4 +-
 blockstore/badger/blockstore.go             |    2 +-
 build/openrpc/worker.json.gz                |  Bin 2710 -> 2710 bytes
 build/params_2k.go                          |    1 -
 build/params_butterfly.go                   |    1 -
 build/params_calibnet.go                    |    2 -
 build/params_interop.go                     |    1 -
 build/params_mainnet.go                     |    1 -
 build/params_nerpanet.go                    |    1 -
 build/params_testground.go                  |   31 +-
 chain/actors/version.go                     |    2 +-
 chain/messagepool/check.go                  |    2 +-
 chain/messagepool/messagepool.go            |    3 +-
 chain/messagepool/selection.go              |    2 +-
 chain/stmgr/forks.go                        | 1088 +-----------------
 chain/stmgr/upgrades.go                     | 1090 +++++++++++++++++++
 chain/sync.go                               |    2 +-
 chain/vm/gas.go                             |   28 +-
 chain/vm/mkactor.go                         |    2 +-
 chain/vm/vm.go                              |    4 +-
 cli/cmd.go                                  |    2 +-
 cmd/lotus-miner/main.go                     |    2 +-
 documentation/en/api-v0-methods-miner.md    |    2 +-
 documentation/en/api-v0-methods-worker.md   |    2 +-
 documentation/en/api-v0-methods.md          |    2 +-
 documentation/en/api-v1-unstable-methods.md |    2 +-
 26 files changed, 1146 insertions(+), 1133 deletions(-)
 create mode 100644 chain/stmgr/upgrades.go

diff --git a/api/version.go b/api/version.go
index ef59dd104..687f5135a 100644
--- a/api/version.go
+++ b/api/version.go
@@ -54,8 +54,8 @@ func VersionForType(nodeType NodeType) (Version, error) {
 
 // semver versions of the rpc api exposed
 var (
-	FullAPIVersion0 = newVer(1, 3, 1)
-	FullAPIVersion1 = newVer(2, 1, 1)
+	FullAPIVersion0 = newVer(1, 3, 0)
+	FullAPIVersion1 = newVer(2, 1, 0)
 
 	MinerAPIVersion0  = newVer(1, 2, 0)
 	WorkerAPIVersion0 = newVer(1, 1, 0)
diff --git a/blockstore/badger/blockstore.go b/blockstore/badger/blockstore.go
index 59fd18d1a..a0b51d8df 100644
--- a/blockstore/badger/blockstore.go
+++ b/blockstore/badger/blockstore.go
@@ -982,4 +982,4 @@ func (b *Blockstore) StorageKey(dst []byte, cid cid.Cid) []byte {
 // WARNING: THIS IS COMPLETELY UNSAFE; DONT USE THIS IN PRODUCTION CODE
 func (b *Blockstore) DB() *badger.DB {
 	return b.db
-}
\ No newline at end of file
+}
diff --git a/build/openrpc/worker.json.gz b/build/openrpc/worker.json.gz
index 9fa491548c83ad0c8ae481a77e828d4089534cba..c1b533b72b946065dbdc580248dfd393126945d0 100644
GIT binary patch
delta 100
zcmV-q0Gt1o6_ypSItpBztsO^qR3VQ`ww?b;sQPz6K^dy#oUTwQOOs!7ve)Lm()|~!
zU((gU<@SR_0aMgPY77Dp5rA0qlujZMFcv=Bd!@#{yJXa93s0wa)Bgnk0RR8b4U|P$
GdH?`aJua#M

delta 100
zcmV-q0Gt1o6_ypSItpBTSUZmHs6rl>Y&-vxQ1$PCf-+ReIbESrmL|XEWUtMArTZ^d
zzoe^y%k2k=0;Z^o)EERHA^@@IDV;<jU@UyL_eza@cgd*J7M@P;rvD270RR8*=vVbw
GdH?`>-!T0E

diff --git a/build/params_2k.go b/build/params_2k.go
index a1ccb0ce3..efa38dc0c 100644
--- a/build/params_2k.go
+++ b/build/params_2k.go
@@ -28,7 +28,6 @@ var UpgradeAssemblyHeight = abi.ChainEpoch(-5)
 var UpgradeLiftoffHeight = abi.ChainEpoch(-6)
 
 var UpgradeKumquatHeight = abi.ChainEpoch(-7)
-var UpgradePricelistOopsHeight = abi.ChainEpoch(-8)
 var UpgradeCalicoHeight = abi.ChainEpoch(-9)
 var UpgradePersianHeight = abi.ChainEpoch(-10)
 var UpgradeOrangeHeight = abi.ChainEpoch(-11)
diff --git a/build/params_butterfly.go b/build/params_butterfly.go
index 835c7dc7a..258f6ab0f 100644
--- a/build/params_butterfly.go
+++ b/build/params_butterfly.go
@@ -28,7 +28,6 @@ var UpgradeAssemblyHeight = abi.ChainEpoch(30)
 const UpgradeTapeHeight = 60
 const UpgradeLiftoffHeight = -5
 const UpgradeKumquatHeight = 90
-const UpgradePricelistOopsHeight = 119
 const UpgradeCalicoHeight = 120
 const UpgradePersianHeight = 150
 const UpgradeClausHeight = 180
diff --git a/build/params_calibnet.go b/build/params_calibnet.go
index fe871bcca..df334a516 100644
--- a/build/params_calibnet.go
+++ b/build/params_calibnet.go
@@ -33,8 +33,6 @@ const UpgradeLiftoffHeight = -5
 
 const UpgradeKumquatHeight = 90
 
-const UpgradePricelistOopsHeight = 119
-
 const UpgradeCalicoHeight = 120
 const UpgradePersianHeight = UpgradeCalicoHeight + (builtin2.EpochsInHour * 1)
 
diff --git a/build/params_interop.go b/build/params_interop.go
index b5e49577d..921dd0981 100644
--- a/build/params_interop.go
+++ b/build/params_interop.go
@@ -31,7 +31,6 @@ var UpgradeAssemblyHeight = abi.ChainEpoch(-5)
 var UpgradeLiftoffHeight = abi.ChainEpoch(-6)
 
 var UpgradeKumquatHeight = abi.ChainEpoch(-7)
-var UpgradePricelistOopsHeight = abi.ChainEpoch(-8)
 var UpgradeCalicoHeight = abi.ChainEpoch(-9)
 var UpgradePersianHeight = abi.ChainEpoch(-10)
 var UpgradeOrangeHeight = abi.ChainEpoch(-11)
diff --git a/build/params_mainnet.go b/build/params_mainnet.go
index c9750b6e6..1c9b69462 100644
--- a/build/params_mainnet.go
+++ b/build/params_mainnet.go
@@ -45,7 +45,6 @@ const UpgradeLiftoffHeight = 148888
 
 const UpgradeKumquatHeight = 170000
 
-const UpgradePricelistOopsHeight = 265199
 const UpgradeCalicoHeight = 265200
 const UpgradePersianHeight = UpgradeCalicoHeight + (builtin2.EpochsInHour * 60)
 
diff --git a/build/params_nerpanet.go b/build/params_nerpanet.go
index 8879d01b5..3cba64ae2 100644
--- a/build/params_nerpanet.go
+++ b/build/params_nerpanet.go
@@ -32,7 +32,6 @@ const UpgradeTapeHeight = 60
 
 const UpgradeKumquatHeight = 90
 
-const UpgradePricelistOopsHeight = 99
 const UpgradeCalicoHeight = 100
 const UpgradePersianHeight = UpgradeCalicoHeight + (builtin2.EpochsInHour * 1)
 
diff --git a/build/params_testground.go b/build/params_testground.go
index b12df11e7..204c74e67 100644
--- a/build/params_testground.go
+++ b/build/params_testground.go
@@ -82,22 +82,21 @@ var (
 	UpgradeBreezeHeight      abi.ChainEpoch = -1
 	BreezeGasTampingDuration abi.ChainEpoch = 0
 
-	UpgradeSmokeHeight         abi.ChainEpoch = -1
-	UpgradeIgnitionHeight      abi.ChainEpoch = -2
-	UpgradeRefuelHeight        abi.ChainEpoch = -3
-	UpgradeTapeHeight          abi.ChainEpoch = -4
-	UpgradeAssemblyHeight      abi.ChainEpoch = 10
-	UpgradeLiftoffHeight       abi.ChainEpoch = -5
-	UpgradeKumquatHeight       abi.ChainEpoch = -6
-	UpgradePricelistOopsHeight abi.ChainEpoch = -7
-	UpgradeCalicoHeight        abi.ChainEpoch = -8
-	UpgradePersianHeight       abi.ChainEpoch = -9
-	UpgradeOrangeHeight        abi.ChainEpoch = -10
-	UpgradeClausHeight         abi.ChainEpoch = -11
-	UpgradeTrustHeight         abi.ChainEpoch = -12
-	UpgradeNorwegianHeight     abi.ChainEpoch = -13
-	UpgradeTurboHeight         abi.ChainEpoch = -14
-	UpgradeHyperdriveHeight    abi.ChainEpoch = -15
+	UpgradeSmokeHeight      abi.ChainEpoch = -1
+	UpgradeIgnitionHeight   abi.ChainEpoch = -2
+	UpgradeRefuelHeight     abi.ChainEpoch = -3
+	UpgradeTapeHeight       abi.ChainEpoch = -4
+	UpgradeAssemblyHeight   abi.ChainEpoch = 10
+	UpgradeLiftoffHeight    abi.ChainEpoch = -5
+	UpgradeKumquatHeight    abi.ChainEpoch = -6
+	UpgradeCalicoHeight     abi.ChainEpoch = -8
+	UpgradePersianHeight    abi.ChainEpoch = -9
+	UpgradeOrangeHeight     abi.ChainEpoch = -10
+	UpgradeClausHeight      abi.ChainEpoch = -11
+	UpgradeTrustHeight      abi.ChainEpoch = -12
+	UpgradeNorwegianHeight  abi.ChainEpoch = -13
+	UpgradeTurboHeight      abi.ChainEpoch = -14
+	UpgradeHyperdriveHeight abi.ChainEpoch = -15
 
 	DrandSchedule = map[abi.ChainEpoch]DrandEnum{
 		0: DrandMainnet,
diff --git a/chain/actors/version.go b/chain/actors/version.go
index 36e8d17b4..8787089af 100644
--- a/chain/actors/version.go
+++ b/chain/actors/version.go
@@ -25,7 +25,7 @@ func VersionForNetwork(version network.Version) (Version, error) {
 	switch version {
 	case network.Version0, network.Version1, network.Version2, network.Version3:
 		return Version0, nil
-	case network.Version4, network.Version5, network.Version6, network.Version6AndAHalf, network.Version7, network.Version8, network.Version9:
+	case network.Version4, network.Version5, network.Version6, network.Version7, network.Version8, network.Version9:
 		return Version2, nil
 	case network.Version10, network.Version11:
 		return Version3, nil
diff --git a/chain/messagepool/check.go b/chain/messagepool/check.go
index 9a55c283c..283c0d119 100644
--- a/chain/messagepool/check.go
+++ b/chain/messagepool/check.go
@@ -281,7 +281,7 @@ func (mp *MessagePool) checkMessages(ctx context.Context, msgs []*types.Message,
 		// gas checks
 
 		// 4. Min Gas
-		minGas := vm.PricelistByVersion(nv).OnChainMessage(m.ChainLength())
+		minGas := vm.PricelistByEpoch(epoch).OnChainMessage(m.ChainLength())
 
 		check = api.MessageCheckStatus{
 			Cid: m.Cid(),
diff --git a/chain/messagepool/messagepool.go b/chain/messagepool/messagepool.go
index 175cda9ff..ee2518ed9 100644
--- a/chain/messagepool/messagepool.go
+++ b/chain/messagepool/messagepool.go
@@ -616,7 +616,8 @@ func (mp *MessagePool) addLocal(ctx context.Context, m *types.SignedMessage) err
 // For non local messages, if the message cannot be included in the next 20 blocks it returns
 // a (soft) validation error.
 func (mp *MessagePool) verifyMsgBeforeAdd(m *types.SignedMessage, curTs *types.TipSet, local bool) (bool, error) {
-	minGas := vm.PricelistByVersion(build.NewestNetworkVersion).OnChainMessage(m.ChainLength())
+	epoch := curTs.Height()
+	minGas := vm.PricelistByEpoch(epoch).OnChainMessage(m.ChainLength())
 
 	if err := m.VMMessage().ValidForBlockInclusion(minGas.Total(), build.NewestNetworkVersion); err != nil {
 		return false, xerrors.Errorf("message will not be included in a block: %w", err)
diff --git a/chain/messagepool/selection.go b/chain/messagepool/selection.go
index 60d75a841..611ab8e5f 100644
--- a/chain/messagepool/selection.go
+++ b/chain/messagepool/selection.go
@@ -749,7 +749,7 @@ func (mp *MessagePool) createMessageChains(actor address.Address, mset map[uint6
 		}
 		curNonce++
 
-		minGas := vm.PricelistByVersion(build.NewestNetworkVersion).OnChainMessage(m.ChainLength()).Total()
+		minGas := vm.PricelistByEpoch(ts.Height()).OnChainMessage(m.ChainLength()).Total()
 		if m.Message.GasLimit < minGas {
 			break
 		}
diff --git a/chain/stmgr/forks.go b/chain/stmgr/forks.go
index fb8e407ed..212272a95 100644
--- a/chain/stmgr/forks.go
+++ b/chain/stmgr/forks.go
@@ -4,42 +4,27 @@ import (
 	"bytes"
 	"context"
 	"encoding/binary"
-	"runtime"
 	"sort"
 	"sync"
 	"time"
 
-	"github.com/filecoin-project/specs-actors/v5/actors/migration/nv13"
-
-	"github.com/filecoin-project/go-state-types/rt"
+	"github.com/ipfs/go-cid"
+	"golang.org/x/xerrors"
 
 	"github.com/filecoin-project/go-address"
 	"github.com/filecoin-project/go-state-types/abi"
 	"github.com/filecoin-project/go-state-types/big"
 	"github.com/filecoin-project/go-state-types/network"
-	"github.com/filecoin-project/lotus/blockstore"
-	"github.com/filecoin-project/lotus/build"
+	"github.com/filecoin-project/go-state-types/rt"
+
+	"github.com/filecoin-project/specs-actors/v3/actors/migration/nv10"
+
 	"github.com/filecoin-project/lotus/chain/actors/adt"
 	"github.com/filecoin-project/lotus/chain/actors/builtin"
 	init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init"
-	"github.com/filecoin-project/lotus/chain/actors/builtin/multisig"
 	"github.com/filecoin-project/lotus/chain/state"
-	"github.com/filecoin-project/lotus/chain/store"
 	"github.com/filecoin-project/lotus/chain/types"
 	"github.com/filecoin-project/lotus/chain/vm"
-	builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
-	miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
-	multisig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig"
-	power0 "github.com/filecoin-project/specs-actors/actors/builtin/power"
-	"github.com/filecoin-project/specs-actors/actors/migration/nv3"
-	adt0 "github.com/filecoin-project/specs-actors/actors/util/adt"
-	"github.com/filecoin-project/specs-actors/v2/actors/migration/nv4"
-	"github.com/filecoin-project/specs-actors/v2/actors/migration/nv7"
-	"github.com/filecoin-project/specs-actors/v3/actors/migration/nv10"
-	"github.com/filecoin-project/specs-actors/v4/actors/migration/nv12"
-	"github.com/ipfs/go-cid"
-	cbor "github.com/ipfs/go-ipld-cbor"
-	"golang.org/x/xerrors"
 )
 
 // MigrationCache can be used to cache information used by a migration. This is primarily useful to
@@ -125,121 +110,6 @@ func (ml migrationLogger) Log(level rt.LogLevel, msg string, args ...interface{}
 	}
 }
 
-func DefaultUpgradeSchedule() UpgradeSchedule {
-	var us UpgradeSchedule
-
-	updates := []Upgrade{{
-		Height:    build.UpgradeBreezeHeight,
-		Network:   network.Version1,
-		Migration: UpgradeFaucetBurnRecovery,
-	}, {
-		Height:    build.UpgradeSmokeHeight,
-		Network:   network.Version2,
-		Migration: nil,
-	}, {
-		Height:    build.UpgradeIgnitionHeight,
-		Network:   network.Version3,
-		Migration: UpgradeIgnition,
-	}, {
-		Height:    build.UpgradeRefuelHeight,
-		Network:   network.Version3,
-		Migration: UpgradeRefuel,
-	}, {
-		Height:    build.UpgradeAssemblyHeight,
-		Network:   network.Version4,
-		Expensive: true,
-		Migration: UpgradeActorsV2,
-	}, {
-		Height:    build.UpgradeTapeHeight,
-		Network:   network.Version5,
-		Migration: nil,
-	}, {
-		Height:    build.UpgradeLiftoffHeight,
-		Network:   network.Version5,
-		Migration: UpgradeLiftoff,
-	}, {
-		Height:    build.UpgradeKumquatHeight,
-		Network:   network.Version6,
-		Migration: nil,
-	}, {
-		Height:    build.UpgradePricelistOopsHeight,
-		Network:   network.Version6AndAHalf,
-		Migration: nil,
-	}, {
-		Height:    build.UpgradeCalicoHeight,
-		Network:   network.Version7,
-		Migration: UpgradeCalico,
-	}, {
-		Height:    build.UpgradePersianHeight,
-		Network:   network.Version8,
-		Migration: nil,
-	}, {
-		Height:    build.UpgradeOrangeHeight,
-		Network:   network.Version9,
-		Migration: nil,
-	}, {
-		Height:    build.UpgradeTrustHeight,
-		Network:   network.Version10,
-		Migration: UpgradeActorsV3,
-		PreMigrations: []PreMigration{{
-			PreMigration:    PreUpgradeActorsV3,
-			StartWithin:     120,
-			DontStartWithin: 60,
-			StopWithin:      35,
-		}, {
-			PreMigration:    PreUpgradeActorsV3,
-			StartWithin:     30,
-			DontStartWithin: 15,
-			StopWithin:      5,
-		}},
-		Expensive: true,
-	}, {
-		Height:    build.UpgradeNorwegianHeight,
-		Network:   network.Version11,
-		Migration: nil,
-	}, {
-		Height:    build.UpgradeTurboHeight,
-		Network:   network.Version12,
-		Migration: UpgradeActorsV4,
-		PreMigrations: []PreMigration{{
-			PreMigration:    PreUpgradeActorsV4,
-			StartWithin:     120,
-			DontStartWithin: 60,
-			StopWithin:      35,
-		}, {
-			PreMigration:    PreUpgradeActorsV4,
-			StartWithin:     30,
-			DontStartWithin: 15,
-			StopWithin:      5,
-		}},
-		Expensive: true,
-	}, {
-		Height:    build.UpgradeHyperdriveHeight,
-		Network:   network.Version13,
-		Migration: UpgradeActorsV5,
-		PreMigrations: []PreMigration{{
-			PreMigration:    PreUpgradeActorsV5,
-			StartWithin:     120,
-			DontStartWithin: 60,
-			StopWithin:      35,
-		}, {
-			PreMigration:    PreUpgradeActorsV5,
-			StartWithin:     30,
-			DontStartWithin: 15,
-			StopWithin:      5,
-		}},
-		Expensive: true}}
-
-	for _, u := range updates {
-		if u.Height < 0 {
-			// upgrade disabled
-			continue
-		}
-		us = append(us, u)
-	}
-	return us
-}
-
 func (us UpgradeSchedule) Validate() error {
 	// Make sure each upgrade is valid.
 	for _, u := range us {
@@ -488,469 +358,6 @@ func doTransfer(tree types.StateTree, from, to address.Address, amt abi.TokenAmo
 	return nil
 }
 
-func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, _ MigrationCache, em ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
-	// Some initial parameters
-	FundsForMiners := types.FromFil(1_000_000)
-	LookbackEpoch := abi.ChainEpoch(32000)
-	AccountCap := types.FromFil(0)
-	BaseMinerBalance := types.FromFil(20)
-	DesiredReimbursementBalance := types.FromFil(5_000_000)
-
-	isSystemAccount := func(addr address.Address) (bool, error) {
-		id, err := address.IDFromAddress(addr)
-		if err != nil {
-			return false, xerrors.Errorf("id address: %w", err)
-		}
-
-		if id < 1000 {
-			return true, nil
-		}
-		return false, nil
-	}
-
-	minerFundsAlloc := func(pow, tpow abi.StoragePower) abi.TokenAmount {
-		return types.BigDiv(types.BigMul(pow, FundsForMiners), tpow)
-	}
-
-	// Grab lookback state for account checks
-	lbts, err := sm.ChainStore().GetTipsetByHeight(ctx, LookbackEpoch, ts, false)
-	if err != nil {
-		return cid.Undef, xerrors.Errorf("failed to get tipset at lookback height: %w", err)
-	}
-
-	lbtree, err := sm.ParentState(lbts)
-	if err != nil {
-		return cid.Undef, xerrors.Errorf("loading state tree failed: %w", err)
-	}
-
-	tree, err := sm.StateTree(root)
-	if err != nil {
-		return cid.Undef, xerrors.Errorf("getting state tree: %w", err)
-	}
-
-	type transfer struct {
-		From address.Address
-		To   address.Address
-		Amt  abi.TokenAmount
-	}
-
-	var transfers []transfer
-	subcalls := make([]types.ExecutionTrace, 0)
-	transferCb := func(trace types.ExecutionTrace) {
-		subcalls = append(subcalls, trace)
-	}
-
-	// Take all excess funds away, put them into the reserve account
-	err = tree.ForEach(func(addr address.Address, act *types.Actor) error {
-		switch act.Code {
-		case builtin0.AccountActorCodeID, builtin0.MultisigActorCodeID, builtin0.PaymentChannelActorCodeID:
-			sysAcc, err := isSystemAccount(addr)
-			if err != nil {
-				return xerrors.Errorf("checking system account: %w", err)
-			}
-
-			if !sysAcc {
-				transfers = append(transfers, transfer{
-					From: addr,
-					To:   builtin.ReserveAddress,
-					Amt:  act.Balance,
-				})
-			}
-		case builtin0.StorageMinerActorCodeID:
-			var st miner0.State
-			if err := sm.ChainStore().ActorStore(ctx).Get(ctx, act.Head, &st); err != nil {
-				return xerrors.Errorf("failed to load miner state: %w", err)
-			}
-
-			var available abi.TokenAmount
-			{
-				defer func() {
-					if err := recover(); err != nil {
-						log.Warnf("Get available balance failed (%s, %s, %s): %s", addr, act.Head, act.Balance, err)
-					}
-					available = abi.NewTokenAmount(0)
-				}()
-				// this panics if the miner doesnt have enough funds to cover their locked pledge
-				available = st.GetAvailableBalance(act.Balance)
-			}
-
-			if !available.IsZero() {
-				transfers = append(transfers, transfer{
-					From: addr,
-					To:   builtin.ReserveAddress,
-					Amt:  available,
-				})
-			}
-		}
-		return nil
-	})
-	if err != nil {
-		return cid.Undef, xerrors.Errorf("foreach over state tree failed: %w", err)
-	}
-
-	// Execute transfers from previous step
-	for _, t := range transfers {
-		if err := doTransfer(tree, t.From, t.To, t.Amt, transferCb); err != nil {
-			return cid.Undef, xerrors.Errorf("transfer %s %s->%s failed: %w", t.Amt, t.From, t.To, err)
-		}
-	}
-
-	// pull up power table to give miners back some funds proportional to their power
-	var ps power0.State
-	powAct, err := tree.GetActor(builtin0.StoragePowerActorAddr)
-	if err != nil {
-		return cid.Undef, xerrors.Errorf("failed to load power actor: %w", err)
-	}
-
-	cst := cbor.NewCborStore(sm.ChainStore().StateBlockstore())
-	if err := cst.Get(ctx, powAct.Head, &ps); err != nil {
-		return cid.Undef, xerrors.Errorf("failed to get power actor state: %w", err)
-	}
-
-	totalPower := ps.TotalBytesCommitted
-
-	var transfersBack []transfer
-	// Now, we return some funds to places where they are needed
-	err = tree.ForEach(func(addr address.Address, act *types.Actor) error {
-		lbact, err := lbtree.GetActor(addr)
-		if err != nil {
-			if !xerrors.Is(err, types.ErrActorNotFound) {
-				return xerrors.Errorf("failed to get actor in lookback state")
-			}
-		}
-
-		prevBalance := abi.NewTokenAmount(0)
-		if lbact != nil {
-			prevBalance = lbact.Balance
-		}
-
-		switch act.Code {
-		case builtin0.AccountActorCodeID, builtin0.MultisigActorCodeID, builtin0.PaymentChannelActorCodeID:
-			nbalance := big.Min(prevBalance, AccountCap)
-			if nbalance.Sign() != 0 {
-				transfersBack = append(transfersBack, transfer{
-					From: builtin.ReserveAddress,
-					To:   addr,
-					Amt:  nbalance,
-				})
-			}
-		case builtin0.StorageMinerActorCodeID:
-			var st miner0.State
-			if err := sm.ChainStore().ActorStore(ctx).Get(ctx, act.Head, &st); err != nil {
-				return xerrors.Errorf("failed to load miner state: %w", err)
-			}
-
-			var minfo miner0.MinerInfo
-			if err := cst.Get(ctx, st.Info, &minfo); err != nil {
-				return xerrors.Errorf("failed to get miner info: %w", err)
-			}
-
-			sectorsArr, err := adt0.AsArray(sm.ChainStore().ActorStore(ctx), st.Sectors)
-			if err != nil {
-				return xerrors.Errorf("failed to load sectors array: %w", err)
-			}
-
-			slen := sectorsArr.Length()
-
-			power := types.BigMul(types.NewInt(slen), types.NewInt(uint64(minfo.SectorSize)))
-
-			mfunds := minerFundsAlloc(power, totalPower)
-			transfersBack = append(transfersBack, transfer{
-				From: builtin.ReserveAddress,
-				To:   minfo.Worker,
-				Amt:  mfunds,
-			})
-
-			// Now make sure to give each miner who had power at the lookback some FIL
-			lbact, err := lbtree.GetActor(addr)
-			if err == nil {
-				var lbst miner0.State
-				if err := sm.ChainStore().ActorStore(ctx).Get(ctx, lbact.Head, &lbst); err != nil {
-					return xerrors.Errorf("failed to load miner state: %w", err)
-				}
-
-				lbsectors, err := adt0.AsArray(sm.ChainStore().ActorStore(ctx), lbst.Sectors)
-				if err != nil {
-					return xerrors.Errorf("failed to load lb sectors array: %w", err)
-				}
-
-				if lbsectors.Length() > 0 {
-					transfersBack = append(transfersBack, transfer{
-						From: builtin.ReserveAddress,
-						To:   minfo.Worker,
-						Amt:  BaseMinerBalance,
-					})
-				}
-
-			} else {
-				log.Warnf("failed to get miner in lookback state: %s", err)
-			}
-		}
-		return nil
-	})
-	if err != nil {
-		return cid.Undef, xerrors.Errorf("foreach over state tree failed: %w", err)
-	}
-
-	for _, t := range transfersBack {
-		if err := doTransfer(tree, t.From, t.To, t.Amt, transferCb); err != nil {
-			return cid.Undef, xerrors.Errorf("transfer %s %s->%s failed: %w", t.Amt, t.From, t.To, err)
-		}
-	}
-
-	// transfer all burnt funds back to the reserve account
-	burntAct, err := tree.GetActor(builtin0.BurntFundsActorAddr)
-	if err != nil {
-		return cid.Undef, xerrors.Errorf("failed to load burnt funds actor: %w", err)
-	}
-	if err := doTransfer(tree, builtin0.BurntFundsActorAddr, builtin.ReserveAddress, burntAct.Balance, transferCb); err != nil {
-		return cid.Undef, xerrors.Errorf("failed to unburn funds: %w", err)
-	}
-
-	// Top up the reimbursement service
-	reimbAddr, err := address.NewFromString("t0111")
-	if err != nil {
-		return cid.Undef, xerrors.Errorf("failed to parse reimbursement service address")
-	}
-
-	reimb, err := tree.GetActor(reimbAddr)
-	if err != nil {
-		return cid.Undef, xerrors.Errorf("failed to load reimbursement account actor: %w", err)
-	}
-
-	difference := types.BigSub(DesiredReimbursementBalance, reimb.Balance)
-	if err := doTransfer(tree, builtin.ReserveAddress, reimbAddr, difference, transferCb); err != nil {
-		return cid.Undef, xerrors.Errorf("failed to top up reimbursement account: %w", err)
-	}
-
-	// Now, a final sanity check to make sure the balances all check out
-	total := abi.NewTokenAmount(0)
-	err = tree.ForEach(func(addr address.Address, act *types.Actor) error {
-		total = types.BigAdd(total, act.Balance)
-		return nil
-	})
-	if err != nil {
-		return cid.Undef, xerrors.Errorf("checking final state balance failed: %w", err)
-	}
-
-	exp := types.FromFil(build.FilBase)
-	if !exp.Equals(total) {
-		return cid.Undef, xerrors.Errorf("resultant state tree account balance was not correct: %s", total)
-	}
-
-	if em != nil {
-		// record the transfer in execution traces
-
-		fakeMsg := makeFakeMsg(builtin.SystemActorAddr, builtin.SystemActorAddr, big.Zero(), uint64(epoch))
-
-		if err := em.MessageApplied(ctx, ts, fakeMsg.Cid(), fakeMsg, &vm.ApplyRet{
-			MessageReceipt: *makeFakeRct(),
-			ActorErr:       nil,
-			ExecutionTrace: types.ExecutionTrace{
-				Msg:        fakeMsg,
-				MsgRct:     makeFakeRct(),
-				Error:      "",
-				Duration:   0,
-				GasCharges: nil,
-				Subcalls:   subcalls,
-			},
-			Duration: 0,
-			GasCosts: nil,
-		}, false); err != nil {
-			return cid.Undef, xerrors.Errorf("recording transfers: %w", err)
-		}
-	}
-
-	return tree.Flush(ctx)
-}
-
-func UpgradeIgnition(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
-	store := sm.cs.ActorStore(ctx)
-
-	if build.UpgradeLiftoffHeight <= epoch {
-		return cid.Undef, xerrors.Errorf("liftoff height must be beyond ignition height")
-	}
-
-	nst, err := nv3.MigrateStateTree(ctx, store, root, epoch)
-	if err != nil {
-		return cid.Undef, xerrors.Errorf("migrating actors state: %w", err)
-	}
-
-	tree, err := sm.StateTree(nst)
-	if err != nil {
-		return cid.Undef, xerrors.Errorf("getting state tree: %w", err)
-	}
-
-	err = setNetworkName(ctx, store, tree, "ignition")
-	if err != nil {
-		return cid.Undef, xerrors.Errorf("setting network name: %w", err)
-	}
-
-	split1, err := address.NewFromString("t0115")
-	if err != nil {
-		return cid.Undef, xerrors.Errorf("first split address: %w", err)
-	}
-
-	split2, err := address.NewFromString("t0116")
-	if err != nil {
-		return cid.Undef, xerrors.Errorf("second split address: %w", err)
-	}
-
-	err = resetGenesisMsigs0(ctx, sm, store, tree, build.UpgradeLiftoffHeight)
-	if err != nil {
-		return cid.Undef, xerrors.Errorf("resetting genesis msig start epochs: %w", err)
-	}
-
-	err = splitGenesisMultisig0(ctx, cb, split1, store, tree, 50, epoch, ts)
-	if err != nil {
-		return cid.Undef, xerrors.Errorf("splitting first msig: %w", err)
-	}
-
-	err = splitGenesisMultisig0(ctx, cb, split2, store, tree, 50, epoch, ts)
-	if err != nil {
-		return cid.Undef, xerrors.Errorf("splitting second msig: %w", err)
-	}
-
-	err = nv3.CheckStateTree(ctx, store, nst, epoch, builtin0.TotalFilecoin)
-	if err != nil {
-		return cid.Undef, xerrors.Errorf("sanity check after ignition upgrade failed: %w", err)
-	}
-
-	return tree.Flush(ctx)
-}
-
-func UpgradeRefuel(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
-
-	store := sm.cs.ActorStore(ctx)
-	tree, err := sm.StateTree(root)
-	if err != nil {
-		return cid.Undef, xerrors.Errorf("getting state tree: %w", err)
-	}
-
-	err = resetMultisigVesting0(ctx, store, tree, builtin.SaftAddress, 0, 0, big.Zero())
-	if err != nil {
-		return cid.Undef, xerrors.Errorf("tweaking msig vesting: %w", err)
-	}
-
-	err = resetMultisigVesting0(ctx, store, tree, builtin.ReserveAddress, 0, 0, big.Zero())
-	if err != nil {
-		return cid.Undef, xerrors.Errorf("tweaking msig vesting: %w", err)
-	}
-
-	err = resetMultisigVesting0(ctx, store, tree, builtin.RootVerifierAddress, 0, 0, big.Zero())
-	if err != nil {
-		return cid.Undef, xerrors.Errorf("tweaking msig vesting: %w", err)
-	}
-
-	return tree.Flush(ctx)
-}
-
-func UpgradeActorsV2(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
-	buf := blockstore.NewTieredBstore(sm.cs.StateBlockstore(), blockstore.NewMemorySync())
-	store := store.ActorStore(ctx, buf)
-
-	info, err := store.Put(ctx, new(types.StateInfo0))
-	if err != nil {
-		return cid.Undef, xerrors.Errorf("failed to create new state info for actors v2: %w", err)
-	}
-
-	newHamtRoot, err := nv4.MigrateStateTree(ctx, store, root, epoch, nv4.DefaultConfig())
-	if err != nil {
-		return cid.Undef, xerrors.Errorf("upgrading to actors v2: %w", err)
-	}
-
-	newRoot, err := store.Put(ctx, &types.StateRoot{
-		Version: types.StateTreeVersion1,
-		Actors:  newHamtRoot,
-		Info:    info,
-	})
-	if err != nil {
-		return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err)
-	}
-
-	// perform some basic sanity checks to make sure everything still works.
-	if newSm, err := state.LoadStateTree(store, newRoot); err != nil {
-		return cid.Undef, xerrors.Errorf("state tree sanity load failed: %w", err)
-	} else if newRoot2, err := newSm.Flush(ctx); err != nil {
-		return cid.Undef, xerrors.Errorf("state tree sanity flush failed: %w", err)
-	} else if newRoot2 != newRoot {
-		return cid.Undef, xerrors.Errorf("state-root mismatch: %s != %s", newRoot, newRoot2)
-	} else if _, err := newSm.GetActor(builtin0.InitActorAddr); err != nil {
-		return cid.Undef, xerrors.Errorf("failed to load init actor after upgrade: %w", err)
-	}
-
-	{
-		from := buf
-		to := buf.Read()
-
-		if err := vm.Copy(ctx, from, to, newRoot); err != nil {
-			return cid.Undef, xerrors.Errorf("copying migrated tree: %w", err)
-		}
-	}
-
-	return newRoot, nil
-}
-
-func UpgradeLiftoff(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
-	tree, err := sm.StateTree(root)
-	if err != nil {
-		return cid.Undef, xerrors.Errorf("getting state tree: %w", err)
-	}
-
-	err = setNetworkName(ctx, sm.cs.ActorStore(ctx), tree, "mainnet")
-	if err != nil {
-		return cid.Undef, xerrors.Errorf("setting network name: %w", err)
-	}
-
-	return tree.Flush(ctx)
-}
-
-func UpgradeCalico(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
-	if build.BuildType != build.BuildMainnet {
-		return root, nil
-	}
-
-	store := sm.cs.ActorStore(ctx)
-	var stateRoot types.StateRoot
-	if err := store.Get(ctx, root, &stateRoot); err != nil {
-		return cid.Undef, xerrors.Errorf("failed to decode state root: %w", err)
-	}
-
-	if stateRoot.Version != types.StateTreeVersion1 {
-		return cid.Undef, xerrors.Errorf(
-			"expected state root version 1 for calico upgrade, got %d",
-			stateRoot.Version,
-		)
-	}
-
-	newHamtRoot, err := nv7.MigrateStateTree(ctx, store, stateRoot.Actors, epoch, nv7.DefaultConfig())
-	if err != nil {
-		return cid.Undef, xerrors.Errorf("running nv7 migration: %w", err)
-	}
-
-	newRoot, err := store.Put(ctx, &types.StateRoot{
-		Version: stateRoot.Version,
-		Actors:  newHamtRoot,
-		Info:    stateRoot.Info,
-	})
-	if err != nil {
-		return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err)
-	}
-
-	// perform some basic sanity checks to make sure everything still works.
-	if newSm, err := state.LoadStateTree(store, newRoot); err != nil {
-		return cid.Undef, xerrors.Errorf("state tree sanity load failed: %w", err)
-	} else if newRoot2, err := newSm.Flush(ctx); err != nil {
-		return cid.Undef, xerrors.Errorf("state tree sanity flush failed: %w", err)
-	} else if newRoot2 != newRoot {
-		return cid.Undef, xerrors.Errorf("state-root mismatch: %s != %s", newRoot, newRoot2)
-	} else if _, err := newSm.GetActor(builtin0.InitActorAddr); err != nil {
-		return cid.Undef, xerrors.Errorf("failed to load init actor after upgrade: %w", err)
-	}
-
-	return newRoot, nil
-}
-
 func terminateActor(ctx context.Context, tree *state.StateTree, addr address.Address, em ExecMonitor, epoch abi.ChainEpoch, ts *types.TipSet) error {
 	a, err := tree.GetActor(addr)
 	if xerrors.Is(err, types.ErrActorNotFound) {
@@ -1011,282 +418,8 @@ func terminateActor(ctx context.Context, tree *state.StateTree, addr address.Add
 	return tree.SetActor(init_.Address, ia)
 }
 
-func UpgradeActorsV3(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
-	// Use all the CPUs except 3.
-	workerCount := runtime.NumCPU() - 3
-	if workerCount <= 0 {
-		workerCount = 1
-	}
-
-	config := nv10.Config{
-		MaxWorkers:        uint(workerCount),
-		JobQueueSize:      1000,
-		ResultQueueSize:   100,
-		ProgressLogPeriod: 10 * time.Second,
-	}
-	newRoot, err := upgradeActorsV3Common(ctx, sm, cache, root, epoch, ts, config)
-	if err != nil {
-		return cid.Undef, xerrors.Errorf("migrating actors v3 state: %w", err)
-	}
-
-	tree, err := sm.StateTree(newRoot)
-	if err != nil {
-		return cid.Undef, xerrors.Errorf("getting state tree: %w", err)
-	}
-
-	if build.BuildType == build.BuildMainnet {
-		err := terminateActor(ctx, tree, build.ZeroAddress, cb, epoch, ts)
-		if err != nil && !xerrors.Is(err, types.ErrActorNotFound) {
-			return cid.Undef, xerrors.Errorf("deleting zero bls actor: %w", err)
-		}
-
-		newRoot, err = tree.Flush(ctx)
-		if err != nil {
-			return cid.Undef, xerrors.Errorf("flushing state tree: %w", err)
-		}
-	}
-
-	return newRoot, nil
-}
-
-func PreUpgradeActorsV3(ctx context.Context, sm *StateManager, cache MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error {
-	// Use half the CPUs for pre-migration, but leave at least 3.
-	workerCount := runtime.NumCPU()
-	if workerCount <= 4 {
-		workerCount = 1
-	} else {
-		workerCount /= 2
-	}
-	config := nv10.Config{MaxWorkers: uint(workerCount)}
-	_, err := upgradeActorsV3Common(ctx, sm, cache, root, epoch, ts, config)
-	return err
-}
-
-func upgradeActorsV3Common(
-	ctx context.Context, sm *StateManager, cache MigrationCache,
-	root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet,
-	config nv10.Config,
-) (cid.Cid, error) {
-	buf := blockstore.NewTieredBstore(sm.cs.StateBlockstore(), blockstore.NewMemorySync())
-	store := store.ActorStore(ctx, buf)
-
-	// Load the state root.
-	var stateRoot types.StateRoot
-	if err := store.Get(ctx, root, &stateRoot); err != nil {
-		return cid.Undef, xerrors.Errorf("failed to decode state root: %w", err)
-	}
-
-	if stateRoot.Version != types.StateTreeVersion1 {
-		return cid.Undef, xerrors.Errorf(
-			"expected state root version 1 for actors v3 upgrade, got %d",
-			stateRoot.Version,
-		)
-	}
-
-	// Perform the migration
-	newHamtRoot, err := nv10.MigrateStateTree(ctx, store, stateRoot.Actors, epoch, config, migrationLogger{}, cache)
-	if err != nil {
-		return cid.Undef, xerrors.Errorf("upgrading to actors v3: %w", err)
-	}
-
-	// Persist the result.
-	newRoot, err := store.Put(ctx, &types.StateRoot{
-		Version: types.StateTreeVersion2,
-		Actors:  newHamtRoot,
-		Info:    stateRoot.Info,
-	})
-	if err != nil {
-		return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err)
-	}
-
-	// Persist the new tree.
-
-	{
-		from := buf
-		to := buf.Read()
-
-		if err := vm.Copy(ctx, from, to, newRoot); err != nil {
-			return cid.Undef, xerrors.Errorf("copying migrated tree: %w", err)
-		}
-	}
-
-	return newRoot, nil
-}
-
-func UpgradeActorsV4(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
-	// Use all the CPUs except 3.
-	workerCount := runtime.NumCPU() - 3
-	if workerCount <= 0 {
-		workerCount = 1
-	}
-
-	config := nv12.Config{
-		MaxWorkers:        uint(workerCount),
-		JobQueueSize:      1000,
-		ResultQueueSize:   100,
-		ProgressLogPeriod: 10 * time.Second,
-	}
-
-	newRoot, err := upgradeActorsV4Common(ctx, sm, cache, root, epoch, ts, config)
-	if err != nil {
-		return cid.Undef, xerrors.Errorf("migrating actors v4 state: %w", err)
-	}
-
-	return newRoot, nil
-}
-
-func PreUpgradeActorsV4(ctx context.Context, sm *StateManager, cache MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error {
-	// Use half the CPUs for pre-migration, but leave at least 3.
-	workerCount := runtime.NumCPU()
-	if workerCount <= 4 {
-		workerCount = 1
-	} else {
-		workerCount /= 2
-	}
-	config := nv12.Config{MaxWorkers: uint(workerCount)}
-	_, err := upgradeActorsV4Common(ctx, sm, cache, root, epoch, ts, config)
-	return err
-}
-
-func upgradeActorsV4Common(
-	ctx context.Context, sm *StateManager, cache MigrationCache,
-	root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet,
-	config nv12.Config,
-) (cid.Cid, error) {
-	buf := blockstore.NewTieredBstore(sm.cs.StateBlockstore(), blockstore.NewMemorySync())
-	store := store.ActorStore(ctx, buf)
-
-	// Load the state root.
-	var stateRoot types.StateRoot
-	if err := store.Get(ctx, root, &stateRoot); err != nil {
-		return cid.Undef, xerrors.Errorf("failed to decode state root: %w", err)
-	}
-
-	if stateRoot.Version != types.StateTreeVersion2 {
-		return cid.Undef, xerrors.Errorf(
-			"expected state root version 2 for actors v4 upgrade, got %d",
-			stateRoot.Version,
-		)
-	}
-
-	// Perform the migration
-	newHamtRoot, err := nv12.MigrateStateTree(ctx, store, stateRoot.Actors, epoch, config, migrationLogger{}, cache)
-	if err != nil {
-		return cid.Undef, xerrors.Errorf("upgrading to actors v4: %w", err)
-	}
-
-	// Persist the result.
-	newRoot, err := store.Put(ctx, &types.StateRoot{
-		Version: types.StateTreeVersion3,
-		Actors:  newHamtRoot,
-		Info:    stateRoot.Info,
-	})
-	if err != nil {
-		return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err)
-	}
-
-	// Persist the new tree.
-
-	{
-		from := buf
-		to := buf.Read()
-
-		if err := vm.Copy(ctx, from, to, newRoot); err != nil {
-			return cid.Undef, xerrors.Errorf("copying migrated tree: %w", err)
-		}
-	}
-
-	return newRoot, nil
-}
-
-func UpgradeActorsV5(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
-	// Use all the CPUs except 3.
-	workerCount := runtime.NumCPU() - 3
-	if workerCount <= 0 {
-		workerCount = 1
-	}
-
-	config := nv13.Config{
-		MaxWorkers:        uint(workerCount),
-		JobQueueSize:      1000,
-		ResultQueueSize:   100,
-		ProgressLogPeriod: 10 * time.Second,
-	}
-
-	newRoot, err := upgradeActorsV5Common(ctx, sm, cache, root, epoch, ts, config)
-	if err != nil {
-		return cid.Undef, xerrors.Errorf("migrating actors v5 state: %w", err)
-	}
-
-	return newRoot, nil
-}
-
-func PreUpgradeActorsV5(ctx context.Context, sm *StateManager, cache MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error {
-	// Use half the CPUs for pre-migration, but leave at least 3.
-	workerCount := runtime.NumCPU()
-	if workerCount <= 4 {
-		workerCount = 1
-	} else {
-		workerCount /= 2
-	}
-	config := nv13.Config{MaxWorkers: uint(workerCount)}
-	_, err := upgradeActorsV5Common(ctx, sm, cache, root, epoch, ts, config)
-	return err
-}
-
-func upgradeActorsV5Common(
-	ctx context.Context, sm *StateManager, cache MigrationCache,
-	root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet,
-	config nv13.Config,
-) (cid.Cid, error) {
-	buf := blockstore.NewTieredBstore(sm.cs.StateBlockstore(), blockstore.NewMemorySync())
-	store := store.ActorStore(ctx, buf)
-
-	// Load the state root.
-	var stateRoot types.StateRoot
-	if err := store.Get(ctx, root, &stateRoot); err != nil {
-		return cid.Undef, xerrors.Errorf("failed to decode state root: %w", err)
-	}
-
-	if stateRoot.Version != types.StateTreeVersion3 {
-		return cid.Undef, xerrors.Errorf(
-			"expected state root version 3 for actors v5 upgrade, got %d",
-			stateRoot.Version,
-		)
-	}
-
-	// Perform the migration
-	newHamtRoot, err := nv13.MigrateStateTree(ctx, store, stateRoot.Actors, epoch, config, migrationLogger{}, cache)
-	if err != nil {
-		return cid.Undef, xerrors.Errorf("upgrading to actors v5: %w", err)
-	}
-
-	// Persist the result.
-	newRoot, err := store.Put(ctx, &types.StateRoot{
-		Version: types.StateTreeVersion4,
-		Actors:  newHamtRoot,
-		Info:    stateRoot.Info,
-	})
-	if err != nil {
-		return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err)
-	}
-
-	// Persist the new tree.
-
-	{
-		from := buf
-		to := buf.Read()
-
-		if err := vm.Copy(ctx, from, to, newRoot); err != nil {
-			return cid.Undef, xerrors.Errorf("copying migrated tree: %w", err)
-		}
-	}
-
-	return newRoot, nil
-}
-
 func setNetworkName(ctx context.Context, store adt.Store, tree *state.StateTree, name string) error {
-	ia, err := tree.GetActor(builtin0.InitActorAddr)
+	ia, err := tree.GetActor(init_.Address)
 	if err != nil {
 		return xerrors.Errorf("getting init actor: %w", err)
 	}
@@ -1305,136 +438,13 @@ func setNetworkName(ctx context.Context, store adt.Store, tree *state.StateTree,
 		return xerrors.Errorf("writing new init state: %w", err)
 	}
 
-	if err := tree.SetActor(builtin0.InitActorAddr, ia); err != nil {
+	if err := tree.SetActor(init_.Address, ia); err != nil {
 		return xerrors.Errorf("setting init actor: %w", err)
 	}
 
 	return nil
 }
 
-func splitGenesisMultisig0(ctx context.Context, em ExecMonitor, addr address.Address, store adt0.Store, tree *state.StateTree, portions uint64, epoch abi.ChainEpoch, ts *types.TipSet) error {
-	if portions < 1 {
-		return xerrors.Errorf("cannot split into 0 portions")
-	}
-
-	mact, err := tree.GetActor(addr)
-	if err != nil {
-		return xerrors.Errorf("getting msig actor: %w", err)
-	}
-
-	mst, err := multisig.Load(store, mact)
-	if err != nil {
-		return xerrors.Errorf("getting msig state: %w", err)
-	}
-
-	signers, err := mst.Signers()
-	if err != nil {
-		return xerrors.Errorf("getting msig signers: %w", err)
-	}
-
-	thresh, err := mst.Threshold()
-	if err != nil {
-		return xerrors.Errorf("getting msig threshold: %w", err)
-	}
-
-	ibal, err := mst.InitialBalance()
-	if err != nil {
-		return xerrors.Errorf("getting msig initial balance: %w", err)
-	}
-
-	se, err := mst.StartEpoch()
-	if err != nil {
-		return xerrors.Errorf("getting msig start epoch: %w", err)
-	}
-
-	ud, err := mst.UnlockDuration()
-	if err != nil {
-		return xerrors.Errorf("getting msig unlock duration: %w", err)
-	}
-
-	pending, err := adt0.MakeEmptyMap(store).Root()
-	if err != nil {
-		return xerrors.Errorf("failed to create empty map: %w", err)
-	}
-
-	newIbal := big.Div(ibal, types.NewInt(portions))
-	newState := &multisig0.State{
-		Signers:               signers,
-		NumApprovalsThreshold: thresh,
-		NextTxnID:             0,
-		InitialBalance:        newIbal,
-		StartEpoch:            se,
-		UnlockDuration:        ud,
-		PendingTxns:           pending,
-	}
-
-	scid, err := store.Put(ctx, newState)
-	if err != nil {
-		return xerrors.Errorf("storing new state: %w", err)
-	}
-
-	newActor := types.Actor{
-		Code:    builtin0.MultisigActorCodeID,
-		Head:    scid,
-		Nonce:   0,
-		Balance: big.Zero(),
-	}
-
-	i := uint64(0)
-	subcalls := make([]types.ExecutionTrace, 0, portions)
-	transferCb := func(trace types.ExecutionTrace) {
-		subcalls = append(subcalls, trace)
-	}
-
-	for i < portions {
-		keyAddr, err := makeKeyAddr(addr, i)
-		if err != nil {
-			return xerrors.Errorf("creating key address: %w", err)
-		}
-
-		idAddr, err := tree.RegisterNewAddress(keyAddr)
-		if err != nil {
-			return xerrors.Errorf("registering new address: %w", err)
-		}
-
-		err = tree.SetActor(idAddr, &newActor)
-		if err != nil {
-			return xerrors.Errorf("setting new msig actor state: %w", err)
-		}
-
-		if err := doTransfer(tree, addr, idAddr, newIbal, transferCb); err != nil {
-			return xerrors.Errorf("transferring split msig balance: %w", err)
-		}
-
-		i++
-	}
-
-	if em != nil {
-		// record the transfer in execution traces
-
-		fakeMsg := makeFakeMsg(builtin.SystemActorAddr, addr, big.Zero(), uint64(epoch))
-
-		if err := em.MessageApplied(ctx, ts, fakeMsg.Cid(), fakeMsg, &vm.ApplyRet{
-			MessageReceipt: *makeFakeRct(),
-			ActorErr:       nil,
-			ExecutionTrace: types.ExecutionTrace{
-				Msg:        fakeMsg,
-				MsgRct:     makeFakeRct(),
-				Error:      "",
-				Duration:   0,
-				GasCharges: nil,
-				Subcalls:   subcalls,
-			},
-			Duration: 0,
-			GasCosts: nil,
-		}, false); err != nil {
-			return xerrors.Errorf("recording transfers: %w", err)
-		}
-	}
-
-	return nil
-}
-
 func makeKeyAddr(splitAddr address.Address, count uint64) (address.Address, error) {
 	var b bytes.Buffer
 	if err := splitAddr.MarshalCBOR(&b); err != nil {
@@ -1457,88 +467,6 @@ func makeKeyAddr(splitAddr address.Address, count uint64) (address.Address, erro
 	return addr, nil
 }
 
-// TODO: After the Liftoff epoch, refactor this to use resetMultisigVesting
-func resetGenesisMsigs0(ctx context.Context, sm *StateManager, store adt0.Store, tree *state.StateTree, startEpoch abi.ChainEpoch) error {
-	gb, err := sm.cs.GetGenesis()
-	if err != nil {
-		return xerrors.Errorf("getting genesis block: %w", err)
-	}
-
-	gts, err := types.NewTipSet([]*types.BlockHeader{gb})
-	if err != nil {
-		return xerrors.Errorf("getting genesis tipset: %w", err)
-	}
-
-	cst := cbor.NewCborStore(sm.cs.StateBlockstore())
-	genesisTree, err := state.LoadStateTree(cst, gts.ParentState())
-	if err != nil {
-		return xerrors.Errorf("loading state tree: %w", err)
-	}
-
-	err = genesisTree.ForEach(func(addr address.Address, genesisActor *types.Actor) error {
-		if genesisActor.Code == builtin0.MultisigActorCodeID {
-			currActor, err := tree.GetActor(addr)
-			if err != nil {
-				return xerrors.Errorf("loading actor: %w", err)
-			}
-
-			var currState multisig0.State
-			if err := store.Get(ctx, currActor.Head, &currState); err != nil {
-				return xerrors.Errorf("reading multisig state: %w", err)
-			}
-
-			currState.StartEpoch = startEpoch
-
-			currActor.Head, err = store.Put(ctx, &currState)
-			if err != nil {
-				return xerrors.Errorf("writing new multisig state: %w", err)
-			}
-
-			if err := tree.SetActor(addr, currActor); err != nil {
-				return xerrors.Errorf("setting multisig actor: %w", err)
-			}
-		}
-		return nil
-	})
-
-	if err != nil {
-		return xerrors.Errorf("iterating over genesis actors: %w", err)
-	}
-
-	return nil
-}
-
-func resetMultisigVesting0(ctx context.Context, store adt0.Store, tree *state.StateTree, addr address.Address, startEpoch abi.ChainEpoch, duration abi.ChainEpoch, balance abi.TokenAmount) error {
-	act, err := tree.GetActor(addr)
-	if err != nil {
-		return xerrors.Errorf("getting actor: %w", err)
-	}
-
-	if !builtin.IsMultisigActor(act.Code) {
-		return xerrors.Errorf("actor wasn't msig: %w", err)
-	}
-
-	var msigState multisig0.State
-	if err := store.Get(ctx, act.Head, &msigState); err != nil {
-		return xerrors.Errorf("reading multisig state: %w", err)
-	}
-
-	msigState.StartEpoch = startEpoch
-	msigState.UnlockDuration = duration
-	msigState.InitialBalance = balance
-
-	act.Head, err = store.Put(ctx, &msigState)
-	if err != nil {
-		return xerrors.Errorf("writing new multisig state: %w", err)
-	}
-
-	if err := tree.SetActor(addr, act); err != nil {
-		return xerrors.Errorf("setting multisig actor: %w", err)
-	}
-
-	return nil
-}
-
 func makeFakeMsg(from address.Address, to address.Address, amt abi.TokenAmount, nonce uint64) *types.Message {
 	return &types.Message{
 		From:  from,
diff --git a/chain/stmgr/upgrades.go b/chain/stmgr/upgrades.go
new file mode 100644
index 000000000..d2ccbad39
--- /dev/null
+++ b/chain/stmgr/upgrades.go
@@ -0,0 +1,1090 @@
+package stmgr
+
+import (
+	"context"
+	"runtime"
+	"time"
+
+	"github.com/ipfs/go-cid"
+	cbor "github.com/ipfs/go-ipld-cbor"
+	"golang.org/x/xerrors"
+
+	"github.com/filecoin-project/go-address"
+	"github.com/filecoin-project/go-state-types/abi"
+	"github.com/filecoin-project/go-state-types/big"
+	"github.com/filecoin-project/go-state-types/network"
+
+	builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
+	miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
+	multisig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig"
+	power0 "github.com/filecoin-project/specs-actors/actors/builtin/power"
+	"github.com/filecoin-project/specs-actors/actors/migration/nv3"
+	adt0 "github.com/filecoin-project/specs-actors/actors/util/adt"
+	"github.com/filecoin-project/specs-actors/v2/actors/migration/nv4"
+	"github.com/filecoin-project/specs-actors/v2/actors/migration/nv7"
+	"github.com/filecoin-project/specs-actors/v3/actors/migration/nv10"
+	"github.com/filecoin-project/specs-actors/v4/actors/migration/nv12"
+	"github.com/filecoin-project/specs-actors/v5/actors/migration/nv13"
+
+	"github.com/filecoin-project/lotus/blockstore"
+	"github.com/filecoin-project/lotus/build"
+	"github.com/filecoin-project/lotus/chain/actors/builtin"
+	"github.com/filecoin-project/lotus/chain/actors/builtin/multisig"
+	"github.com/filecoin-project/lotus/chain/state"
+	"github.com/filecoin-project/lotus/chain/store"
+	"github.com/filecoin-project/lotus/chain/types"
+	"github.com/filecoin-project/lotus/chain/vm"
+)
+
+func DefaultUpgradeSchedule() UpgradeSchedule {
+	var us UpgradeSchedule
+
+	updates := []Upgrade{{
+		Height:    build.UpgradeBreezeHeight,
+		Network:   network.Version1,
+		Migration: UpgradeFaucetBurnRecovery,
+	}, {
+		Height:    build.UpgradeSmokeHeight,
+		Network:   network.Version2,
+		Migration: nil,
+	}, {
+		Height:    build.UpgradeIgnitionHeight,
+		Network:   network.Version3,
+		Migration: UpgradeIgnition,
+	}, {
+		Height:    build.UpgradeRefuelHeight,
+		Network:   network.Version3,
+		Migration: UpgradeRefuel,
+	}, {
+		Height:    build.UpgradeAssemblyHeight,
+		Network:   network.Version4,
+		Expensive: true,
+		Migration: UpgradeActorsV2,
+	}, {
+		Height:    build.UpgradeTapeHeight,
+		Network:   network.Version5,
+		Migration: nil,
+	}, {
+		Height:    build.UpgradeLiftoffHeight,
+		Network:   network.Version5,
+		Migration: UpgradeLiftoff,
+	}, {
+		Height:    build.UpgradeKumquatHeight,
+		Network:   network.Version6,
+		Migration: nil,
+	}, {
+		Height:    build.UpgradeCalicoHeight,
+		Network:   network.Version7,
+		Migration: UpgradeCalico,
+	}, {
+		Height:    build.UpgradePersianHeight,
+		Network:   network.Version8,
+		Migration: nil,
+	}, {
+		Height:    build.UpgradeOrangeHeight,
+		Network:   network.Version9,
+		Migration: nil,
+	}, {
+		Height:    build.UpgradeTrustHeight,
+		Network:   network.Version10,
+		Migration: UpgradeActorsV3,
+		PreMigrations: []PreMigration{{
+			PreMigration:    PreUpgradeActorsV3,
+			StartWithin:     120,
+			DontStartWithin: 60,
+			StopWithin:      35,
+		}, {
+			PreMigration:    PreUpgradeActorsV3,
+			StartWithin:     30,
+			DontStartWithin: 15,
+			StopWithin:      5,
+		}},
+		Expensive: true,
+	}, {
+		Height:    build.UpgradeNorwegianHeight,
+		Network:   network.Version11,
+		Migration: nil,
+	}, {
+		Height:    build.UpgradeTurboHeight,
+		Network:   network.Version12,
+		Migration: UpgradeActorsV4,
+		PreMigrations: []PreMigration{{
+			PreMigration:    PreUpgradeActorsV4,
+			StartWithin:     120,
+			DontStartWithin: 60,
+			StopWithin:      35,
+		}, {
+			PreMigration:    PreUpgradeActorsV4,
+			StartWithin:     30,
+			DontStartWithin: 15,
+			StopWithin:      5,
+		}},
+		Expensive: true,
+	}, {
+		Height:    build.UpgradeHyperdriveHeight,
+		Network:   network.Version13,
+		Migration: UpgradeActorsV5,
+		PreMigrations: []PreMigration{{
+			PreMigration:    PreUpgradeActorsV5,
+			StartWithin:     120,
+			DontStartWithin: 60,
+			StopWithin:      35,
+		}, {
+			PreMigration:    PreUpgradeActorsV5,
+			StartWithin:     30,
+			DontStartWithin: 15,
+			StopWithin:      5,
+		}},
+		Expensive: true}}
+
+	for _, u := range updates {
+		if u.Height < 0 {
+			// upgrade disabled
+			continue
+		}
+		us = append(us, u)
+	}
+	return us
+}
+
+func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, _ MigrationCache, em ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
+	// Some initial parameters
+	FundsForMiners := types.FromFil(1_000_000)
+	LookbackEpoch := abi.ChainEpoch(32000)
+	AccountCap := types.FromFil(0)
+	BaseMinerBalance := types.FromFil(20)
+	DesiredReimbursementBalance := types.FromFil(5_000_000)
+
+	isSystemAccount := func(addr address.Address) (bool, error) {
+		id, err := address.IDFromAddress(addr)
+		if err != nil {
+			return false, xerrors.Errorf("id address: %w", err)
+		}
+
+		if id < 1000 {
+			return true, nil
+		}
+		return false, nil
+	}
+
+	minerFundsAlloc := func(pow, tpow abi.StoragePower) abi.TokenAmount {
+		return types.BigDiv(types.BigMul(pow, FundsForMiners), tpow)
+	}
+
+	// Grab lookback state for account checks
+	lbts, err := sm.ChainStore().GetTipsetByHeight(ctx, LookbackEpoch, ts, false)
+	if err != nil {
+		return cid.Undef, xerrors.Errorf("failed to get tipset at lookback height: %w", err)
+	}
+
+	lbtree, err := sm.ParentState(lbts)
+	if err != nil {
+		return cid.Undef, xerrors.Errorf("loading state tree failed: %w", err)
+	}
+
+	tree, err := sm.StateTree(root)
+	if err != nil {
+		return cid.Undef, xerrors.Errorf("getting state tree: %w", err)
+	}
+
+	type transfer struct {
+		From address.Address
+		To   address.Address
+		Amt  abi.TokenAmount
+	}
+
+	var transfers []transfer
+	subcalls := make([]types.ExecutionTrace, 0)
+	transferCb := func(trace types.ExecutionTrace) {
+		subcalls = append(subcalls, trace)
+	}
+
+	// Take all excess funds away, put them into the reserve account
+	err = tree.ForEach(func(addr address.Address, act *types.Actor) error {
+		switch act.Code {
+		case builtin0.AccountActorCodeID, builtin0.MultisigActorCodeID, builtin0.PaymentChannelActorCodeID:
+			sysAcc, err := isSystemAccount(addr)
+			if err != nil {
+				return xerrors.Errorf("checking system account: %w", err)
+			}
+
+			if !sysAcc {
+				transfers = append(transfers, transfer{
+					From: addr,
+					To:   builtin.ReserveAddress,
+					Amt:  act.Balance,
+				})
+			}
+		case builtin0.StorageMinerActorCodeID:
+			var st miner0.State
+			if err := sm.ChainStore().ActorStore(ctx).Get(ctx, act.Head, &st); err != nil {
+				return xerrors.Errorf("failed to load miner state: %w", err)
+			}
+
+			var available abi.TokenAmount
+			{
+				defer func() {
+					if err := recover(); err != nil {
+						log.Warnf("Get available balance failed (%s, %s, %s): %s", addr, act.Head, act.Balance, err)
+					}
+					available = abi.NewTokenAmount(0)
+				}()
+				// this panics if the miner doesnt have enough funds to cover their locked pledge
+				available = st.GetAvailableBalance(act.Balance)
+			}
+
+			if !available.IsZero() {
+				transfers = append(transfers, transfer{
+					From: addr,
+					To:   builtin.ReserveAddress,
+					Amt:  available,
+				})
+			}
+		}
+		return nil
+	})
+	if err != nil {
+		return cid.Undef, xerrors.Errorf("foreach over state tree failed: %w", err)
+	}
+
+	// Execute transfers from previous step
+	for _, t := range transfers {
+		if err := doTransfer(tree, t.From, t.To, t.Amt, transferCb); err != nil {
+			return cid.Undef, xerrors.Errorf("transfer %s %s->%s failed: %w", t.Amt, t.From, t.To, err)
+		}
+	}
+
+	// pull up power table to give miners back some funds proportional to their power
+	var ps power0.State
+	powAct, err := tree.GetActor(builtin0.StoragePowerActorAddr)
+	if err != nil {
+		return cid.Undef, xerrors.Errorf("failed to load power actor: %w", err)
+	}
+
+	cst := cbor.NewCborStore(sm.ChainStore().StateBlockstore())
+	if err := cst.Get(ctx, powAct.Head, &ps); err != nil {
+		return cid.Undef, xerrors.Errorf("failed to get power actor state: %w", err)
+	}
+
+	totalPower := ps.TotalBytesCommitted
+
+	var transfersBack []transfer
+	// Now, we return some funds to places where they are needed
+	err = tree.ForEach(func(addr address.Address, act *types.Actor) error {
+		lbact, err := lbtree.GetActor(addr)
+		if err != nil {
+			if !xerrors.Is(err, types.ErrActorNotFound) {
+				return xerrors.Errorf("failed to get actor in lookback state")
+			}
+		}
+
+		prevBalance := abi.NewTokenAmount(0)
+		if lbact != nil {
+			prevBalance = lbact.Balance
+		}
+
+		switch act.Code {
+		case builtin0.AccountActorCodeID, builtin0.MultisigActorCodeID, builtin0.PaymentChannelActorCodeID:
+			nbalance := big.Min(prevBalance, AccountCap)
+			if nbalance.Sign() != 0 {
+				transfersBack = append(transfersBack, transfer{
+					From: builtin.ReserveAddress,
+					To:   addr,
+					Amt:  nbalance,
+				})
+			}
+		case builtin0.StorageMinerActorCodeID:
+			var st miner0.State
+			if err := sm.ChainStore().ActorStore(ctx).Get(ctx, act.Head, &st); err != nil {
+				return xerrors.Errorf("failed to load miner state: %w", err)
+			}
+
+			var minfo miner0.MinerInfo
+			if err := cst.Get(ctx, st.Info, &minfo); err != nil {
+				return xerrors.Errorf("failed to get miner info: %w", err)
+			}
+
+			sectorsArr, err := adt0.AsArray(sm.ChainStore().ActorStore(ctx), st.Sectors)
+			if err != nil {
+				return xerrors.Errorf("failed to load sectors array: %w", err)
+			}
+
+			slen := sectorsArr.Length()
+
+			power := types.BigMul(types.NewInt(slen), types.NewInt(uint64(minfo.SectorSize)))
+
+			mfunds := minerFundsAlloc(power, totalPower)
+			transfersBack = append(transfersBack, transfer{
+				From: builtin.ReserveAddress,
+				To:   minfo.Worker,
+				Amt:  mfunds,
+			})
+
+			// Now make sure to give each miner who had power at the lookback some FIL
+			lbact, err := lbtree.GetActor(addr)
+			if err == nil {
+				var lbst miner0.State
+				if err := sm.ChainStore().ActorStore(ctx).Get(ctx, lbact.Head, &lbst); err != nil {
+					return xerrors.Errorf("failed to load miner state: %w", err)
+				}
+
+				lbsectors, err := adt0.AsArray(sm.ChainStore().ActorStore(ctx), lbst.Sectors)
+				if err != nil {
+					return xerrors.Errorf("failed to load lb sectors array: %w", err)
+				}
+
+				if lbsectors.Length() > 0 {
+					transfersBack = append(transfersBack, transfer{
+						From: builtin.ReserveAddress,
+						To:   minfo.Worker,
+						Amt:  BaseMinerBalance,
+					})
+				}
+
+			} else {
+				log.Warnf("failed to get miner in lookback state: %s", err)
+			}
+		}
+		return nil
+	})
+	if err != nil {
+		return cid.Undef, xerrors.Errorf("foreach over state tree failed: %w", err)
+	}
+
+	for _, t := range transfersBack {
+		if err := doTransfer(tree, t.From, t.To, t.Amt, transferCb); err != nil {
+			return cid.Undef, xerrors.Errorf("transfer %s %s->%s failed: %w", t.Amt, t.From, t.To, err)
+		}
+	}
+
+	// transfer all burnt funds back to the reserve account
+	burntAct, err := tree.GetActor(builtin0.BurntFundsActorAddr)
+	if err != nil {
+		return cid.Undef, xerrors.Errorf("failed to load burnt funds actor: %w", err)
+	}
+	if err := doTransfer(tree, builtin0.BurntFundsActorAddr, builtin.ReserveAddress, burntAct.Balance, transferCb); err != nil {
+		return cid.Undef, xerrors.Errorf("failed to unburn funds: %w", err)
+	}
+
+	// Top up the reimbursement service
+	reimbAddr, err := address.NewFromString("t0111")
+	if err != nil {
+		return cid.Undef, xerrors.Errorf("failed to parse reimbursement service address")
+	}
+
+	reimb, err := tree.GetActor(reimbAddr)
+	if err != nil {
+		return cid.Undef, xerrors.Errorf("failed to load reimbursement account actor: %w", err)
+	}
+
+	difference := types.BigSub(DesiredReimbursementBalance, reimb.Balance)
+	if err := doTransfer(tree, builtin.ReserveAddress, reimbAddr, difference, transferCb); err != nil {
+		return cid.Undef, xerrors.Errorf("failed to top up reimbursement account: %w", err)
+	}
+
+	// Now, a final sanity check to make sure the balances all check out
+	total := abi.NewTokenAmount(0)
+	err = tree.ForEach(func(addr address.Address, act *types.Actor) error {
+		total = types.BigAdd(total, act.Balance)
+		return nil
+	})
+	if err != nil {
+		return cid.Undef, xerrors.Errorf("checking final state balance failed: %w", err)
+	}
+
+	exp := types.FromFil(build.FilBase)
+	if !exp.Equals(total) {
+		return cid.Undef, xerrors.Errorf("resultant state tree account balance was not correct: %s", total)
+	}
+
+	if em != nil {
+		// record the transfer in execution traces
+
+		fakeMsg := makeFakeMsg(builtin.SystemActorAddr, builtin.SystemActorAddr, big.Zero(), uint64(epoch))
+
+		if err := em.MessageApplied(ctx, ts, fakeMsg.Cid(), fakeMsg, &vm.ApplyRet{
+			MessageReceipt: *makeFakeRct(),
+			ActorErr:       nil,
+			ExecutionTrace: types.ExecutionTrace{
+				Msg:        fakeMsg,
+				MsgRct:     makeFakeRct(),
+				Error:      "",
+				Duration:   0,
+				GasCharges: nil,
+				Subcalls:   subcalls,
+			},
+			Duration: 0,
+			GasCosts: nil,
+		}, false); err != nil {
+			return cid.Undef, xerrors.Errorf("recording transfers: %w", err)
+		}
+	}
+
+	return tree.Flush(ctx)
+}
+
+func UpgradeIgnition(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
+	store := sm.cs.ActorStore(ctx)
+
+	if build.UpgradeLiftoffHeight <= epoch {
+		return cid.Undef, xerrors.Errorf("liftoff height must be beyond ignition height")
+	}
+
+	nst, err := nv3.MigrateStateTree(ctx, store, root, epoch)
+	if err != nil {
+		return cid.Undef, xerrors.Errorf("migrating actors state: %w", err)
+	}
+
+	tree, err := sm.StateTree(nst)
+	if err != nil {
+		return cid.Undef, xerrors.Errorf("getting state tree: %w", err)
+	}
+
+	err = setNetworkName(ctx, store, tree, "ignition")
+	if err != nil {
+		return cid.Undef, xerrors.Errorf("setting network name: %w", err)
+	}
+
+	split1, err := address.NewFromString("t0115")
+	if err != nil {
+		return cid.Undef, xerrors.Errorf("first split address: %w", err)
+	}
+
+	split2, err := address.NewFromString("t0116")
+	if err != nil {
+		return cid.Undef, xerrors.Errorf("second split address: %w", err)
+	}
+
+	err = resetGenesisMsigs0(ctx, sm, store, tree, build.UpgradeLiftoffHeight)
+	if err != nil {
+		return cid.Undef, xerrors.Errorf("resetting genesis msig start epochs: %w", err)
+	}
+
+	err = splitGenesisMultisig0(ctx, cb, split1, store, tree, 50, epoch, ts)
+	if err != nil {
+		return cid.Undef, xerrors.Errorf("splitting first msig: %w", err)
+	}
+
+	err = splitGenesisMultisig0(ctx, cb, split2, store, tree, 50, epoch, ts)
+	if err != nil {
+		return cid.Undef, xerrors.Errorf("splitting second msig: %w", err)
+	}
+
+	err = nv3.CheckStateTree(ctx, store, nst, epoch, builtin0.TotalFilecoin)
+	if err != nil {
+		return cid.Undef, xerrors.Errorf("sanity check after ignition upgrade failed: %w", err)
+	}
+
+	return tree.Flush(ctx)
+}
+
+func splitGenesisMultisig0(ctx context.Context, em ExecMonitor, addr address.Address, store adt0.Store, tree *state.StateTree, portions uint64, epoch abi.ChainEpoch, ts *types.TipSet) error {
+	if portions < 1 {
+		return xerrors.Errorf("cannot split into 0 portions")
+	}
+
+	mact, err := tree.GetActor(addr)
+	if err != nil {
+		return xerrors.Errorf("getting msig actor: %w", err)
+	}
+
+	mst, err := multisig.Load(store, mact)
+	if err != nil {
+		return xerrors.Errorf("getting msig state: %w", err)
+	}
+
+	signers, err := mst.Signers()
+	if err != nil {
+		return xerrors.Errorf("getting msig signers: %w", err)
+	}
+
+	thresh, err := mst.Threshold()
+	if err != nil {
+		return xerrors.Errorf("getting msig threshold: %w", err)
+	}
+
+	ibal, err := mst.InitialBalance()
+	if err != nil {
+		return xerrors.Errorf("getting msig initial balance: %w", err)
+	}
+
+	se, err := mst.StartEpoch()
+	if err != nil {
+		return xerrors.Errorf("getting msig start epoch: %w", err)
+	}
+
+	ud, err := mst.UnlockDuration()
+	if err != nil {
+		return xerrors.Errorf("getting msig unlock duration: %w", err)
+	}
+
+	pending, err := adt0.MakeEmptyMap(store).Root()
+	if err != nil {
+		return xerrors.Errorf("failed to create empty map: %w", err)
+	}
+
+	newIbal := big.Div(ibal, types.NewInt(portions))
+	newState := &multisig0.State{
+		Signers:               signers,
+		NumApprovalsThreshold: thresh,
+		NextTxnID:             0,
+		InitialBalance:        newIbal,
+		StartEpoch:            se,
+		UnlockDuration:        ud,
+		PendingTxns:           pending,
+	}
+
+	scid, err := store.Put(ctx, newState)
+	if err != nil {
+		return xerrors.Errorf("storing new state: %w", err)
+	}
+
+	newActor := types.Actor{
+		Code:    builtin0.MultisigActorCodeID,
+		Head:    scid,
+		Nonce:   0,
+		Balance: big.Zero(),
+	}
+
+	i := uint64(0)
+	subcalls := make([]types.ExecutionTrace, 0, portions)
+	transferCb := func(trace types.ExecutionTrace) {
+		subcalls = append(subcalls, trace)
+	}
+
+	for i < portions {
+		keyAddr, err := makeKeyAddr(addr, i)
+		if err != nil {
+			return xerrors.Errorf("creating key address: %w", err)
+		}
+
+		idAddr, err := tree.RegisterNewAddress(keyAddr)
+		if err != nil {
+			return xerrors.Errorf("registering new address: %w", err)
+		}
+
+		err = tree.SetActor(idAddr, &newActor)
+		if err != nil {
+			return xerrors.Errorf("setting new msig actor state: %w", err)
+		}
+
+		if err := doTransfer(tree, addr, idAddr, newIbal, transferCb); err != nil {
+			return xerrors.Errorf("transferring split msig balance: %w", err)
+		}
+
+		i++
+	}
+
+	if em != nil {
+		// record the transfer in execution traces
+
+		fakeMsg := makeFakeMsg(builtin.SystemActorAddr, addr, big.Zero(), uint64(epoch))
+
+		if err := em.MessageApplied(ctx, ts, fakeMsg.Cid(), fakeMsg, &vm.ApplyRet{
+			MessageReceipt: *makeFakeRct(),
+			ActorErr:       nil,
+			ExecutionTrace: types.ExecutionTrace{
+				Msg:        fakeMsg,
+				MsgRct:     makeFakeRct(),
+				Error:      "",
+				Duration:   0,
+				GasCharges: nil,
+				Subcalls:   subcalls,
+			},
+			Duration: 0,
+			GasCosts: nil,
+		}, false); err != nil {
+			return xerrors.Errorf("recording transfers: %w", err)
+		}
+	}
+
+	return nil
+}
+
+// TODO: After the Liftoff epoch, refactor this to use resetMultisigVesting
+func resetGenesisMsigs0(ctx context.Context, sm *StateManager, store adt0.Store, tree *state.StateTree, startEpoch abi.ChainEpoch) error {
+	gb, err := sm.cs.GetGenesis()
+	if err != nil {
+		return xerrors.Errorf("getting genesis block: %w", err)
+	}
+
+	gts, err := types.NewTipSet([]*types.BlockHeader{gb})
+	if err != nil {
+		return xerrors.Errorf("getting genesis tipset: %w", err)
+	}
+
+	cst := cbor.NewCborStore(sm.cs.StateBlockstore())
+	genesisTree, err := state.LoadStateTree(cst, gts.ParentState())
+	if err != nil {
+		return xerrors.Errorf("loading state tree: %w", err)
+	}
+
+	err = genesisTree.ForEach(func(addr address.Address, genesisActor *types.Actor) error {
+		if genesisActor.Code == builtin0.MultisigActorCodeID {
+			currActor, err := tree.GetActor(addr)
+			if err != nil {
+				return xerrors.Errorf("loading actor: %w", err)
+			}
+
+			var currState multisig0.State
+			if err := store.Get(ctx, currActor.Head, &currState); err != nil {
+				return xerrors.Errorf("reading multisig state: %w", err)
+			}
+
+			currState.StartEpoch = startEpoch
+
+			currActor.Head, err = store.Put(ctx, &currState)
+			if err != nil {
+				return xerrors.Errorf("writing new multisig state: %w", err)
+			}
+
+			if err := tree.SetActor(addr, currActor); err != nil {
+				return xerrors.Errorf("setting multisig actor: %w", err)
+			}
+		}
+		return nil
+	})
+
+	if err != nil {
+		return xerrors.Errorf("iterating over genesis actors: %w", err)
+	}
+
+	return nil
+}
+
+func resetMultisigVesting0(ctx context.Context, store adt0.Store, tree *state.StateTree, addr address.Address, startEpoch abi.ChainEpoch, duration abi.ChainEpoch, balance abi.TokenAmount) error {
+	act, err := tree.GetActor(addr)
+	if err != nil {
+		return xerrors.Errorf("getting actor: %w", err)
+	}
+
+	if !builtin.IsMultisigActor(act.Code) {
+		return xerrors.Errorf("actor wasn't msig: %w", err)
+	}
+
+	var msigState multisig0.State
+	if err := store.Get(ctx, act.Head, &msigState); err != nil {
+		return xerrors.Errorf("reading multisig state: %w", err)
+	}
+
+	msigState.StartEpoch = startEpoch
+	msigState.UnlockDuration = duration
+	msigState.InitialBalance = balance
+
+	act.Head, err = store.Put(ctx, &msigState)
+	if err != nil {
+		return xerrors.Errorf("writing new multisig state: %w", err)
+	}
+
+	if err := tree.SetActor(addr, act); err != nil {
+		return xerrors.Errorf("setting multisig actor: %w", err)
+	}
+
+	return nil
+}
+
+func UpgradeRefuel(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
+
+	store := sm.cs.ActorStore(ctx)
+	tree, err := sm.StateTree(root)
+	if err != nil {
+		return cid.Undef, xerrors.Errorf("getting state tree: %w", err)
+	}
+
+	err = resetMultisigVesting0(ctx, store, tree, builtin.SaftAddress, 0, 0, big.Zero())
+	if err != nil {
+		return cid.Undef, xerrors.Errorf("tweaking msig vesting: %w", err)
+	}
+
+	err = resetMultisigVesting0(ctx, store, tree, builtin.ReserveAddress, 0, 0, big.Zero())
+	if err != nil {
+		return cid.Undef, xerrors.Errorf("tweaking msig vesting: %w", err)
+	}
+
+	err = resetMultisigVesting0(ctx, store, tree, builtin.RootVerifierAddress, 0, 0, big.Zero())
+	if err != nil {
+		return cid.Undef, xerrors.Errorf("tweaking msig vesting: %w", err)
+	}
+
+	return tree.Flush(ctx)
+}
+
+func UpgradeActorsV2(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
+	buf := blockstore.NewTieredBstore(sm.cs.StateBlockstore(), blockstore.NewMemorySync())
+	store := store.ActorStore(ctx, buf)
+
+	info, err := store.Put(ctx, new(types.StateInfo0))
+	if err != nil {
+		return cid.Undef, xerrors.Errorf("failed to create new state info for actors v2: %w", err)
+	}
+
+	newHamtRoot, err := nv4.MigrateStateTree(ctx, store, root, epoch, nv4.DefaultConfig())
+	if err != nil {
+		return cid.Undef, xerrors.Errorf("upgrading to actors v2: %w", err)
+	}
+
+	newRoot, err := store.Put(ctx, &types.StateRoot{
+		Version: types.StateTreeVersion1,
+		Actors:  newHamtRoot,
+		Info:    info,
+	})
+	if err != nil {
+		return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err)
+	}
+
+	// perform some basic sanity checks to make sure everything still works.
+	if newSm, err := state.LoadStateTree(store, newRoot); err != nil {
+		return cid.Undef, xerrors.Errorf("state tree sanity load failed: %w", err)
+	} else if newRoot2, err := newSm.Flush(ctx); err != nil {
+		return cid.Undef, xerrors.Errorf("state tree sanity flush failed: %w", err)
+	} else if newRoot2 != newRoot {
+		return cid.Undef, xerrors.Errorf("state-root mismatch: %s != %s", newRoot, newRoot2)
+	} else if _, err := newSm.GetActor(builtin0.InitActorAddr); err != nil {
+		return cid.Undef, xerrors.Errorf("failed to load init actor after upgrade: %w", err)
+	}
+
+	{
+		from := buf
+		to := buf.Read()
+
+		if err := vm.Copy(ctx, from, to, newRoot); err != nil {
+			return cid.Undef, xerrors.Errorf("copying migrated tree: %w", err)
+		}
+	}
+
+	return newRoot, nil
+}
+
+func UpgradeLiftoff(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
+	tree, err := sm.StateTree(root)
+	if err != nil {
+		return cid.Undef, xerrors.Errorf("getting state tree: %w", err)
+	}
+
+	err = setNetworkName(ctx, sm.cs.ActorStore(ctx), tree, "mainnet")
+	if err != nil {
+		return cid.Undef, xerrors.Errorf("setting network name: %w", err)
+	}
+
+	return tree.Flush(ctx)
+}
+
+func UpgradeCalico(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
+	if build.BuildType != build.BuildMainnet {
+		return root, nil
+	}
+
+	store := sm.cs.ActorStore(ctx)
+	var stateRoot types.StateRoot
+	if err := store.Get(ctx, root, &stateRoot); err != nil {
+		return cid.Undef, xerrors.Errorf("failed to decode state root: %w", err)
+	}
+
+	if stateRoot.Version != types.StateTreeVersion1 {
+		return cid.Undef, xerrors.Errorf(
+			"expected state root version 1 for calico upgrade, got %d",
+			stateRoot.Version,
+		)
+	}
+
+	newHamtRoot, err := nv7.MigrateStateTree(ctx, store, stateRoot.Actors, epoch, nv7.DefaultConfig())
+	if err != nil {
+		return cid.Undef, xerrors.Errorf("running nv7 migration: %w", err)
+	}
+
+	newRoot, err := store.Put(ctx, &types.StateRoot{
+		Version: stateRoot.Version,
+		Actors:  newHamtRoot,
+		Info:    stateRoot.Info,
+	})
+	if err != nil {
+		return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err)
+	}
+
+	// perform some basic sanity checks to make sure everything still works.
+	if newSm, err := state.LoadStateTree(store, newRoot); err != nil {
+		return cid.Undef, xerrors.Errorf("state tree sanity load failed: %w", err)
+	} else if newRoot2, err := newSm.Flush(ctx); err != nil {
+		return cid.Undef, xerrors.Errorf("state tree sanity flush failed: %w", err)
+	} else if newRoot2 != newRoot {
+		return cid.Undef, xerrors.Errorf("state-root mismatch: %s != %s", newRoot, newRoot2)
+	} else if _, err := newSm.GetActor(builtin0.InitActorAddr); err != nil {
+		return cid.Undef, xerrors.Errorf("failed to load init actor after upgrade: %w", err)
+	}
+
+	return newRoot, nil
+}
+
+func UpgradeActorsV3(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
+	// Use all the CPUs except 3.
+	workerCount := runtime.NumCPU() - 3
+	if workerCount <= 0 {
+		workerCount = 1
+	}
+
+	config := nv10.Config{
+		MaxWorkers:        uint(workerCount),
+		JobQueueSize:      1000,
+		ResultQueueSize:   100,
+		ProgressLogPeriod: 10 * time.Second,
+	}
+	newRoot, err := upgradeActorsV3Common(ctx, sm, cache, root, epoch, ts, config)
+	if err != nil {
+		return cid.Undef, xerrors.Errorf("migrating actors v3 state: %w", err)
+	}
+
+	tree, err := sm.StateTree(newRoot)
+	if err != nil {
+		return cid.Undef, xerrors.Errorf("getting state tree: %w", err)
+	}
+
+	if build.BuildType == build.BuildMainnet {
+		err := terminateActor(ctx, tree, build.ZeroAddress, cb, epoch, ts)
+		if err != nil && !xerrors.Is(err, types.ErrActorNotFound) {
+			return cid.Undef, xerrors.Errorf("deleting zero bls actor: %w", err)
+		}
+
+		newRoot, err = tree.Flush(ctx)
+		if err != nil {
+			return cid.Undef, xerrors.Errorf("flushing state tree: %w", err)
+		}
+	}
+
+	return newRoot, nil
+}
+
+func PreUpgradeActorsV3(ctx context.Context, sm *StateManager, cache MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error {
+	// Use half the CPUs for pre-migration, but leave at least 3.
+	workerCount := runtime.NumCPU()
+	if workerCount <= 4 {
+		workerCount = 1
+	} else {
+		workerCount /= 2
+	}
+	config := nv10.Config{MaxWorkers: uint(workerCount)}
+	_, err := upgradeActorsV3Common(ctx, sm, cache, root, epoch, ts, config)
+	return err
+}
+
+func upgradeActorsV3Common(
+	ctx context.Context, sm *StateManager, cache MigrationCache,
+	root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet,
+	config nv10.Config,
+) (cid.Cid, error) {
+	buf := blockstore.NewTieredBstore(sm.cs.StateBlockstore(), blockstore.NewMemorySync())
+	store := store.ActorStore(ctx, buf)
+
+	// Load the state root.
+	var stateRoot types.StateRoot
+	if err := store.Get(ctx, root, &stateRoot); err != nil {
+		return cid.Undef, xerrors.Errorf("failed to decode state root: %w", err)
+	}
+
+	if stateRoot.Version != types.StateTreeVersion1 {
+		return cid.Undef, xerrors.Errorf(
+			"expected state root version 1 for actors v3 upgrade, got %d",
+			stateRoot.Version,
+		)
+	}
+
+	// Perform the migration
+	newHamtRoot, err := nv10.MigrateStateTree(ctx, store, stateRoot.Actors, epoch, config, migrationLogger{}, cache)
+	if err != nil {
+		return cid.Undef, xerrors.Errorf("upgrading to actors v3: %w", err)
+	}
+
+	// Persist the result.
+	newRoot, err := store.Put(ctx, &types.StateRoot{
+		Version: types.StateTreeVersion2,
+		Actors:  newHamtRoot,
+		Info:    stateRoot.Info,
+	})
+	if err != nil {
+		return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err)
+	}
+
+	// Persist the new tree.
+
+	{
+		from := buf
+		to := buf.Read()
+
+		if err := vm.Copy(ctx, from, to, newRoot); err != nil {
+			return cid.Undef, xerrors.Errorf("copying migrated tree: %w", err)
+		}
+	}
+
+	return newRoot, nil
+}
+
+func UpgradeActorsV4(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
+	// Use all the CPUs except 3.
+	workerCount := runtime.NumCPU() - 3
+	if workerCount <= 0 {
+		workerCount = 1
+	}
+
+	config := nv12.Config{
+		MaxWorkers:        uint(workerCount),
+		JobQueueSize:      1000,
+		ResultQueueSize:   100,
+		ProgressLogPeriod: 10 * time.Second,
+	}
+
+	newRoot, err := upgradeActorsV4Common(ctx, sm, cache, root, epoch, ts, config)
+	if err != nil {
+		return cid.Undef, xerrors.Errorf("migrating actors v4 state: %w", err)
+	}
+
+	return newRoot, nil
+}
+
+func PreUpgradeActorsV4(ctx context.Context, sm *StateManager, cache MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error {
+	// Use half the CPUs for pre-migration, but leave at least 3.
+	workerCount := runtime.NumCPU()
+	if workerCount <= 4 {
+		workerCount = 1
+	} else {
+		workerCount /= 2
+	}
+	config := nv12.Config{MaxWorkers: uint(workerCount)}
+	_, err := upgradeActorsV4Common(ctx, sm, cache, root, epoch, ts, config)
+	return err
+}
+
+func upgradeActorsV4Common(
+	ctx context.Context, sm *StateManager, cache MigrationCache,
+	root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet,
+	config nv12.Config,
+) (cid.Cid, error) {
+	buf := blockstore.NewTieredBstore(sm.cs.StateBlockstore(), blockstore.NewMemorySync())
+	store := store.ActorStore(ctx, buf)
+
+	// Load the state root.
+	var stateRoot types.StateRoot
+	if err := store.Get(ctx, root, &stateRoot); err != nil {
+		return cid.Undef, xerrors.Errorf("failed to decode state root: %w", err)
+	}
+
+	if stateRoot.Version != types.StateTreeVersion2 {
+		return cid.Undef, xerrors.Errorf(
+			"expected state root version 2 for actors v4 upgrade, got %d",
+			stateRoot.Version,
+		)
+	}
+
+	// Perform the migration
+	newHamtRoot, err := nv12.MigrateStateTree(ctx, store, stateRoot.Actors, epoch, config, migrationLogger{}, cache)
+	if err != nil {
+		return cid.Undef, xerrors.Errorf("upgrading to actors v4: %w", err)
+	}
+
+	// Persist the result.
+	newRoot, err := store.Put(ctx, &types.StateRoot{
+		Version: types.StateTreeVersion3,
+		Actors:  newHamtRoot,
+		Info:    stateRoot.Info,
+	})
+	if err != nil {
+		return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err)
+	}
+
+	// Persist the new tree.
+
+	{
+		from := buf
+		to := buf.Read()
+
+		if err := vm.Copy(ctx, from, to, newRoot); err != nil {
+			return cid.Undef, xerrors.Errorf("copying migrated tree: %w", err)
+		}
+	}
+
+	return newRoot, nil
+}
+
+func UpgradeActorsV5(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
+	// Use all the CPUs except 3.
+	workerCount := runtime.NumCPU() - 3
+	if workerCount <= 0 {
+		workerCount = 1
+	}
+
+	config := nv13.Config{
+		MaxWorkers:        uint(workerCount),
+		JobQueueSize:      1000,
+		ResultQueueSize:   100,
+		ProgressLogPeriod: 10 * time.Second,
+	}
+
+	newRoot, err := upgradeActorsV5Common(ctx, sm, cache, root, epoch, ts, config)
+	if err != nil {
+		return cid.Undef, xerrors.Errorf("migrating actors v5 state: %w", err)
+	}
+
+	return newRoot, nil
+}
+
+func PreUpgradeActorsV5(ctx context.Context, sm *StateManager, cache MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error {
+	// Use half the CPUs for pre-migration, but leave at least 3.
+	workerCount := runtime.NumCPU()
+	if workerCount <= 4 {
+		workerCount = 1
+	} else {
+		workerCount /= 2
+	}
+	config := nv13.Config{MaxWorkers: uint(workerCount)}
+	_, err := upgradeActorsV5Common(ctx, sm, cache, root, epoch, ts, config)
+	return err
+}
+
+func upgradeActorsV5Common(
+	ctx context.Context, sm *StateManager, cache MigrationCache,
+	root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet,
+	config nv13.Config,
+) (cid.Cid, error) {
+	buf := blockstore.NewTieredBstore(sm.cs.StateBlockstore(), blockstore.NewMemorySync())
+	store := store.ActorStore(ctx, buf)
+
+	// Load the state root.
+	var stateRoot types.StateRoot
+	if err := store.Get(ctx, root, &stateRoot); err != nil {
+		return cid.Undef, xerrors.Errorf("failed to decode state root: %w", err)
+	}
+
+	if stateRoot.Version != types.StateTreeVersion3 {
+		return cid.Undef, xerrors.Errorf(
+			"expected state root version 3 for actors v5 upgrade, got %d",
+			stateRoot.Version,
+		)
+	}
+
+	// Perform the migration
+	newHamtRoot, err := nv13.MigrateStateTree(ctx, store, stateRoot.Actors, epoch, config, migrationLogger{}, cache)
+	if err != nil {
+		return cid.Undef, xerrors.Errorf("upgrading to actors v5: %w", err)
+	}
+
+	// Persist the result.
+	newRoot, err := store.Put(ctx, &types.StateRoot{
+		Version: types.StateTreeVersion4,
+		Actors:  newHamtRoot,
+		Info:    stateRoot.Info,
+	})
+	if err != nil {
+		return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err)
+	}
+
+	// Persist the new tree.
+
+	{
+		from := buf
+		to := buf.Read()
+
+		if err := vm.Copy(ctx, from, to, newRoot); err != nil {
+			return cid.Undef, xerrors.Errorf("copying migrated tree: %w", err)
+		}
+	}
+
+	return newRoot, nil
+}
diff --git a/chain/sync.go b/chain/sync.go
index 5d3c1d992..7914cc8d5 100644
--- a/chain/sync.go
+++ b/chain/sync.go
@@ -1060,7 +1060,7 @@ func (syncer *Syncer) checkBlockMessages(ctx context.Context, b *types.FullBlock
 	}
 
 	nv := syncer.sm.GetNtwkVersion(ctx, b.Header.Height)
-	pl := vm.PricelistByVersion(nv)
+	pl := vm.PricelistByEpoch(baseTs.Height())
 	var sumGasLimit int64
 	checkMsg := func(msg types.ChainMsg) error {
 		m := msg.VMMessage()
diff --git a/chain/vm/gas.go b/chain/vm/gas.go
index b848550f3..206a55d36 100644
--- a/chain/vm/gas.go
+++ b/chain/vm/gas.go
@@ -7,7 +7,7 @@ import (
 	addr "github.com/filecoin-project/go-address"
 	"github.com/filecoin-project/go-state-types/abi"
 	"github.com/filecoin-project/go-state-types/crypto"
-	"github.com/filecoin-project/go-state-types/network"
+	"github.com/filecoin-project/lotus/build"
 	vmr5 "github.com/filecoin-project/specs-actors/v5/actors/runtime"
 	proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof"
 	"github.com/ipfs/go-cid"
@@ -79,8 +79,8 @@ type Pricelist interface {
 	OnVerifyConsensusFault() GasCharge
 }
 
-var prices = map[network.Version]Pricelist{
-	network.Version0: &pricelistV0{
+var prices = map[abi.ChainEpoch]Pricelist{
+	abi.ChainEpoch(0): &pricelistV0{
 		computeGasMulti: 1,
 		storageGasMulti: 1000,
 
@@ -129,7 +129,7 @@ var prices = map[network.Version]Pricelist{
 		verifyPostDiscount:   true,
 		verifyConsensusFault: 495422,
 	},
-	network.Version6AndAHalf: &pricelistV0{
+	abi.ChainEpoch(build.UpgradeCalicoHeight): &pricelistV0{
 		computeGasMulti: 1,
 		storageGasMulti: 1300,
 
@@ -207,19 +207,21 @@ var prices = map[network.Version]Pricelist{
 	},
 }
 
-// PricelistByVersion finds the latest prices for the given network version
-func PricelistByVersion(version network.Version) Pricelist {
-	bestVersion := network.Version0
-	bestPrice := prices[bestVersion]
-	for nv, pl := range prices {
-		// if `nv > bestVersion` and `nv <= version`
-		if nv > bestVersion && nv <= version {
-			bestVersion = nv
+// PricelistByEpoch finds the latest prices for the given epoch
+func PricelistByEpoch(epoch abi.ChainEpoch) Pricelist {
+	// since we are storing the prices as map or epoch to price
+	// we need to get the price with the highest epoch that is lower or equal to the `epoch` arg
+	bestEpoch := abi.ChainEpoch(0)
+	bestPrice := prices[bestEpoch]
+	for e, pl := range prices {
+		// if `e` happened after `bestEpoch` and `e` is earlier or equal to the target `epoch`
+		if e > bestEpoch && e <= epoch {
+			bestEpoch = e
 			bestPrice = pl
 		}
 	}
 	if bestPrice == nil {
-		panic(fmt.Sprintf("bad setup: no gas prices available for version %d", version))
+		panic(fmt.Sprintf("bad setup: no gas prices available for epoch %d", epoch))
 	}
 	return bestPrice
 }
diff --git a/chain/vm/mkactor.go b/chain/vm/mkactor.go
index 4beee5ce6..e461a2b4c 100644
--- a/chain/vm/mkactor.go
+++ b/chain/vm/mkactor.go
@@ -41,7 +41,7 @@ var EmptyObjectCid cid.Cid
 
 // TryCreateAccountActor creates account actors from only BLS/SECP256K1 addresses.
 func TryCreateAccountActor(rt *Runtime, addr address.Address) (*types.Actor, address.Address, aerrors.ActorError) {
-	if err := rt.chargeGasSafe(PricelistByVersion(rt.NetworkVersion()).OnCreateActor()); err != nil {
+	if err := rt.chargeGasSafe(PricelistByEpoch(rt.height).OnCreateActor()); err != nil {
 		return nil, address.Undef, err
 	}
 
diff --git a/chain/vm/vm.go b/chain/vm/vm.go
index 2746d5f17..5a31187b7 100644
--- a/chain/vm/vm.go
+++ b/chain/vm/vm.go
@@ -135,7 +135,7 @@ func (vm *VM) makeRuntime(ctx context.Context, msg *types.Message, parent *Runti
 		gasAvailable:     msg.GasLimit,
 		depth:            0,
 		numActorsCreated: 0,
-		pricelist:        PricelistByVersion(vm.ntwkVersion(ctx, vm.blockHeight)),
+		pricelist:        PricelistByEpoch(vm.blockHeight),
 		allowInternal:    true,
 		callerValidated:  false,
 		executionTrace:   types.ExecutionTrace{Msg: msg},
@@ -424,7 +424,7 @@ func (vm *VM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet,
 		return nil, err
 	}
 
-	pl := PricelistByVersion(vm.ntwkVersion(ctx, vm.blockHeight))
+	pl := PricelistByEpoch(vm.blockHeight)
 
 	msgGas := pl.OnChainMessage(cmsg.ChainLength())
 	msgGasCost := msgGas.Total()
diff --git a/cli/cmd.go b/cli/cmd.go
index 775911f42..71524d787 100644
--- a/cli/cmd.go
+++ b/cli/cmd.go
@@ -90,4 +90,4 @@ var Commands = []*cli.Command{
 func WithCategory(cat string, cmd *cli.Command) *cli.Command {
 	cmd.Category = strings.ToUpper(cat)
 	return cmd
-}
\ No newline at end of file
+}
diff --git a/cmd/lotus-miner/main.go b/cmd/lotus-miner/main.go
index 3c432a962..9cee61b03 100644
--- a/cmd/lotus-miner/main.go
+++ b/cmd/lotus-miner/main.go
@@ -171,4 +171,4 @@ func getActorAddress(ctx context.Context, cctx *cli.Context) (maddr address.Addr
 	}
 
 	return maddr, nil
-}
\ No newline at end of file
+}
diff --git a/documentation/en/api-v0-methods-miner.md b/documentation/en/api-v0-methods-miner.md
index 3b6d5ac51..86cf62bbc 100644
--- a/documentation/en/api-v0-methods-miner.md
+++ b/documentation/en/api-v0-methods-miner.md
@@ -201,7 +201,7 @@ Response:
 ```json
 {
   "Version": "string value",
-  "APIVersion": 131329,
+  "APIVersion": 131328,
   "BlockDelay": 42
 }
 ```
diff --git a/documentation/en/api-v0-methods-worker.md b/documentation/en/api-v0-methods-worker.md
index 341846759..c620113f4 100644
--- a/documentation/en/api-v0-methods-worker.md
+++ b/documentation/en/api-v0-methods-worker.md
@@ -144,7 +144,7 @@ Perms: admin
 
 Inputs: `null`
 
-Response: `131329`
+Response: `131328`
 
 ## Add
 
diff --git a/documentation/en/api-v0-methods.md b/documentation/en/api-v0-methods.md
index bc67382d6..6b09ffc2a 100644
--- a/documentation/en/api-v0-methods.md
+++ b/documentation/en/api-v0-methods.md
@@ -280,7 +280,7 @@ Response:
 ```json
 {
   "Version": "string value",
-  "APIVersion": 131329,
+  "APIVersion": 131328,
   "BlockDelay": 42
 }
 ```
diff --git a/documentation/en/api-v1-unstable-methods.md b/documentation/en/api-v1-unstable-methods.md
index cbaed82af..20528712a 100644
--- a/documentation/en/api-v1-unstable-methods.md
+++ b/documentation/en/api-v1-unstable-methods.md
@@ -284,7 +284,7 @@ Response:
 ```json
 {
   "Version": "string value",
-  "APIVersion": 131329,
+  "APIVersion": 131328,
   "BlockDelay": 42
 }
 ```