Merge pull request #10282 from filecoin-project/feat/cache-migrated-stateroot
feat: stmgr: cache migrated stateroots
This commit is contained in:
commit
f641139bf2
@ -256,7 +256,7 @@ func NewGeneratorWithSectorsAndUpgradeSchedule(numSectors int, us stmgr.UpgradeS
|
|||||||
//return nil, xerrors.Errorf("creating drand beacon: %w", err)
|
//return nil, xerrors.Errorf("creating drand beacon: %w", err)
|
||||||
//}
|
//}
|
||||||
|
|
||||||
sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), sys, us, beac)
|
sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), sys, us, beac, ds)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("initing stmgr: %w", err)
|
return nil, xerrors.Errorf("initing stmgr: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -174,9 +174,16 @@ func (us UpgradeSchedule) GetNtwkVersion(e abi.ChainEpoch) (network.Version, err
|
|||||||
|
|
||||||
func (sm *StateManager) HandleStateForks(ctx context.Context, root cid.Cid, height abi.ChainEpoch, cb ExecMonitor, ts *types.TipSet) (cid.Cid, error) {
|
func (sm *StateManager) HandleStateForks(ctx context.Context, root cid.Cid, height abi.ChainEpoch, cb ExecMonitor, ts *types.TipSet) (cid.Cid, error) {
|
||||||
retCid := root
|
retCid := root
|
||||||
var err error
|
|
||||||
u := sm.stateMigrations[height]
|
u := sm.stateMigrations[height]
|
||||||
if u != nil && u.upgrade != nil {
|
if u != nil && u.upgrade != nil {
|
||||||
|
migCid, ok, err := u.migrationResultCache.Get(ctx, root)
|
||||||
|
if err == nil && ok {
|
||||||
|
log.Infow("CACHED migration", "height", height, "from", root, "to", migCid)
|
||||||
|
return migCid, nil
|
||||||
|
} else if err != nil {
|
||||||
|
log.Errorw("failed to lookup previous migration result", "err", err)
|
||||||
|
}
|
||||||
|
|
||||||
startTime := time.Now()
|
startTime := time.Now()
|
||||||
log.Warnw("STARTING migration", "height", height, "from", root)
|
log.Warnw("STARTING migration", "height", height, "from", root)
|
||||||
// Yes, we clone the cache, even for the final upgrade epoch. Why? Reverts. We may
|
// Yes, we clone the cache, even for the final upgrade epoch. Why? Reverts. We may
|
||||||
@ -197,6 +204,11 @@ func (sm *StateManager) HandleStateForks(ctx context.Context, root cid.Cid, heig
|
|||||||
"to", retCid,
|
"to", retCid,
|
||||||
"duration", time.Since(startTime),
|
"duration", time.Since(startTime),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Only set if migration ran, we do not want a root => root mapping
|
||||||
|
if err := u.migrationResultCache.Store(ctx, root, retCid); err != nil {
|
||||||
|
log.Errorw("failed to store migration result", "err", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return retCid, nil
|
return retCid, nil
|
||||||
|
@ -10,6 +10,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
|
"github.com/ipfs/go-datastore"
|
||||||
ipldcbor "github.com/ipfs/go-ipld-cbor"
|
ipldcbor "github.com/ipfs/go-ipld-cbor"
|
||||||
logging "github.com/ipfs/go-log/v2"
|
logging "github.com/ipfs/go-log/v2"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
@ -35,6 +36,7 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/chain/consensus"
|
"github.com/filecoin-project/lotus/chain/consensus"
|
||||||
"github.com/filecoin-project/lotus/chain/consensus/filcns"
|
"github.com/filecoin-project/lotus/chain/consensus/filcns"
|
||||||
"github.com/filecoin-project/lotus/chain/gen"
|
"github.com/filecoin-project/lotus/chain/gen"
|
||||||
|
"github.com/filecoin-project/lotus/chain/stmgr"
|
||||||
. "github.com/filecoin-project/lotus/chain/stmgr"
|
. "github.com/filecoin-project/lotus/chain/stmgr"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/filecoin-project/lotus/chain/vm"
|
"github.com/filecoin-project/lotus/chain/vm"
|
||||||
@ -166,7 +168,7 @@ func TestForkHeightTriggers(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
return st.Flush(ctx)
|
return st.Flush(ctx)
|
||||||
}}}, cg.BeaconSchedule())
|
}}}, cg.BeaconSchedule(), datastore.NewMapDatastore())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -284,7 +286,7 @@ func testForkRefuseCall(t *testing.T, nullsBefore, nullsAfter int) {
|
|||||||
root cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
root cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||||
migrationCount++
|
migrationCount++
|
||||||
return root, nil
|
return root, nil
|
||||||
}}}, cg.BeaconSchedule())
|
}}}, cg.BeaconSchedule(), datastore.NewMapDatastore())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -502,7 +504,7 @@ func TestForkPreMigration(t *testing.T) {
|
|||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
}}},
|
}}},
|
||||||
}, cg.BeaconSchedule())
|
}, cg.BeaconSchedule(), datastore.NewMapDatastore())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -576,6 +578,7 @@ func TestDisablePreMigration(t *testing.T) {
|
|||||||
}}},
|
}}},
|
||||||
},
|
},
|
||||||
cg.BeaconSchedule(),
|
cg.BeaconSchedule(),
|
||||||
|
datastore.NewMapDatastore(),
|
||||||
)
|
)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NoError(t, sm.Start(context.Background()))
|
require.NoError(t, sm.Start(context.Background()))
|
||||||
@ -603,3 +606,102 @@ func TestDisablePreMigration(t *testing.T) {
|
|||||||
|
|
||||||
require.Equal(t, 1, len(counter))
|
require.Equal(t, 1, len(counter))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestMigrtionCache(t *testing.T) {
|
||||||
|
logging.SetAllLoggers(logging.LevelInfo)
|
||||||
|
|
||||||
|
cg, err := gen.NewGenerator()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
counter := make(chan struct{}, 10)
|
||||||
|
metadataDs := datastore.NewMapDatastore()
|
||||||
|
|
||||||
|
sm, err := NewStateManager(
|
||||||
|
cg.ChainStore(),
|
||||||
|
consensus.NewTipSetExecutor(filcns.RewardFunc),
|
||||||
|
cg.StateManager().VMSys(),
|
||||||
|
UpgradeSchedule{{
|
||||||
|
Network: network.Version1,
|
||||||
|
Height: testForkHeight,
|
||||||
|
Migration: func(_ context.Context, _ *StateManager, _ MigrationCache, _ ExecMonitor,
|
||||||
|
root cid.Cid, _ abi.ChainEpoch, _ *types.TipSet) (cid.Cid, error) {
|
||||||
|
|
||||||
|
counter <- struct{}{}
|
||||||
|
|
||||||
|
return root, nil
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
cg.BeaconSchedule(),
|
||||||
|
metadataDs,
|
||||||
|
)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, sm.Start(context.Background()))
|
||||||
|
defer func() {
|
||||||
|
require.NoError(t, sm.Stop(context.Background()))
|
||||||
|
}()
|
||||||
|
|
||||||
|
inv := consensus.NewActorRegistry()
|
||||||
|
registry := builtin.MakeRegistryLegacy([]rtt.VMActor{testActor{}})
|
||||||
|
inv.Register(actorstypes.Version0, nil, registry)
|
||||||
|
|
||||||
|
sm.SetVMConstructor(func(ctx context.Context, vmopt *vm.VMOpts) (vm.Interface, error) {
|
||||||
|
nvm, err := vm.NewLegacyVM(ctx, vmopt)
|
||||||
|
require.NoError(t, err)
|
||||||
|
nvm.SetInvoker(inv)
|
||||||
|
return nvm, nil
|
||||||
|
})
|
||||||
|
|
||||||
|
cg.SetStateManager(sm)
|
||||||
|
|
||||||
|
for i := 0; i < 50; i++ {
|
||||||
|
_, err := cg.NextTipSet()
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ts, err := cg.ChainStore().GetTipsetByHeight(context.Background(), testForkHeight, nil, false)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
root, _, err := stmgr.ComputeState(context.Background(), sm, testForkHeight+1, []*types.Message{}, ts)
|
||||||
|
require.NoError(t, err)
|
||||||
|
t.Log(root)
|
||||||
|
|
||||||
|
require.Equal(t, 1, len(counter))
|
||||||
|
|
||||||
|
{
|
||||||
|
sm, err := NewStateManager(
|
||||||
|
cg.ChainStore(),
|
||||||
|
consensus.NewTipSetExecutor(filcns.RewardFunc),
|
||||||
|
cg.StateManager().VMSys(),
|
||||||
|
UpgradeSchedule{{
|
||||||
|
Network: network.Version1,
|
||||||
|
Height: testForkHeight,
|
||||||
|
Migration: func(_ context.Context, _ *StateManager, _ MigrationCache, _ ExecMonitor,
|
||||||
|
root cid.Cid, _ abi.ChainEpoch, _ *types.TipSet) (cid.Cid, error) {
|
||||||
|
|
||||||
|
counter <- struct{}{}
|
||||||
|
|
||||||
|
return root, nil
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
cg.BeaconSchedule(),
|
||||||
|
metadataDs,
|
||||||
|
)
|
||||||
|
require.NoError(t, err)
|
||||||
|
sm.SetVMConstructor(func(ctx context.Context, vmopt *vm.VMOpts) (vm.Interface, error) {
|
||||||
|
nvm, err := vm.NewLegacyVM(ctx, vmopt)
|
||||||
|
require.NoError(t, err)
|
||||||
|
nvm.SetInvoker(inv)
|
||||||
|
return nvm, nil
|
||||||
|
})
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
base, _, err := sm.ExecutionTrace(ctx, ts)
|
||||||
|
require.NoError(t, err)
|
||||||
|
_, err = sm.HandleStateForks(context.Background(), base, ts.Height(), nil, ts)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Should not have increased as we should be using the cached results in the metadataDs
|
||||||
|
require.Equal(t, 1, len(counter))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -2,10 +2,13 @@ package stmgr
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
|
dstore "github.com/ipfs/go-datastore"
|
||||||
cbor "github.com/ipfs/go-ipld-cbor"
|
cbor "github.com/ipfs/go-ipld-cbor"
|
||||||
|
ipld "github.com/ipfs/go-ipld-format"
|
||||||
logging "github.com/ipfs/go-log/v2"
|
logging "github.com/ipfs/go-log/v2"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
@ -54,6 +57,44 @@ type migration struct {
|
|||||||
upgrade MigrationFunc
|
upgrade MigrationFunc
|
||||||
preMigrations []PreMigration
|
preMigrations []PreMigration
|
||||||
cache *nv16.MemMigrationCache
|
cache *nv16.MemMigrationCache
|
||||||
|
migrationResultCache *migrationResultCache
|
||||||
|
}
|
||||||
|
|
||||||
|
type migrationResultCache struct {
|
||||||
|
ds dstore.Batching
|
||||||
|
keyPrefix string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *migrationResultCache) keyForMigration(root cid.Cid) dstore.Key {
|
||||||
|
kStr := fmt.Sprintf("%s/%s", m.keyPrefix, root)
|
||||||
|
return dstore.NewKey(kStr)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *migrationResultCache) Get(ctx context.Context, root cid.Cid) (cid.Cid, bool, error) {
|
||||||
|
k := m.keyForMigration(root)
|
||||||
|
|
||||||
|
bs, err := m.ds.Get(ctx, k)
|
||||||
|
if ipld.IsNotFound(err) {
|
||||||
|
return cid.Undef, false, nil
|
||||||
|
} else if err != nil {
|
||||||
|
return cid.Undef, false, xerrors.Errorf("error loading migration result: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
c, err := cid.Parse(bs)
|
||||||
|
if err != nil {
|
||||||
|
return cid.Undef, false, xerrors.Errorf("error parsing migration result: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return c, true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *migrationResultCache) Store(ctx context.Context, root cid.Cid, resultCid cid.Cid) error {
|
||||||
|
k := m.keyForMigration(root)
|
||||||
|
if err := m.ds.Put(ctx, k, resultCid.Bytes()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type Executor interface {
|
type Executor interface {
|
||||||
@ -103,7 +144,7 @@ type treeCache struct {
|
|||||||
tree *state.StateTree
|
tree *state.StateTree
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewStateManager(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder, us UpgradeSchedule, beacon beacon.Schedule) (*StateManager, error) {
|
func NewStateManager(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder, us UpgradeSchedule, beacon beacon.Schedule, metadataDs dstore.Batching) (*StateManager, error) {
|
||||||
// If we have upgrades, make sure they're in-order and make sense.
|
// If we have upgrades, make sure they're in-order and make sense.
|
||||||
if err := us.Validate(); err != nil {
|
if err := us.Validate(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -122,12 +163,18 @@ func NewStateManager(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder,
|
|||||||
upgrade: upgrade.Migration,
|
upgrade: upgrade.Migration,
|
||||||
preMigrations: upgrade.PreMigrations,
|
preMigrations: upgrade.PreMigrations,
|
||||||
cache: nv16.NewMemMigrationCache(),
|
cache: nv16.NewMemMigrationCache(),
|
||||||
|
migrationResultCache: &migrationResultCache{
|
||||||
|
keyPrefix: fmt.Sprintf("/migration-cache/nv%d", upgrade.Network),
|
||||||
|
ds: metadataDs,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
stateMigrations[upgrade.Height] = migration
|
stateMigrations[upgrade.Height] = migration
|
||||||
}
|
}
|
||||||
if upgrade.Expensive {
|
if upgrade.Expensive {
|
||||||
expensiveUpgrades[upgrade.Height] = struct{}{}
|
expensiveUpgrades[upgrade.Height] = struct{}{}
|
||||||
}
|
}
|
||||||
|
|
||||||
networkVersions = append(networkVersions, versionSpec{
|
networkVersions = append(networkVersions, versionSpec{
|
||||||
networkVersion: lastVersion,
|
networkVersion: lastVersion,
|
||||||
atOrBelow: upgrade.Height,
|
atOrBelow: upgrade.Height,
|
||||||
@ -155,8 +202,8 @@ func NewStateManager(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder,
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewStateManagerWithUpgradeScheduleAndMonitor(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder, us UpgradeSchedule, b beacon.Schedule, em ExecMonitor) (*StateManager, error) {
|
func NewStateManagerWithUpgradeScheduleAndMonitor(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder, us UpgradeSchedule, b beacon.Schedule, em ExecMonitor, metadataDs dstore.Batching) (*StateManager, error) {
|
||||||
sm, err := NewStateManager(cs, exec, sys, us, b)
|
sm, err := NewStateManager(cs, exec, sys, us, b, metadataDs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -196,7 +196,8 @@ func TestChainExportImportFull(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
nbs := blockstore.NewMemorySync()
|
nbs := blockstore.NewMemorySync()
|
||||||
cs := store.NewChainStore(nbs, nbs, datastore.NewMapDatastore(), filcns.Weight, nil)
|
ds := datastore.NewMapDatastore()
|
||||||
|
cs := store.NewChainStore(nbs, nbs, ds, filcns.Weight, nil)
|
||||||
defer cs.Close() //nolint:errcheck
|
defer cs.Close() //nolint:errcheck
|
||||||
|
|
||||||
root, err := cs.Import(context.TODO(), buf)
|
root, err := cs.Import(context.TODO(), buf)
|
||||||
@ -213,7 +214,7 @@ func TestChainExportImportFull(t *testing.T) {
|
|||||||
t.Fatal("imported chain differed from exported chain")
|
t.Fatal("imported chain differed from exported chain")
|
||||||
}
|
}
|
||||||
|
|
||||||
sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), nil, filcns.DefaultUpgradeSchedule(), cg.BeaconSchedule())
|
sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), nil, filcns.DefaultUpgradeSchedule(), cg.BeaconSchedule(), ds)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -229,7 +229,7 @@ var importBenchCmd = &cli.Command{
|
|||||||
defer cs.Close() //nolint:errcheck
|
defer cs.Close() //nolint:errcheck
|
||||||
|
|
||||||
// TODO: We need to supply the actual beacon after v14
|
// TODO: We need to supply the actual beacon after v14
|
||||||
stm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(verifier), filcns.DefaultUpgradeSchedule(), nil)
|
stm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(verifier), filcns.DefaultUpgradeSchedule(), nil, metadataDs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -513,7 +513,7 @@ var chainBalanceStateCmd = &cli.Command{
|
|||||||
cst := cbor.NewCborStore(bs)
|
cst := cbor.NewCborStore(bs)
|
||||||
store := adt.WrapStore(ctx, cst)
|
store := adt.WrapStore(ctx, cst)
|
||||||
|
|
||||||
sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil)
|
sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -737,7 +737,7 @@ var chainPledgeCmd = &cli.Command{
|
|||||||
cst := cbor.NewCborStore(bs)
|
cst := cbor.NewCborStore(bs)
|
||||||
store := adt.WrapStore(ctx, cst)
|
store := adt.WrapStore(ctx, cst)
|
||||||
|
|
||||||
sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil)
|
sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -111,7 +111,7 @@ var gasTraceCmd = &cli.Command{
|
|||||||
cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil)
|
cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil)
|
||||||
defer cs.Close() //nolint:errcheck
|
defer cs.Close() //nolint:errcheck
|
||||||
|
|
||||||
sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), shd)
|
sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), shd, mds)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -212,7 +212,7 @@ var replayOfflineCmd = &cli.Command{
|
|||||||
cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil)
|
cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil)
|
||||||
defer cs.Close() //nolint:errcheck
|
defer cs.Close() //nolint:errcheck
|
||||||
|
|
||||||
sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), shd)
|
sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), shd, mds)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -90,7 +90,7 @@ var invariantsCmd = &cli.Command{
|
|||||||
cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil)
|
cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil)
|
||||||
defer cs.Close() //nolint:errcheck
|
defer cs.Close() //nolint:errcheck
|
||||||
|
|
||||||
sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil)
|
sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -8,6 +8,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
|
"github.com/ipfs/go-datastore"
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
cbg "github.com/whyrusleeping/cbor-gen"
|
cbg "github.com/whyrusleeping/cbor-gen"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
@ -121,7 +122,8 @@ var migrationsCmd = &cli.Command{
|
|||||||
cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil)
|
cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil)
|
||||||
defer cs.Close() //nolint:errcheck
|
defer cs.Close() //nolint:errcheck
|
||||||
|
|
||||||
sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil)
|
// Note: we use a map datastore for the metadata to avoid writing / using cached migration results in the metadata store
|
||||||
|
sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, datastore.NewMapDatastore())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -308,7 +308,7 @@ to reduce the number of decode operations performed by caching the decoded objec
|
|||||||
}
|
}
|
||||||
|
|
||||||
tsExec := consensus.NewTipSetExecutor(filcns.RewardFunc)
|
tsExec := consensus.NewTipSetExecutor(filcns.RewardFunc)
|
||||||
sm, err := stmgr.NewStateManager(cs, tsExec, vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil)
|
sm, err := stmgr.NewStateManager(cs, tsExec, vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -106,7 +106,7 @@ func (nd *Node) LoadSim(ctx context.Context, name string) (*Simulation, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("failed to create upgrade schedule for simulation %s: %w", name, err)
|
return nil, xerrors.Errorf("failed to create upgrade schedule for simulation %s: %w", name, err)
|
||||||
}
|
}
|
||||||
sim.StateManager, err = stmgr.NewStateManager(nd.Chainstore, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(mock.Verifier), us, nil)
|
sim.StateManager, err = stmgr.NewStateManager(nd.Chainstore, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(mock.Verifier), us, nil, nd.MetadataDS)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("failed to create state manager for simulation %s: %w", name, err)
|
return nil, xerrors.Errorf("failed to create state manager for simulation %s: %w", name, err)
|
||||||
}
|
}
|
||||||
@ -125,7 +125,7 @@ func (nd *Node) CreateSim(ctx context.Context, name string, head *types.TipSet)
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
sm, err := stmgr.NewStateManager(nd.Chainstore, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(mock.Verifier), filcns.DefaultUpgradeSchedule(), nil)
|
sm, err := stmgr.NewStateManager(nd.Chainstore, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(mock.Verifier), filcns.DefaultUpgradeSchedule(), nil, nd.MetadataDS)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("creating state manager: %w", err)
|
return nil, xerrors.Errorf("creating state manager: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -201,7 +201,7 @@ func (sim *Simulation) SetUpgradeHeight(nv network.Version, epoch abi.ChainEpoch
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
sm, err := stmgr.NewStateManager(sim.Node.Chainstore, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(mock.Verifier), newUpgradeSchedule, nil)
|
sm, err := stmgr.NewStateManager(sim.Node.Chainstore, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(mock.Verifier), newUpgradeSchedule, nil, sim.Node.MetadataDS)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -540,7 +540,7 @@ func ImportChain(ctx context.Context, r repo.Repo, fname string, snapshot bool)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// TODO: We need to supply the actual beacon after v14
|
// TODO: We need to supply the actual beacon after v14
|
||||||
stm, err := stmgr.NewStateManager(cst, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil)
|
stm, err := stmgr.NewStateManager(cst, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -108,7 +108,7 @@ func (d *Driver) ExecuteTipset(bs blockstore.Blockstore, ds ds.Batching, params
|
|||||||
|
|
||||||
cs = store.NewChainStore(bs, bs, ds, filcns.Weight, nil)
|
cs = store.NewChainStore(bs, bs, ds, filcns.Weight, nil)
|
||||||
tse = consensus.NewTipSetExecutor(filcns.RewardFunc)
|
tse = consensus.NewTipSetExecutor(filcns.RewardFunc)
|
||||||
sm, err = stmgr.NewStateManager(cs, tse, syscalls, filcns.DefaultUpgradeSchedule(), nil)
|
sm, err = stmgr.NewStateManager(cs, tse, syscalls, filcns.DefaultUpgradeSchedule(), nil, ds)
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -123,7 +123,7 @@ func NetworkName(mctx helpers.MetricsCtx,
|
|||||||
|
|
||||||
ctx := helpers.LifecycleCtx(mctx, lc)
|
ctx := helpers.LifecycleCtx(mctx, lc)
|
||||||
|
|
||||||
sm, err := stmgr.NewStateManager(cs, tsexec, syscalls, us, nil)
|
sm, err := stmgr.NewStateManager(cs, tsexec, syscalls, us, nil, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
@ -7,10 +7,11 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/chain/stmgr"
|
"github.com/filecoin-project/lotus/chain/stmgr"
|
||||||
"github.com/filecoin-project/lotus/chain/store"
|
"github.com/filecoin-project/lotus/chain/store"
|
||||||
"github.com/filecoin-project/lotus/chain/vm"
|
"github.com/filecoin-project/lotus/chain/vm"
|
||||||
|
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||||
)
|
)
|
||||||
|
|
||||||
func StateManager(lc fx.Lifecycle, cs *store.ChainStore, exec stmgr.Executor, sys vm.SyscallBuilder, us stmgr.UpgradeSchedule, b beacon.Schedule) (*stmgr.StateManager, error) {
|
func StateManager(lc fx.Lifecycle, cs *store.ChainStore, exec stmgr.Executor, sys vm.SyscallBuilder, us stmgr.UpgradeSchedule, b beacon.Schedule, metadataDs dtypes.MetadataDS) (*stmgr.StateManager, error) {
|
||||||
sm, err := stmgr.NewStateManager(cs, exec, sys, us, b)
|
sm, err := stmgr.NewStateManager(cs, exec, sys, us, b, metadataDs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user