diff --git a/api/api_storage.go b/api/api_storage.go index 1131f45a0..0ccfbd88f 100644 --- a/api/api_storage.go +++ b/api/api_storage.go @@ -24,6 +24,7 @@ import ( "github.com/filecoin-project/lotus/extern/sector-storage/fsutil" "github.com/filecoin-project/lotus/extern/sector-storage/stores" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" + "github.com/filecoin-project/lotus/extern/storage-sealing/sealiface" ) // MODIFYING THE API INTERFACE @@ -91,6 +92,16 @@ type StorageMiner interface { // SectorTerminatePending returns a list of pending sector terminations to be sent in the next batch message SectorTerminatePending(ctx context.Context) ([]abi.SectorID, error) //perm:admin SectorMarkForUpgrade(ctx context.Context, id abi.SectorNumber) error //perm:admin + // SectorPreCommitFlush immediately sends a PreCommit message with sectors batched for PreCommit. + // Returns null if message wasn't sent + SectorPreCommitFlush(ctx context.Context) ([]sealiface.PreCommitBatchRes, error) //perm:admin + // SectorPreCommitPending returns a list of pending PreCommit sectors to be sent in the next batch message + SectorPreCommitPending(ctx context.Context) ([]abi.SectorID, error) //perm:admin + // SectorCommitFlush immediately sends a Commit message with sectors aggregated for Commit. + // Returns null if message wasn't sent + SectorCommitFlush(ctx context.Context) ([]sealiface.CommitBatchRes, error) //perm:admin + // SectorCommitPending returns a list of pending Commit sectors to be sent in the next aggregate message + SectorCommitPending(ctx context.Context) ([]abi.SectorID, error) //perm:admin // WorkerConnect tells the node to connect to workers RPC WorkerConnect(context.Context, string) error //perm:admin retry:true diff --git a/api/proxy_gen.go b/api/proxy_gen.go index 79d7b0ac6..4fa51a4af 100644 --- a/api/proxy_gen.go +++ b/api/proxy_gen.go @@ -27,6 +27,7 @@ import ( "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks" "github.com/filecoin-project/lotus/extern/sector-storage/stores" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" + "github.com/filecoin-project/lotus/extern/storage-sealing/sealiface" marketevents "github.com/filecoin-project/lotus/markets/loggers" "github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/specs-storage/storage" @@ -659,12 +660,20 @@ type StorageMinerStruct struct { SealingSchedDiag func(p0 context.Context, p1 bool) (interface{}, error) `perm:"admin"` + SectorCommitFlush func(p0 context.Context) ([]sealiface.CommitBatchRes, error) `perm:"admin"` + + SectorCommitPending func(p0 context.Context) ([]abi.SectorID, error) `perm:"admin"` + SectorGetExpectedSealDuration func(p0 context.Context) (time.Duration, error) `perm:"read"` SectorGetSealDelay func(p0 context.Context) (time.Duration, error) `perm:"read"` SectorMarkForUpgrade func(p0 context.Context, p1 abi.SectorNumber) error `perm:"admin"` + SectorPreCommitFlush func(p0 context.Context) ([]sealiface.PreCommitBatchRes, error) `perm:"admin"` + + SectorPreCommitPending func(p0 context.Context) ([]abi.SectorID, error) `perm:"admin"` + SectorRemove func(p0 context.Context, p1 abi.SectorNumber) error `perm:"admin"` SectorSetExpectedSealDuration func(p0 context.Context, p1 time.Duration) error `perm:"write"` @@ -3117,6 +3126,22 @@ func (s *StorageMinerStub) SealingSchedDiag(p0 context.Context, p1 bool) (interf return nil, xerrors.New("method not supported") } +func (s *StorageMinerStruct) SectorCommitFlush(p0 context.Context) ([]sealiface.CommitBatchRes, error) { + return s.Internal.SectorCommitFlush(p0) +} + +func (s *StorageMinerStub) SectorCommitFlush(p0 context.Context) ([]sealiface.CommitBatchRes, error) { + return *new([]sealiface.CommitBatchRes), xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) SectorCommitPending(p0 context.Context) ([]abi.SectorID, error) { + return s.Internal.SectorCommitPending(p0) +} + +func (s *StorageMinerStub) SectorCommitPending(p0 context.Context) ([]abi.SectorID, error) { + return *new([]abi.SectorID), xerrors.New("method not supported") +} + func (s *StorageMinerStruct) SectorGetExpectedSealDuration(p0 context.Context) (time.Duration, error) { return s.Internal.SectorGetExpectedSealDuration(p0) } @@ -3141,6 +3166,22 @@ func (s *StorageMinerStub) SectorMarkForUpgrade(p0 context.Context, p1 abi.Secto return xerrors.New("method not supported") } +func (s *StorageMinerStruct) SectorPreCommitFlush(p0 context.Context) ([]sealiface.PreCommitBatchRes, error) { + return s.Internal.SectorPreCommitFlush(p0) +} + +func (s *StorageMinerStub) SectorPreCommitFlush(p0 context.Context) ([]sealiface.PreCommitBatchRes, error) { + return *new([]sealiface.PreCommitBatchRes), xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) SectorPreCommitPending(p0 context.Context) ([]abi.SectorID, error) { + return s.Internal.SectorPreCommitPending(p0) +} + +func (s *StorageMinerStub) SectorPreCommitPending(p0 context.Context) ([]abi.SectorID, error) { + return *new([]abi.SectorID), xerrors.New("method not supported") +} + func (s *StorageMinerStruct) SectorRemove(p0 context.Context, p1 abi.SectorNumber) error { return s.Internal.SectorRemove(p0, p1) } diff --git a/api/test/deadlines.go b/api/test/deadlines.go index 43fa731be..987bfb3ae 100644 --- a/api/test/deadlines.go +++ b/api/test/deadlines.go @@ -63,7 +63,7 @@ func TestDeadlineToggling(t *testing.T, b APIBuilder, blocktime time.Duration) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - n, sn := b(t, []FullNodeOpts{FullNodeWithLatestActorsAt(upgradeH)}, OneMiner) + n, sn := b(t, []FullNodeOpts{FullNodeWithNetworkUpgradeAt(network.Version12, upgradeH)}, OneMiner) client := n[0].FullNode.(*impl.FullNodeAPI) minerA := sn[0] diff --git a/api/test/deals.go b/api/test/deals.go index d93001692..aa7e23bcc 100644 --- a/api/test/deals.go +++ b/api/test/deals.go @@ -500,6 +500,8 @@ func startSealingWaiting(t *testing.T, ctx context.Context, miner TestStorageNod require.NoError(t, miner.SectorStartSealing(ctx, snum)) } } + + flushSealingBatches(t, ctx, miner) } func testRetrieval(t *testing.T, ctx context.Context, client api.FullNode, fcid cid.Cid, piece *cid.Cid, carExport bool, data []byte) { diff --git a/api/test/pledge.go b/api/test/pledge.go new file mode 100644 index 000000000..08548dc60 --- /dev/null +++ b/api/test/pledge.go @@ -0,0 +1,389 @@ +package test + +import ( + "context" + "fmt" + "sort" + "strings" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/network" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/stmgr" + sealing "github.com/filecoin-project/lotus/extern/storage-sealing" + bminer "github.com/filecoin-project/lotus/miner" + "github.com/filecoin-project/lotus/node" + "github.com/filecoin-project/lotus/node/impl" +) + +func TestSDRUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + n, sn := b(t, []FullNodeOpts{FullNodeWithSDRAt(500, 1000)}, OneMiner) + client := n[0].FullNode.(*impl.FullNodeAPI) + miner := sn[0] + + addrinfo, err := client.NetAddrsListen(ctx) + if err != nil { + t.Fatal(err) + } + + if err := miner.NetConnect(ctx, addrinfo); err != nil { + t.Fatal(err) + } + build.Clock.Sleep(time.Second) + + pledge := make(chan struct{}) + mine := int64(1) + done := make(chan struct{}) + go func() { + defer close(done) + round := 0 + for atomic.LoadInt64(&mine) != 0 { + build.Clock.Sleep(blocktime) + if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) { + + }}); err != nil { + t.Error(err) + } + + // 3 sealing rounds: before, during after. + if round >= 3 { + continue + } + + head, err := client.ChainHead(ctx) + assert.NoError(t, err) + + // rounds happen every 100 blocks, with a 50 block offset. + if head.Height() >= abi.ChainEpoch(round*500+50) { + round++ + pledge <- struct{}{} + + ver, err := client.StateNetworkVersion(ctx, head.Key()) + assert.NoError(t, err) + switch round { + case 1: + assert.Equal(t, network.Version6, ver) + case 2: + assert.Equal(t, network.Version7, ver) + case 3: + assert.Equal(t, network.Version8, ver) + } + } + + } + }() + + // before. + pledgeSectors(t, ctx, miner, 9, 0, pledge) + + s, err := miner.SectorsList(ctx) + require.NoError(t, err) + sort.Slice(s, func(i, j int) bool { + return s[i] < s[j] + }) + + for i, id := range s { + info, err := miner.SectorsStatus(ctx, id, true) + require.NoError(t, err) + expectProof := abi.RegisteredSealProof_StackedDrg2KiBV1 + if i >= 3 { + // after + expectProof = abi.RegisteredSealProof_StackedDrg2KiBV1_1 + } + assert.Equal(t, expectProof, info.SealProof, "sector %d, id %d", i, id) + } + + atomic.StoreInt64(&mine, 0) + <-done +} + +func TestPledgeBatching(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + n, sn := b(t, []FullNodeOpts{FullNodeWithLatestActorsAt(-1)}, OneMiner) + client := n[0].FullNode.(*impl.FullNodeAPI) + miner := sn[0] + + addrinfo, err := client.NetAddrsListen(ctx) + if err != nil { + t.Fatal(err) + } + + if err := miner.NetConnect(ctx, addrinfo); err != nil { + t.Fatal(err) + } + build.Clock.Sleep(time.Second) + + mine := int64(1) + done := make(chan struct{}) + go func() { + defer close(done) + for atomic.LoadInt64(&mine) != 0 { + build.Clock.Sleep(blocktime) + if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) { + + }}); err != nil { + t.Error(err) + } + } + }() + + for { + h, err := client.ChainHead(ctx) + require.NoError(t, err) + if h.Height() > 10 { + break + } + } + + toCheck := startPledge(t, ctx, miner, nSectors, 0, nil) + + for len(toCheck) > 0 { + states := map[api.SectorState]int{} + + for n := range toCheck { + st, err := miner.SectorsStatus(ctx, n, false) + require.NoError(t, err) + states[st.State]++ + if st.State == api.SectorState(sealing.Proving) { + delete(toCheck, n) + } + if strings.Contains(string(st.State), "Fail") { + t.Fatal("sector in a failed state", st.State) + } + } + if states[api.SectorState(sealing.SubmitPreCommitBatch)] == nSectors || + (states[api.SectorState(sealing.SubmitPreCommitBatch)] > 0 && states[api.SectorState(sealing.PreCommit1)] == 0 && states[api.SectorState(sealing.PreCommit2)] == 0) { + pcb, err := miner.SectorPreCommitFlush(ctx) + require.NoError(t, err) + if pcb != nil { + fmt.Printf("PRECOMMIT BATCH: %+v\n", pcb) + } + } + + if states[api.SectorState(sealing.SubmitCommitAggregate)] == nSectors || + (states[api.SectorState(sealing.SubmitCommitAggregate)] > 0 && states[api.SectorState(sealing.WaitSeed)] == 0 && states[api.SectorState(sealing.Committing)] == 0) { + cb, err := miner.SectorCommitFlush(ctx) + require.NoError(t, err) + if cb != nil { + fmt.Printf("COMMIT BATCH: %+v\n", cb) + } + } + + build.Clock.Sleep(100 * time.Millisecond) + fmt.Printf("WaitSeal: %d %+v\n", len(toCheck), states) + } + + atomic.StoreInt64(&mine, 0) + <-done +} + +func TestPledgeBeforeNv13(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + n, sn := b(t, []FullNodeOpts{ + { + Opts: func(nodes []TestNode) node.Option { + return node.Override(new(stmgr.UpgradeSchedule), stmgr.UpgradeSchedule{{ + Network: network.Version9, + Height: 1, + Migration: stmgr.UpgradeActorsV2, + }, { + Network: network.Version10, + Height: 2, + Migration: stmgr.UpgradeActorsV3, + }, { + Network: network.Version12, + Height: 3, + Migration: stmgr.UpgradeActorsV4, + }, { + Network: network.Version13, + Height: 1000000000, + Migration: stmgr.UpgradeActorsV5, + }}) + }, + }, + }, OneMiner) + client := n[0].FullNode.(*impl.FullNodeAPI) + miner := sn[0] + + addrinfo, err := client.NetAddrsListen(ctx) + if err != nil { + t.Fatal(err) + } + + if err := miner.NetConnect(ctx, addrinfo); err != nil { + t.Fatal(err) + } + build.Clock.Sleep(time.Second) + + mine := int64(1) + done := make(chan struct{}) + go func() { + defer close(done) + for atomic.LoadInt64(&mine) != 0 { + build.Clock.Sleep(blocktime) + if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) { + + }}); err != nil { + t.Error(err) + } + } + }() + + for { + h, err := client.ChainHead(ctx) + require.NoError(t, err) + if h.Height() > 10 { + break + } + } + + toCheck := startPledge(t, ctx, miner, nSectors, 0, nil) + + for len(toCheck) > 0 { + states := map[api.SectorState]int{} + + for n := range toCheck { + st, err := miner.SectorsStatus(ctx, n, false) + require.NoError(t, err) + states[st.State]++ + if st.State == api.SectorState(sealing.Proving) { + delete(toCheck, n) + } + if strings.Contains(string(st.State), "Fail") { + t.Fatal("sector in a failed state", st.State) + } + } + + build.Clock.Sleep(100 * time.Millisecond) + fmt.Printf("WaitSeal: %d %+v\n", len(toCheck), states) + } + + atomic.StoreInt64(&mine, 0) + <-done +} + +func TestPledgeSector(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + n, sn := b(t, OneFull, OneMiner) + client := n[0].FullNode.(*impl.FullNodeAPI) + miner := sn[0] + + addrinfo, err := client.NetAddrsListen(ctx) + if err != nil { + t.Fatal(err) + } + + if err := miner.NetConnect(ctx, addrinfo); err != nil { + t.Fatal(err) + } + build.Clock.Sleep(time.Second) + + mine := int64(1) + done := make(chan struct{}) + go func() { + defer close(done) + for atomic.LoadInt64(&mine) != 0 { + build.Clock.Sleep(blocktime) + if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) { + + }}); err != nil { + t.Error(err) + } + } + }() + + pledgeSectors(t, ctx, miner, nSectors, 0, nil) + + atomic.StoreInt64(&mine, 0) + <-done +} + +func flushSealingBatches(t *testing.T, ctx context.Context, miner TestStorageNode) { + pcb, err := miner.SectorPreCommitFlush(ctx) + require.NoError(t, err) + if pcb != nil { + fmt.Printf("PRECOMMIT BATCH: %+v\n", pcb) + } + + cb, err := miner.SectorCommitFlush(ctx) + require.NoError(t, err) + if cb != nil { + fmt.Printf("COMMIT BATCH: %+v\n", cb) + } +} + +func startPledge(t *testing.T, ctx context.Context, miner TestStorageNode, n, existing int, blockNotif <-chan struct{}) map[abi.SectorNumber]struct{} { + for i := 0; i < n; i++ { + if i%3 == 0 && blockNotif != nil { + <-blockNotif + log.Errorf("WAIT") + } + log.Errorf("PLEDGING %d", i) + _, err := miner.PledgeSector(ctx) + require.NoError(t, err) + } + + for { + s, err := miner.SectorsList(ctx) // Note - the test builder doesn't import genesis sectors into FSM + require.NoError(t, err) + fmt.Printf("Sectors: %d\n", len(s)) + if len(s) >= n+existing { + break + } + + build.Clock.Sleep(100 * time.Millisecond) + } + + fmt.Printf("All sectors is fsm\n") + + s, err := miner.SectorsList(ctx) + require.NoError(t, err) + + toCheck := map[abi.SectorNumber]struct{}{} + for _, number := range s { + toCheck[number] = struct{}{} + } + + return toCheck +} + +func pledgeSectors(t *testing.T, ctx context.Context, miner TestStorageNode, n, existing int, blockNotif <-chan struct{}) { + toCheck := startPledge(t, ctx, miner, n, existing, blockNotif) + + for len(toCheck) > 0 { + flushSealingBatches(t, ctx, miner) + + states := map[api.SectorState]int{} + for n := range toCheck { + st, err := miner.SectorsStatus(ctx, n, false) + require.NoError(t, err) + states[st.State]++ + if st.State == api.SectorState(sealing.Proving) { + delete(toCheck, n) + } + if strings.Contains(string(st.State), "Fail") { + t.Fatal("sector in a failed state", st.State) + } + } + + build.Clock.Sleep(100 * time.Millisecond) + fmt.Printf("WaitSeal: %d %+v\n", len(toCheck), states) + } +} diff --git a/api/test/test.go b/api/test/test.go index d09827f5e..64062e4ff 100644 --- a/api/test/test.go +++ b/api/test/test.go @@ -122,26 +122,46 @@ var OneFull = DefaultFullOpts(1) var TwoFull = DefaultFullOpts(2) var FullNodeWithLatestActorsAt = func(upgradeHeight abi.ChainEpoch) FullNodeOpts { - if upgradeHeight == -1 { - upgradeHeight = 3 + // Attention: Update this when introducing new actor versions or your tests will be sad + return FullNodeWithNetworkUpgradeAt(network.Version13, upgradeHeight) +} + +var FullNodeWithNetworkUpgradeAt = func(version network.Version, upgradeHeight abi.ChainEpoch) FullNodeOpts { + fullSchedule := stmgr.UpgradeSchedule{{ + // prepare for upgrade. + Network: network.Version9, + Height: 1, + Migration: stmgr.UpgradeActorsV2, + }, { + Network: network.Version10, + Height: 2, + Migration: stmgr.UpgradeActorsV3, + }, { + Network: network.Version12, + Height: 3, + Migration: stmgr.UpgradeActorsV4, + }, { + Network: network.Version13, + Height: 4, + Migration: stmgr.UpgradeActorsV5, + }} + + schedule := stmgr.UpgradeSchedule{} + for _, upgrade := range fullSchedule { + if upgrade.Network > version { + break + } + + schedule = append(schedule, upgrade) + } + + if upgradeHeight > 0 { + schedule[len(schedule)-1].Height = upgradeHeight } return FullNodeOpts{ Opts: func(nodes []TestNode) node.Option { - return node.Override(new(stmgr.UpgradeSchedule), stmgr.UpgradeSchedule{{ - // prepare for upgrade. - Network: network.Version9, - Height: 1, - Migration: stmgr.UpgradeActorsV2, - }, { - Network: network.Version10, - Height: 2, - Migration: stmgr.UpgradeActorsV3, - }, { - Network: network.Version12, - Height: upgradeHeight, - Migration: stmgr.UpgradeActorsV4, - }}) + return node.Override(new(stmgr.UpgradeSchedule), schedule) }, } } diff --git a/api/test/verifreg.go b/api/test/verifreg.go index b66ca1a36..3fc1fb75a 100644 --- a/api/test/verifreg.go +++ b/api/test/verifreg.go @@ -4,6 +4,8 @@ import ( "context" "strings" + "github.com/filecoin-project/go-state-types/network" + lapi "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/actors" @@ -19,108 +21,120 @@ import ( ) func AddVerifiedClient(t *testing.T, b APIBuilder) { + test := func(nv network.Version, shouldWork bool) func(*testing.T) { + return func(t *testing.T) { - nodes, miners := b(t, []FullNodeOpts{FullNodeWithLatestActorsAt(-1)}, OneMiner) - api := nodes[0].FullNode.(*impl.FullNodeAPI) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + nodes, miners := b(t, []FullNodeOpts{FullNodeWithNetworkUpgradeAt(nv, -1)}, OneMiner) + api := nodes[0].FullNode.(*impl.FullNodeAPI) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - //Get VRH - vrh, err := api.StateVerifiedRegistryRootKey(ctx, types.TipSetKey{}) - if err != nil { - t.Fatal(err) + //Get VRH + vrh, err := api.StateVerifiedRegistryRootKey(ctx, types.TipSetKey{}) + if err != nil { + t.Fatal(err) + } + + //Add verifier + verifier, err := api.WalletDefaultAddress(ctx) + if err != nil { + t.Fatal(err) + } + + params, err := actors.SerializeParams(&verifreg4.AddVerifierParams{Address: verifier, Allowance: big.NewInt(100000000000)}) + if err != nil { + t.Fatal(err) + } + msg := &types.Message{ + To: verifreg.Address, + From: vrh, + Method: verifreg.Methods.AddVerifier, + Params: params, + Value: big.Zero(), + } + + bm := NewBlockMiner(ctx, t, miners[0], 100*time.Millisecond) + bm.MineBlocks() + defer bm.Stop() + + sm, err := api.MpoolPushMessage(ctx, msg, nil) + if err != nil { + t.Fatal("AddVerifier failed: ", err) + } + res, err := api.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true) + if err != nil { + t.Fatal(err) + } + if res.Receipt.ExitCode != 0 { + t.Fatal("did not successfully send message") + } + + //Assign datacap to a client + datacap := big.NewInt(10000) + clientAddress, err := api.WalletNew(ctx, types.KTBLS) + if err != nil { + t.Fatal(err) + } + + params, err = actors.SerializeParams(&verifreg4.AddVerifiedClientParams{Address: clientAddress, Allowance: datacap}) + if err != nil { + t.Fatal(err) + } + + msg = &types.Message{ + To: verifreg.Address, + From: verifier, + Method: verifreg.Methods.AddVerifiedClient, + Params: params, + Value: big.Zero(), + } + + sm, err = api.MpoolPushMessage(ctx, msg, nil) + if err != nil { + t.Fatal("AddVerifiedClient faield: ", err) + } + res, err = api.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true) + if err != nil { + t.Fatal(err) + } + if res.Receipt.ExitCode != 0 { + t.Fatal("did not successfully send message") + } + + //check datacap balance + dcap, err := api.StateVerifiedClientStatus(ctx, clientAddress, types.EmptyTSK) + if err != nil { + t.Fatal(err) + } + if !dcap.Equals(datacap) { + t.Fatal("") + } + + //try to assign datacap to the same client should fail for actor v4 and below + params, err = actors.SerializeParams(&verifreg4.AddVerifiedClientParams{Address: clientAddress, Allowance: datacap}) + if err != nil { + t.Fatal(err) + } + + msg = &types.Message{ + To: verifreg.Address, + From: verifier, + Method: verifreg.Methods.AddVerifiedClient, + Params: params, + Value: big.Zero(), + } + + _, err = api.MpoolPushMessage(ctx, msg, nil) + if shouldWork && err != nil { + t.Fatal("expected nil err", err) + } + + if !shouldWork && (err == nil || !strings.Contains(err.Error(), "verified client already exists")) { + t.Fatal("Add datacap to an existing verified client should fail") + } + } } - //Add verifier - verifier, err := api.WalletDefaultAddress(ctx) - if err != nil { - t.Fatal(err) - } - - params, err := actors.SerializeParams(&verifreg4.AddVerifierParams{Address: verifier, Allowance: big.NewInt(100000000000)}) - if err != nil { - t.Fatal(err) - } - msg := &types.Message{ - To: verifreg.Address, - From: vrh, - Method: verifreg.Methods.AddVerifier, - Params: params, - Value: big.Zero(), - } - - bm := NewBlockMiner(ctx, t, miners[0], 100*time.Millisecond) - bm.MineBlocks() - defer bm.Stop() - - sm, err := api.MpoolPushMessage(ctx, msg, nil) - if err != nil { - t.Fatal("AddVerifier failed: ", err) - } - res, err := api.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true) - if err != nil { - t.Fatal(err) - } - if res.Receipt.ExitCode != 0 { - t.Fatal("did not successfully send message") - } - - //Assign datacap to a client - datacap := big.NewInt(10000) - clientAddress, err := api.WalletNew(ctx, types.KTBLS) - if err != nil { - t.Fatal(err) - } - - params, err = actors.SerializeParams(&verifreg4.AddVerifiedClientParams{Address: clientAddress, Allowance: datacap}) - if err != nil { - t.Fatal(err) - } - - msg = &types.Message{ - To: verifreg.Address, - From: verifier, - Method: verifreg.Methods.AddVerifiedClient, - Params: params, - Value: big.Zero(), - } - - sm, err = api.MpoolPushMessage(ctx, msg, nil) - if err != nil { - t.Fatal("AddVerifiedClient faield: ", err) - } - res, err = api.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true) - if err != nil { - t.Fatal(err) - } - if res.Receipt.ExitCode != 0 { - t.Fatal("did not successfully send message") - } - - //check datacap balance - dcap, err := api.StateVerifiedClientStatus(ctx, clientAddress, types.EmptyTSK) - if err != nil { - t.Fatal(err) - } - if !dcap.Equals(datacap) { - t.Fatal("") - } - - //try to assign datacap to the same client should fail for actor v4 and below - params, err = actors.SerializeParams(&verifreg4.AddVerifiedClientParams{Address: clientAddress, Allowance: datacap}) - if err != nil { - t.Fatal(err) - } - - msg = &types.Message{ - To: verifreg.Address, - From: verifier, - Method: verifreg.Methods.AddVerifiedClient, - Params: params, - Value: big.Zero(), - } - - if _, err = api.MpoolPushMessage(ctx, msg, nil); !strings.Contains(err.Error(), "verified client already exists") { - t.Fatal("Add datacap to an exist verified client should fail") - } + t.Run("nv12", test(network.Version12, false)) + t.Run("nv13", test(network.Version13, true)) } diff --git a/api/test/window_post.go b/api/test/window_post.go index bb5010b25..6d317676e 100644 --- a/api/test/window_post.go +++ b/api/test/window_post.go @@ -3,14 +3,11 @@ package test import ( "context" "fmt" - "sort" - "sync/atomic" - - "strings" "testing" "time" - "github.com/stretchr/testify/assert" + "github.com/filecoin-project/go-state-types/big" + "github.com/stretchr/testify/require" "github.com/filecoin-project/go-address" @@ -18,7 +15,6 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/dline" - "github.com/filecoin-project/go-state-types/network" "github.com/filecoin-project/lotus/extern/sector-storage/mock" sealing "github.com/filecoin-project/lotus/extern/storage-sealing" proof3 "github.com/filecoin-project/specs-actors/v3/actors/runtime/proof" @@ -29,181 +25,9 @@ import ( "github.com/filecoin-project/lotus/chain/actors" minerActor "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/types" - bminer "github.com/filecoin-project/lotus/miner" "github.com/filecoin-project/lotus/node/impl" ) -func TestSDRUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n, sn := b(t, []FullNodeOpts{FullNodeWithSDRAt(500, 1000)}, OneMiner) - client := n[0].FullNode.(*impl.FullNodeAPI) - miner := sn[0] - - addrinfo, err := client.NetAddrsListen(ctx) - if err != nil { - t.Fatal(err) - } - - if err := miner.NetConnect(ctx, addrinfo); err != nil { - t.Fatal(err) - } - build.Clock.Sleep(time.Second) - - pledge := make(chan struct{}) - mine := int64(1) - done := make(chan struct{}) - go func() { - defer close(done) - round := 0 - for atomic.LoadInt64(&mine) != 0 { - build.Clock.Sleep(blocktime) - if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) { - - }}); err != nil { - t.Error(err) - } - - // 3 sealing rounds: before, during after. - if round >= 3 { - continue - } - - head, err := client.ChainHead(ctx) - assert.NoError(t, err) - - // rounds happen every 100 blocks, with a 50 block offset. - if head.Height() >= abi.ChainEpoch(round*500+50) { - round++ - pledge <- struct{}{} - - ver, err := client.StateNetworkVersion(ctx, head.Key()) - assert.NoError(t, err) - switch round { - case 1: - assert.Equal(t, network.Version6, ver) - case 2: - assert.Equal(t, network.Version7, ver) - case 3: - assert.Equal(t, network.Version8, ver) - } - } - - } - }() - - // before. - pledgeSectors(t, ctx, miner, 9, 0, pledge) - - s, err := miner.SectorsList(ctx) - require.NoError(t, err) - sort.Slice(s, func(i, j int) bool { - return s[i] < s[j] - }) - - for i, id := range s { - info, err := miner.SectorsStatus(ctx, id, true) - require.NoError(t, err) - expectProof := abi.RegisteredSealProof_StackedDrg2KiBV1 - if i >= 3 { - // after - expectProof = abi.RegisteredSealProof_StackedDrg2KiBV1_1 - } - assert.Equal(t, expectProof, info.SealProof, "sector %d, id %d", i, id) - } - - atomic.StoreInt64(&mine, 0) - <-done -} - -func TestPledgeSector(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n, sn := b(t, OneFull, OneMiner) - client := n[0].FullNode.(*impl.FullNodeAPI) - miner := sn[0] - - addrinfo, err := client.NetAddrsListen(ctx) - if err != nil { - t.Fatal(err) - } - - if err := miner.NetConnect(ctx, addrinfo); err != nil { - t.Fatal(err) - } - build.Clock.Sleep(time.Second) - - mine := int64(1) - done := make(chan struct{}) - go func() { - defer close(done) - for atomic.LoadInt64(&mine) != 0 { - build.Clock.Sleep(blocktime) - if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) { - - }}); err != nil { - t.Error(err) - } - } - }() - - pledgeSectors(t, ctx, miner, nSectors, 0, nil) - - atomic.StoreInt64(&mine, 0) - <-done -} - -func pledgeSectors(t *testing.T, ctx context.Context, miner TestStorageNode, n, existing int, blockNotif <-chan struct{}) { - for i := 0; i < n; i++ { - if i%3 == 0 && blockNotif != nil { - <-blockNotif - log.Errorf("WAIT") - } - log.Errorf("PLEDGING %d", i) - _, err := miner.PledgeSector(ctx) - require.NoError(t, err) - } - - for { - s, err := miner.SectorsList(ctx) // Note - the test builder doesn't import genesis sectors into FSM - require.NoError(t, err) - fmt.Printf("Sectors: %d\n", len(s)) - if len(s) >= n+existing { - break - } - - build.Clock.Sleep(100 * time.Millisecond) - } - - fmt.Printf("All sectors is fsm\n") - - s, err := miner.SectorsList(ctx) - require.NoError(t, err) - - toCheck := map[abi.SectorNumber]struct{}{} - for _, number := range s { - toCheck[number] = struct{}{} - } - - for len(toCheck) > 0 { - for n := range toCheck { - st, err := miner.SectorsStatus(ctx, n, false) - require.NoError(t, err) - if st.State == api.SectorState(sealing.Proving) { - delete(toCheck, n) - } - if strings.Contains(string(st.State), "Fail") { - t.Fatal("sector in a failed state", st.State) - } - } - - build.Clock.Sleep(100 * time.Millisecond) - fmt.Printf("WaitSeal: %d\n", len(s)) - } -} - func TestWindowPost(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) { for _, height := range []abi.ChainEpoch{ -1, // before @@ -719,7 +543,7 @@ func TestWindowPostDispute(t *testing.T, b APIBuilder, blocktime time.Duration) for { di, err = client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK) require.NoError(t, err) - if di.Index == evilSectorLoc.Deadline { + if di.Index == evilSectorLoc.Deadline && di.CurrentEpoch-di.PeriodStart > 1 { break } build.Clock.Sleep(blocktime) @@ -816,7 +640,7 @@ func TestWindowPostDispute(t *testing.T, b APIBuilder, blocktime time.Duration) for { di, err = client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK) require.NoError(t, err) - if di.Index == evilSectorLoc.Deadline { + if di.Index == evilSectorLoc.Deadline && di.CurrentEpoch-di.PeriodStart > 1 { break } build.Clock.Sleep(blocktime) @@ -1024,3 +848,155 @@ waitForProof: require.Contains(t, err.Error(), "failed to dispute valid post (RetCode=16)") } } + +func TestWindowPostBaseFeeNoBurn(t *testing.T, b APIBuilder, blocktime time.Duration) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + och := build.UpgradeClausHeight + build.UpgradeClausHeight = 10 + n, sn := b(t, DefaultFullOpts(1), OneMiner) + + client := n[0].FullNode.(*impl.FullNodeAPI) + miner := sn[0] + + { + addrinfo, err := client.NetAddrsListen(ctx) + if err != nil { + t.Fatal(err) + } + + if err := miner.NetConnect(ctx, addrinfo); err != nil { + t.Fatal(err) + } + } + + maddr, err := miner.ActorAddress(ctx) + require.NoError(t, err) + + mi, err := client.StateMinerInfo(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + + build.Clock.Sleep(time.Second) + + done := make(chan struct{}) + go func() { + defer close(done) + for ctx.Err() == nil { + build.Clock.Sleep(blocktime) + if err := miner.MineOne(ctx, MineNext); err != nil { + if ctx.Err() != nil { + // context was canceled, ignore the error. + return + } + t.Error(err) + } + } + }() + defer func() { + cancel() + <-done + }() + + pledgeSectors(t, ctx, miner, 10, 0, nil) + wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK) + require.NoError(t, err) + en := wact.Nonce + + // wait for a new message to be sent from worker address, it will be a PoSt + +waitForProof: + for { + wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK) + require.NoError(t, err) + if wact.Nonce > en { + break waitForProof + } + + build.Clock.Sleep(blocktime) + } + + slm, err := client.StateListMessages(ctx, &api.MessageMatch{To: maddr}, types.EmptyTSK, 0) + require.NoError(t, err) + + pmr, err := client.StateReplay(ctx, types.EmptyTSK, slm[0]) + require.NoError(t, err) + + require.Equal(t, pmr.GasCost.BaseFeeBurn, big.Zero()) + + build.UpgradeClausHeight = och +} + +func TestWindowPostBaseFeeBurn(t *testing.T, b APIBuilder, blocktime time.Duration) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + n, sn := b(t, []FullNodeOpts{FullNodeWithLatestActorsAt(-1)}, OneMiner) + + client := n[0].FullNode.(*impl.FullNodeAPI) + miner := sn[0] + + { + addrinfo, err := client.NetAddrsListen(ctx) + if err != nil { + t.Fatal(err) + } + + if err := miner.NetConnect(ctx, addrinfo); err != nil { + t.Fatal(err) + } + } + + maddr, err := miner.ActorAddress(ctx) + require.NoError(t, err) + + mi, err := client.StateMinerInfo(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + + build.Clock.Sleep(time.Second) + + done := make(chan struct{}) + go func() { + defer close(done) + for ctx.Err() == nil { + build.Clock.Sleep(blocktime) + if err := miner.MineOne(ctx, MineNext); err != nil { + if ctx.Err() != nil { + // context was canceled, ignore the error. + return + } + t.Error(err) + } + } + }() + defer func() { + cancel() + <-done + }() + + pledgeSectors(t, ctx, miner, 10, 0, nil) + wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK) + require.NoError(t, err) + en := wact.Nonce + + // wait for a new message to be sent from worker address, it will be a PoSt + +waitForProof: + for { + wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK) + require.NoError(t, err) + if wact.Nonce > en { + break waitForProof + } + + build.Clock.Sleep(blocktime) + } + + slm, err := client.StateListMessages(ctx, &api.MessageMatch{To: maddr}, types.EmptyTSK, 0) + require.NoError(t, err) + + pmr, err := client.StateReplay(ctx, types.EmptyTSK, slm[0]) + require.NoError(t, err) + + require.NotEqual(t, pmr.GasCost.BaseFeeBurn, big.Zero()) +} diff --git a/build/openrpc/full.json.gz b/build/openrpc/full.json.gz index 394f1998b..055c13754 100644 Binary files a/build/openrpc/full.json.gz and b/build/openrpc/full.json.gz differ diff --git a/build/openrpc/miner.json.gz b/build/openrpc/miner.json.gz index db17586d0..5d32e910d 100644 Binary files a/build/openrpc/miner.json.gz and b/build/openrpc/miner.json.gz differ diff --git a/build/openrpc/worker.json.gz b/build/openrpc/worker.json.gz index d0a18b43a..fa6f28aff 100644 Binary files a/build/openrpc/worker.json.gz and b/build/openrpc/worker.json.gz differ diff --git a/build/parameters.go b/build/parameters.go index e2626e2c3..9e60f12a6 100644 --- a/build/parameters.go +++ b/build/parameters.go @@ -7,6 +7,13 @@ import ( //go:embed proof-params/parameters.json var params []byte +//go:embed proof-params/srs-inner-product.json +var srs []byte + func ParametersJSON() []byte { return params } + +func SrsJSON() []byte { + return srs +} diff --git a/build/params_2k.go b/build/params_2k.go index 3dd68c9c6..387d2da0b 100644 --- a/build/params_2k.go +++ b/build/params_2k.go @@ -24,7 +24,7 @@ var UpgradeIgnitionHeight = abi.ChainEpoch(-2) var UpgradeRefuelHeight = abi.ChainEpoch(-3) var UpgradeTapeHeight = abi.ChainEpoch(-4) -var UpgradeActorsV2Height = abi.ChainEpoch(-5) +var UpgradeAssemblyHeight = abi.ChainEpoch(-5) var UpgradeLiftoffHeight = abi.ChainEpoch(-6) var UpgradeKumquatHeight = abi.ChainEpoch(-7) @@ -33,11 +33,13 @@ var UpgradePersianHeight = abi.ChainEpoch(-9) var UpgradeOrangeHeight = abi.ChainEpoch(-10) var UpgradeClausHeight = abi.ChainEpoch(-11) -var UpgradeActorsV3Height = abi.ChainEpoch(-12) +var UpgradeTrustHeight = abi.ChainEpoch(-12) var UpgradeNorwegianHeight = abi.ChainEpoch(-13) -var UpgradeActorsV4Height = abi.ChainEpoch(-14) +var UpgradeTurboHeight = abi.ChainEpoch(-14) + +var UpgradeHyperdriveHeight = abi.ChainEpoch(-15) var DrandSchedule = map[abi.ChainEpoch]DrandEnum{ 0: DrandMainnet, @@ -68,16 +70,17 @@ func init() { UpgradeIgnitionHeight = getUpgradeHeight("LOTUS_IGNITION_HEIGHT", UpgradeIgnitionHeight) UpgradeRefuelHeight = getUpgradeHeight("LOTUS_REFUEL_HEIGHT", UpgradeRefuelHeight) UpgradeTapeHeight = getUpgradeHeight("LOTUS_TAPE_HEIGHT", UpgradeTapeHeight) - UpgradeActorsV2Height = getUpgradeHeight("LOTUS_ACTORSV2_HEIGHT", UpgradeActorsV2Height) + UpgradeAssemblyHeight = getUpgradeHeight("LOTUS_ACTORSV2_HEIGHT", UpgradeAssemblyHeight) UpgradeLiftoffHeight = getUpgradeHeight("LOTUS_LIFTOFF_HEIGHT", UpgradeLiftoffHeight) UpgradeKumquatHeight = getUpgradeHeight("LOTUS_KUMQUAT_HEIGHT", UpgradeKumquatHeight) UpgradeCalicoHeight = getUpgradeHeight("LOTUS_CALICO_HEIGHT", UpgradeCalicoHeight) UpgradePersianHeight = getUpgradeHeight("LOTUS_PERSIAN_HEIGHT", UpgradePersianHeight) UpgradeOrangeHeight = getUpgradeHeight("LOTUS_ORANGE_HEIGHT", UpgradeOrangeHeight) UpgradeClausHeight = getUpgradeHeight("LOTUS_CLAUS_HEIGHT", UpgradeClausHeight) - UpgradeActorsV3Height = getUpgradeHeight("LOTUS_ACTORSV3_HEIGHT", UpgradeActorsV3Height) + UpgradeTrustHeight = getUpgradeHeight("LOTUS_ACTORSV3_HEIGHT", UpgradeTrustHeight) UpgradeNorwegianHeight = getUpgradeHeight("LOTUS_NORWEGIAN_HEIGHT", UpgradeNorwegianHeight) - UpgradeActorsV4Height = getUpgradeHeight("LOTUS_ACTORSV4_HEIGHT", UpgradeActorsV4Height) + UpgradeTurboHeight = getUpgradeHeight("LOTUS_ACTORSV4_HEIGHT", UpgradeTurboHeight) + UpgradeHyperdriveHeight = getUpgradeHeight("LOTUS_HYPERDRIVE_HEIGHT", UpgradeHyperdriveHeight) BuildType |= Build2k } diff --git a/build/params_butterfly.go b/build/params_butterfly.go index 6daeca502..258f6ab0f 100644 --- a/build/params_butterfly.go +++ b/build/params_butterfly.go @@ -23,7 +23,7 @@ const UpgradeSmokeHeight = -2 const UpgradeIgnitionHeight = -3 const UpgradeRefuelHeight = -4 -var UpgradeActorsV2Height = abi.ChainEpoch(30) +var UpgradeAssemblyHeight = abi.ChainEpoch(30) const UpgradeTapeHeight = 60 const UpgradeLiftoffHeight = -5 @@ -32,9 +32,10 @@ const UpgradeCalicoHeight = 120 const UpgradePersianHeight = 150 const UpgradeClausHeight = 180 const UpgradeOrangeHeight = 210 -const UpgradeActorsV3Height = 240 -const UpgradeNorwegianHeight = UpgradeActorsV3Height + (builtin2.EpochsInHour * 12) -const UpgradeActorsV4Height = 8922 +const UpgradeTrustHeight = 240 +const UpgradeNorwegianHeight = UpgradeTrustHeight + (builtin2.EpochsInHour * 12) +const UpgradeTurboHeight = 8922 +const UpgradeHyperdriveHeight = 9999999 func init() { policy.SetConsensusMinerMinPower(abi.NewStoragePower(2 << 30)) diff --git a/build/params_calibnet.go b/build/params_calibnet.go index 997bb395b..4685ec30c 100644 --- a/build/params_calibnet.go +++ b/build/params_calibnet.go @@ -25,7 +25,7 @@ const UpgradeSmokeHeight = -2 const UpgradeIgnitionHeight = -3 const UpgradeRefuelHeight = -4 -var UpgradeActorsV2Height = abi.ChainEpoch(30) +var UpgradeAssemblyHeight = abi.ChainEpoch(30) const UpgradeTapeHeight = 60 @@ -40,10 +40,12 @@ const UpgradeClausHeight = 250 const UpgradeOrangeHeight = 300 -const UpgradeActorsV3Height = 600 +const UpgradeTrustHeight = 600 const UpgradeNorwegianHeight = 114000 -const UpgradeActorsV4Height = 193789 +const UpgradeTurboHeight = 193789 + +const UpgradeHyperdriveHeight = 9999999 func init() { policy.SetConsensusMinerMinPower(abi.NewStoragePower(32 << 30)) diff --git a/build/params_mainnet.go b/build/params_mainnet.go index fe25b3745..52c622479 100644 --- a/build/params_mainnet.go +++ b/build/params_mainnet.go @@ -8,6 +8,7 @@ package build import ( + "math" "os" "github.com/filecoin-project/go-address" @@ -32,7 +33,7 @@ const UpgradeSmokeHeight = 51000 const UpgradeIgnitionHeight = 94000 const UpgradeRefuelHeight = 130800 -const UpgradeActorsV2Height = 138720 +const UpgradeAssemblyHeight = 138720 const UpgradeTapeHeight = 140760 @@ -49,22 +50,29 @@ const UpgradePersianHeight = UpgradeCalicoHeight + (builtin2.EpochsInHour * 60) const UpgradeOrangeHeight = 336458 // 2020-12-22T02:00:00Z -const UpgradeClausHeight = 343200 +var UpgradeClausHeight = abi.ChainEpoch(343200) // 2021-03-04T00:00:30Z -const UpgradeActorsV3Height = 550321 +const UpgradeTrustHeight = 550321 // 2021-04-12T22:00:00Z const UpgradeNorwegianHeight = 665280 // 2021-04-29T06:00:00Z -const UpgradeActorsV4Height = 712320 +const UpgradeTurboHeight = 712320 + +// ??? +var UpgradeHyperdriveHeight = abi.ChainEpoch(9999999) func init() { if os.Getenv("LOTUS_USE_TEST_ADDRESSES") != "1" { SetAddressNetwork(address.Mainnet) } + if os.Getenv("LOTUS_DISABLE_HYPERDRIVE") == "1" { + UpgradeHyperdriveHeight = math.MaxInt64 + } + Devnet = false BuildType = BuildMainnet diff --git a/build/params_nerpanet.go b/build/params_nerpanet.go index 4587e81f8..6663a9162 100644 --- a/build/params_nerpanet.go +++ b/build/params_nerpanet.go @@ -27,7 +27,7 @@ const UpgradeRefuelHeight = -3 const UpgradeLiftoffHeight = -5 -const UpgradeActorsV2Height = 30 // critical: the network can bootstrap from v1 only +const UpgradeAssemblyHeight = 30 // critical: the network can bootstrap from v1 only const UpgradeTapeHeight = 60 const UpgradeKumquatHeight = 90 @@ -39,9 +39,10 @@ const UpgradeClausHeight = 250 const UpgradeOrangeHeight = 300 -const UpgradeActorsV3Height = 600 +const UpgradeTrustHeight = 600 const UpgradeNorwegianHeight = 201000 -const UpgradeActorsV4Height = 203000 +const UpgradeTurboHeight = 203000 +const UpgradeHyperdriveHeight = 999999999 func init() { // Minimum block production power is set to 4 TiB diff --git a/build/params_shared_vals.go b/build/params_shared_vals.go index 6b98b6a9c..e4240ccce 100644 --- a/build/params_shared_vals.go +++ b/build/params_shared_vals.go @@ -25,7 +25,7 @@ const UnixfsLinksPerLevel = 1024 // Consensus / Network const AllowableClockDriftSecs = uint64(1) -const NewestNetworkVersion = network.Version12 +const NewestNetworkVersion = network.Version13 const ActorUpgradeNetworkVersion = network.Version4 // Epochs diff --git a/build/params_testground.go b/build/params_testground.go index 7da3c2272..252d23e75 100644 --- a/build/params_testground.go +++ b/build/params_testground.go @@ -82,20 +82,21 @@ var ( UpgradeBreezeHeight abi.ChainEpoch = -1 BreezeGasTampingDuration abi.ChainEpoch = 0 - UpgradeSmokeHeight abi.ChainEpoch = -1 - UpgradeIgnitionHeight abi.ChainEpoch = -2 - UpgradeRefuelHeight abi.ChainEpoch = -3 - UpgradeTapeHeight abi.ChainEpoch = -4 - UpgradeActorsV2Height abi.ChainEpoch = 10 - UpgradeLiftoffHeight abi.ChainEpoch = -5 - UpgradeKumquatHeight abi.ChainEpoch = -6 - UpgradeCalicoHeight abi.ChainEpoch = -7 - UpgradePersianHeight abi.ChainEpoch = -8 - UpgradeOrangeHeight abi.ChainEpoch = -9 - UpgradeClausHeight abi.ChainEpoch = -10 - UpgradeActorsV3Height abi.ChainEpoch = -11 - UpgradeNorwegianHeight abi.ChainEpoch = -12 - UpgradeActorsV4Height abi.ChainEpoch = -13 + UpgradeSmokeHeight abi.ChainEpoch = -1 + UpgradeIgnitionHeight abi.ChainEpoch = -2 + UpgradeRefuelHeight abi.ChainEpoch = -3 + UpgradeTapeHeight abi.ChainEpoch = -4 + UpgradeAssemblyHeight abi.ChainEpoch = 10 + UpgradeLiftoffHeight abi.ChainEpoch = -5 + UpgradeKumquatHeight abi.ChainEpoch = -6 + UpgradeCalicoHeight abi.ChainEpoch = -7 + UpgradePersianHeight abi.ChainEpoch = -8 + UpgradeOrangeHeight abi.ChainEpoch = -9 + UpgradeClausHeight abi.ChainEpoch = -10 + UpgradeTrustHeight abi.ChainEpoch = -11 + UpgradeNorwegianHeight abi.ChainEpoch = -12 + UpgradeTurboHeight abi.ChainEpoch = -13 + UpgradeHyperdriveHeight abi.ChainEpoch = -13 DrandSchedule = map[abi.ChainEpoch]DrandEnum{ 0: DrandMainnet, diff --git a/build/proof-params/srs-inner-product.json b/build/proof-params/srs-inner-product.json new file mode 100644 index 000000000..8566bf5fd --- /dev/null +++ b/build/proof-params/srs-inner-product.json @@ -0,0 +1,7 @@ +{ + "v28-fil-inner-product-v1.srs": { + "cid": "Qmdq44DjcQnFfU3PJcdX7J49GCqcUYszr1TxMbHtAkvQ3g", + "digest": "ae20310138f5ba81451d723f858e3797", + "sector_size": 0 + } +} diff --git a/chain/actors/adt/temp b/chain/actors/adt/temp deleted file mode 100644 index e69de29bb..000000000 diff --git a/chain/actors/aerrors/temp b/chain/actors/aerrors/temp deleted file mode 100644 index e69de29bb..000000000 diff --git a/chain/actors/agen/temp b/chain/actors/agen/temp deleted file mode 100644 index e69de29bb..000000000 diff --git a/chain/actors/builtin/account/account.go b/chain/actors/builtin/account/account.go index 97811d08a..04c82b340 100644 --- a/chain/actors/builtin/account/account.go +++ b/chain/actors/builtin/account/account.go @@ -19,6 +19,8 @@ import ( builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" + + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" ) func init() { @@ -38,6 +40,10 @@ func init() { builtin.RegisterActorState(builtin4.AccountActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { return load4(store, root) }) + + builtin.RegisterActorState(builtin5.AccountActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load5(store, root) + }) } var Methods = builtin4.MethodsAccount @@ -57,6 +63,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case builtin4.AccountActorCodeID: return load4(store, act.Head) + case builtin5.AccountActorCodeID: + return load5(store, act.Head) + } return nil, xerrors.Errorf("unknown actor code %s", act.Code) } @@ -76,6 +85,9 @@ func MakeState(store adt.Store, av actors.Version, addr address.Address) (State, case actors.Version4: return make4(store, addr) + case actors.Version5: + return make5(store, addr) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -95,6 +107,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) { case actors.Version4: return builtin4.AccountActorCodeID, nil + case actors.Version5: + return builtin5.AccountActorCodeID, nil + } return cid.Undef, xerrors.Errorf("unknown actor version %d", av) diff --git a/chain/actors/builtin/account/temp b/chain/actors/builtin/account/temp deleted file mode 100644 index e69de29bb..000000000 diff --git a/chain/actors/builtin/account/v5.go b/chain/actors/builtin/account/v5.go new file mode 100644 index 000000000..538f56987 --- /dev/null +++ b/chain/actors/builtin/account/v5.go @@ -0,0 +1,40 @@ +package account + +import ( + "github.com/filecoin-project/go-address" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + account5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/account" +) + +var _ State = (*state5)(nil) + +func load5(store adt.Store, root cid.Cid) (State, error) { + out := state5{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make5(store adt.Store, addr address.Address) (State, error) { + out := state5{store: store} + out.State = account5.State{Address: addr} + return &out, nil +} + +type state5 struct { + account5.State + store adt.Store +} + +func (s *state5) PubkeyAddress() (address.Address, error) { + return s.Address, nil +} + +func (s *state5) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/builtin.go b/chain/actors/builtin/builtin.go index 5e34c015a..74d622819 100644 --- a/chain/actors/builtin/builtin.go +++ b/chain/actors/builtin/builtin.go @@ -17,46 +17,49 @@ import ( builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" smoothing4 "github.com/filecoin-project/specs-actors/v4/actors/util/smoothing" + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" + smoothing5 "github.com/filecoin-project/specs-actors/v5/actors/util/smoothing" + "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/cbor" "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/types" - miner4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/miner" - proof4 "github.com/filecoin-project/specs-actors/v4/actors/runtime/proof" + miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" + proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" ) -var SystemActorAddr = builtin4.SystemActorAddr -var BurntFundsActorAddr = builtin4.BurntFundsActorAddr -var CronActorAddr = builtin4.CronActorAddr +var SystemActorAddr = builtin5.SystemActorAddr +var BurntFundsActorAddr = builtin5.BurntFundsActorAddr +var CronActorAddr = builtin5.CronActorAddr var SaftAddress = makeAddress("t0122") var ReserveAddress = makeAddress("t090") var RootVerifierAddress = makeAddress("t080") var ( - ExpectedLeadersPerEpoch = builtin4.ExpectedLeadersPerEpoch + ExpectedLeadersPerEpoch = builtin5.ExpectedLeadersPerEpoch ) const ( - EpochDurationSeconds = builtin4.EpochDurationSeconds - EpochsInDay = builtin4.EpochsInDay - SecondsInDay = builtin4.SecondsInDay + EpochDurationSeconds = builtin5.EpochDurationSeconds + EpochsInDay = builtin5.EpochsInDay + SecondsInDay = builtin5.SecondsInDay ) const ( - MethodSend = builtin4.MethodSend - MethodConstructor = builtin4.MethodConstructor + MethodSend = builtin5.MethodSend + MethodConstructor = builtin5.MethodConstructor ) // These are all just type aliases across actor versions. In the future, that might change // and we might need to do something fancier. -type SectorInfo = proof4.SectorInfo -type PoStProof = proof4.PoStProof +type SectorInfo = proof5.SectorInfo +type PoStProof = proof5.PoStProof type FilterEstimate = smoothing0.FilterEstimate func QAPowerForWeight(size abi.SectorSize, duration abi.ChainEpoch, dealWeight, verifiedWeight abi.DealWeight) abi.StoragePower { - return miner4.QAPowerForWeight(size, duration, dealWeight, verifiedWeight) + return miner5.QAPowerForWeight(size, duration, dealWeight, verifiedWeight) } func FromV0FilterEstimate(v0 smoothing0.FilterEstimate) FilterEstimate { @@ -83,6 +86,12 @@ func FromV4FilterEstimate(v4 smoothing4.FilterEstimate) FilterEstimate { } +func FromV5FilterEstimate(v5 smoothing5.FilterEstimate) FilterEstimate { + + return (FilterEstimate)(v5) + +} + type ActorStateLoader func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) var ActorStateLoaders = make(map[cid.Cid]ActorStateLoader) @@ -114,6 +123,9 @@ func ActorNameByCode(c cid.Cid) string { case builtin4.IsBuiltinActor(c): return builtin4.ActorNameByCode(c) + case builtin5.IsBuiltinActor(c): + return builtin5.ActorNameByCode(c) + default: return "" } @@ -137,6 +149,10 @@ func IsBuiltinActor(c cid.Cid) bool { return true } + if builtin5.IsBuiltinActor(c) { + return true + } + return false } @@ -158,6 +174,10 @@ func IsAccountActor(c cid.Cid) bool { return true } + if c == builtin5.AccountActorCodeID { + return true + } + return false } @@ -179,6 +199,10 @@ func IsStorageMinerActor(c cid.Cid) bool { return true } + if c == builtin5.StorageMinerActorCodeID { + return true + } + return false } @@ -200,6 +224,10 @@ func IsMultisigActor(c cid.Cid) bool { return true } + if c == builtin5.MultisigActorCodeID { + return true + } + return false } @@ -221,6 +249,10 @@ func IsPaymentChannelActor(c cid.Cid) bool { return true } + if c == builtin5.PaymentChannelActorCodeID { + return true + } + return false } diff --git a/chain/actors/builtin/builtin.go.template b/chain/actors/builtin/builtin.go.template index 6eac2627e..031c05182 100644 --- a/chain/actors/builtin/builtin.go.template +++ b/chain/actors/builtin/builtin.go.template @@ -17,7 +17,7 @@ import ( "github.com/filecoin-project/lotus/chain/types" miner{{.latestVersion}} "github.com/filecoin-project/specs-actors{{import .latestVersion}}actors/builtin/miner" - proof{{.latestVersion}} "github.com/filecoin-project/specs-actors{{import .latestVersion}}actors/runtime/proof" + proof{{.latestVersion}} "github.com/filecoin-project/specs-actors{{import .latestVersion}}actors/runtime/proof" ) var SystemActorAddr = builtin{{.latestVersion}}.SystemActorAddr @@ -33,12 +33,12 @@ var ( const ( EpochDurationSeconds = builtin{{.latestVersion}}.EpochDurationSeconds - EpochsInDay = builtin{{.latestVersion}}.EpochsInDay - SecondsInDay = builtin{{.latestVersion}}.SecondsInDay + EpochsInDay = builtin{{.latestVersion}}.EpochsInDay + SecondsInDay = builtin{{.latestVersion}}.SecondsInDay ) const ( - MethodSend = builtin{{.latestVersion}}.MethodSend + MethodSend = builtin{{.latestVersion}}.MethodSend MethodConstructor = builtin{{.latestVersion}}.MethodConstructor ) diff --git a/chain/actors/builtin/cron/cron.go b/chain/actors/builtin/cron/cron.go index 62fa413a8..2275e747f 100644 --- a/chain/actors/builtin/cron/cron.go +++ b/chain/actors/builtin/cron/cron.go @@ -13,6 +13,8 @@ import ( builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" + + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" ) func MakeState(store adt.Store, av actors.Version) (State, error) { @@ -30,6 +32,9 @@ func MakeState(store adt.Store, av actors.Version) (State, error) { case actors.Version4: return make4(store) + case actors.Version5: + return make5(store) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -49,14 +54,17 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) { case actors.Version4: return builtin4.CronActorCodeID, nil + case actors.Version5: + return builtin5.CronActorCodeID, nil + } return cid.Undef, xerrors.Errorf("unknown actor version %d", av) } var ( - Address = builtin4.CronActorAddr - Methods = builtin4.MethodsCron + Address = builtin5.CronActorAddr + Methods = builtin5.MethodsCron ) type State interface { diff --git a/chain/actors/builtin/cron/temp b/chain/actors/builtin/cron/temp deleted file mode 100644 index e69de29bb..000000000 diff --git a/chain/actors/builtin/cron/v5.go b/chain/actors/builtin/cron/v5.go new file mode 100644 index 000000000..2bb00dc21 --- /dev/null +++ b/chain/actors/builtin/cron/v5.go @@ -0,0 +1,35 @@ +package cron + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + cron5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/cron" +) + +var _ State = (*state5)(nil) + +func load5(store adt.Store, root cid.Cid) (State, error) { + out := state5{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make5(store adt.Store) (State, error) { + out := state5{store: store} + out.State = *cron5.ConstructState(cron5.BuiltInEntries()) + return &out, nil +} + +type state5 struct { + cron5.State + store adt.Store +} + +func (s *state5) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/init/init.go b/chain/actors/builtin/init/init.go index 2091252ce..e1bd6f371 100644 --- a/chain/actors/builtin/init/init.go +++ b/chain/actors/builtin/init/init.go @@ -21,6 +21,8 @@ import ( builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" + + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" ) func init() { @@ -40,11 +42,15 @@ func init() { builtin.RegisterActorState(builtin4.InitActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { return load4(store, root) }) + + builtin.RegisterActorState(builtin5.InitActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load5(store, root) + }) } var ( - Address = builtin4.InitActorAddr - Methods = builtin4.MethodsInit + Address = builtin5.InitActorAddr + Methods = builtin5.MethodsInit ) func Load(store adt.Store, act *types.Actor) (State, error) { @@ -62,6 +68,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case builtin4.InitActorCodeID: return load4(store, act.Head) + case builtin5.InitActorCodeID: + return load5(store, act.Head) + } return nil, xerrors.Errorf("unknown actor code %s", act.Code) } @@ -81,6 +90,9 @@ func MakeState(store adt.Store, av actors.Version, networkName string) (State, e case actors.Version4: return make4(store, networkName) + case actors.Version5: + return make5(store, networkName) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -100,6 +112,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) { case actors.Version4: return builtin4.InitActorCodeID, nil + case actors.Version5: + return builtin5.InitActorCodeID, nil + } return cid.Undef, xerrors.Errorf("unknown actor version %d", av) diff --git a/chain/actors/builtin/init/temp b/chain/actors/builtin/init/temp deleted file mode 100644 index e69de29bb..000000000 diff --git a/chain/actors/builtin/init/v5.go b/chain/actors/builtin/init/v5.go new file mode 100644 index 000000000..107366de5 --- /dev/null +++ b/chain/actors/builtin/init/v5.go @@ -0,0 +1,114 @@ +package init + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/node/modules/dtypes" + + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" + + init5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/init" + adt5 "github.com/filecoin-project/specs-actors/v5/actors/util/adt" +) + +var _ State = (*state5)(nil) + +func load5(store adt.Store, root cid.Cid) (State, error) { + out := state5{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make5(store adt.Store, networkName string) (State, error) { + out := state5{store: store} + + s, err := init5.ConstructState(store, networkName) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state5 struct { + init5.State + store adt.Store +} + +func (s *state5) ResolveAddress(address address.Address) (address.Address, bool, error) { + return s.State.ResolveAddress(s.store, address) +} + +func (s *state5) MapAddressToNewID(address address.Address) (address.Address, error) { + return s.State.MapAddressToNewID(s.store, address) +} + +func (s *state5) ForEachActor(cb func(id abi.ActorID, address address.Address) error) error { + addrs, err := adt5.AsMap(s.store, s.State.AddressMap, builtin5.DefaultHamtBitwidth) + if err != nil { + return err + } + var actorID cbg.CborInt + return addrs.ForEach(&actorID, func(key string) error { + addr, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + return cb(abi.ActorID(actorID), addr) + }) +} + +func (s *state5) NetworkName() (dtypes.NetworkName, error) { + return dtypes.NetworkName(s.State.NetworkName), nil +} + +func (s *state5) SetNetworkName(name string) error { + s.State.NetworkName = name + return nil +} + +func (s *state5) SetNextID(id abi.ActorID) error { + s.State.NextID = id + return nil +} + +func (s *state5) Remove(addrs ...address.Address) (err error) { + m, err := adt5.AsMap(s.store, s.State.AddressMap, builtin5.DefaultHamtBitwidth) + if err != nil { + return err + } + for _, addr := range addrs { + if err = m.Delete(abi.AddrKey(addr)); err != nil { + return xerrors.Errorf("failed to delete entry for address: %s; err: %w", addr, err) + } + } + amr, err := m.Root() + if err != nil { + return xerrors.Errorf("failed to get address map root: %w", err) + } + s.State.AddressMap = amr + return nil +} + +func (s *state5) SetAddressMap(mcid cid.Cid) error { + s.State.AddressMap = mcid + return nil +} + +func (s *state5) AddressMap() (adt.Map, error) { + return adt5.AsMap(s.store, s.State.AddressMap, builtin5.DefaultHamtBitwidth) +} + +func (s *state5) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/market/actor.go.template b/chain/actors/builtin/market/actor.go.template index 5b67695e1..f78c84b8f 100644 --- a/chain/actors/builtin/market/actor.go.template +++ b/chain/actors/builtin/market/actor.go.template @@ -104,6 +104,7 @@ type DealProposals interface { type PublishStorageDealsParams = market0.PublishStorageDealsParams type PublishStorageDealsReturn = market0.PublishStorageDealsReturn +type VerifyDealsForActivationParams = market0.VerifyDealsForActivationParams type WithdrawBalanceParams = market0.WithdrawBalanceParams type ClientDealProposal = market0.ClientDealProposal @@ -111,7 +112,7 @@ type ClientDealProposal = market0.ClientDealProposal type DealState struct { SectorStartEpoch abi.ChainEpoch // -1 if not yet included in proven sector LastUpdatedEpoch abi.ChainEpoch // -1 if deal state never updated - SlashEpoch abi.ChainEpoch // -1 if deal never slashed + SlashEpoch abi.ChainEpoch // -1 if deal never slashed } type DealProposal struct { diff --git a/chain/actors/builtin/market/market.go b/chain/actors/builtin/market/market.go index ffc826658..026e35d4e 100644 --- a/chain/actors/builtin/market/market.go +++ b/chain/actors/builtin/market/market.go @@ -20,6 +20,8 @@ import ( builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" + "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin" @@ -43,11 +45,15 @@ func init() { builtin.RegisterActorState(builtin4.StorageMarketActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { return load4(store, root) }) + + builtin.RegisterActorState(builtin5.StorageMarketActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load5(store, root) + }) } var ( - Address = builtin4.StorageMarketActorAddr - Methods = builtin4.MethodsMarket + Address = builtin5.StorageMarketActorAddr + Methods = builtin5.MethodsMarket ) func Load(store adt.Store, act *types.Actor) (State, error) { @@ -65,6 +71,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case builtin4.StorageMarketActorCodeID: return load4(store, act.Head) + case builtin5.StorageMarketActorCodeID: + return load5(store, act.Head) + } return nil, xerrors.Errorf("unknown actor code %s", act.Code) } @@ -84,6 +93,9 @@ func MakeState(store adt.Store, av actors.Version) (State, error) { case actors.Version4: return make4(store) + case actors.Version5: + return make5(store) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -103,6 +115,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) { case actors.Version4: return builtin4.StorageMarketActorCodeID, nil + case actors.Version5: + return builtin5.StorageMarketActorCodeID, nil + } return cid.Undef, xerrors.Errorf("unknown actor version %d", av) @@ -148,6 +163,7 @@ type DealProposals interface { type PublishStorageDealsParams = market0.PublishStorageDealsParams type PublishStorageDealsReturn = market0.PublishStorageDealsReturn +type VerifyDealsForActivationParams = market0.VerifyDealsForActivationParams type WithdrawBalanceParams = market0.WithdrawBalanceParams type ClientDealProposal = market0.ClientDealProposal diff --git a/chain/actors/builtin/market/state.go.template b/chain/actors/builtin/market/state.go.template index 965c8d41f..70b731148 100644 --- a/chain/actors/builtin/market/state.go.template +++ b/chain/actors/builtin/market/state.go.template @@ -235,4 +235,4 @@ func fromV{{.v}}DealProposal(v{{.v}} market{{.v}}.DealProposal) DealProposal { func (s *state{{.v}}) GetState() interface{} { return &s.State -} \ No newline at end of file +} diff --git a/chain/actors/builtin/market/temp b/chain/actors/builtin/market/temp deleted file mode 100644 index e69de29bb..000000000 diff --git a/chain/actors/builtin/market/v5.go b/chain/actors/builtin/market/v5.go new file mode 100644 index 000000000..12378c76d --- /dev/null +++ b/chain/actors/builtin/market/v5.go @@ -0,0 +1,226 @@ +package market + +import ( + "bytes" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/types" + + market5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/market" + adt5 "github.com/filecoin-project/specs-actors/v5/actors/util/adt" +) + +var _ State = (*state5)(nil) + +func load5(store adt.Store, root cid.Cid) (State, error) { + out := state5{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make5(store adt.Store) (State, error) { + out := state5{store: store} + + s, err := market5.ConstructState(store) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state5 struct { + market5.State + store adt.Store +} + +func (s *state5) TotalLocked() (abi.TokenAmount, error) { + fml := types.BigAdd(s.TotalClientLockedCollateral, s.TotalProviderLockedCollateral) + fml = types.BigAdd(fml, s.TotalClientStorageFee) + return fml, nil +} + +func (s *state5) BalancesChanged(otherState State) (bool, error) { + otherState5, ok := otherState.(*state5) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.EscrowTable.Equals(otherState5.State.EscrowTable) || !s.State.LockedTable.Equals(otherState5.State.LockedTable), nil +} + +func (s *state5) StatesChanged(otherState State) (bool, error) { + otherState5, ok := otherState.(*state5) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.States.Equals(otherState5.State.States), nil +} + +func (s *state5) States() (DealStates, error) { + stateArray, err := adt5.AsArray(s.store, s.State.States, market5.StatesAmtBitwidth) + if err != nil { + return nil, err + } + return &dealStates5{stateArray}, nil +} + +func (s *state5) ProposalsChanged(otherState State) (bool, error) { + otherState5, ok := otherState.(*state5) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.Proposals.Equals(otherState5.State.Proposals), nil +} + +func (s *state5) Proposals() (DealProposals, error) { + proposalArray, err := adt5.AsArray(s.store, s.State.Proposals, market5.ProposalsAmtBitwidth) + if err != nil { + return nil, err + } + return &dealProposals5{proposalArray}, nil +} + +func (s *state5) EscrowTable() (BalanceTable, error) { + bt, err := adt5.AsBalanceTable(s.store, s.State.EscrowTable) + if err != nil { + return nil, err + } + return &balanceTable5{bt}, nil +} + +func (s *state5) LockedTable() (BalanceTable, error) { + bt, err := adt5.AsBalanceTable(s.store, s.State.LockedTable) + if err != nil { + return nil, err + } + return &balanceTable5{bt}, nil +} + +func (s *state5) VerifyDealsForActivation( + minerAddr address.Address, deals []abi.DealID, currEpoch, sectorExpiry abi.ChainEpoch, +) (weight, verifiedWeight abi.DealWeight, err error) { + w, vw, _, err := market5.ValidateDealsForActivation(&s.State, s.store, deals, minerAddr, sectorExpiry, currEpoch) + return w, vw, err +} + +func (s *state5) NextID() (abi.DealID, error) { + return s.State.NextID, nil +} + +type balanceTable5 struct { + *adt5.BalanceTable +} + +func (bt *balanceTable5) ForEach(cb func(address.Address, abi.TokenAmount) error) error { + asMap := (*adt5.Map)(bt.BalanceTable) + var ta abi.TokenAmount + return asMap.ForEach(&ta, func(key string) error { + a, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + return cb(a, ta) + }) +} + +type dealStates5 struct { + adt.Array +} + +func (s *dealStates5) Get(dealID abi.DealID) (*DealState, bool, error) { + var deal5 market5.DealState + found, err := s.Array.Get(uint64(dealID), &deal5) + if err != nil { + return nil, false, err + } + if !found { + return nil, false, nil + } + deal := fromV5DealState(deal5) + return &deal, true, nil +} + +func (s *dealStates5) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { + var ds5 market5.DealState + return s.Array.ForEach(&ds5, func(idx int64) error { + return cb(abi.DealID(idx), fromV5DealState(ds5)) + }) +} + +func (s *dealStates5) decode(val *cbg.Deferred) (*DealState, error) { + var ds5 market5.DealState + if err := ds5.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return nil, err + } + ds := fromV5DealState(ds5) + return &ds, nil +} + +func (s *dealStates5) array() adt.Array { + return s.Array +} + +func fromV5DealState(v5 market5.DealState) DealState { + return (DealState)(v5) +} + +type dealProposals5 struct { + adt.Array +} + +func (s *dealProposals5) Get(dealID abi.DealID) (*DealProposal, bool, error) { + var proposal5 market5.DealProposal + found, err := s.Array.Get(uint64(dealID), &proposal5) + if err != nil { + return nil, false, err + } + if !found { + return nil, false, nil + } + proposal := fromV5DealProposal(proposal5) + return &proposal, true, nil +} + +func (s *dealProposals5) ForEach(cb func(dealID abi.DealID, dp DealProposal) error) error { + var dp5 market5.DealProposal + return s.Array.ForEach(&dp5, func(idx int64) error { + return cb(abi.DealID(idx), fromV5DealProposal(dp5)) + }) +} + +func (s *dealProposals5) decode(val *cbg.Deferred) (*DealProposal, error) { + var dp5 market5.DealProposal + if err := dp5.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return nil, err + } + dp := fromV5DealProposal(dp5) + return &dp, nil +} + +func (s *dealProposals5) array() adt.Array { + return s.Array +} + +func fromV5DealProposal(v5 market5.DealProposal) DealProposal { + return (DealProposal)(v5) +} + +func (s *state5) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/miner/actor.go.template b/chain/actors/builtin/miner/actor.go.template index c7755ef71..619dc699d 100644 --- a/chain/actors/builtin/miner/actor.go.template +++ b/chain/actors/builtin/miner/actor.go.template @@ -143,26 +143,26 @@ type Partition interface { } type SectorOnChainInfo struct { - SectorNumber abi.SectorNumber - SealProof abi.RegisteredSealProof - SealedCID cid.Cid - DealIDs []abi.DealID - Activation abi.ChainEpoch - Expiration abi.ChainEpoch - DealWeight abi.DealWeight - VerifiedDealWeight abi.DealWeight - InitialPledge abi.TokenAmount - ExpectedDayReward abi.TokenAmount + SectorNumber abi.SectorNumber + SealProof abi.RegisteredSealProof + SealedCID cid.Cid + DealIDs []abi.DealID + Activation abi.ChainEpoch + Expiration abi.ChainEpoch + DealWeight abi.DealWeight + VerifiedDealWeight abi.DealWeight + InitialPledge abi.TokenAmount + ExpectedDayReward abi.TokenAmount ExpectedStoragePledge abi.TokenAmount } type SectorPreCommitInfo = miner0.SectorPreCommitInfo type SectorPreCommitOnChainInfo struct { - Info SectorPreCommitInfo + Info SectorPreCommitInfo PreCommitDeposit abi.TokenAmount - PreCommitEpoch abi.ChainEpoch - DealWeight abi.DealWeight + PreCommitEpoch abi.ChainEpoch + DealWeight abi.DealWeight VerifiedDealWeight abi.DealWeight } @@ -231,17 +231,17 @@ func WinningPoStProofTypeFromWindowPoStProofType(nver network.Version, proof abi } type MinerInfo struct { - Owner address.Address // Must be an ID-address. - Worker address.Address // Must be an ID-address. - NewWorker address.Address // Must be an ID-address. - ControlAddresses []address.Address // Must be an ID-addresses. - WorkerChangeEpoch abi.ChainEpoch - PeerId *peer.ID - Multiaddrs []abi.Multiaddrs - WindowPoStProofType abi.RegisteredPoStProof - SectorSize abi.SectorSize + Owner address.Address // Must be an ID-address. + Worker address.Address // Must be an ID-address. + NewWorker address.Address // Must be an ID-address. + ControlAddresses []address.Address // Must be an ID-addresses. + WorkerChangeEpoch abi.ChainEpoch + PeerId *peer.ID + Multiaddrs []abi.Multiaddrs + WindowPoStProofType abi.RegisteredPoStProof + SectorSize abi.SectorSize WindowPoStPartitionSectors uint64 - ConsensusFaultElapsed abi.ChainEpoch + ConsensusFaultElapsed abi.ChainEpoch } func (mi MinerInfo) IsController(addr address.Address) bool { @@ -272,25 +272,25 @@ type SectorLocation struct { } type SectorChanges struct { - Added []SectorOnChainInfo + Added []SectorOnChainInfo Extended []SectorExtensions - Removed []SectorOnChainInfo + Removed []SectorOnChainInfo } type SectorExtensions struct { From SectorOnChainInfo - To SectorOnChainInfo + To SectorOnChainInfo } type PreCommitChanges struct { - Added []SectorPreCommitOnChainInfo + Added []SectorPreCommitOnChainInfo Removed []SectorPreCommitOnChainInfo } type LockedFunds struct { - VestingFunds abi.TokenAmount + VestingFunds abi.TokenAmount InitialPledgeRequirement abi.TokenAmount - PreCommitDeposits abi.TokenAmount + PreCommitDeposits abi.TokenAmount } func (lf LockedFunds) TotalLockedFunds() abi.TokenAmount { diff --git a/chain/actors/builtin/miner/miner.go b/chain/actors/builtin/miner/miner.go index d9b872e3f..6e35d4e9f 100644 --- a/chain/actors/builtin/miner/miner.go +++ b/chain/actors/builtin/miner/miner.go @@ -30,6 +30,8 @@ import ( builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" + + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" ) func init() { @@ -50,9 +52,13 @@ func init() { return load4(store, root) }) + builtin.RegisterActorState(builtin5.StorageMinerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load5(store, root) + }) + } -var Methods = builtin4.MethodsMiner +var Methods = builtin5.MethodsMiner // Unchanged between v0, v2, v3, and v4 actors var WPoStProvingPeriod = miner0.WPoStProvingPeriod @@ -83,6 +89,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case builtin4.StorageMinerActorCodeID: return load4(store, act.Head) + case builtin5.StorageMinerActorCodeID: + return load5(store, act.Head) + } return nil, xerrors.Errorf("unknown actor code %s", act.Code) } @@ -102,6 +111,9 @@ func MakeState(store adt.Store, av actors.Version) (State, error) { case actors.Version4: return make4(store) + case actors.Version5: + return make5(store) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -121,6 +133,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) { case actors.Version4: return builtin4.StorageMinerActorCodeID, nil + case actors.Version5: + return builtin5.StorageMinerActorCodeID, nil + } return cid.Undef, xerrors.Errorf("unknown actor version %d", av) diff --git a/chain/actors/builtin/miner/state.go.template b/chain/actors/builtin/miner/state.go.template index 270510a8c..b7e5f40df 100644 --- a/chain/actors/builtin/miner/state.go.template +++ b/chain/actors/builtin/miner/state.go.template @@ -74,9 +74,9 @@ func (s *state{{.v}}) VestedFunds(epoch abi.ChainEpoch) (abi.TokenAmount, error) func (s *state{{.v}}) LockedFunds() (LockedFunds, error) { return LockedFunds{ - VestingFunds: s.State.LockedFunds, + VestingFunds: s.State.LockedFunds, InitialPledgeRequirement: s.State.InitialPledge{{if (le .v 1)}}Requirement{{end}}, - PreCommitDeposits: s.State.PreCommitDeposits, + PreCommitDeposits: s.State.PreCommitDeposits, }, nil } @@ -317,19 +317,19 @@ func (s *state{{.v}}) Info() (MinerInfo, error) { } {{end}} mi := MinerInfo{ - Owner: info.Owner, - Worker: info.Worker, + Owner: info.Owner, + Worker: info.Worker, ControlAddresses: info.ControlAddresses, - NewWorker: address.Undef, + NewWorker: address.Undef, WorkerChangeEpoch: -1, - PeerId: pid, - Multiaddrs: info.Multiaddrs, - WindowPoStProofType: {{if (ge .v 3)}}info.WindowPoStProofType{{else}}wpp{{end}}, - SectorSize: info.SectorSize, + PeerId: pid, + Multiaddrs: info.Multiaddrs, + WindowPoStProofType: {{if (ge .v 3)}}info.WindowPoStProofType{{else}}wpp{{end}}, + SectorSize: info.SectorSize, WindowPoStPartitionSectors: info.WindowPoStPartitionSectors, - ConsensusFaultElapsed: {{if (ge .v 2)}}info.ConsensusFaultElapsed{{else}}-1{{end}}, + ConsensusFaultElapsed: {{if (ge .v 2)}}info.ConsensusFaultElapsed{{else}}-1{{end}}, } if info.PendingWorkerKey != nil { @@ -477,16 +477,16 @@ func (p *partition{{.v}}) RecoveringSectors() (bitfield.BitField, error) { func fromV{{.v}}SectorOnChainInfo(v{{.v}} miner{{.v}}.SectorOnChainInfo) SectorOnChainInfo { {{if (ge .v 2)}} return SectorOnChainInfo{ - SectorNumber: v{{.v}}.SectorNumber, - SealProof: v{{.v}}.SealProof, - SealedCID: v{{.v}}.SealedCID, - DealIDs: v{{.v}}.DealIDs, - Activation: v{{.v}}.Activation, - Expiration: v{{.v}}.Expiration, - DealWeight: v{{.v}}.DealWeight, - VerifiedDealWeight: v{{.v}}.VerifiedDealWeight, - InitialPledge: v{{.v}}.InitialPledge, - ExpectedDayReward: v{{.v}}.ExpectedDayReward, + SectorNumber: v{{.v}}.SectorNumber, + SealProof: v{{.v}}.SealProof, + SealedCID: v{{.v}}.SealedCID, + DealIDs: v{{.v}}.DealIDs, + Activation: v{{.v}}.Activation, + Expiration: v{{.v}}.Expiration, + DealWeight: v{{.v}}.DealWeight, + VerifiedDealWeight: v{{.v}}.VerifiedDealWeight, + InitialPledge: v{{.v}}.InitialPledge, + ExpectedDayReward: v{{.v}}.ExpectedDayReward, ExpectedStoragePledge: v{{.v}}.ExpectedStoragePledge, } {{else}} @@ -497,10 +497,10 @@ func fromV{{.v}}SectorOnChainInfo(v{{.v}} miner{{.v}}.SectorOnChainInfo) SectorO func fromV{{.v}}SectorPreCommitOnChainInfo(v{{.v}} miner{{.v}}.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo { {{if (ge .v 2)}} return SectorPreCommitOnChainInfo{ - Info: (SectorPreCommitInfo)(v{{.v}}.Info), - PreCommitDeposit: v{{.v}}.PreCommitDeposit, - PreCommitEpoch: v{{.v}}.PreCommitEpoch, - DealWeight: v{{.v}}.DealWeight, + Info: (SectorPreCommitInfo)(v{{.v}}.Info), + PreCommitDeposit: v{{.v}}.PreCommitDeposit, + PreCommitEpoch: v{{.v}}.PreCommitEpoch, + DealWeight: v{{.v}}.DealWeight, VerifiedDealWeight: v{{.v}}.VerifiedDealWeight, } {{else}} @@ -510,4 +510,4 @@ func fromV{{.v}}SectorPreCommitOnChainInfo(v{{.v}} miner{{.v}}.SectorPreCommitOn func (s *state{{.v}}) GetState() interface{} { return &s.State -} \ No newline at end of file +} diff --git a/chain/actors/builtin/miner/temp b/chain/actors/builtin/miner/temp deleted file mode 100644 index e69de29bb..000000000 diff --git a/chain/actors/builtin/miner/v5.go b/chain/actors/builtin/miner/v5.go new file mode 100644 index 000000000..7996acf32 --- /dev/null +++ b/chain/actors/builtin/miner/v5.go @@ -0,0 +1,496 @@ +package miner + +import ( + "bytes" + "errors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-bitfield" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/dline" + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p-core/peer" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" + + miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" + adt5 "github.com/filecoin-project/specs-actors/v5/actors/util/adt" +) + +var _ State = (*state5)(nil) + +func load5(store adt.Store, root cid.Cid) (State, error) { + out := state5{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make5(store adt.Store) (State, error) { + out := state5{store: store} + out.State = miner5.State{} + return &out, nil +} + +type state5 struct { + miner5.State + store adt.Store +} + +type deadline5 struct { + miner5.Deadline + store adt.Store +} + +type partition5 struct { + miner5.Partition + store adt.Store +} + +func (s *state5) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmount, err error) { + defer func() { + if r := recover(); r != nil { + err = xerrors.Errorf("failed to get available balance: %w", r) + available = abi.NewTokenAmount(0) + } + }() + // this panics if the miner doesnt have enough funds to cover their locked pledge + available, err = s.GetAvailableBalance(bal) + return available, err +} + +func (s *state5) VestedFunds(epoch abi.ChainEpoch) (abi.TokenAmount, error) { + return s.CheckVestedFunds(s.store, epoch) +} + +func (s *state5) LockedFunds() (LockedFunds, error) { + return LockedFunds{ + VestingFunds: s.State.LockedFunds, + InitialPledgeRequirement: s.State.InitialPledge, + PreCommitDeposits: s.State.PreCommitDeposits, + }, nil +} + +func (s *state5) FeeDebt() (abi.TokenAmount, error) { + return s.State.FeeDebt, nil +} + +func (s *state5) InitialPledge() (abi.TokenAmount, error) { + return s.State.InitialPledge, nil +} + +func (s *state5) PreCommitDeposits() (abi.TokenAmount, error) { + return s.State.PreCommitDeposits, nil +} + +func (s *state5) GetSector(num abi.SectorNumber) (*SectorOnChainInfo, error) { + info, ok, err := s.State.GetSector(s.store, num) + if !ok || err != nil { + return nil, err + } + + ret := fromV5SectorOnChainInfo(*info) + return &ret, nil +} + +func (s *state5) FindSector(num abi.SectorNumber) (*SectorLocation, error) { + dlIdx, partIdx, err := s.State.FindSector(s.store, num) + if err != nil { + return nil, err + } + return &SectorLocation{ + Deadline: dlIdx, + Partition: partIdx, + }, nil +} + +func (s *state5) NumLiveSectors() (uint64, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return 0, err + } + var total uint64 + if err := dls.ForEach(s.store, func(dlIdx uint64, dl *miner5.Deadline) error { + total += dl.LiveSectors + return nil + }); err != nil { + return 0, err + } + return total, nil +} + +// GetSectorExpiration returns the effective expiration of the given sector. +// +// If the sector does not expire early, the Early expiration field is 0. +func (s *state5) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return nil, err + } + // NOTE: this can be optimized significantly. + // 1. If the sector is non-faulty, it will either expire on-time (can be + // learned from the sector info), or in the next quantized expiration + // epoch (i.e., the first element in the partition's expiration queue. + // 2. If it's faulty, it will expire early within the first 14 entries + // of the expiration queue. + stopErr := errors.New("stop") + out := SectorExpiration{} + err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner5.Deadline) error { + partitions, err := dl.PartitionsArray(s.store) + if err != nil { + return err + } + quant := s.State.QuantSpecForDeadline(dlIdx) + var part miner5.Partition + return partitions.ForEach(&part, func(partIdx int64) error { + if found, err := part.Sectors.IsSet(uint64(num)); err != nil { + return err + } else if !found { + return nil + } + if found, err := part.Terminated.IsSet(uint64(num)); err != nil { + return err + } else if found { + // already terminated + return stopErr + } + + q, err := miner5.LoadExpirationQueue(s.store, part.ExpirationsEpochs, quant, miner5.PartitionExpirationAmtBitwidth) + if err != nil { + return err + } + var exp miner5.ExpirationSet + return q.ForEach(&exp, func(epoch int64) error { + if early, err := exp.EarlySectors.IsSet(uint64(num)); err != nil { + return err + } else if early { + out.Early = abi.ChainEpoch(epoch) + return nil + } + if onTime, err := exp.OnTimeSectors.IsSet(uint64(num)); err != nil { + return err + } else if onTime { + out.OnTime = abi.ChainEpoch(epoch) + return stopErr + } + return nil + }) + }) + }) + if err == stopErr { + err = nil + } + if err != nil { + return nil, err + } + if out.Early == 0 && out.OnTime == 0 { + return nil, xerrors.Errorf("failed to find sector %d", num) + } + return &out, nil +} + +func (s *state5) GetPrecommittedSector(num abi.SectorNumber) (*SectorPreCommitOnChainInfo, error) { + info, ok, err := s.State.GetPrecommittedSector(s.store, num) + if !ok || err != nil { + return nil, err + } + + ret := fromV5SectorPreCommitOnChainInfo(*info) + + return &ret, nil +} + +func (s *state5) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) { + sectors, err := miner5.LoadSectors(s.store, s.State.Sectors) + if err != nil { + return nil, err + } + + // If no sector numbers are specified, load all. + if snos == nil { + infos := make([]*SectorOnChainInfo, 0, sectors.Length()) + var info5 miner5.SectorOnChainInfo + if err := sectors.ForEach(&info5, func(_ int64) error { + info := fromV5SectorOnChainInfo(info5) + infos = append(infos, &info) + return nil + }); err != nil { + return nil, err + } + return infos, nil + } + + // Otherwise, load selected. + infos5, err := sectors.Load(*snos) + if err != nil { + return nil, err + } + infos := make([]*SectorOnChainInfo, len(infos5)) + for i, info5 := range infos5 { + info := fromV5SectorOnChainInfo(*info5) + infos[i] = &info + } + return infos, nil +} + +func (s *state5) IsAllocated(num abi.SectorNumber) (bool, error) { + var allocatedSectors bitfield.BitField + if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil { + return false, err + } + + return allocatedSectors.IsSet(uint64(num)) +} + +func (s *state5) GetProvingPeriodStart() (abi.ChainEpoch, error) { + return s.State.ProvingPeriodStart, nil +} + +func (s *state5) LoadDeadline(idx uint64) (Deadline, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return nil, err + } + dl, err := dls.LoadDeadline(s.store, idx) + if err != nil { + return nil, err + } + return &deadline5{*dl, s.store}, nil +} + +func (s *state5) ForEachDeadline(cb func(uint64, Deadline) error) error { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return err + } + return dls.ForEach(s.store, func(i uint64, dl *miner5.Deadline) error { + return cb(i, &deadline5{*dl, s.store}) + }) +} + +func (s *state5) NumDeadlines() (uint64, error) { + return miner5.WPoStPeriodDeadlines, nil +} + +func (s *state5) DeadlinesChanged(other State) (bool, error) { + other5, ok := other.(*state5) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + + return !s.State.Deadlines.Equals(other5.Deadlines), nil +} + +func (s *state5) MinerInfoChanged(other State) (bool, error) { + other0, ok := other.(*state5) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.Info.Equals(other0.State.Info), nil +} + +func (s *state5) Info() (MinerInfo, error) { + info, err := s.State.GetInfo(s.store) + if err != nil { + return MinerInfo{}, err + } + + var pid *peer.ID + if peerID, err := peer.IDFromBytes(info.PeerId); err == nil { + pid = &peerID + } + + mi := MinerInfo{ + Owner: info.Owner, + Worker: info.Worker, + ControlAddresses: info.ControlAddresses, + + NewWorker: address.Undef, + WorkerChangeEpoch: -1, + + PeerId: pid, + Multiaddrs: info.Multiaddrs, + WindowPoStProofType: info.WindowPoStProofType, + SectorSize: info.SectorSize, + WindowPoStPartitionSectors: info.WindowPoStPartitionSectors, + ConsensusFaultElapsed: info.ConsensusFaultElapsed, + } + + if info.PendingWorkerKey != nil { + mi.NewWorker = info.PendingWorkerKey.NewWorker + mi.WorkerChangeEpoch = info.PendingWorkerKey.EffectiveAt + } + + return mi, nil +} + +func (s *state5) DeadlineInfo(epoch abi.ChainEpoch) (*dline.Info, error) { + return s.State.RecordedDeadlineInfo(epoch), nil +} + +func (s *state5) DeadlineCronActive() (bool, error) { + return s.State.DeadlineCronActive, nil +} + +func (s *state5) sectors() (adt.Array, error) { + return adt5.AsArray(s.store, s.Sectors, miner5.SectorsAmtBitwidth) +} + +func (s *state5) decodeSectorOnChainInfo(val *cbg.Deferred) (SectorOnChainInfo, error) { + var si miner5.SectorOnChainInfo + err := si.UnmarshalCBOR(bytes.NewReader(val.Raw)) + if err != nil { + return SectorOnChainInfo{}, err + } + + return fromV5SectorOnChainInfo(si), nil +} + +func (s *state5) precommits() (adt.Map, error) { + return adt5.AsMap(s.store, s.PreCommittedSectors, builtin5.DefaultHamtBitwidth) +} + +func (s *state5) decodeSectorPreCommitOnChainInfo(val *cbg.Deferred) (SectorPreCommitOnChainInfo, error) { + var sp miner5.SectorPreCommitOnChainInfo + err := sp.UnmarshalCBOR(bytes.NewReader(val.Raw)) + if err != nil { + return SectorPreCommitOnChainInfo{}, err + } + + return fromV5SectorPreCommitOnChainInfo(sp), nil +} + +func (s *state5) EraseAllUnproven() error { + + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return err + } + + err = dls.ForEach(s.store, func(dindx uint64, dl *miner5.Deadline) error { + ps, err := dl.PartitionsArray(s.store) + if err != nil { + return err + } + + var part miner5.Partition + err = ps.ForEach(&part, func(pindx int64) error { + _ = part.ActivateUnproven() + err = ps.Set(uint64(pindx), &part) + return nil + }) + + if err != nil { + return err + } + + dl.Partitions, err = ps.Root() + if err != nil { + return err + } + + return dls.UpdateDeadline(s.store, dindx, dl) + }) + + return s.State.SaveDeadlines(s.store, dls) + + return nil +} + +func (d *deadline5) LoadPartition(idx uint64) (Partition, error) { + p, err := d.Deadline.LoadPartition(d.store, idx) + if err != nil { + return nil, err + } + return &partition5{*p, d.store}, nil +} + +func (d *deadline5) ForEachPartition(cb func(uint64, Partition) error) error { + ps, err := d.Deadline.PartitionsArray(d.store) + if err != nil { + return err + } + var part miner5.Partition + return ps.ForEach(&part, func(i int64) error { + return cb(uint64(i), &partition5{part, d.store}) + }) +} + +func (d *deadline5) PartitionsChanged(other Deadline) (bool, error) { + other5, ok := other.(*deadline5) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + + return !d.Deadline.Partitions.Equals(other5.Deadline.Partitions), nil +} + +func (d *deadline5) PartitionsPoSted() (bitfield.BitField, error) { + return d.Deadline.PartitionsPoSted, nil +} + +func (d *deadline5) DisputableProofCount() (uint64, error) { + + ops, err := d.OptimisticProofsSnapshotArray(d.store) + if err != nil { + return 0, err + } + + return ops.Length(), nil + +} + +func (p *partition5) AllSectors() (bitfield.BitField, error) { + return p.Partition.Sectors, nil +} + +func (p *partition5) FaultySectors() (bitfield.BitField, error) { + return p.Partition.Faults, nil +} + +func (p *partition5) RecoveringSectors() (bitfield.BitField, error) { + return p.Partition.Recoveries, nil +} + +func fromV5SectorOnChainInfo(v5 miner5.SectorOnChainInfo) SectorOnChainInfo { + + return SectorOnChainInfo{ + SectorNumber: v5.SectorNumber, + SealProof: v5.SealProof, + SealedCID: v5.SealedCID, + DealIDs: v5.DealIDs, + Activation: v5.Activation, + Expiration: v5.Expiration, + DealWeight: v5.DealWeight, + VerifiedDealWeight: v5.VerifiedDealWeight, + InitialPledge: v5.InitialPledge, + ExpectedDayReward: v5.ExpectedDayReward, + ExpectedStoragePledge: v5.ExpectedStoragePledge, + } + +} + +func fromV5SectorPreCommitOnChainInfo(v5 miner5.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo { + + return SectorPreCommitOnChainInfo{ + Info: (SectorPreCommitInfo)(v5.Info), + PreCommitDeposit: v5.PreCommitDeposit, + PreCommitEpoch: v5.PreCommitEpoch, + DealWeight: v5.DealWeight, + VerifiedDealWeight: v5.VerifiedDealWeight, + } + +} + +func (s *state5) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/multisig/actor.go.template b/chain/actors/builtin/multisig/actor.go.template index 3af270c60..77bc13f67 100644 --- a/chain/actors/builtin/multisig/actor.go.template +++ b/chain/actors/builtin/multisig/actor.go.template @@ -12,7 +12,8 @@ import ( "github.com/filecoin-project/go-state-types/cbor" "github.com/ipfs/go-cid" - msig{{.latestVersion}} "github.com/filecoin-project/specs-actors/v4/actors/builtin/multisig" + msig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig" + msig{{.latestVersion}} "github.com/filecoin-project/specs-actors{{import .latestVersion}}actors/builtin/multisig" {{range .versions}} builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin" {{end}} @@ -79,7 +80,7 @@ type State interface { GetState() interface{} } -type Transaction = msig{{.latestVersion}}.Transaction +type Transaction = msig0.Transaction var Methods = builtin{{.latestVersion}}.MethodsMultisig @@ -88,7 +89,7 @@ func Message(version actors.Version, from address.Address) MessageBuilder { {{range .versions}} case actors.Version{{.}}: return message{{.}}{{"{"}}{{if (ge . 2)}}message0{from}{{else}}from{{end}}} -{{end}} default: +{{end}} default: panic(fmt.Sprintf("unsupported actors version: %d", version)) } } diff --git a/chain/actors/builtin/multisig/message.go.template b/chain/actors/builtin/multisig/message.go.template index 917e6944b..6bff8983a 100644 --- a/chain/actors/builtin/multisig/message.go.template +++ b/chain/actors/builtin/multisig/message.go.template @@ -43,10 +43,10 @@ func (m message{{.v}}) Create( {{end}} // Set up constructor parameters for multisig msigParams := &multisig{{.v}}.ConstructorParams{ - Signers: signers, + Signers: signers, NumApprovalsThreshold: threshold, - UnlockDuration: unlockDuration,{{if (ge .v 2)}} - StartEpoch: unlockStart,{{end}} + UnlockDuration: unlockDuration,{{if (ge .v 2)}} + StartEpoch: unlockStart,{{end}} } enc, actErr := actors.SerializeParams(msigParams) @@ -56,7 +56,7 @@ func (m message{{.v}}) Create( // new actors are created by invoking 'exec' on the init actor with the constructor params execParams := &init{{.v}}.ExecParams{ - CodeCID: builtin{{.v}}.MultisigActorCodeID, + CodeCID: builtin{{.v}}.MultisigActorCodeID, ConstructorParams: enc, } @@ -66,11 +66,11 @@ func (m message{{.v}}) Create( } return &types.Message{ - To: init_.Address, - From: m.from, + To: init_.Address, + From: m.from, Method: builtin{{.v}}.MethodsInit.Exec, Params: enc, - Value: initialAmount, + Value: initialAmount, }, nil } @@ -96,8 +96,8 @@ func (m message0) Propose(msig, to address.Address, amt abi.TokenAmount, } enc, actErr := actors.SerializeParams(&multisig0.ProposeParams{ - To: to, - Value: amt, + To: to, + Value: amt, Method: method, Params: params, }) @@ -106,9 +106,9 @@ func (m message0) Propose(msig, to address.Address, amt abi.TokenAmount, } return &types.Message{ - To: msig, - From: m.from, - Value: abi.NewTokenAmount(0), + To: msig, + From: m.from, + Value: abi.NewTokenAmount(0), Method: builtin0.MethodsMultisig.Propose, Params: enc, }, nil @@ -121,9 +121,9 @@ func (m message0) Approve(msig address.Address, txID uint64, hashData *ProposalH } return &types.Message{ - To: msig, - From: m.from, - Value: types.NewInt(0), + To: msig, + From: m.from, + Value: types.NewInt(0), Method: builtin0.MethodsMultisig.Approve, Params: enc, }, nil @@ -136,9 +136,9 @@ func (m message0) Cancel(msig address.Address, txID uint64, hashData *ProposalHa } return &types.Message{ - To: msig, - From: m.from, - Value: types.NewInt(0), + To: msig, + From: m.from, + Value: types.NewInt(0), Method: builtin0.MethodsMultisig.Cancel, Params: enc, }, nil diff --git a/chain/actors/builtin/multisig/message5.go b/chain/actors/builtin/multisig/message5.go new file mode 100644 index 000000000..9a8110f2c --- /dev/null +++ b/chain/actors/builtin/multisig/message5.go @@ -0,0 +1,71 @@ +package multisig + +import ( + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" + init5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/init" + multisig5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/multisig" + + "github.com/filecoin-project/lotus/chain/actors" + init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init" + "github.com/filecoin-project/lotus/chain/types" +) + +type message5 struct{ message0 } + +func (m message5) Create( + signers []address.Address, threshold uint64, + unlockStart, unlockDuration abi.ChainEpoch, + initialAmount abi.TokenAmount, +) (*types.Message, error) { + + lenAddrs := uint64(len(signers)) + + if lenAddrs < threshold { + return nil, xerrors.Errorf("cannot require signing of more addresses than provided for multisig") + } + + if threshold == 0 { + threshold = lenAddrs + } + + if m.from == address.Undef { + return nil, xerrors.Errorf("must provide source address") + } + + // Set up constructor parameters for multisig + msigParams := &multisig5.ConstructorParams{ + Signers: signers, + NumApprovalsThreshold: threshold, + UnlockDuration: unlockDuration, + StartEpoch: unlockStart, + } + + enc, actErr := actors.SerializeParams(msigParams) + if actErr != nil { + return nil, actErr + } + + // new actors are created by invoking 'exec' on the init actor with the constructor params + execParams := &init5.ExecParams{ + CodeCID: builtin5.MultisigActorCodeID, + ConstructorParams: enc, + } + + enc, actErr = actors.SerializeParams(execParams) + if actErr != nil { + return nil, actErr + } + + return &types.Message{ + To: init_.Address, + From: m.from, + Method: builtin5.MethodsInit.Exec, + Params: enc, + Value: initialAmount, + }, nil +} diff --git a/chain/actors/builtin/multisig/multisig.go b/chain/actors/builtin/multisig/multisig.go index fd773f398..ae6a9daf3 100644 --- a/chain/actors/builtin/multisig/multisig.go +++ b/chain/actors/builtin/multisig/multisig.go @@ -12,7 +12,8 @@ import ( "github.com/filecoin-project/go-state-types/cbor" "github.com/ipfs/go-cid" - msig4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/multisig" + msig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig" + msig5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/multisig" builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" @@ -22,6 +23,8 @@ import ( builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" + "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin" @@ -45,6 +48,10 @@ func init() { builtin.RegisterActorState(builtin4.MultisigActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { return load4(store, root) }) + + builtin.RegisterActorState(builtin5.MultisigActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load5(store, root) + }) } func Load(store adt.Store, act *types.Actor) (State, error) { @@ -62,6 +69,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case builtin4.MultisigActorCodeID: return load4(store, act.Head) + case builtin5.MultisigActorCodeID: + return load5(store, act.Head) + } return nil, xerrors.Errorf("unknown actor code %s", act.Code) } @@ -81,6 +91,9 @@ func MakeState(store adt.Store, av actors.Version, signers []address.Address, th case actors.Version4: return make4(store, signers, threshold, startEpoch, unlockDuration, initialBalance) + case actors.Version5: + return make5(store, signers, threshold, startEpoch, unlockDuration, initialBalance) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -100,6 +113,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) { case actors.Version4: return builtin4.MultisigActorCodeID, nil + case actors.Version5: + return builtin5.MultisigActorCodeID, nil + } return cid.Undef, xerrors.Errorf("unknown actor version %d", av) @@ -123,9 +139,9 @@ type State interface { GetState() interface{} } -type Transaction = msig4.Transaction +type Transaction = msig0.Transaction -var Methods = builtin4.MethodsMultisig +var Methods = builtin5.MethodsMultisig func Message(version actors.Version, from address.Address) MessageBuilder { switch version { @@ -141,6 +157,9 @@ func Message(version actors.Version, from address.Address) MessageBuilder { case actors.Version4: return message4{message0{from}} + + case actors.Version5: + return message5{message0{from}} default: panic(fmt.Sprintf("unsupported actors version: %d", version)) } @@ -164,12 +183,12 @@ type MessageBuilder interface { } // this type is the same between v0 and v2 -type ProposalHashData = msig4.ProposalHashData -type ProposeReturn = msig4.ProposeReturn -type ProposeParams = msig4.ProposeParams +type ProposalHashData = msig5.ProposalHashData +type ProposeReturn = msig5.ProposeReturn +type ProposeParams = msig5.ProposeParams func txnParams(id uint64, data *ProposalHashData) ([]byte, error) { - params := msig4.TxnIDParams{ID: msig4.TxnID(id)} + params := msig5.TxnIDParams{ID: msig5.TxnID(id)} if data != nil { if data.Requester.Protocol() != address.ID { return nil, xerrors.Errorf("proposer address must be an ID address, was %s", data.Requester) diff --git a/chain/actors/builtin/multisig/state.go.template b/chain/actors/builtin/multisig/state.go.template index 067415533..6c0130c09 100644 --- a/chain/actors/builtin/multisig/state.go.template +++ b/chain/actors/builtin/multisig/state.go.template @@ -124,4 +124,4 @@ func (s *state{{.v}}) decodeTransaction(val *cbg.Deferred) (Transaction, error) func (s *state{{.v}}) GetState() interface{} { return &s.State -} \ No newline at end of file +} diff --git a/chain/actors/builtin/multisig/temp b/chain/actors/builtin/multisig/temp deleted file mode 100644 index e69de29bb..000000000 diff --git a/chain/actors/builtin/multisig/v5.go b/chain/actors/builtin/multisig/v5.go new file mode 100644 index 000000000..4ad9aea94 --- /dev/null +++ b/chain/actors/builtin/multisig/v5.go @@ -0,0 +1,119 @@ +package multisig + +import ( + "bytes" + "encoding/binary" + + adt5 "github.com/filecoin-project/specs-actors/v5/actors/util/adt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" + + msig5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/multisig" +) + +var _ State = (*state5)(nil) + +func load5(store adt.Store, root cid.Cid) (State, error) { + out := state5{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make5(store adt.Store, signers []address.Address, threshold uint64, startEpoch abi.ChainEpoch, unlockDuration abi.ChainEpoch, initialBalance abi.TokenAmount) (State, error) { + out := state5{store: store} + out.State = msig5.State{} + out.State.Signers = signers + out.State.NumApprovalsThreshold = threshold + out.State.StartEpoch = startEpoch + out.State.UnlockDuration = unlockDuration + out.State.InitialBalance = initialBalance + + em, err := adt5.StoreEmptyMap(store, builtin5.DefaultHamtBitwidth) + if err != nil { + return nil, err + } + + out.State.PendingTxns = em + + return &out, nil +} + +type state5 struct { + msig5.State + store adt.Store +} + +func (s *state5) LockedBalance(currEpoch abi.ChainEpoch) (abi.TokenAmount, error) { + return s.State.AmountLocked(currEpoch - s.State.StartEpoch), nil +} + +func (s *state5) StartEpoch() (abi.ChainEpoch, error) { + return s.State.StartEpoch, nil +} + +func (s *state5) UnlockDuration() (abi.ChainEpoch, error) { + return s.State.UnlockDuration, nil +} + +func (s *state5) InitialBalance() (abi.TokenAmount, error) { + return s.State.InitialBalance, nil +} + +func (s *state5) Threshold() (uint64, error) { + return s.State.NumApprovalsThreshold, nil +} + +func (s *state5) Signers() ([]address.Address, error) { + return s.State.Signers, nil +} + +func (s *state5) ForEachPendingTxn(cb func(id int64, txn Transaction) error) error { + arr, err := adt5.AsMap(s.store, s.State.PendingTxns, builtin5.DefaultHamtBitwidth) + if err != nil { + return err + } + var out msig5.Transaction + return arr.ForEach(&out, func(key string) error { + txid, n := binary.Varint([]byte(key)) + if n <= 0 { + return xerrors.Errorf("invalid pending transaction key: %v", key) + } + return cb(txid, (Transaction)(out)) //nolint:unconvert + }) +} + +func (s *state5) PendingTxnChanged(other State) (bool, error) { + other5, ok := other.(*state5) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.PendingTxns.Equals(other5.PendingTxns), nil +} + +func (s *state5) transactions() (adt.Map, error) { + return adt5.AsMap(s.store, s.PendingTxns, builtin5.DefaultHamtBitwidth) +} + +func (s *state5) decodeTransaction(val *cbg.Deferred) (Transaction, error) { + var tx msig5.Transaction + if err := tx.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return Transaction{}, err + } + return tx, nil +} + +func (s *state5) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/paych/message.go.template b/chain/actors/builtin/paych/message.go.template index cb111d910..4a5ea2331 100644 --- a/chain/actors/builtin/paych/message.go.template +++ b/chain/actors/builtin/paych/message.go.template @@ -21,7 +21,7 @@ func (m message{{.v}}) Create(to address.Address, initialAmount abi.TokenAmount) return nil, aerr } enc, aerr := actors.SerializeParams(&init{{.v}}.ExecParams{ - CodeCID: builtin{{.v}}.PaymentChannelActorCodeID, + CodeCID: builtin{{.v}}.PaymentChannelActorCodeID, ConstructorParams: params, }) if aerr != nil { @@ -29,9 +29,9 @@ func (m message{{.v}}) Create(to address.Address, initialAmount abi.TokenAmount) } return &types.Message{ - To: init_.Address, - From: m.from, - Value: initialAmount, + To: init_.Address, + From: m.from, + Value: initialAmount, Method: builtin{{.v}}.MethodsInit.Exec, Params: enc, }, nil @@ -39,7 +39,7 @@ func (m message{{.v}}) Create(to address.Address, initialAmount abi.TokenAmount) func (m message{{.v}}) Update(paych address.Address, sv *SignedVoucher, secret []byte) (*types.Message, error) { params, aerr := actors.SerializeParams(&paych{{.v}}.UpdateChannelStateParams{ - Sv: *sv, + Sv: *sv, Secret: secret, }) if aerr != nil { @@ -47,9 +47,9 @@ func (m message{{.v}}) Update(paych address.Address, sv *SignedVoucher, secret [ } return &types.Message{ - To: paych, - From: m.from, - Value: abi.NewTokenAmount(0), + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), Method: builtin{{.v}}.MethodsPaych.UpdateChannelState, Params: params, }, nil @@ -57,18 +57,18 @@ func (m message{{.v}}) Update(paych address.Address, sv *SignedVoucher, secret [ func (m message{{.v}}) Settle(paych address.Address) (*types.Message, error) { return &types.Message{ - To: paych, - From: m.from, - Value: abi.NewTokenAmount(0), + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), Method: builtin{{.v}}.MethodsPaych.Settle, }, nil } func (m message{{.v}}) Collect(paych address.Address) (*types.Message, error) { return &types.Message{ - To: paych, - From: m.from, - Value: abi.NewTokenAmount(0), + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), Method: builtin{{.v}}.MethodsPaych.Collect, }, nil } diff --git a/chain/actors/builtin/paych/message5.go b/chain/actors/builtin/paych/message5.go new file mode 100644 index 000000000..37a2b6f04 --- /dev/null +++ b/chain/actors/builtin/paych/message5.go @@ -0,0 +1,74 @@ +package paych + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" + init5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/init" + paych5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/paych" + + "github.com/filecoin-project/lotus/chain/actors" + init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init" + "github.com/filecoin-project/lotus/chain/types" +) + +type message5 struct{ from address.Address } + +func (m message5) Create(to address.Address, initialAmount abi.TokenAmount) (*types.Message, error) { + params, aerr := actors.SerializeParams(&paych5.ConstructorParams{From: m.from, To: to}) + if aerr != nil { + return nil, aerr + } + enc, aerr := actors.SerializeParams(&init5.ExecParams{ + CodeCID: builtin5.PaymentChannelActorCodeID, + ConstructorParams: params, + }) + if aerr != nil { + return nil, aerr + } + + return &types.Message{ + To: init_.Address, + From: m.from, + Value: initialAmount, + Method: builtin5.MethodsInit.Exec, + Params: enc, + }, nil +} + +func (m message5) Update(paych address.Address, sv *SignedVoucher, secret []byte) (*types.Message, error) { + params, aerr := actors.SerializeParams(&paych5.UpdateChannelStateParams{ + Sv: *sv, + Secret: secret, + }) + if aerr != nil { + return nil, aerr + } + + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin5.MethodsPaych.UpdateChannelState, + Params: params, + }, nil +} + +func (m message5) Settle(paych address.Address) (*types.Message, error) { + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin5.MethodsPaych.Settle, + }, nil +} + +func (m message5) Collect(paych address.Address) (*types.Message, error) { + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin5.MethodsPaych.Collect, + }, nil +} diff --git a/chain/actors/builtin/paych/mock/temp b/chain/actors/builtin/paych/mock/temp deleted file mode 100644 index e69de29bb..000000000 diff --git a/chain/actors/builtin/paych/paych.go b/chain/actors/builtin/paych/paych.go index 63638cda1..d87f70f0c 100644 --- a/chain/actors/builtin/paych/paych.go +++ b/chain/actors/builtin/paych/paych.go @@ -23,6 +23,8 @@ import ( builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" + "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin" @@ -46,6 +48,10 @@ func init() { builtin.RegisterActorState(builtin4.PaymentChannelActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { return load4(store, root) }) + + builtin.RegisterActorState(builtin5.PaymentChannelActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load5(store, root) + }) } // Load returns an abstract copy of payment channel state, irregardless of actor version @@ -64,6 +70,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case builtin4.PaymentChannelActorCodeID: return load4(store, act.Head) + case builtin5.PaymentChannelActorCodeID: + return load5(store, act.Head) + } return nil, xerrors.Errorf("unknown actor code %s", act.Code) } @@ -83,6 +92,9 @@ func MakeState(store adt.Store, av actors.Version) (State, error) { case actors.Version4: return make4(store) + case actors.Version5: + return make5(store) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -102,6 +114,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) { case actors.Version4: return builtin4.PaymentChannelActorCodeID, nil + case actors.Version5: + return builtin5.PaymentChannelActorCodeID, nil + } return cid.Undef, xerrors.Errorf("unknown actor version %d", av) @@ -155,7 +170,7 @@ func DecodeSignedVoucher(s string) (*SignedVoucher, error) { return &sv, nil } -var Methods = builtin4.MethodsPaych +var Methods = builtin5.MethodsPaych func Message(version actors.Version, from address.Address) MessageBuilder { switch version { @@ -172,6 +187,9 @@ func Message(version actors.Version, from address.Address) MessageBuilder { case actors.Version4: return message4{from} + case actors.Version5: + return message5{from} + default: panic(fmt.Sprintf("unsupported actors version: %d", version)) } diff --git a/chain/actors/builtin/paych/temp b/chain/actors/builtin/paych/temp deleted file mode 100644 index e69de29bb..000000000 diff --git a/chain/actors/builtin/paych/v5.go b/chain/actors/builtin/paych/v5.go new file mode 100644 index 000000000..b331a1500 --- /dev/null +++ b/chain/actors/builtin/paych/v5.go @@ -0,0 +1,114 @@ +package paych + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + paych5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/paych" + adt5 "github.com/filecoin-project/specs-actors/v5/actors/util/adt" +) + +var _ State = (*state5)(nil) + +func load5(store adt.Store, root cid.Cid) (State, error) { + out := state5{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make5(store adt.Store) (State, error) { + out := state5{store: store} + out.State = paych5.State{} + return &out, nil +} + +type state5 struct { + paych5.State + store adt.Store + lsAmt *adt5.Array +} + +// Channel owner, who has funded the actor +func (s *state5) From() (address.Address, error) { + return s.State.From, nil +} + +// Recipient of payouts from channel +func (s *state5) To() (address.Address, error) { + return s.State.To, nil +} + +// Height at which the channel can be `Collected` +func (s *state5) SettlingAt() (abi.ChainEpoch, error) { + return s.State.SettlingAt, nil +} + +// Amount successfully redeemed through the payment channel, paid out on `Collect()` +func (s *state5) ToSend() (abi.TokenAmount, error) { + return s.State.ToSend, nil +} + +func (s *state5) getOrLoadLsAmt() (*adt5.Array, error) { + if s.lsAmt != nil { + return s.lsAmt, nil + } + + // Get the lane state from the chain + lsamt, err := adt5.AsArray(s.store, s.State.LaneStates, paych5.LaneStatesAmtBitwidth) + if err != nil { + return nil, err + } + + s.lsAmt = lsamt + return lsamt, nil +} + +// Get total number of lanes +func (s *state5) LaneCount() (uint64, error) { + lsamt, err := s.getOrLoadLsAmt() + if err != nil { + return 0, err + } + return lsamt.Length(), nil +} + +func (s *state5) GetState() interface{} { + return &s.State +} + +// Iterate lane states +func (s *state5) ForEachLaneState(cb func(idx uint64, dl LaneState) error) error { + // Get the lane state from the chain + lsamt, err := s.getOrLoadLsAmt() + if err != nil { + return err + } + + // Note: we use a map instead of an array to store laneStates because the + // client sets the lane ID (the index) and potentially they could use a + // very large index. + var ls paych5.LaneState + return lsamt.ForEach(&ls, func(i int64) error { + return cb(uint64(i), &laneState5{ls}) + }) +} + +type laneState5 struct { + paych5.LaneState +} + +func (ls *laneState5) Redeemed() (big.Int, error) { + return ls.LaneState.Redeemed, nil +} + +func (ls *laneState5) Nonce() (uint64, error) { + return ls.LaneState.Nonce, nil +} diff --git a/chain/actors/builtin/power/actor.go.template b/chain/actors/builtin/power/actor.go.template index 7ff3d0387..fe11fc160 100644 --- a/chain/actors/builtin/power/actor.go.template +++ b/chain/actors/builtin/power/actor.go.template @@ -101,7 +101,7 @@ type Claim struct { func AddClaims(a Claim, b Claim) Claim { return Claim{ - RawBytePower: big.Add(a.RawBytePower, b.RawBytePower), + RawBytePower: big.Add(a.RawBytePower, b.RawBytePower), QualityAdjPower: big.Add(a.QualityAdjPower, b.QualityAdjPower), } } diff --git a/chain/actors/builtin/power/power.go b/chain/actors/builtin/power/power.go index 69ed6cf89..5b4aa1b04 100644 --- a/chain/actors/builtin/power/power.go +++ b/chain/actors/builtin/power/power.go @@ -22,6 +22,8 @@ import ( builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" + + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" ) func init() { @@ -41,11 +43,15 @@ func init() { builtin.RegisterActorState(builtin4.StoragePowerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { return load4(store, root) }) + + builtin.RegisterActorState(builtin5.StoragePowerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load5(store, root) + }) } var ( - Address = builtin4.StoragePowerActorAddr - Methods = builtin4.MethodsPower + Address = builtin5.StoragePowerActorAddr + Methods = builtin5.MethodsPower ) func Load(store adt.Store, act *types.Actor) (State, error) { @@ -63,6 +69,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case builtin4.StoragePowerActorCodeID: return load4(store, act.Head) + case builtin5.StoragePowerActorCodeID: + return load5(store, act.Head) + } return nil, xerrors.Errorf("unknown actor code %s", act.Code) } @@ -82,6 +91,9 @@ func MakeState(store adt.Store, av actors.Version) (State, error) { case actors.Version4: return make4(store) + case actors.Version5: + return make5(store) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -101,6 +113,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) { case actors.Version4: return builtin4.StoragePowerActorCodeID, nil + case actors.Version5: + return builtin5.StoragePowerActorCodeID, nil + } return cid.Undef, xerrors.Errorf("unknown actor version %d", av) diff --git a/chain/actors/builtin/power/state.go.template b/chain/actors/builtin/power/state.go.template index d0abba3fa..fcdc5c350 100644 --- a/chain/actors/builtin/power/state.go.template +++ b/chain/actors/builtin/power/state.go.template @@ -66,7 +66,7 @@ func (s *state{{.v}}) TotalLocked() (abi.TokenAmount, error) { func (s *state{{.v}}) TotalPower() (Claim, error) { return Claim{ - RawBytePower: s.TotalRawBytePower, + RawBytePower: s.TotalRawBytePower, QualityAdjPower: s.TotalQualityAdjPower, }, nil } @@ -74,7 +74,7 @@ func (s *state{{.v}}) TotalPower() (Claim, error) { // Committed power to the network. Includes miners below the minimum threshold. func (s *state{{.v}}) TotalCommitted() (Claim, error) { return Claim{ - RawBytePower: s.TotalBytesCommitted, + RawBytePower: s.TotalBytesCommitted, QualityAdjPower: s.TotalQABytesCommitted, }, nil } @@ -90,7 +90,7 @@ func (s *state{{.v}}) MinerPower(addr address.Address) (Claim, bool, error) { return Claim{}, false, err } return Claim{ - RawBytePower: claim.RawBytePower, + RawBytePower: claim.RawBytePower, QualityAdjPower: claim.QualityAdjPower, }, ok, nil } @@ -142,7 +142,7 @@ func (s *state{{.v}}) ForEachClaim(cb func(miner address.Address, claim Claim) e return err } return cb(a, Claim{ - RawBytePower: claim.RawBytePower, + RawBytePower: claim.RawBytePower, QualityAdjPower: claim.QualityAdjPower, }) }) @@ -195,7 +195,7 @@ func (s *state{{.v}}) decodeClaim(val *cbg.Deferred) (Claim, error) { func fromV{{.v}}Claim(v{{.v}} power{{.v}}.Claim) Claim { return Claim{ - RawBytePower: v{{.v}}.RawBytePower, + RawBytePower: v{{.v}}.RawBytePower, QualityAdjPower: v{{.v}}.QualityAdjPower, } } diff --git a/chain/actors/builtin/power/temp b/chain/actors/builtin/power/temp deleted file mode 100644 index e69de29bb..000000000 diff --git a/chain/actors/builtin/power/v5.go b/chain/actors/builtin/power/v5.go new file mode 100644 index 000000000..84b23a577 --- /dev/null +++ b/chain/actors/builtin/power/v5.go @@ -0,0 +1,187 @@ +package power + +import ( + "bytes" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin" + + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" + + power5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/power" + adt5 "github.com/filecoin-project/specs-actors/v5/actors/util/adt" +) + +var _ State = (*state5)(nil) + +func load5(store adt.Store, root cid.Cid) (State, error) { + out := state5{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make5(store adt.Store) (State, error) { + out := state5{store: store} + + s, err := power5.ConstructState(store) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state5 struct { + power5.State + store adt.Store +} + +func (s *state5) TotalLocked() (abi.TokenAmount, error) { + return s.TotalPledgeCollateral, nil +} + +func (s *state5) TotalPower() (Claim, error) { + return Claim{ + RawBytePower: s.TotalRawBytePower, + QualityAdjPower: s.TotalQualityAdjPower, + }, nil +} + +// Committed power to the network. Includes miners below the minimum threshold. +func (s *state5) TotalCommitted() (Claim, error) { + return Claim{ + RawBytePower: s.TotalBytesCommitted, + QualityAdjPower: s.TotalQABytesCommitted, + }, nil +} + +func (s *state5) MinerPower(addr address.Address) (Claim, bool, error) { + claims, err := s.claims() + if err != nil { + return Claim{}, false, err + } + var claim power5.Claim + ok, err := claims.Get(abi.AddrKey(addr), &claim) + if err != nil { + return Claim{}, false, err + } + return Claim{ + RawBytePower: claim.RawBytePower, + QualityAdjPower: claim.QualityAdjPower, + }, ok, nil +} + +func (s *state5) MinerNominalPowerMeetsConsensusMinimum(a address.Address) (bool, error) { + return s.State.MinerNominalPowerMeetsConsensusMinimum(s.store, a) +} + +func (s *state5) TotalPowerSmoothed() (builtin.FilterEstimate, error) { + return builtin.FromV5FilterEstimate(s.State.ThisEpochQAPowerSmoothed), nil +} + +func (s *state5) MinerCounts() (uint64, uint64, error) { + return uint64(s.State.MinerAboveMinPowerCount), uint64(s.State.MinerCount), nil +} + +func (s *state5) ListAllMiners() ([]address.Address, error) { + claims, err := s.claims() + if err != nil { + return nil, err + } + + var miners []address.Address + err = claims.ForEach(nil, func(k string) error { + a, err := address.NewFromBytes([]byte(k)) + if err != nil { + return err + } + miners = append(miners, a) + return nil + }) + if err != nil { + return nil, err + } + + return miners, nil +} + +func (s *state5) ForEachClaim(cb func(miner address.Address, claim Claim) error) error { + claims, err := s.claims() + if err != nil { + return err + } + + var claim power5.Claim + return claims.ForEach(&claim, func(k string) error { + a, err := address.NewFromBytes([]byte(k)) + if err != nil { + return err + } + return cb(a, Claim{ + RawBytePower: claim.RawBytePower, + QualityAdjPower: claim.QualityAdjPower, + }) + }) +} + +func (s *state5) ClaimsChanged(other State) (bool, error) { + other5, ok := other.(*state5) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.Claims.Equals(other5.State.Claims), nil +} + +func (s *state5) SetTotalQualityAdjPower(p abi.StoragePower) error { + s.State.TotalQualityAdjPower = p + return nil +} + +func (s *state5) SetTotalRawBytePower(p abi.StoragePower) error { + s.State.TotalRawBytePower = p + return nil +} + +func (s *state5) SetThisEpochQualityAdjPower(p abi.StoragePower) error { + s.State.ThisEpochQualityAdjPower = p + return nil +} + +func (s *state5) SetThisEpochRawBytePower(p abi.StoragePower) error { + s.State.ThisEpochRawBytePower = p + return nil +} + +func (s *state5) GetState() interface{} { + return &s.State +} + +func (s *state5) claims() (adt.Map, error) { + return adt5.AsMap(s.store, s.Claims, builtin5.DefaultHamtBitwidth) +} + +func (s *state5) decodeClaim(val *cbg.Deferred) (Claim, error) { + var ci power5.Claim + if err := ci.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return Claim{}, err + } + return fromV5Claim(ci), nil +} + +func fromV5Claim(v5 power5.Claim) Claim { + return Claim{ + RawBytePower: v5.RawBytePower, + QualityAdjPower: v5.QualityAdjPower, + } +} diff --git a/chain/actors/builtin/reward/reward.go b/chain/actors/builtin/reward/reward.go index c325cc7b6..ebec85517 100644 --- a/chain/actors/builtin/reward/reward.go +++ b/chain/actors/builtin/reward/reward.go @@ -17,6 +17,8 @@ import ( builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" + "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/types" @@ -39,11 +41,15 @@ func init() { builtin.RegisterActorState(builtin4.RewardActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { return load4(store, root) }) + + builtin.RegisterActorState(builtin5.RewardActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load5(store, root) + }) } var ( - Address = builtin4.RewardActorAddr - Methods = builtin4.MethodsReward + Address = builtin5.RewardActorAddr + Methods = builtin5.MethodsReward ) func Load(store adt.Store, act *types.Actor) (State, error) { @@ -61,6 +67,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case builtin4.RewardActorCodeID: return load4(store, act.Head) + case builtin5.RewardActorCodeID: + return load5(store, act.Head) + } return nil, xerrors.Errorf("unknown actor code %s", act.Code) } @@ -80,6 +89,9 @@ func MakeState(store adt.Store, av actors.Version, currRealizedPower abi.Storage case actors.Version4: return make4(store, currRealizedPower) + case actors.Version5: + return make5(store, currRealizedPower) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -99,6 +111,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) { case actors.Version4: return builtin4.RewardActorCodeID, nil + case actors.Version5: + return builtin5.RewardActorCodeID, nil + } return cid.Undef, xerrors.Errorf("unknown actor version %d", av) diff --git a/chain/actors/builtin/reward/state.go.template b/chain/actors/builtin/reward/state.go.template index 67bfd5c85..2bc271cbb 100644 --- a/chain/actors/builtin/reward/state.go.template +++ b/chain/actors/builtin/reward/state.go.template @@ -110,4 +110,4 @@ func (s *state{{.v}}) PreCommitDepositForPower(networkQAPower builtin.FilterEsti func (s *state{{.v}}) GetState() interface{} { return &s.State -} \ No newline at end of file +} diff --git a/chain/actors/builtin/reward/temp b/chain/actors/builtin/reward/temp deleted file mode 100644 index e69de29bb..000000000 diff --git a/chain/actors/builtin/reward/v5.go b/chain/actors/builtin/reward/v5.go new file mode 100644 index 000000000..7200f7d11 --- /dev/null +++ b/chain/actors/builtin/reward/v5.go @@ -0,0 +1,98 @@ +package reward + +import ( + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin" + + miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" + reward5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/reward" + smoothing5 "github.com/filecoin-project/specs-actors/v5/actors/util/smoothing" +) + +var _ State = (*state5)(nil) + +func load5(store adt.Store, root cid.Cid) (State, error) { + out := state5{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make5(store adt.Store, currRealizedPower abi.StoragePower) (State, error) { + out := state5{store: store} + out.State = *reward5.ConstructState(currRealizedPower) + return &out, nil +} + +type state5 struct { + reward5.State + store adt.Store +} + +func (s *state5) ThisEpochReward() (abi.TokenAmount, error) { + return s.State.ThisEpochReward, nil +} + +func (s *state5) ThisEpochRewardSmoothed() (builtin.FilterEstimate, error) { + + return builtin.FilterEstimate{ + PositionEstimate: s.State.ThisEpochRewardSmoothed.PositionEstimate, + VelocityEstimate: s.State.ThisEpochRewardSmoothed.VelocityEstimate, + }, nil + +} + +func (s *state5) ThisEpochBaselinePower() (abi.StoragePower, error) { + return s.State.ThisEpochBaselinePower, nil +} + +func (s *state5) TotalStoragePowerReward() (abi.TokenAmount, error) { + return s.State.TotalStoragePowerReward, nil +} + +func (s *state5) EffectiveBaselinePower() (abi.StoragePower, error) { + return s.State.EffectiveBaselinePower, nil +} + +func (s *state5) EffectiveNetworkTime() (abi.ChainEpoch, error) { + return s.State.EffectiveNetworkTime, nil +} + +func (s *state5) CumsumBaseline() (reward5.Spacetime, error) { + return s.State.CumsumBaseline, nil +} + +func (s *state5) CumsumRealized() (reward5.Spacetime, error) { + return s.State.CumsumRealized, nil +} + +func (s *state5) InitialPledgeForPower(qaPower abi.StoragePower, networkTotalPledge abi.TokenAmount, networkQAPower *builtin.FilterEstimate, circSupply abi.TokenAmount) (abi.TokenAmount, error) { + return miner5.InitialPledgeForPower( + qaPower, + s.State.ThisEpochBaselinePower, + s.State.ThisEpochRewardSmoothed, + smoothing5.FilterEstimate{ + PositionEstimate: networkQAPower.PositionEstimate, + VelocityEstimate: networkQAPower.VelocityEstimate, + }, + circSupply, + ), nil +} + +func (s *state5) PreCommitDepositForPower(networkQAPower builtin.FilterEstimate, sectorWeight abi.StoragePower) (abi.TokenAmount, error) { + return miner5.PreCommitDepositForPower(s.State.ThisEpochRewardSmoothed, + smoothing5.FilterEstimate{ + PositionEstimate: networkQAPower.PositionEstimate, + VelocityEstimate: networkQAPower.VelocityEstimate, + }, + sectorWeight), nil +} + +func (s *state5) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/system/system.go b/chain/actors/builtin/system/system.go index b4ced850f..289fb4d5d 100644 --- a/chain/actors/builtin/system/system.go +++ b/chain/actors/builtin/system/system.go @@ -13,10 +13,12 @@ import ( builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" + + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" ) var ( - Address = builtin4.SystemActorAddr + Address = builtin5.SystemActorAddr ) func MakeState(store adt.Store, av actors.Version) (State, error) { @@ -34,6 +36,9 @@ func MakeState(store adt.Store, av actors.Version) (State, error) { case actors.Version4: return make4(store) + case actors.Version5: + return make5(store) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -53,6 +58,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) { case actors.Version4: return builtin4.SystemActorCodeID, nil + case actors.Version5: + return builtin5.SystemActorCodeID, nil + } return cid.Undef, xerrors.Errorf("unknown actor version %d", av) diff --git a/chain/actors/builtin/system/temp b/chain/actors/builtin/system/temp deleted file mode 100644 index e69de29bb..000000000 diff --git a/chain/actors/builtin/system/v5.go b/chain/actors/builtin/system/v5.go new file mode 100644 index 000000000..77d2a8478 --- /dev/null +++ b/chain/actors/builtin/system/v5.go @@ -0,0 +1,35 @@ +package system + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + system5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/system" +) + +var _ State = (*state5)(nil) + +func load5(store adt.Store, root cid.Cid) (State, error) { + out := state5{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make5(store adt.Store) (State, error) { + out := state5{store: store} + out.State = system5.State{} + return &out, nil +} + +type state5 struct { + system5.State + store adt.Store +} + +func (s *state5) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/temp b/chain/actors/builtin/temp deleted file mode 100644 index e69de29bb..000000000 diff --git a/chain/actors/builtin/verifreg/state.go.template b/chain/actors/builtin/verifreg/state.go.template index 96bebe25f..b59cfb628 100644 --- a/chain/actors/builtin/verifreg/state.go.template +++ b/chain/actors/builtin/verifreg/state.go.template @@ -9,7 +9,7 @@ import ( "github.com/filecoin-project/lotus/chain/actors/adt" {{if (ge .v 3)}} builtin{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin" -{{end}} verifreg{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin/verifreg" +{{end}} verifreg{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin/verifreg" adt{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/util/adt" ) diff --git a/chain/actors/builtin/verifreg/temp b/chain/actors/builtin/verifreg/temp deleted file mode 100644 index e69de29bb..000000000 diff --git a/chain/actors/builtin/verifreg/v5.go b/chain/actors/builtin/verifreg/v5.go new file mode 100644 index 000000000..6fefd7115 --- /dev/null +++ b/chain/actors/builtin/verifreg/v5.go @@ -0,0 +1,75 @@ +package verifreg + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" + + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" + verifreg5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/verifreg" + adt5 "github.com/filecoin-project/specs-actors/v5/actors/util/adt" +) + +var _ State = (*state5)(nil) + +func load5(store adt.Store, root cid.Cid) (State, error) { + out := state5{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make5(store adt.Store, rootKeyAddress address.Address) (State, error) { + out := state5{store: store} + + s, err := verifreg5.ConstructState(store, rootKeyAddress) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state5 struct { + verifreg5.State + store adt.Store +} + +func (s *state5) RootKey() (address.Address, error) { + return s.State.RootKey, nil +} + +func (s *state5) VerifiedClientDataCap(addr address.Address) (bool, abi.StoragePower, error) { + return getDataCap(s.store, actors.Version5, s.verifiedClients, addr) +} + +func (s *state5) VerifierDataCap(addr address.Address) (bool, abi.StoragePower, error) { + return getDataCap(s.store, actors.Version5, s.verifiers, addr) +} + +func (s *state5) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error { + return forEachCap(s.store, actors.Version5, s.verifiers, cb) +} + +func (s *state5) ForEachClient(cb func(addr address.Address, dcap abi.StoragePower) error) error { + return forEachCap(s.store, actors.Version5, s.verifiedClients, cb) +} + +func (s *state5) verifiedClients() (adt.Map, error) { + return adt5.AsMap(s.store, s.VerifiedClients, builtin5.DefaultHamtBitwidth) +} + +func (s *state5) verifiers() (adt.Map, error) { + return adt5.AsMap(s.store, s.Verifiers, builtin5.DefaultHamtBitwidth) +} + +func (s *state5) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/verifreg/verifreg.go b/chain/actors/builtin/verifreg/verifreg.go index 618907554..88104ad69 100644 --- a/chain/actors/builtin/verifreg/verifreg.go +++ b/chain/actors/builtin/verifreg/verifreg.go @@ -17,6 +17,8 @@ import ( builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" + "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin" @@ -41,11 +43,15 @@ func init() { return load4(store, root) }) + builtin.RegisterActorState(builtin5.VerifiedRegistryActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load5(store, root) + }) + } var ( - Address = builtin4.VerifiedRegistryActorAddr - Methods = builtin4.MethodsVerifiedRegistry + Address = builtin5.VerifiedRegistryActorAddr + Methods = builtin5.MethodsVerifiedRegistry ) func Load(store adt.Store, act *types.Actor) (State, error) { @@ -63,6 +69,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case builtin4.VerifiedRegistryActorCodeID: return load4(store, act.Head) + case builtin5.VerifiedRegistryActorCodeID: + return load5(store, act.Head) + } return nil, xerrors.Errorf("unknown actor code %s", act.Code) } @@ -82,6 +91,9 @@ func MakeState(store adt.Store, av actors.Version, rootKeyAddress address.Addres case actors.Version4: return make4(store, rootKeyAddress) + case actors.Version5: + return make5(store, rootKeyAddress) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -101,6 +113,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) { case actors.Version4: return builtin4.VerifiedRegistryActorCodeID, nil + case actors.Version5: + return builtin5.VerifiedRegistryActorCodeID, nil + } return cid.Undef, xerrors.Errorf("unknown actor version %d", av) diff --git a/chain/actors/policy/policy.go b/chain/actors/policy/policy.go index 164f19a76..bb35025ec 100644 --- a/chain/actors/policy/policy.go +++ b/chain/actors/policy/policy.go @@ -27,14 +27,19 @@ import ( miner4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/miner" verifreg4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/verifreg" - paych4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/paych" + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" + market5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/market" + miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" + verifreg5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/verifreg" + + paych5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/paych" ) const ( - ChainFinality = miner4.ChainFinality + ChainFinality = miner5.ChainFinality SealRandomnessLookback = ChainFinality - PaychSettleDelay = paych4.SettleDelay - MaxPreCommitRandomnessLookback = builtin4.EpochsInDay + SealRandomnessLookback + PaychSettleDelay = paych5.SettleDelay + MaxPreCommitRandomnessLookback = builtin5.EpochsInDay + SealRandomnessLookback ) // SetSupportedProofTypes sets supported proof types, across all actor versions. @@ -55,6 +60,8 @@ func SetSupportedProofTypes(types ...abi.RegisteredSealProof) { miner4.PreCommitSealProofTypesV7 = make(map[abi.RegisteredSealProof]struct{}, len(types)*2) miner4.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types)) + miner5.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types)) + AddSupportedProofTypes(types...) } @@ -84,6 +91,15 @@ func AddSupportedProofTypes(types ...abi.RegisteredSealProof) { miner4.PreCommitSealProofTypesV7[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{} miner4.PreCommitSealProofTypesV8[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{} + miner5.PreCommitSealProofTypesV8[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{} + wpp, err := t.RegisteredWindowPoStProof() + if err != nil { + // Fine to panic, this is a test-only method + panic(err) + } + + miner5.WindowPoStProofTypes[wpp] = struct{}{} + } } @@ -100,11 +116,13 @@ func SetPreCommitChallengeDelay(delay abi.ChainEpoch) { miner4.PreCommitChallengeDelay = delay + miner5.PreCommitChallengeDelay = delay + } // TODO: this function shouldn't really exist. Instead, the API should expose the precommit delay. func GetPreCommitChallengeDelay() abi.ChainEpoch { - return miner4.PreCommitChallengeDelay + return miner5.PreCommitChallengeDelay } // SetConsensusMinerMinPower sets the minimum power of an individual miner must @@ -126,6 +144,10 @@ func SetConsensusMinerMinPower(p abi.StoragePower) { policy.ConsensusMinerMinPower = p } + for _, policy := range builtin5.PoStProofPolicies { + policy.ConsensusMinerMinPower = p + } + } // SetMinVerifiedDealSize sets the minimum size of a verified deal. This should @@ -140,6 +162,8 @@ func SetMinVerifiedDealSize(size abi.StoragePower) { verifreg4.MinVerifiedDealSize = size + verifreg5.MinVerifiedDealSize = size + } func GetMaxProveCommitDuration(ver actors.Version, t abi.RegisteredSealProof) abi.ChainEpoch { @@ -161,6 +185,10 @@ func GetMaxProveCommitDuration(ver actors.Version, t abi.RegisteredSealProof) ab return miner4.MaxProveCommitDuration[t] + case actors.Version5: + + return miner5.MaxProveCommitDuration[t] + default: panic("unsupported actors version") } @@ -189,13 +217,17 @@ func DealProviderCollateralBounds( return market4.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil) + case actors.Version5: + + return market5.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil) + default: panic("unsupported actors version") } } func DealDurationBounds(pieceSize abi.PaddedPieceSize) (min, max abi.ChainEpoch) { - return market4.DealDurationBounds(pieceSize) + return market5.DealDurationBounds(pieceSize) } // Sets the challenge window and scales the proving period to match (such that @@ -222,6 +254,13 @@ func SetWPoStChallengeWindow(period abi.ChainEpoch) { // scale it if we're scaling the challenge period. miner4.WPoStDisputeWindow = period * 30 + miner5.WPoStChallengeWindow = period + miner5.WPoStProvingPeriod = period * abi.ChainEpoch(miner5.WPoStPeriodDeadlines) + + // by default, this is 2x finality which is 30 periods. + // scale it if we're scaling the challenge period. + miner5.WPoStDisputeWindow = period * 30 + } func GetWinningPoStSectorSetLookback(nwVer network.Version) abi.ChainEpoch { @@ -234,22 +273,22 @@ func GetWinningPoStSectorSetLookback(nwVer network.Version) abi.ChainEpoch { } func GetMaxSectorExpirationExtension() abi.ChainEpoch { - return miner4.MaxSectorExpirationExtension + return miner5.MaxSectorExpirationExtension } // TODO: we'll probably need to abstract over this better in the future. func GetMaxPoStPartitions(p abi.RegisteredPoStProof) (int, error) { - sectorsPerPart, err := builtin4.PoStProofWindowPoStPartitionSectors(p) + sectorsPerPart, err := builtin5.PoStProofWindowPoStPartitionSectors(p) if err != nil { return 0, err } - return int(miner4.AddressedSectorsMax / sectorsPerPart), nil + return int(miner5.AddressedSectorsMax / sectorsPerPart), nil } func GetDefaultSectorSize() abi.SectorSize { // supported sector sizes are the same across versions. - szs := make([]abi.SectorSize, 0, len(miner4.PreCommitSealProofTypesV8)) - for spt := range miner4.PreCommitSealProofTypesV8 { + szs := make([]abi.SectorSize, 0, len(miner5.PreCommitSealProofTypesV8)) + for spt := range miner5.PreCommitSealProofTypesV8 { ss, err := spt.SectorSize() if err != nil { panic(err) @@ -265,12 +304,16 @@ func GetDefaultSectorSize() abi.SectorSize { return szs[0] } +func GetDefaultAggregationProof() abi.RegisteredAggregationProof { + return abi.RegisteredAggregationProof_SnarkPackV1 +} + func GetSectorMaxLifetime(proof abi.RegisteredSealProof, nwVer network.Version) abi.ChainEpoch { if nwVer <= network.Version10 { return builtin4.SealProofPoliciesV0[proof].SectorMaxLifetime } - return builtin4.SealProofPoliciesV11[proof].SectorMaxLifetime + return builtin5.SealProofPoliciesV11[proof].SectorMaxLifetime } func GetAddressedSectorsMax(nwVer network.Version) int { @@ -288,6 +331,9 @@ func GetAddressedSectorsMax(nwVer network.Version) int { case actors.Version4: return miner4.AddressedSectorsMax + case actors.Version5: + return miner5.AddressedSectorsMax + default: panic("unsupported network version") } @@ -313,6 +359,10 @@ func GetDeclarationsMax(nwVer network.Version) int { return miner4.DeclarationsMax + case actors.Version5: + + return miner5.DeclarationsMax + default: panic("unsupported network version") } diff --git a/chain/actors/policy/policy.go.template b/chain/actors/policy/policy.go.template index a1a47852b..6b8f80bd0 100644 --- a/chain/actors/policy/policy.go.template +++ b/chain/actors/policy/policy.go.template @@ -19,9 +19,9 @@ import ( ) const ( - ChainFinality = miner{{.latestVersion}}.ChainFinality - SealRandomnessLookback = ChainFinality - PaychSettleDelay = paych{{.latestVersion}}.SettleDelay + ChainFinality = miner{{.latestVersion}}.ChainFinality + SealRandomnessLookback = ChainFinality + PaychSettleDelay = paych{{.latestVersion}}.SettleDelay MaxPreCommitRandomnessLookback = builtin{{.latestVersion}}.EpochsInDay + SealRandomnessLookback ) @@ -31,10 +31,12 @@ func SetSupportedProofTypes(types ...abi.RegisteredSealProof) { {{range .versions}} {{if (eq . 0)}} miner{{.}}.SupportedProofTypes = make(map[abi.RegisteredSealProof]struct{}, len(types)) - {{else}} + {{else if (le . 4)}} miner{{.}}.PreCommitSealProofTypesV0 = make(map[abi.RegisteredSealProof]struct{}, len(types)) miner{{.}}.PreCommitSealProofTypesV7 = make(map[abi.RegisteredSealProof]struct{}, len(types)*2) miner{{.}}.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types)) + {{else}} + miner{{.}}.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types)) {{end}} {{end}} @@ -51,15 +53,24 @@ func AddSupportedProofTypes(types ...abi.RegisteredSealProof) { // Set for all miner versions. {{range .versions}} - {{if (eq . 0)}} - miner{{.}}.SupportedProofTypes[t] = struct{}{} - {{else}} - miner{{.}}.PreCommitSealProofTypesV0[t] = struct{}{} - miner{{.}}.PreCommitSealProofTypesV7[t] = struct{}{} - miner{{.}}.PreCommitSealProofTypesV7[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{} - miner{{.}}.PreCommitSealProofTypesV8[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{} - {{end}} - {{end}} + {{if (eq . 0)}} + miner{{.}}.SupportedProofTypes[t] = struct{}{} + {{else if (le . 4)}} + miner{{.}}.PreCommitSealProofTypesV0[t] = struct{}{} + miner{{.}}.PreCommitSealProofTypesV7[t] = struct{}{} + miner{{.}}.PreCommitSealProofTypesV7[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{} + miner{{.}}.PreCommitSealProofTypesV8[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{} + {{else}} + miner{{.}}.PreCommitSealProofTypesV8[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{} + wpp, err := t.RegisteredWindowPoStProof() + if err != nil { + // Fine to panic, this is a test-only method + panic(err) + } + + miner{{.}}.WindowPoStProofTypes[wpp] = struct{}{} + {{end}} + {{end}} } } @@ -197,9 +208,13 @@ func GetDefaultSectorSize() abi.SectorSize { return szs[0] } +func GetDefaultAggregationProof() abi.RegisteredAggregationProof { + return abi.RegisteredAggregationProof_SnarkPackV1 +} + func GetSectorMaxLifetime(proof abi.RegisteredSealProof, nwVer network.Version) abi.ChainEpoch { if nwVer <= network.Version10 { - return builtin{{.latestVersion}}.SealProofPoliciesV0[proof].SectorMaxLifetime + return builtin4.SealProofPoliciesV0[proof].SectorMaxLifetime } return builtin{{.latestVersion}}.SealProofPoliciesV11[proof].SectorMaxLifetime diff --git a/chain/actors/policy/temp b/chain/actors/policy/temp deleted file mode 100644 index e69de29bb..000000000 diff --git a/chain/actors/temp b/chain/actors/temp deleted file mode 100644 index e69de29bb..000000000 diff --git a/chain/actors/version.go b/chain/actors/version.go index a8b4c62b2..9710e62fa 100644 --- a/chain/actors/version.go +++ b/chain/actors/version.go @@ -8,15 +8,16 @@ import ( type Version int -var LatestVersion = 4 +var LatestVersion = 5 -var Versions = []int{0, 2, 3, LatestVersion} +var Versions = []int{0, 2, 3, 4, LatestVersion} const ( Version0 Version = 0 Version2 Version = 2 Version3 Version = 3 Version4 Version = 4 + Version5 Version = 5 ) // Converts a network version into an actors adt version. @@ -30,6 +31,8 @@ func VersionForNetwork(version network.Version) Version { return Version3 case network.Version12: return Version4 + case network.Version13: + return Version5 default: panic(fmt.Sprintf("unsupported network version %d", version)) } diff --git a/chain/gen/gen.go b/chain/gen/gen.go index 5c33ac4d7..424ee6edc 100644 --- a/chain/gen/gen.go +++ b/chain/gen/gen.go @@ -26,7 +26,7 @@ import ( "go.opencensus.io/trace" "golang.org/x/xerrors" - proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" + proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/blockstore" @@ -52,7 +52,7 @@ const msgsPerBlock = 20 //nolint:deadcode,varcheck var log = logging.Logger("gen") -var ValidWpostForTesting = []proof2.PoStProof{{ +var ValidWpostForTesting = []proof5.PoStProof{{ ProofBytes: []byte("valid proof"), }} @@ -76,9 +76,10 @@ type ChainGen struct { w *wallet.LocalWallet - eppProvs map[address.Address]WinningPoStProver - Miners []address.Address - receivers []address.Address + eppProvs map[address.Address]WinningPoStProver + Miners []address.Address + receivers []address.Address + // a SecP address banker address.Address bankerNonce uint64 @@ -111,7 +112,7 @@ var DefaultRemainderAccountActor = genesis.Actor{ Meta: remAccMeta.ActorMeta(), } -func NewGeneratorWithSectors(numSectors int) (*ChainGen, error) { +func NewGeneratorWithSectorsAndUpgradeSchedule(numSectors int, us stmgr.UpgradeSchedule) (*ChainGen, error) { j := journal.NilJournal() // TODO: we really shouldn't modify a global variable here. policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1) @@ -246,7 +247,10 @@ func NewGeneratorWithSectors(numSectors int) (*ChainGen, error) { mgen[genesis2.MinerAddress(uint64(i))] = &wppProvider{} } - sm := stmgr.NewStateManager(cs) + sm, err := stmgr.NewStateManagerWithUpgradeSchedule(cs, us) + if err != nil { + return nil, xerrors.Errorf("initing stmgr: %w", err) + } miners := []address.Address{maddr1, maddr2} @@ -284,6 +288,14 @@ func NewGenerator() (*ChainGen, error) { return NewGeneratorWithSectors(1) } +func NewGeneratorWithSectors(numSectors int) (*ChainGen, error) { + return NewGeneratorWithSectorsAndUpgradeSchedule(numSectors, stmgr.DefaultUpgradeSchedule()) +} + +func NewGeneratorWithUpgradeSchedule(us stmgr.UpgradeSchedule) (*ChainGen, error) { + return NewGeneratorWithSectorsAndUpgradeSchedule(1, us) +} + func (cg *ChainGen) StateManager() *stmgr.StateManager { return cg.sm } @@ -386,7 +398,7 @@ type MinedTipSet struct { } func (cg *ChainGen) NextTipSet() (*MinedTipSet, error) { - mts, err := cg.NextTipSetFromMiners(cg.CurTipset.TipSet(), cg.Miners) + mts, err := cg.NextTipSetFromMiners(cg.CurTipset.TipSet(), cg.Miners, 0) if err != nil { return nil, err } @@ -399,7 +411,7 @@ func (cg *ChainGen) SetWinningPoStProver(m address.Address, wpp WinningPoStProve cg.eppProvs[m] = wpp } -func (cg *ChainGen) NextTipSetFromMiners(base *types.TipSet, miners []address.Address) (*MinedTipSet, error) { +func (cg *ChainGen) NextTipSetFromMiners(base *types.TipSet, miners []address.Address, nulls abi.ChainEpoch) (*MinedTipSet, error) { ms, err := cg.GetMessages(cg) if err != nil { return nil, xerrors.Errorf("get random messages: %w", err) @@ -410,21 +422,23 @@ func (cg *ChainGen) NextTipSetFromMiners(base *types.TipSet, miners []address.Ad msgs[i] = ms } - fts, err := cg.NextTipSetFromMinersWithMessages(base, miners, msgs) + fts, err := cg.NextTipSetFromMinersWithMessagesAndNulls(base, miners, msgs, nulls) if err != nil { return nil, err } + cg.CurTipset = fts + return &MinedTipSet{ TipSet: fts, Messages: ms, }, nil } -func (cg *ChainGen) NextTipSetFromMinersWithMessages(base *types.TipSet, miners []address.Address, msgs [][]*types.SignedMessage) (*store.FullTipSet, error) { +func (cg *ChainGen) NextTipSetFromMinersWithMessagesAndNulls(base *types.TipSet, miners []address.Address, msgs [][]*types.SignedMessage, nulls abi.ChainEpoch) (*store.FullTipSet, error) { var blks []*types.FullBlock - for round := base.Height() + 1; len(blks) == 0; round++ { + for round := base.Height() + nulls + 1; len(blks) == 0; round++ { for mi, m := range miners { bvals, et, ticket, err := cg.nextBlockProof(context.TODO(), base, m, round) if err != nil { @@ -457,12 +471,14 @@ func (cg *ChainGen) NextTipSetFromMinersWithMessages(base *types.TipSet, miners return nil, err } + cg.CurTipset = fts + return fts, nil } func (cg *ChainGen) makeBlock(parents *types.TipSet, m address.Address, vrfticket *types.Ticket, eticket *types.ElectionProof, bvals []types.BeaconEntry, height abi.ChainEpoch, - wpost []proof2.PoStProof, msgs []*types.SignedMessage) (*types.FullBlock, error) { + wpost []proof5.PoStProof, msgs []*types.SignedMessage) (*types.FullBlock, error) { var ts uint64 if cg.Timestamper != nil { @@ -576,7 +592,11 @@ func (mca mca) ChainGetRandomnessFromTickets(ctx context.Context, tsk types.TipS return nil, xerrors.Errorf("loading tipset key: %w", err) } - return mca.sm.ChainStore().GetChainRandomness(ctx, pts.Cids(), personalization, randEpoch, entropy) + if randEpoch > build.UpgradeHyperdriveHeight { + return mca.sm.ChainStore().GetChainRandomnessLookingForward(ctx, pts.Cids(), personalization, randEpoch, entropy) + } + + return mca.sm.ChainStore().GetChainRandomnessLookingBack(ctx, pts.Cids(), personalization, randEpoch, entropy) } func (mca mca) ChainGetRandomnessFromBeacon(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) { @@ -585,7 +605,11 @@ func (mca mca) ChainGetRandomnessFromBeacon(ctx context.Context, tsk types.TipSe return nil, xerrors.Errorf("loading tipset key: %w", err) } - return mca.sm.ChainStore().GetBeaconRandomness(ctx, pts.Cids(), personalization, randEpoch, entropy) + if randEpoch > build.UpgradeHyperdriveHeight { + return mca.sm.ChainStore().GetBeaconRandomnessLookingForward(ctx, pts.Cids(), personalization, randEpoch, entropy) + } + + return mca.sm.ChainStore().GetBeaconRandomnessLookingBack(ctx, pts.Cids(), personalization, randEpoch, entropy) } func (mca mca) MinerGetBaseInfo(ctx context.Context, maddr address.Address, epoch abi.ChainEpoch, tsk types.TipSetKey) (*api.MiningBaseInfo, error) { @@ -600,7 +624,7 @@ func (mca mca) WalletSign(ctx context.Context, a address.Address, v []byte) (*cr type WinningPoStProver interface { GenerateCandidates(context.Context, abi.PoStRandomness, uint64) ([]uint64, error) - ComputeProof(context.Context, []proof2.SectorInfo, abi.PoStRandomness) ([]proof2.PoStProof, error) + ComputeProof(context.Context, []proof5.SectorInfo, abi.PoStRandomness) ([]proof5.PoStProof, error) } type wppProvider struct{} @@ -609,7 +633,7 @@ func (wpp *wppProvider) GenerateCandidates(ctx context.Context, _ abi.PoStRandom return []uint64{0}, nil } -func (wpp *wppProvider) ComputeProof(context.Context, []proof2.SectorInfo, abi.PoStRandomness) ([]proof2.PoStProof, error) { +func (wpp *wppProvider) ComputeProof(context.Context, []proof5.SectorInfo, abi.PoStRandomness) ([]proof5.PoStProof, error) { return ValidWpostForTesting, nil } @@ -676,15 +700,19 @@ type genFakeVerifier struct{} var _ ffiwrapper.Verifier = (*genFakeVerifier)(nil) -func (m genFakeVerifier) VerifySeal(svi proof2.SealVerifyInfo) (bool, error) { +func (m genFakeVerifier) VerifySeal(svi proof5.SealVerifyInfo) (bool, error) { return true, nil } -func (m genFakeVerifier) VerifyWinningPoSt(ctx context.Context, info proof2.WinningPoStVerifyInfo) (bool, error) { +func (m genFakeVerifier) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) (bool, error) { panic("not supported") } -func (m genFakeVerifier) VerifyWindowPoSt(ctx context.Context, info proof2.WindowPoStVerifyInfo) (bool, error) { +func (m genFakeVerifier) VerifyWinningPoSt(ctx context.Context, info proof5.WinningPoStVerifyInfo) (bool, error) { + panic("not supported") +} + +func (m genFakeVerifier) VerifyWindowPoSt(ctx context.Context, info proof5.WindowPoStVerifyInfo) (bool, error) { panic("not supported") } diff --git a/chain/gen/genesis/miners.go b/chain/gen/genesis/miners.go index 17349b270..e6f17d677 100644 --- a/chain/gen/genesis/miners.go +++ b/chain/gen/genesis/miners.go @@ -43,7 +43,7 @@ import ( miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" power0 "github.com/filecoin-project/specs-actors/actors/builtin/power" reward0 "github.com/filecoin-project/specs-actors/actors/builtin/reward" - runtime2 "github.com/filecoin-project/specs-actors/v2/actors/runtime" + runtime5 "github.com/filecoin-project/specs-actors/v5/actors/runtime" "github.com/filecoin-project/lotus/chain/state" "github.com/filecoin-project/lotus/chain/store" @@ -62,7 +62,7 @@ func MinerAddress(genesisIndex uint64) address.Address { } type fakedSigSyscalls struct { - runtime2.Syscalls + runtime5.Syscalls } func (fss *fakedSigSyscalls) VerifySignature(signature crypto.Signature, signer address.Address, plaintext []byte) error { @@ -70,7 +70,7 @@ func (fss *fakedSigSyscalls) VerifySignature(signature crypto.Signature, signer } func mkFakedSigSyscalls(base vm.SyscallBuilder) vm.SyscallBuilder { - return func(ctx context.Context, rt *vm.Runtime) runtime2.Syscalls { + return func(ctx context.Context, rt *vm.Runtime) runtime5.Syscalls { return &fakedSigSyscalls{ base(ctx, rt), } @@ -488,13 +488,25 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid // TODO: copied from actors test harness, deduplicate or remove from here type fakeRand struct{} -func (fr *fakeRand) GetChainRandomness(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) { +func (fr *fakeRand) GetChainRandomnessLookingForward(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) { out := make([]byte, 32) _, _ = rand.New(rand.NewSource(int64(randEpoch * 1000))).Read(out) //nolint return out, nil } -func (fr *fakeRand) GetBeaconRandomness(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) { +func (fr *fakeRand) GetChainRandomnessLookingBack(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) { + out := make([]byte, 32) + _, _ = rand.New(rand.NewSource(int64(randEpoch * 1000))).Read(out) //nolint + return out, nil +} + +func (fr *fakeRand) GetBeaconRandomnessLookingForward(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) { + out := make([]byte, 32) + _, _ = rand.New(rand.NewSource(int64(randEpoch))).Read(out) //nolint + return out, nil +} + +func (fr *fakeRand) GetBeaconRandomnessLookingBack(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) { out := make([]byte, 32) _, _ = rand.New(rand.NewSource(int64(randEpoch))).Read(out) //nolint return out, nil diff --git a/chain/messagepool/messagepool.go b/chain/messagepool/messagepool.go index 93dce1df0..0180d1abf 100644 --- a/chain/messagepool/messagepool.go +++ b/chain/messagepool/messagepool.go @@ -126,10 +126,14 @@ type MessagePool struct { republished map[cid.Cid]struct{} + // do NOT access this map directly, use isLocal, setLocal, and forEachLocal respectively localAddrs map[address.Address]struct{} + // do NOT access this map directly, use getPendingMset, setPendingMset, deletePendingMset, forEachPending, and clearPending respectively pending map[address.Address]*msgSet + keyCache map[address.Address]address.Address + curTsLk sync.Mutex // DO NOT LOCK INSIDE lk curTs *types.TipSet @@ -329,6 +333,20 @@ func (ms *msgSet) getRequiredFunds(nonce uint64) types.BigInt { return types.BigInt{Int: requiredFunds} } +func (ms *msgSet) toSlice() []*types.SignedMessage { + set := make([]*types.SignedMessage, 0, len(ms.msgs)) + + for _, m := range ms.msgs { + set = append(set, m) + } + + sort.Slice(set, func(i, j int) bool { + return set[i].Message.Nonce < set[j].Message.Nonce + }) + + return set +} + func New(api Provider, ds dtypes.MetadataDS, netName dtypes.NetworkName, j journal.Journal) (*MessagePool, error) { cache, _ := lru.New2Q(build.BlsSignatureCacheSize) verifcache, _ := lru.New2Q(build.VerifSigCacheSize) @@ -350,6 +368,7 @@ func New(api Provider, ds dtypes.MetadataDS, netName dtypes.NetworkName, j journ repubTrigger: make(chan struct{}, 1), localAddrs: make(map[address.Address]struct{}), pending: make(map[address.Address]*msgSet), + keyCache: make(map[address.Address]address.Address), minGasPrice: types.NewInt(0), pruneTrigger: make(chan struct{}, 1), pruneCooldown: make(chan struct{}, 1), @@ -371,9 +390,11 @@ func New(api Provider, ds dtypes.MetadataDS, netName dtypes.NetworkName, j journ // enable initial prunes mp.pruneCooldown <- struct{}{} + ctx, cancel := context.WithCancel(context.TODO()) + // load the current tipset and subscribe to head changes _before_ loading local messages mp.curTs = api.SubscribeHeadChanges(func(rev, app []*types.TipSet) error { - err := mp.HeadChange(rev, app) + err := mp.HeadChange(ctx, rev, app) if err != nil { log.Errorf("mpool head notif handler error: %+v", err) } @@ -384,7 +405,8 @@ func New(api Provider, ds dtypes.MetadataDS, netName dtypes.NetworkName, j journ mp.lk.Lock() go func() { - err := mp.loadLocal() + defer cancel() + err := mp.loadLocal(ctx) mp.lk.Unlock() mp.curTsLk.Unlock() @@ -395,12 +417,106 @@ func New(api Provider, ds dtypes.MetadataDS, netName dtypes.NetworkName, j journ log.Info("mpool ready") - mp.runLoop() + mp.runLoop(ctx) }() return mp, nil } +func (mp *MessagePool) resolveToKey(ctx context.Context, addr address.Address) (address.Address, error) { + // check the cache + a, f := mp.keyCache[addr] + if f { + return a, nil + } + + // resolve the address + ka, err := mp.api.StateAccountKeyAtFinality(ctx, addr, mp.curTs) + if err != nil { + return address.Undef, err + } + + // place both entries in the cache (may both be key addresses, which is fine) + mp.keyCache[addr] = ka + mp.keyCache[ka] = ka + + return ka, nil +} + +func (mp *MessagePool) getPendingMset(ctx context.Context, addr address.Address) (*msgSet, bool, error) { + ra, err := mp.resolveToKey(ctx, addr) + if err != nil { + return nil, false, err + } + + ms, f := mp.pending[ra] + + return ms, f, nil +} + +func (mp *MessagePool) setPendingMset(ctx context.Context, addr address.Address, ms *msgSet) error { + ra, err := mp.resolveToKey(ctx, addr) + if err != nil { + return err + } + + mp.pending[ra] = ms + + return nil +} + +// This method isn't strictly necessary, since it doesn't resolve any addresses, but it's safer to have +func (mp *MessagePool) forEachPending(f func(address.Address, *msgSet)) { + for la, ms := range mp.pending { + f(la, ms) + } +} + +func (mp *MessagePool) deletePendingMset(ctx context.Context, addr address.Address) error { + ra, err := mp.resolveToKey(ctx, addr) + if err != nil { + return err + } + + delete(mp.pending, ra) + + return nil +} + +// This method isn't strictly necessary, since it doesn't resolve any addresses, but it's safer to have +func (mp *MessagePool) clearPending() { + mp.pending = make(map[address.Address]*msgSet) +} + +func (mp *MessagePool) isLocal(ctx context.Context, addr address.Address) (bool, error) { + ra, err := mp.resolveToKey(ctx, addr) + if err != nil { + return false, err + } + + _, f := mp.localAddrs[ra] + + return f, nil +} + +func (mp *MessagePool) setLocal(ctx context.Context, addr address.Address) error { + ra, err := mp.resolveToKey(ctx, addr) + if err != nil { + return err + } + + mp.localAddrs[ra] = struct{}{} + + return nil +} + +// This method isn't strictly necessary, since it doesn't resolve any addresses, but it's safer to have +func (mp *MessagePool) forEachLocal(ctx context.Context, f func(context.Context, address.Address)) { + for la := range mp.localAddrs { + f(ctx, la) + } +} + func (mp *MessagePool) Close() error { close(mp.closer) return nil @@ -418,15 +534,15 @@ func (mp *MessagePool) Prune() { mp.pruneTrigger <- struct{}{} } -func (mp *MessagePool) runLoop() { +func (mp *MessagePool) runLoop(ctx context.Context) { for { select { case <-mp.repubTk.C: - if err := mp.republishPendingMessages(); err != nil { + if err := mp.republishPendingMessages(ctx); err != nil { log.Errorf("error while republishing messages: %s", err) } case <-mp.repubTrigger: - if err := mp.republishPendingMessages(); err != nil { + if err := mp.republishPendingMessages(ctx); err != nil { log.Errorf("error while republishing messages: %s", err) } @@ -442,8 +558,10 @@ func (mp *MessagePool) runLoop() { } } -func (mp *MessagePool) addLocal(m *types.SignedMessage) error { - mp.localAddrs[m.Message.From] = struct{}{} +func (mp *MessagePool) addLocal(ctx context.Context, m *types.SignedMessage) error { + if err := mp.setLocal(ctx, m.Message.From); err != nil { + return err + } msgb, err := m.Serialize() if err != nil { @@ -475,7 +593,7 @@ func (mp *MessagePool) verifyMsgBeforeAdd(m *types.SignedMessage, curTs *types.T return false, xerrors.Errorf("message will not be included in a block: %w", err) } - // this checks if the GasFeeCap is suffisciently high for inclusion in the next 20 blocks + // this checks if the GasFeeCap is sufficiently high for inclusion in the next 20 blocks // if the GasFeeCap is too low, we soft reject the message (Ignore in pubsub) and rely // on republish to push it through later, if the baseFee has fallen. // this is a defensive check that stops minimum baseFee spam attacks from overloading validation @@ -510,7 +628,7 @@ func (mp *MessagePool) verifyMsgBeforeAdd(m *types.SignedMessage, curTs *types.T return publish, nil } -func (mp *MessagePool) Push(m *types.SignedMessage) (cid.Cid, error) { +func (mp *MessagePool) Push(ctx context.Context, m *types.SignedMessage) (cid.Cid, error) { err := mp.checkMessage(m) if err != nil { return cid.Undef, err @@ -523,7 +641,7 @@ func (mp *MessagePool) Push(m *types.SignedMessage) (cid.Cid, error) { }() mp.curTsLk.Lock() - publish, err := mp.addTs(m, mp.curTs, true, false) + publish, err := mp.addTs(ctx, m, mp.curTs, true, false) if err != nil { mp.curTsLk.Unlock() return cid.Undef, err @@ -576,7 +694,7 @@ func (mp *MessagePool) checkMessage(m *types.SignedMessage) error { return nil } -func (mp *MessagePool) Add(m *types.SignedMessage) error { +func (mp *MessagePool) Add(ctx context.Context, m *types.SignedMessage) error { err := mp.checkMessage(m) if err != nil { return err @@ -591,7 +709,7 @@ func (mp *MessagePool) Add(m *types.SignedMessage) error { mp.curTsLk.Lock() defer mp.curTsLk.Unlock() - _, err = mp.addTs(m, mp.curTs, false, false) + _, err = mp.addTs(ctx, m, mp.curTs, false, false) return err } @@ -631,7 +749,7 @@ func (mp *MessagePool) VerifyMsgSig(m *types.SignedMessage) error { return nil } -func (mp *MessagePool) checkBalance(m *types.SignedMessage, curTs *types.TipSet) error { +func (mp *MessagePool) checkBalance(ctx context.Context, m *types.SignedMessage, curTs *types.TipSet) error { balance, err := mp.getStateBalance(m.Message.From, curTs) if err != nil { return xerrors.Errorf("failed to check sender balance: %s: %w", err, ErrSoftValidationFailure) @@ -645,7 +763,12 @@ func (mp *MessagePool) checkBalance(m *types.SignedMessage, curTs *types.TipSet) // add Value for soft failure check //requiredFunds = types.BigAdd(requiredFunds, m.Message.Value) - mset, ok := mp.pending[m.Message.From] + mset, ok, err := mp.getPendingMset(ctx, m.Message.From) + if err != nil { + log.Debugf("mpoolcheckbalance failed to get pending mset: %s", err) + return err + } + if ok { requiredFunds = types.BigAdd(requiredFunds, mset.getRequiredFunds(m.Message.Nonce)) } @@ -659,7 +782,7 @@ func (mp *MessagePool) checkBalance(m *types.SignedMessage, curTs *types.TipSet) return nil } -func (mp *MessagePool) addTs(m *types.SignedMessage, curTs *types.TipSet, local, untrusted bool) (bool, error) { +func (mp *MessagePool) addTs(ctx context.Context, m *types.SignedMessage, curTs *types.TipSet, local, untrusted bool) (bool, error) { snonce, err := mp.getStateNonce(m.Message.From, curTs) if err != nil { return false, xerrors.Errorf("failed to look up actor state nonce: %s: %w", err, ErrSoftValidationFailure) @@ -677,17 +800,17 @@ func (mp *MessagePool) addTs(m *types.SignedMessage, curTs *types.TipSet, local, return false, err } - if err := mp.checkBalance(m, curTs); err != nil { + if err := mp.checkBalance(ctx, m, curTs); err != nil { return false, err } - err = mp.addLocked(m, !local, untrusted) + err = mp.addLocked(ctx, m, !local, untrusted) if err != nil { return false, err } if local { - err = mp.addLocal(m) + err = mp.addLocal(ctx, m) if err != nil { return false, xerrors.Errorf("error persisting local message: %w", err) } @@ -696,7 +819,7 @@ func (mp *MessagePool) addTs(m *types.SignedMessage, curTs *types.TipSet, local, return publish, nil } -func (mp *MessagePool) addLoaded(m *types.SignedMessage) error { +func (mp *MessagePool) addLoaded(ctx context.Context, m *types.SignedMessage) error { err := mp.checkMessage(m) if err != nil { return err @@ -722,21 +845,21 @@ func (mp *MessagePool) addLoaded(m *types.SignedMessage) error { return err } - if err := mp.checkBalance(m, curTs); err != nil { + if err := mp.checkBalance(ctx, m, curTs); err != nil { return err } - return mp.addLocked(m, false, false) + return mp.addLocked(ctx, m, false, false) } -func (mp *MessagePool) addSkipChecks(m *types.SignedMessage) error { +func (mp *MessagePool) addSkipChecks(ctx context.Context, m *types.SignedMessage) error { mp.lk.Lock() defer mp.lk.Unlock() - return mp.addLocked(m, false, false) + return mp.addLocked(ctx, m, false, false) } -func (mp *MessagePool) addLocked(m *types.SignedMessage, strict, untrusted bool) error { +func (mp *MessagePool) addLocked(ctx context.Context, m *types.SignedMessage, strict, untrusted bool) error { log.Debugf("mpooladd: %s %d", m.Message.From, m.Message.Nonce) if m.Signature.Type == crypto.SigTypeBLS { mp.blsSigCache.Add(m.Cid(), m.Signature) @@ -752,7 +875,13 @@ func (mp *MessagePool) addLocked(m *types.SignedMessage, strict, untrusted bool) return err } - mset, ok := mp.pending[m.Message.From] + // Note: If performance becomes an issue, making this getOrCreatePendingMset will save some work + mset, ok, err := mp.getPendingMset(ctx, m.Message.From) + if err != nil { + log.Debug(err) + return err + } + if !ok { nonce, err := mp.getStateNonce(m.Message.From, mp.curTs) if err != nil { @@ -760,7 +889,9 @@ func (mp *MessagePool) addLocked(m *types.SignedMessage, strict, untrusted bool) } mset = newMsgSet(nonce) - mp.pending[m.Message.From] = mset + if err = mp.setPendingMset(ctx, m.Message.From, mset); err != nil { + return xerrors.Errorf("failed to set pending mset: %w", err) + } } incr, err := mset.add(m, mp, strict, untrusted) @@ -795,14 +926,14 @@ func (mp *MessagePool) addLocked(m *types.SignedMessage, strict, untrusted bool) return nil } -func (mp *MessagePool) GetNonce(_ context.Context, addr address.Address, _ types.TipSetKey) (uint64, error) { +func (mp *MessagePool) GetNonce(ctx context.Context, addr address.Address, _ types.TipSetKey) (uint64, error) { mp.curTsLk.Lock() defer mp.curTsLk.Unlock() mp.lk.Lock() defer mp.lk.Unlock() - return mp.getNonceLocked(addr, mp.curTs) + return mp.getNonceLocked(ctx, addr, mp.curTs) } // GetActor should not be used. It is only here to satisfy interface mess caused by lite node handling @@ -812,13 +943,18 @@ func (mp *MessagePool) GetActor(_ context.Context, addr address.Address, _ types return mp.api.GetActorAfter(addr, mp.curTs) } -func (mp *MessagePool) getNonceLocked(addr address.Address, curTs *types.TipSet) (uint64, error) { +func (mp *MessagePool) getNonceLocked(ctx context.Context, addr address.Address, curTs *types.TipSet) (uint64, error) { stateNonce, err := mp.getStateNonce(addr, curTs) // sanity check if err != nil { return 0, err } - mset, ok := mp.pending[addr] + mset, ok, err := mp.getPendingMset(ctx, addr) + if err != nil { + log.Debugf("mpoolgetnonce failed to get mset: %s", err) + return 0, err + } + if ok { if stateNonce > mset.nextNonce { log.Errorf("state nonce was larger than mset.nextNonce (%d > %d)", stateNonce, mset.nextNonce) @@ -855,7 +991,7 @@ func (mp *MessagePool) getStateBalance(addr address.Address, ts *types.TipSet) ( // - strict checks are enabled // - extra strict add checks are used when adding the messages to the msgSet // that means: no nonce gaps, at most 10 pending messages for the actor -func (mp *MessagePool) PushUntrusted(m *types.SignedMessage) (cid.Cid, error) { +func (mp *MessagePool) PushUntrusted(ctx context.Context, m *types.SignedMessage) (cid.Cid, error) { err := mp.checkMessage(m) if err != nil { return cid.Undef, err @@ -868,7 +1004,7 @@ func (mp *MessagePool) PushUntrusted(m *types.SignedMessage) (cid.Cid, error) { }() mp.curTsLk.Lock() - publish, err := mp.addTs(m, mp.curTs, true, true) + publish, err := mp.addTs(ctx, m, mp.curTs, true, true) if err != nil { mp.curTsLk.Unlock() return cid.Undef, err @@ -890,15 +1026,20 @@ func (mp *MessagePool) PushUntrusted(m *types.SignedMessage) (cid.Cid, error) { return m.Cid(), nil } -func (mp *MessagePool) Remove(from address.Address, nonce uint64, applied bool) { +func (mp *MessagePool) Remove(ctx context.Context, from address.Address, nonce uint64, applied bool) { mp.lk.Lock() defer mp.lk.Unlock() - mp.remove(from, nonce, applied) + mp.remove(ctx, from, nonce, applied) } -func (mp *MessagePool) remove(from address.Address, nonce uint64, applied bool) { - mset, ok := mp.pending[from] +func (mp *MessagePool) remove(ctx context.Context, from address.Address, nonce uint64, applied bool) { + mset, ok, err := mp.getPendingMset(ctx, from) + if err != nil { + log.Debugf("mpoolremove failed to get mset: %s", err) + return + } + if !ok { return } @@ -923,58 +1064,57 @@ func (mp *MessagePool) remove(from address.Address, nonce uint64, applied bool) mset.rm(nonce, applied) if len(mset.msgs) == 0 { - delete(mp.pending, from) + if err = mp.deletePendingMset(ctx, from); err != nil { + log.Debugf("mpoolremove failed to delete mset: %s", err) + return + } } } -func (mp *MessagePool) Pending() ([]*types.SignedMessage, *types.TipSet) { +func (mp *MessagePool) Pending(ctx context.Context) ([]*types.SignedMessage, *types.TipSet) { mp.curTsLk.Lock() defer mp.curTsLk.Unlock() mp.lk.Lock() defer mp.lk.Unlock() - return mp.allPending() + return mp.allPending(ctx) } -func (mp *MessagePool) allPending() ([]*types.SignedMessage, *types.TipSet) { +func (mp *MessagePool) allPending(ctx context.Context) ([]*types.SignedMessage, *types.TipSet) { out := make([]*types.SignedMessage, 0) - for a := range mp.pending { - out = append(out, mp.pendingFor(a)...) - } + + mp.forEachPending(func(a address.Address, mset *msgSet) { + out = append(out, mset.toSlice()...) + }) return out, mp.curTs } -func (mp *MessagePool) PendingFor(a address.Address) ([]*types.SignedMessage, *types.TipSet) { +func (mp *MessagePool) PendingFor(ctx context.Context, a address.Address) ([]*types.SignedMessage, *types.TipSet) { mp.curTsLk.Lock() defer mp.curTsLk.Unlock() mp.lk.Lock() defer mp.lk.Unlock() - return mp.pendingFor(a), mp.curTs + return mp.pendingFor(ctx, a), mp.curTs } -func (mp *MessagePool) pendingFor(a address.Address) []*types.SignedMessage { - mset := mp.pending[a] - if mset == nil || len(mset.msgs) == 0 { +func (mp *MessagePool) pendingFor(ctx context.Context, a address.Address) []*types.SignedMessage { + mset, ok, err := mp.getPendingMset(ctx, a) + if err != nil { + log.Debugf("mpoolpendingfor failed to get mset: %s", err) return nil } - set := make([]*types.SignedMessage, 0, len(mset.msgs)) - - for _, m := range mset.msgs { - set = append(set, m) + if mset == nil || !ok || len(mset.msgs) == 0 { + return nil } - sort.Slice(set, func(i, j int) bool { - return set[i].Message.Nonce < set[j].Message.Nonce - }) - - return set + return mset.toSlice() } -func (mp *MessagePool) HeadChange(revert []*types.TipSet, apply []*types.TipSet) error { +func (mp *MessagePool) HeadChange(ctx context.Context, revert []*types.TipSet, apply []*types.TipSet) error { mp.curTsLk.Lock() defer mp.curTsLk.Unlock() @@ -991,7 +1131,7 @@ func (mp *MessagePool) HeadChange(revert []*types.TipSet, apply []*types.TipSet) rm := func(from address.Address, nonce uint64) { s, ok := rmsgs[from] if !ok { - mp.Remove(from, nonce, true) + mp.Remove(ctx, from, nonce, true) return } @@ -1000,7 +1140,7 @@ func (mp *MessagePool) HeadChange(revert []*types.TipSet, apply []*types.TipSet) return } - mp.Remove(from, nonce, true) + mp.Remove(ctx, from, nonce, true) } maybeRepub := func(cid cid.Cid) { @@ -1071,7 +1211,7 @@ func (mp *MessagePool) HeadChange(revert []*types.TipSet, apply []*types.TipSet) for _, s := range rmsgs { for _, msg := range s { - if err := mp.addSkipChecks(msg); err != nil { + if err := mp.addSkipChecks(ctx, msg); err != nil { log.Errorf("Failed to readd message from reorg to mpool: %s", err) } } @@ -1079,7 +1219,7 @@ func (mp *MessagePool) HeadChange(revert []*types.TipSet, apply []*types.TipSet) if len(revert) > 0 && futureDebug { mp.lk.Lock() - msgs, ts := mp.allPending() + msgs, ts := mp.allPending(ctx) mp.lk.Unlock() buckets := map[address.Address]*statBucket{} @@ -1286,7 +1426,7 @@ func (mp *MessagePool) Updates(ctx context.Context) (<-chan api.MpoolUpdate, err return out, nil } -func (mp *MessagePool) loadLocal() error { +func (mp *MessagePool) loadLocal(ctx context.Context) error { res, err := mp.localMsgs.Query(query.Query{}) if err != nil { return xerrors.Errorf("query local messages: %w", err) @@ -1302,7 +1442,7 @@ func (mp *MessagePool) loadLocal() error { return xerrors.Errorf("unmarshaling local message: %w", err) } - if err := mp.addLoaded(&sm); err != nil { + if err := mp.addLoaded(ctx, &sm); err != nil { if xerrors.Is(err, ErrNonceTooLow) { continue // todo: drop the message from local cache (if above certain confidence threshold) } @@ -1310,47 +1450,61 @@ func (mp *MessagePool) loadLocal() error { log.Errorf("adding local message: %+v", err) } - mp.localAddrs[sm.Message.From] = struct{}{} + if err = mp.setLocal(ctx, sm.Message.From); err != nil { + log.Debugf("mpoolloadLocal errored: %s", err) + return err + } } return nil } -func (mp *MessagePool) Clear(local bool) { +func (mp *MessagePool) Clear(ctx context.Context, local bool) { mp.lk.Lock() defer mp.lk.Unlock() // remove everything if local is true, including removing local messages from // the datastore if local { - for a := range mp.localAddrs { - mset, ok := mp.pending[a] - if !ok { - continue + mp.forEachLocal(ctx, func(ctx context.Context, la address.Address) { + mset, ok, err := mp.getPendingMset(ctx, la) + if err != nil { + log.Warnf("errored while getting pending mset: %w", err) + return } - for _, m := range mset.msgs { - err := mp.localMsgs.Delete(datastore.NewKey(string(m.Cid().Bytes()))) - if err != nil { - log.Warnf("error deleting local message: %s", err) + if ok { + for _, m := range mset.msgs { + err := mp.localMsgs.Delete(datastore.NewKey(string(m.Cid().Bytes()))) + if err != nil { + log.Warnf("error deleting local message: %s", err) + } } } - } + }) - mp.pending = make(map[address.Address]*msgSet) + mp.clearPending() mp.republished = nil return } - // remove everything except the local messages - for a := range mp.pending { - _, isLocal := mp.localAddrs[a] - if isLocal { - continue + mp.forEachPending(func(a address.Address, ms *msgSet) { + isLocal, err := mp.isLocal(ctx, a) + if err != nil { + log.Warnf("errored while determining isLocal: %w", err) + return } - delete(mp.pending, a) - } + + if isLocal { + return + } + + if err = mp.deletePendingMset(ctx, a); err != nil { + log.Warnf("errored while deleting mset: %w", err) + return + } + }) } func getBaseFeeLowerBound(baseFee, factor types.BigInt) types.BigInt { diff --git a/chain/messagepool/messagepool_test.go b/chain/messagepool/messagepool_test.go index 925ee438c..b48685069 100644 --- a/chain/messagepool/messagepool_test.go +++ b/chain/messagepool/messagepool_test.go @@ -153,7 +153,7 @@ func (tma *testMpoolAPI) GetActorAfter(addr address.Address, ts *types.TipSet) ( }, nil } -func (tma *testMpoolAPI) StateAccountKey(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) { +func (tma *testMpoolAPI) StateAccountKeyAtFinality(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) { if addr.Protocol() != address.BLS && addr.Protocol() != address.SECP256K1 { return address.Undef, fmt.Errorf("given address was not a key addr") } @@ -202,7 +202,7 @@ func (tma *testMpoolAPI) ChainComputeBaseFee(ctx context.Context, ts *types.TipS func assertNonce(t *testing.T, mp *MessagePool, addr address.Address, val uint64) { t.Helper() - n, err := mp.GetNonce(context.Background(), addr, types.EmptyTSK) + n, err := mp.GetNonce(context.TODO(), addr, types.EmptyTSK) if err != nil { t.Fatal(err) } @@ -214,7 +214,7 @@ func assertNonce(t *testing.T, mp *MessagePool, addr address.Address, val uint64 func mustAdd(t *testing.T, mp *MessagePool, msg *types.SignedMessage) { t.Helper() - if err := mp.Add(msg); err != nil { + if err := mp.Add(context.TODO(), msg); err != nil { t.Fatal(err) } } @@ -296,9 +296,9 @@ func TestMessagePoolMessagesInEachBlock(t *testing.T) { tma.applyBlock(t, a) tsa := mock.TipSet(a) - _, _ = mp.Pending() + _, _ = mp.Pending(context.TODO()) - selm, _ := mp.SelectMessages(tsa, 1) + selm, _ := mp.SelectMessages(context.Background(), tsa, 1) if len(selm) == 0 { t.Fatal("should have returned the rest of the messages") } @@ -358,7 +358,7 @@ func TestRevertMessages(t *testing.T) { assertNonce(t, mp, sender, 4) - p, _ := mp.Pending() + p, _ := mp.Pending(context.TODO()) fmt.Printf("%+v\n", p) if len(p) != 3 { t.Fatal("expected three messages in mempool") @@ -399,14 +399,14 @@ func TestPruningSimple(t *testing.T) { for i := 0; i < 5; i++ { smsg := mock.MkMessage(sender, target, uint64(i), w) - if err := mp.Add(smsg); err != nil { + if err := mp.Add(context.TODO(), smsg); err != nil { t.Fatal(err) } } for i := 10; i < 50; i++ { smsg := mock.MkMessage(sender, target, uint64(i), w) - if err := mp.Add(smsg); err != nil { + if err := mp.Add(context.TODO(), smsg); err != nil { t.Fatal(err) } } @@ -416,7 +416,7 @@ func TestPruningSimple(t *testing.T) { mp.Prune() - msgs, _ := mp.Pending() + msgs, _ := mp.Pending(context.TODO()) if len(msgs) != 5 { t.Fatal("expected only 5 messages in pool, got: ", len(msgs)) } @@ -458,7 +458,7 @@ func TestLoadLocal(t *testing.T) { msgs := make(map[cid.Cid]struct{}) for i := 0; i < 10; i++ { m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1)) - cid, err := mp.Push(m) + cid, err := mp.Push(context.TODO(), m) if err != nil { t.Fatal(err) } @@ -474,7 +474,7 @@ func TestLoadLocal(t *testing.T) { t.Fatal(err) } - pmsgs, _ := mp.Pending() + pmsgs, _ := mp.Pending(context.TODO()) if len(msgs) != len(pmsgs) { t.Fatalf("expected %d messages, but got %d", len(msgs), len(pmsgs)) } @@ -529,7 +529,7 @@ func TestClearAll(t *testing.T) { gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin2.StorageMarketActorCodeID, M: 2}] for i := 0; i < 10; i++ { m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1)) - _, err := mp.Push(m) + _, err := mp.Push(context.TODO(), m) if err != nil { t.Fatal(err) } @@ -540,9 +540,9 @@ func TestClearAll(t *testing.T) { mustAdd(t, mp, m) } - mp.Clear(true) + mp.Clear(context.Background(), true) - pending, _ := mp.Pending() + pending, _ := mp.Pending(context.TODO()) if len(pending) > 0 { t.Fatalf("cleared the mpool, but got %d pending messages", len(pending)) } @@ -584,7 +584,7 @@ func TestClearNonLocal(t *testing.T) { gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin2.StorageMarketActorCodeID, M: 2}] for i := 0; i < 10; i++ { m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1)) - _, err := mp.Push(m) + _, err := mp.Push(context.TODO(), m) if err != nil { t.Fatal(err) } @@ -595,9 +595,9 @@ func TestClearNonLocal(t *testing.T) { mustAdd(t, mp, m) } - mp.Clear(false) + mp.Clear(context.Background(), false) - pending, _ := mp.Pending() + pending, _ := mp.Pending(context.TODO()) if len(pending) != 10 { t.Fatalf("expected 10 pending messages, but got %d instead", len(pending)) } @@ -654,7 +654,7 @@ func TestUpdates(t *testing.T) { for i := 0; i < 10; i++ { m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1)) - _, err := mp.Push(m) + _, err := mp.Push(context.TODO(), m) if err != nil { t.Fatal(err) } diff --git a/chain/messagepool/provider.go b/chain/messagepool/provider.go index 565691004..0f904c52c 100644 --- a/chain/messagepool/provider.go +++ b/chain/messagepool/provider.go @@ -26,7 +26,7 @@ type Provider interface { PutMessage(m types.ChainMsg) (cid.Cid, error) PubSubPublish(string, []byte) error GetActorAfter(address.Address, *types.TipSet) (*types.Actor, error) - StateAccountKey(context.Context, address.Address, *types.TipSet) (address.Address, error) + StateAccountKeyAtFinality(context.Context, address.Address, *types.TipSet) (address.Address, error) MessagesForBlock(*types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) MessagesForTipset(*types.TipSet) ([]types.ChainMsg, error) LoadTipSet(tsk types.TipSetKey) (*types.TipSet, error) @@ -41,6 +41,8 @@ type mpoolProvider struct { lite messagesigner.MpoolNonceAPI } +var _ Provider = (*mpoolProvider)(nil) + func NewProvider(sm *stmgr.StateManager, ps *pubsub.PubSub) Provider { return &mpoolProvider{sm: sm, ps: ps} } @@ -97,8 +99,8 @@ func (mpp *mpoolProvider) GetActorAfter(addr address.Address, ts *types.TipSet) return st.GetActor(addr) } -func (mpp *mpoolProvider) StateAccountKey(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) { - return mpp.sm.ResolveToKeyAddress(ctx, addr, ts) +func (mpp *mpoolProvider) StateAccountKeyAtFinality(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) { + return mpp.sm.ResolveToKeyAddressAtFinality(ctx, addr, ts) } func (mpp *mpoolProvider) MessagesForBlock(h *types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) { diff --git a/chain/messagepool/pruning.go b/chain/messagepool/pruning.go index dc1c69417..c10239b8e 100644 --- a/chain/messagepool/pruning.go +++ b/chain/messagepool/pruning.go @@ -57,13 +57,18 @@ func (mp *MessagePool) pruneMessages(ctx context.Context, ts *types.TipSet) erro mpCfg := mp.getConfig() // we never prune priority addresses for _, actor := range mpCfg.PriorityAddrs { - protected[actor] = struct{}{} + pk, err := mp.resolveToKey(ctx, actor) + if err != nil { + log.Debugf("pruneMessages failed to resolve priority address: %s", err) + } + + protected[pk] = struct{}{} } // we also never prune locally published messages - for actor := range mp.localAddrs { + mp.forEachLocal(ctx, func(ctx context.Context, actor address.Address) { protected[actor] = struct{}{} - } + }) // Collect all messages to track which ones to remove and create chains for block inclusion pruneMsgs := make(map[cid.Cid]*types.SignedMessage, mp.currentSize) @@ -108,7 +113,7 @@ keepLoop: // and remove all messages that are still in pruneMsgs after processing the chains log.Infof("Pruning %d messages", len(pruneMsgs)) for _, m := range pruneMsgs { - mp.remove(m.Message.From, m.Message.Nonce, false) + mp.remove(ctx, m.Message.From, m.Message.Nonce, false) } return nil diff --git a/chain/messagepool/repub.go b/chain/messagepool/repub.go index 5fa68aa53..4323bdee1 100644 --- a/chain/messagepool/repub.go +++ b/chain/messagepool/repub.go @@ -18,7 +18,7 @@ const repubMsgLimit = 30 var RepublishBatchDelay = 100 * time.Millisecond -func (mp *MessagePool) republishPendingMessages() error { +func (mp *MessagePool) republishPendingMessages(ctx context.Context) error { mp.curTsLk.Lock() ts := mp.curTs @@ -32,13 +32,18 @@ func (mp *MessagePool) republishPendingMessages() error { pending := make(map[address.Address]map[uint64]*types.SignedMessage) mp.lk.Lock() mp.republished = nil // clear this to avoid races triggering an early republish - for actor := range mp.localAddrs { - mset, ok := mp.pending[actor] + mp.forEachLocal(ctx, func(ctx context.Context, actor address.Address) { + mset, ok, err := mp.getPendingMset(ctx, actor) + if err != nil { + log.Debugf("failed to get mset: %w", err) + return + } + if !ok { - continue + return } if len(mset.msgs) == 0 { - continue + return } // we need to copy this while holding the lock to avoid races with concurrent modification pend := make(map[uint64]*types.SignedMessage, len(mset.msgs)) @@ -46,7 +51,8 @@ func (mp *MessagePool) republishPendingMessages() error { pend[nonce] = m } pending[actor] = pend - } + }) + mp.lk.Unlock() mp.curTsLk.Unlock() diff --git a/chain/messagepool/repub_test.go b/chain/messagepool/repub_test.go index 8da64f974..580231f7a 100644 --- a/chain/messagepool/repub_test.go +++ b/chain/messagepool/repub_test.go @@ -56,7 +56,7 @@ func TestRepubMessages(t *testing.T) { for i := 0; i < 10; i++ { m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1)) - _, err := mp.Push(m) + _, err := mp.Push(context.TODO(), m) if err != nil { t.Fatal(err) } diff --git a/chain/messagepool/selection.go b/chain/messagepool/selection.go index 05acc5667..611ab8e5f 100644 --- a/chain/messagepool/selection.go +++ b/chain/messagepool/selection.go @@ -38,7 +38,7 @@ type msgChain struct { prev *msgChain } -func (mp *MessagePool) SelectMessages(ts *types.TipSet, tq float64) (msgs []*types.SignedMessage, err error) { +func (mp *MessagePool) SelectMessages(ctx context.Context, ts *types.TipSet, tq float64) (msgs []*types.SignedMessage, err error) { mp.curTsLk.Lock() defer mp.curTsLk.Unlock() @@ -49,9 +49,9 @@ func (mp *MessagePool) SelectMessages(ts *types.TipSet, tq float64) (msgs []*typ // than any other block, then we don't bother with optimal selection because the // first block will always have higher effective performance if tq > 0.84 { - msgs, err = mp.selectMessagesGreedy(mp.curTs, ts) + msgs, err = mp.selectMessagesGreedy(ctx, mp.curTs, ts) } else { - msgs, err = mp.selectMessagesOptimal(mp.curTs, ts, tq) + msgs, err = mp.selectMessagesOptimal(ctx, mp.curTs, ts, tq) } if err != nil { @@ -65,7 +65,7 @@ func (mp *MessagePool) SelectMessages(ts *types.TipSet, tq float64) (msgs []*typ return msgs, nil } -func (mp *MessagePool) selectMessagesOptimal(curTs, ts *types.TipSet, tq float64) ([]*types.SignedMessage, error) { +func (mp *MessagePool) selectMessagesOptimal(ctx context.Context, curTs, ts *types.TipSet, tq float64) ([]*types.SignedMessage, error) { start := time.Now() baseFee, err := mp.api.ChainComputeBaseFee(context.TODO(), ts) @@ -91,7 +91,7 @@ func (mp *MessagePool) selectMessagesOptimal(curTs, ts *types.TipSet, tq float64 // 0b. Select all priority messages that fit in the block minGas := int64(gasguess.MinGas) - result, gasLimit := mp.selectPriorityMessages(pending, baseFee, ts) + result, gasLimit := mp.selectPriorityMessages(ctx, pending, baseFee, ts) // have we filled the block? if gasLimit < minGas { @@ -389,7 +389,7 @@ tailLoop: return result, nil } -func (mp *MessagePool) selectMessagesGreedy(curTs, ts *types.TipSet) ([]*types.SignedMessage, error) { +func (mp *MessagePool) selectMessagesGreedy(ctx context.Context, curTs, ts *types.TipSet) ([]*types.SignedMessage, error) { start := time.Now() baseFee, err := mp.api.ChainComputeBaseFee(context.TODO(), ts) @@ -415,7 +415,7 @@ func (mp *MessagePool) selectMessagesGreedy(curTs, ts *types.TipSet) ([]*types.S // 0b. Select all priority messages that fit in the block minGas := int64(gasguess.MinGas) - result, gasLimit := mp.selectPriorityMessages(pending, baseFee, ts) + result, gasLimit := mp.selectPriorityMessages(ctx, pending, baseFee, ts) // have we filled the block? if gasLimit < minGas { @@ -525,7 +525,7 @@ tailLoop: return result, nil } -func (mp *MessagePool) selectPriorityMessages(pending map[address.Address]map[uint64]*types.SignedMessage, baseFee types.BigInt, ts *types.TipSet) ([]*types.SignedMessage, int64) { +func (mp *MessagePool) selectPriorityMessages(ctx context.Context, pending map[address.Address]map[uint64]*types.SignedMessage, baseFee types.BigInt, ts *types.TipSet) ([]*types.SignedMessage, int64) { start := time.Now() defer func() { if dt := time.Since(start); dt > time.Millisecond { @@ -541,10 +541,16 @@ func (mp *MessagePool) selectPriorityMessages(pending map[address.Address]map[ui var chains []*msgChain priority := mpCfg.PriorityAddrs for _, actor := range priority { - mset, ok := pending[actor] + pk, err := mp.resolveToKey(ctx, actor) + if err != nil { + log.Debugf("mpooladdlocal failed to resolve sender: %s", err) + return nil, gasLimit + } + + mset, ok := pending[pk] if ok { // remove actor from pending set as we are already processed these messages - delete(pending, actor) + delete(pending, pk) // create chains for the priority actor next := mp.createMessageChains(actor, mset, baseFee, ts) chains = append(chains, next...) @@ -646,8 +652,7 @@ func (mp *MessagePool) getPendingMessages(curTs, ts *types.TipSet) (map[address. inSync = true } - // first add our current pending messages - for a, mset := range mp.pending { + mp.forEachPending(func(a address.Address, mset *msgSet) { if inSync { // no need to copy the map result[a] = mset.msgs @@ -660,7 +665,7 @@ func (mp *MessagePool) getPendingMessages(curTs, ts *types.TipSet) (map[address. result[a] = msetCopy } - } + }) // we are in sync, that's the happy path if inSync { diff --git a/chain/messagepool/selection_test.go b/chain/messagepool/selection_test.go index e32d897c4..463473229 100644 --- a/chain/messagepool/selection_test.go +++ b/chain/messagepool/selection_test.go @@ -427,7 +427,7 @@ func TestBasicMessageSelection(t *testing.T) { mustAdd(t, mp, m) } - msgs, err := mp.SelectMessages(ts, 1.0) + msgs, err := mp.SelectMessages(context.Background(), ts, 1.0) if err != nil { t.Fatal(err) } @@ -464,7 +464,7 @@ func TestBasicMessageSelection(t *testing.T) { tma.applyBlock(t, block2) // we should have no pending messages in the mpool - pend, _ := mp.Pending() + pend, _ := mp.Pending(context.TODO()) if len(pend) != 0 { t.Fatalf("expected no pending messages, but got %d", len(pend)) } @@ -495,7 +495,7 @@ func TestBasicMessageSelection(t *testing.T) { tma.setStateNonce(a1, 10) tma.setStateNonce(a2, 10) - msgs, err = mp.SelectMessages(ts3, 1.0) + msgs, err = mp.SelectMessages(context.Background(), ts3, 1.0) if err != nil { t.Fatal(err) } @@ -569,7 +569,7 @@ func TestMessageSelectionTrimming(t *testing.T) { mustAdd(t, mp, m) } - msgs, err := mp.SelectMessages(ts, 1.0) + msgs, err := mp.SelectMessages(context.Background(), ts, 1.0) if err != nil { t.Fatal(err) } @@ -633,7 +633,7 @@ func TestPriorityMessageSelection(t *testing.T) { mustAdd(t, mp, m) } - msgs, err := mp.SelectMessages(ts, 1.0) + msgs, err := mp.SelectMessages(context.Background(), ts, 1.0) if err != nil { t.Fatal(err) } @@ -712,7 +712,7 @@ func TestPriorityMessageSelection2(t *testing.T) { mustAdd(t, mp, m) } - msgs, err := mp.SelectMessages(ts, 1.0) + msgs, err := mp.SelectMessages(context.Background(), ts, 1.0) if err != nil { t.Fatal(err) } @@ -782,7 +782,7 @@ func TestPriorityMessageSelection3(t *testing.T) { } // test greedy selection - msgs, err := mp.SelectMessages(ts, 1.0) + msgs, err := mp.SelectMessages(context.Background(), ts, 1.0) if err != nil { t.Fatal(err) } @@ -805,7 +805,7 @@ func TestPriorityMessageSelection3(t *testing.T) { } // test optimal selection - msgs, err = mp.SelectMessages(ts, 0.1) + msgs, err = mp.SelectMessages(context.Background(), ts, 0.1) if err != nil { t.Fatal(err) } @@ -872,7 +872,7 @@ func TestOptimalMessageSelection1(t *testing.T) { mustAdd(t, mp, m) } - msgs, err := mp.SelectMessages(ts, 0.25) + msgs, err := mp.SelectMessages(context.Background(), ts, 0.25) if err != nil { t.Fatal(err) } @@ -941,7 +941,7 @@ func TestOptimalMessageSelection2(t *testing.T) { mustAdd(t, mp, m) } - msgs, err := mp.SelectMessages(ts, 0.1) + msgs, err := mp.SelectMessages(context.Background(), ts, 0.1) if err != nil { t.Fatal(err) } @@ -1020,7 +1020,7 @@ func TestOptimalMessageSelection3(t *testing.T) { } } - msgs, err := mp.SelectMessages(ts, 0.1) + msgs, err := mp.SelectMessages(context.Background(), ts, 0.1) if err != nil { t.Fatal(err) } @@ -1108,7 +1108,7 @@ func testCompetitiveMessageSelection(t *testing.T, rng *rand.Rand, getPremium fu logging.SetLogLevel("messagepool", "error") // 1. greedy selection - greedyMsgs, err := mp.selectMessagesGreedy(ts, ts) + greedyMsgs, err := mp.selectMessagesGreedy(context.Background(), ts, ts) if err != nil { t.Fatal(err) } @@ -1137,7 +1137,7 @@ func testCompetitiveMessageSelection(t *testing.T, rng *rand.Rand, getPremium fu var bestMsgs []*types.SignedMessage for j := 0; j < nMiners; j++ { tq := rng.Float64() - msgs, err := mp.SelectMessages(ts, tq) + msgs, err := mp.SelectMessages(context.Background(), ts, tq) if err != nil { t.Fatal(err) } @@ -1396,7 +1396,7 @@ readLoop: minGasLimit := int64(0.9 * float64(build.BlockGasLimit)) // greedy first - selected, err := mp.SelectMessages(ts, 1.0) + selected, err := mp.SelectMessages(context.Background(), ts, 1.0) if err != nil { t.Fatal(err) } @@ -1410,7 +1410,7 @@ readLoop: } // high quality ticket - selected, err = mp.SelectMessages(ts, .8) + selected, err = mp.SelectMessages(context.Background(), ts, .8) if err != nil { t.Fatal(err) } @@ -1424,7 +1424,7 @@ readLoop: } // mid quality ticket - selected, err = mp.SelectMessages(ts, .4) + selected, err = mp.SelectMessages(context.Background(), ts, .4) if err != nil { t.Fatal(err) } @@ -1438,7 +1438,7 @@ readLoop: } // low quality ticket - selected, err = mp.SelectMessages(ts, .1) + selected, err = mp.SelectMessages(context.Background(), ts, .1) if err != nil { t.Fatal(err) } @@ -1452,7 +1452,7 @@ readLoop: } // very low quality ticket - selected, err = mp.SelectMessages(ts, .01) + selected, err = mp.SelectMessages(context.Background(), ts, .01) if err != nil { t.Fatal(err) } diff --git a/chain/messagesigner/messagesigner_test.go b/chain/messagesigner/messagesigner_test.go index 90d16b7ff..20d9af38b 100644 --- a/chain/messagesigner/messagesigner_test.go +++ b/chain/messagesigner/messagesigner_test.go @@ -24,6 +24,8 @@ type mockMpool struct { nonces map[address.Address]uint64 } +var _ MpoolNonceAPI = (*mockMpool)(nil) + func newMockMpool() *mockMpool { return &mockMpool{nonces: make(map[address.Address]uint64)} } diff --git a/chain/state/statetree.go b/chain/state/statetree.go index a31ec2396..40955c48b 100644 --- a/chain/state/statetree.go +++ b/chain/state/statetree.go @@ -24,6 +24,7 @@ import ( states2 "github.com/filecoin-project/specs-actors/v2/actors/states" states3 "github.com/filecoin-project/specs-actors/v3/actors/states" states4 "github.com/filecoin-project/specs-actors/v4/actors/states" + states5 "github.com/filecoin-project/specs-actors/v5/actors/states" ) var log = logging.Logger("statetree") @@ -151,6 +152,8 @@ func VersionForNetwork(ver network.Version) (types.StateTreeVersion, error) { return types.StateTreeVersion2, nil case network.Version12: return types.StateTreeVersion3, nil + case network.Version13: + return types.StateTreeVersion4, nil default: panic(fmt.Sprintf("unsupported network version %d", ver)) } @@ -161,7 +164,7 @@ func NewStateTree(cst cbor.IpldStore, ver types.StateTreeVersion) (*StateTree, e switch ver { case types.StateTreeVersion0: // info is undefined - case types.StateTreeVersion1, types.StateTreeVersion2, types.StateTreeVersion3: + case types.StateTreeVersion1, types.StateTreeVersion2, types.StateTreeVersion3, types.StateTreeVersion4: var err error info, err = cst.Put(context.TODO(), new(types.StateInfo0)) if err != nil { @@ -198,6 +201,12 @@ func NewStateTree(cst cbor.IpldStore, ver types.StateTreeVersion) (*StateTree, e return nil, xerrors.Errorf("failed to create state tree: %w", err) } hamt = tree.Map + case types.StateTreeVersion4: + tree, err := states5.NewTree(store) + if err != nil { + return nil, xerrors.Errorf("failed to create state tree: %w", err) + } + hamt = tree.Map default: return nil, xerrors.Errorf("unsupported state tree version: %d", ver) } @@ -253,6 +262,12 @@ func LoadStateTree(cst cbor.IpldStore, c cid.Cid) (*StateTree, error) { if tree != nil { hamt = tree.Map } + case types.StateTreeVersion4: + var tree *states5.Tree + tree, err = states5.LoadTree(store, root.Actors) + if tree != nil { + hamt = tree.Map + } default: return nil, xerrors.Errorf("unsupported state tree version: %d", root.Version) } diff --git a/chain/stmgr/call.go b/chain/stmgr/call.go index 961bebd9c..cfbf60a95 100644 --- a/chain/stmgr/call.go +++ b/chain/stmgr/call.go @@ -155,11 +155,6 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri return nil, xerrors.Errorf("computing tipset state: %w", err) } - state, err = sm.handleStateForks(ctx, state, ts.Height(), nil, ts) - if err != nil { - return nil, fmt.Errorf("failed to handle fork: %w", err) - } - r := store.NewChainRand(sm.cs, ts.Cids()) if span.IsRecordingEvents() { @@ -172,7 +167,7 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri vmopt := &vm.VMOpts{ StateBase: state, - Epoch: ts.Height() + 1, + Epoch: ts.Height(), Rand: r, Bstore: sm.cs.StateBlockstore(), Syscalls: sm.cs.VMSys(), diff --git a/chain/stmgr/forks.go b/chain/stmgr/forks.go index a7b56f679..ee5a26dea 100644 --- a/chain/stmgr/forks.go +++ b/chain/stmgr/forks.go @@ -9,6 +9,8 @@ import ( "sync" "time" + "github.com/filecoin-project/specs-actors/v5/actors/migration/nv13" + "github.com/filecoin-project/go-state-types/rt" "github.com/filecoin-project/go-address" @@ -143,7 +145,7 @@ func DefaultUpgradeSchedule() UpgradeSchedule { Network: network.Version3, Migration: UpgradeRefuel, }, { - Height: build.UpgradeActorsV2Height, + Height: build.UpgradeAssemblyHeight, Network: network.Version4, Expensive: true, Migration: UpgradeActorsV2, @@ -172,7 +174,7 @@ func DefaultUpgradeSchedule() UpgradeSchedule { Network: network.Version9, Migration: nil, }, { - Height: build.UpgradeActorsV3Height, + Height: build.UpgradeTrustHeight, Network: network.Version10, Migration: UpgradeActorsV3, PreMigrations: []PreMigration{{ @@ -192,7 +194,7 @@ func DefaultUpgradeSchedule() UpgradeSchedule { Network: network.Version11, Migration: nil, }, { - Height: build.UpgradeActorsV4Height, + Height: build.UpgradeTurboHeight, Network: network.Version12, Migration: UpgradeActorsV4, PreMigrations: []PreMigration{{ @@ -207,7 +209,22 @@ func DefaultUpgradeSchedule() UpgradeSchedule { StopWithin: 5, }}, Expensive: true, - }} + }, { + Height: build.UpgradeHyperdriveHeight, + Network: network.Version13, + Migration: UpgradeActorsV5, + PreMigrations: []PreMigration{{ + PreMigration: PreUpgradeActorsV5, + StartWithin: 120, + DontStartWithin: 60, + StopWithin: 35, + }, { + PreMigration: PreUpgradeActorsV5, + StartWithin: 30, + DontStartWithin: 15, + StopWithin: 5, + }}, + Expensive: true}} for _, u := range updates { if u.Height < 0 { @@ -1053,7 +1070,7 @@ func upgradeActorsV3Common( // Perform the migration newHamtRoot, err := nv10.MigrateStateTree(ctx, store, stateRoot.Actors, epoch, config, migrationLogger{}, cache) if err != nil { - return cid.Undef, xerrors.Errorf("upgrading to actors v2: %w", err) + return cid.Undef, xerrors.Errorf("upgrading to actors v3: %w", err) } // Persist the result. @@ -1139,7 +1156,7 @@ func upgradeActorsV4Common( // Perform the migration newHamtRoot, err := nv12.MigrateStateTree(ctx, store, stateRoot.Actors, epoch, config, migrationLogger{}, cache) if err != nil { - return cid.Undef, xerrors.Errorf("upgrading to actors v2: %w", err) + return cid.Undef, xerrors.Errorf("upgrading to actors v4: %w", err) } // Persist the result. @@ -1166,6 +1183,92 @@ func upgradeActorsV4Common( return newRoot, nil } +func UpgradeActorsV5(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { + // Use all the CPUs except 3. + workerCount := runtime.NumCPU() - 3 + if workerCount <= 0 { + workerCount = 1 + } + + config := nv13.Config{ + MaxWorkers: uint(workerCount), + JobQueueSize: 1000, + ResultQueueSize: 100, + ProgressLogPeriod: 10 * time.Second, + } + + newRoot, err := upgradeActorsV5Common(ctx, sm, cache, root, epoch, ts, config) + if err != nil { + return cid.Undef, xerrors.Errorf("migrating actors v5 state: %w", err) + } + + return newRoot, nil +} + +func PreUpgradeActorsV5(ctx context.Context, sm *StateManager, cache MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error { + // Use half the CPUs for pre-migration, but leave at least 3. + workerCount := runtime.NumCPU() + if workerCount <= 4 { + workerCount = 1 + } else { + workerCount /= 2 + } + config := nv13.Config{MaxWorkers: uint(workerCount)} + _, err := upgradeActorsV5Common(ctx, sm, cache, root, epoch, ts, config) + return err +} + +func upgradeActorsV5Common( + ctx context.Context, sm *StateManager, cache MigrationCache, + root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet, + config nv13.Config, +) (cid.Cid, error) { + buf := blockstore.NewTieredBstore(sm.cs.StateBlockstore(), blockstore.NewMemorySync()) + store := store.ActorStore(ctx, buf) + + // Load the state root. + var stateRoot types.StateRoot + if err := store.Get(ctx, root, &stateRoot); err != nil { + return cid.Undef, xerrors.Errorf("failed to decode state root: %w", err) + } + + if stateRoot.Version != types.StateTreeVersion3 { + return cid.Undef, xerrors.Errorf( + "expected state root version 3 for actors v5 upgrade, got %d", + stateRoot.Version, + ) + } + + // Perform the migration + newHamtRoot, err := nv13.MigrateStateTree(ctx, store, stateRoot.Actors, epoch, config, migrationLogger{}, cache) + if err != nil { + return cid.Undef, xerrors.Errorf("upgrading to actors v5: %w", err) + } + + // Persist the result. + newRoot, err := store.Put(ctx, &types.StateRoot{ + Version: types.StateTreeVersion4, + Actors: newHamtRoot, + Info: stateRoot.Info, + }) + if err != nil { + return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err) + } + + // Persist the new tree. + + { + from := buf + to := buf.Read() + + if err := vm.Copy(ctx, from, to, newRoot); err != nil { + return cid.Undef, xerrors.Errorf("copying migrated tree: %w", err) + } + } + + return newRoot, nil +} + func setNetworkName(ctx context.Context, store adt.Store, tree *state.StateTree, name string) error { ia, err := tree.GetActor(builtin0.InitActorAddr) if err != nil { diff --git a/chain/stmgr/stmgr.go b/chain/stmgr/stmgr.go index afc98a32a..6690a4ad3 100644 --- a/chain/stmgr/stmgr.go +++ b/chain/stmgr/stmgr.go @@ -7,6 +7,8 @@ import ( "sync" "sync/atomic" + "github.com/filecoin-project/lotus/chain/actors/policy" + "github.com/ipfs/go-cid" cbor "github.com/ipfs/go-ipld-cbor" logging "github.com/ipfs/go-log/v2" @@ -90,6 +92,7 @@ type StateManager struct { expensiveUpgrades map[abi.ChainEpoch]struct{} stCache map[string][]cid.Cid + tCache treeCache compWait map[string]chan struct{} stlk sync.Mutex genesisMsigLk sync.Mutex @@ -102,6 +105,12 @@ type StateManager struct { genesisMarketFunds abi.TokenAmount } +// Caches a single state tree +type treeCache struct { + root cid.Cid + tree *state.StateTree +} + func NewStateManager(cs *store.ChainStore) *StateManager { sm, err := NewStateManagerWithUpgradeSchedule(cs, DefaultUpgradeSchedule()) if err != nil { @@ -154,7 +163,11 @@ func NewStateManagerWithUpgradeSchedule(cs *store.ChainStore, us UpgradeSchedule newVM: vm.NewVM, cs: cs, stCache: make(map[string][]cid.Cid), - compWait: make(map[string]chan struct{}), + tCache: treeCache{ + root: cid.Undef, + tree: nil, + }, + compWait: make(map[string]chan struct{}), }, nil } @@ -563,6 +576,52 @@ func (sm *StateManager) ResolveToKeyAddress(ctx context.Context, addr address.Ad return vm.ResolveToKeyAddr(tree, cst, addr) } +// ResolveToKeyAddressAtFinality is similar to stmgr.ResolveToKeyAddress but fails if the ID address being resolved isn't reorg-stable yet. +// It should not be used for consensus-critical subsystems. +func (sm *StateManager) ResolveToKeyAddressAtFinality(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) { + switch addr.Protocol() { + case address.BLS, address.SECP256K1: + return addr, nil + case address.Actor: + return address.Undef, xerrors.New("cannot resolve actor address to key address") + default: + } + + if ts == nil { + ts = sm.cs.GetHeaviestTipSet() + } + + var err error + if ts.Height() > policy.ChainFinality { + ts, err = sm.ChainStore().GetTipsetByHeight(ctx, ts.Height()-policy.ChainFinality, ts, true) + if err != nil { + return address.Undef, xerrors.Errorf("failed to load lookback tipset: %w", err) + } + } + + cst := cbor.NewCborStore(sm.cs.StateBlockstore()) + tree := sm.tCache.tree + + if tree == nil || sm.tCache.root != ts.ParentState() { + tree, err = state.LoadStateTree(cst, ts.ParentState()) + if err != nil { + return address.Undef, xerrors.Errorf("failed to load parent state tree: %w", err) + } + + sm.tCache = treeCache{ + root: ts.ParentState(), + tree: tree, + } + } + + resolved, err := vm.ResolveToKeyAddr(tree, cst, addr) + if err == nil { + return resolved, nil + } + + return address.Undef, xerrors.New("ID address not found in lookback state") +} + func (sm *StateManager) GetBlsPublicKey(ctx context.Context, addr address.Address, ts *types.TipSet) (pubk []byte, err error) { kaddr, err := sm.ResolveToKeyAddress(ctx, addr, ts) if err != nil { @@ -1141,8 +1200,8 @@ func (sm *StateManager) GetFilVested(ctx context.Context, height abi.ChainEpoch, } } - // After UpgradeActorsV2Height these funds are accounted for in GetFilReserveDisbursed - if height <= build.UpgradeActorsV2Height { + // After UpgradeAssemblyHeight these funds are accounted for in GetFilReserveDisbursed + if height <= build.UpgradeAssemblyHeight { // continue to use preIgnitionGenInfos, nothing changed at the Ignition epoch vf = big.Add(vf, sm.genesisPledge) // continue to use preIgnitionGenInfos, nothing changed at the Ignition epoch @@ -1265,7 +1324,7 @@ func (sm *StateManager) GetVMCirculatingSupplyDetailed(ctx context.Context, heig } filReserveDisbursed := big.Zero() - if height > build.UpgradeActorsV2Height { + if height > build.UpgradeAssemblyHeight { filReserveDisbursed, err = GetFilReserveDisbursed(ctx, st) if err != nil { return api.CirculatingSupply{}, xerrors.Errorf("failed to calculate filReserveDisbursed: %w", err) diff --git a/chain/stmgr/utils.go b/chain/stmgr/utils.go index 11c765333..f73554d3b 100644 --- a/chain/stmgr/utils.go +++ b/chain/stmgr/utils.go @@ -9,6 +9,8 @@ import ( "runtime" "strings" + exported5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/exported" + "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/network" @@ -549,6 +551,7 @@ func init() { actors = append(actors, exported2.BuiltinActors()...) actors = append(actors, exported3.BuiltinActors()...) actors = append(actors, exported4.BuiltinActors()...) + actors = append(actors, exported5.BuiltinActors()...) for _, actor := range actors { exports := actor.Exports() diff --git a/chain/store/checkpoint_test.go b/chain/store/checkpoint_test.go index 320b76797..81bbab6ea 100644 --- a/chain/store/checkpoint_test.go +++ b/chain/store/checkpoint_test.go @@ -18,7 +18,7 @@ func TestChainCheckpoint(t *testing.T) { // Let the first miner mine some blocks. last := cg.CurTipset.TipSet() for i := 0; i < 4; i++ { - ts, err := cg.NextTipSetFromMiners(last, cg.Miners[:1]) + ts, err := cg.NextTipSetFromMiners(last, cg.Miners[:1], 0) require.NoError(t, err) last = ts.TipSet.TipSet() @@ -57,7 +57,7 @@ func TestChainCheckpoint(t *testing.T) { // Let the second miner miner mine a fork last = checkpointParents for i := 0; i < 4; i++ { - ts, err := cg.NextTipSetFromMiners(last, cg.Miners[1:]) + ts, err := cg.NextTipSetFromMiners(last, cg.Miners[1:], 0) require.NoError(t, err) last = ts.TipSet.TipSet() diff --git a/chain/store/index.go b/chain/store/index.go index a9da994af..324fb7a63 100644 --- a/chain/store/index.go +++ b/chain/store/index.go @@ -107,6 +107,9 @@ func (ci *ChainIndex) fillCache(tsk types.TipSetKey) (*lbEntry, error) { } rheight -= ci.skipLength + if rheight < 0 { + rheight = 0 + } var skipTarget *types.TipSet if parent.Height() < rheight { diff --git a/chain/store/store.go b/chain/store/store.go index f8f1b0c49..71fa0397a 100644 --- a/chain/store/store.go +++ b/chain/store/store.go @@ -12,6 +12,8 @@ import ( "strings" "sync" + "github.com/filecoin-project/lotus/chain/state" + "golang.org/x/sync/errgroup" "github.com/filecoin-project/go-state-types/crypto" @@ -1129,17 +1131,33 @@ type BlockMessages struct { func (cs *ChainStore) BlockMsgsForTipset(ts *types.TipSet) ([]BlockMessages, error) { applied := make(map[address.Address]uint64) + cst := cbor.NewCborStore(cs.stateBlockstore) + st, err := state.LoadStateTree(cst, ts.Blocks()[0].ParentStateRoot) + if err != nil { + return nil, xerrors.Errorf("failed to load state tree") + } + selectMsg := func(m *types.Message) (bool, error) { - // The first match for a sender is guaranteed to have correct nonce -- the block isn't valid otherwise - if _, ok := applied[m.From]; !ok { - applied[m.From] = m.Nonce + var sender address.Address + if ts.Height() >= build.UpgradeHyperdriveHeight { + sender, err = st.LookupID(m.From) + if err != nil { + return false, err + } + } else { + sender = m.From } - if applied[m.From] != m.Nonce { + // The first match for a sender is guaranteed to have correct nonce -- the block isn't valid otherwise + if _, ok := applied[sender]; !ok { + applied[sender] = m.Nonce + } + + if applied[sender] != m.Nonce { return false, nil } - applied[m.From]++ + applied[sender]++ return true, nil } @@ -1404,7 +1422,15 @@ func DrawRandomness(rbase []byte, pers crypto.DomainSeparationTag, round abi.Cha return h.Sum(nil), nil } -func (cs *ChainStore) GetBeaconRandomness(ctx context.Context, blks []cid.Cid, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { +func (cs *ChainStore) GetBeaconRandomnessLookingBack(ctx context.Context, blks []cid.Cid, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { + return cs.GetBeaconRandomness(ctx, blks, pers, round, entropy, true) +} + +func (cs *ChainStore) GetBeaconRandomnessLookingForward(ctx context.Context, blks []cid.Cid, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { + return cs.GetBeaconRandomness(ctx, blks, pers, round, entropy, false) +} + +func (cs *ChainStore) GetBeaconRandomness(ctx context.Context, blks []cid.Cid, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte, lookback bool) ([]byte, error) { _, span := trace.StartSpan(ctx, "store.GetBeaconRandomness") defer span.End() span.AddAttributes(trace.Int64Attribute("round", int64(round))) @@ -1423,7 +1449,7 @@ func (cs *ChainStore) GetBeaconRandomness(ctx context.Context, blks []cid.Cid, p searchHeight = 0 } - randTs, err := cs.GetTipsetByHeight(ctx, searchHeight, ts, true) + randTs, err := cs.GetTipsetByHeight(ctx, searchHeight, ts, lookback) if err != nil { return nil, err } @@ -1438,7 +1464,15 @@ func (cs *ChainStore) GetBeaconRandomness(ctx context.Context, blks []cid.Cid, p return DrawRandomness(be.Data, pers, round, entropy) } -func (cs *ChainStore) GetChainRandomness(ctx context.Context, blks []cid.Cid, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { +func (cs *ChainStore) GetChainRandomnessLookingBack(ctx context.Context, blks []cid.Cid, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { + return cs.GetChainRandomness(ctx, blks, pers, round, entropy, true) +} + +func (cs *ChainStore) GetChainRandomnessLookingForward(ctx context.Context, blks []cid.Cid, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { + return cs.GetChainRandomness(ctx, blks, pers, round, entropy, false) +} + +func (cs *ChainStore) GetChainRandomness(ctx context.Context, blks []cid.Cid, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte, lookback bool) ([]byte, error) { _, span := trace.StartSpan(ctx, "store.GetChainRandomness") defer span.End() span.AddAttributes(trace.Int64Attribute("round", int64(round))) @@ -1457,7 +1491,7 @@ func (cs *ChainStore) GetChainRandomness(ctx context.Context, blks []cid.Cid, pe searchHeight = 0 } - randTs, err := cs.GetTipsetByHeight(ctx, searchHeight, ts, true) + randTs, err := cs.GetTipsetByHeight(ctx, searchHeight, ts, lookback) if err != nil { return nil, err } @@ -1732,12 +1766,20 @@ func NewChainRand(cs *ChainStore, blks []cid.Cid) vm.Rand { } } -func (cr *chainRand) GetChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { - return cr.cs.GetChainRandomness(ctx, cr.blks, pers, round, entropy) +func (cr *chainRand) GetChainRandomnessLookingBack(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { + return cr.cs.GetChainRandomnessLookingBack(ctx, cr.blks, pers, round, entropy) } -func (cr *chainRand) GetBeaconRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { - return cr.cs.GetBeaconRandomness(ctx, cr.blks, pers, round, entropy) +func (cr *chainRand) GetChainRandomnessLookingForward(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { + return cr.cs.GetChainRandomnessLookingForward(ctx, cr.blks, pers, round, entropy) +} + +func (cr *chainRand) GetBeaconRandomnessLookingBack(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { + return cr.cs.GetBeaconRandomnessLookingBack(ctx, cr.blks, pers, round, entropy) +} + +func (cr *chainRand) GetBeaconRandomnessLookingForward(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { + return cr.cs.GetBeaconRandomnessLookingForward(ctx, cr.blks, pers, round, entropy) } func (cs *ChainStore) GetTipSetFromKey(tsk types.TipSetKey) (*types.TipSet, error) { diff --git a/chain/store/store_test.go b/chain/store/store_test.go index 51e2e08d0..62a0430e3 100644 --- a/chain/store/store_test.go +++ b/chain/store/store_test.go @@ -76,7 +76,7 @@ func BenchmarkGetRandomness(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { - _, err := cs.GetChainRandomness(context.TODO(), last.Cids(), crypto.DomainSeparationTag_SealRandomness, 500, nil) + _, err := cs.GetChainRandomnessLookingBack(context.TODO(), last.Cids(), crypto.DomainSeparationTag_SealRandomness, 500, nil) if err != nil { b.Fatal(err) } diff --git a/chain/sub/incoming.go b/chain/sub/incoming.go index 55f8232bb..7744fe1b9 100644 --- a/chain/sub/incoming.go +++ b/chain/sub/incoming.go @@ -516,7 +516,7 @@ func (mv *MessageValidator) Validate(ctx context.Context, pid peer.ID, msg *pubs return pubsub.ValidationReject } - if err := mv.mpool.Add(m); err != nil { + if err := mv.mpool.Add(ctx, m); err != nil { log.Debugf("failed to add message from network to message pool (From: %s, To: %s, Nonce: %d, Value: %s): %s", m.Message.From, m.Message.To, m.Message.Nonce, types.FIL(m.Message.Value), err) ctx, _ = tag.New( ctx, diff --git a/chain/sync.go b/chain/sync.go index 6f594024d..167856927 100644 --- a/chain/sync.go +++ b/chain/sync.go @@ -1074,9 +1074,19 @@ func (syncer *Syncer) checkBlockMessages(ctx context.Context, b *types.FullBlock // Phase 2: (Partial) semantic validation: // the sender exists and is an account actor, and the nonces make sense - if _, ok := nonces[m.From]; !ok { + var sender address.Address + if syncer.sm.GetNtwkVersion(ctx, b.Header.Height) >= network.Version13 { + sender, err = st.LookupID(m.From) + if err != nil { + return err + } + } else { + sender = m.From + } + + if _, ok := nonces[sender]; !ok { // `GetActor` does not validate that this is an account actor. - act, err := st.GetActor(m.From) + act, err := st.GetActor(sender) if err != nil { return xerrors.Errorf("failed to get actor: %w", err) } @@ -1084,13 +1094,13 @@ func (syncer *Syncer) checkBlockMessages(ctx context.Context, b *types.FullBlock if !builtin.IsAccountActor(act.Code) { return xerrors.New("Sender must be an account actor") } - nonces[m.From] = act.Nonce + nonces[sender] = act.Nonce } - if nonces[m.From] != m.Nonce { - return xerrors.Errorf("wrong nonce (exp: %d, got: %d)", nonces[m.From], m.Nonce) + if nonces[sender] != m.Nonce { + return xerrors.Errorf("wrong nonce (exp: %d, got: %d)", nonces[sender], m.Nonce) } - nonces[m.From]++ + nonces[sender]++ return nil } diff --git a/chain/sync_test.go b/chain/sync_test.go index 3176d9ec3..2289d6350 100644 --- a/chain/sync_test.go +++ b/chain/sync_test.go @@ -7,6 +7,11 @@ import ( "testing" "time" + "github.com/filecoin-project/go-state-types/crypto" + + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/lotus/chain/stmgr" + "github.com/ipfs/go-cid" ds "github.com/ipfs/go-datastore" @@ -101,7 +106,8 @@ func prepSyncTest(t testing.TB, h int) *syncTestUtil { g: g, } - tu.addSourceNode(h) + tu.addSourceNode(stmgr.DefaultUpgradeSchedule(), h) + //tu.checkHeight("source", source, h) // separate logs @@ -110,6 +116,53 @@ func prepSyncTest(t testing.TB, h int) *syncTestUtil { return tu } +func prepSyncTestWithV5Height(t testing.TB, h int, v5height abi.ChainEpoch) *syncTestUtil { + logging.SetLogLevel("*", "INFO") + + us := stmgr.UpgradeSchedule{{ + // prepare for upgrade. + Network: network.Version9, + Height: 1, + Migration: stmgr.UpgradeActorsV2, + }, { + Network: network.Version10, + Height: 2, + Migration: stmgr.UpgradeActorsV3, + }, { + Network: network.Version12, + Height: 3, + Migration: stmgr.UpgradeActorsV4, + }, { + Network: network.Version13, + Height: v5height, + Migration: stmgr.UpgradeActorsV5, + }} + + g, err := gen.NewGeneratorWithUpgradeSchedule(us) + + if err != nil { + t.Fatalf("%+v", err) + } + + ctx, cancel := context.WithCancel(context.Background()) + + tu := &syncTestUtil{ + t: t, + ctx: ctx, + cancel: cancel, + + mn: mocknet.New(ctx), + g: g, + } + + tu.addSourceNode(us, h) + //tu.checkHeight("source", source, h) + + // separate logs + fmt.Println("\x1b[31m///////////////////////////////////////////////////\x1b[39b") + return tu +} + func (tu *syncTestUtil) Shutdown() { tu.cancel() } @@ -174,7 +227,7 @@ func (tu *syncTestUtil) pushTsExpectErr(to int, fts *store.FullTipSet, experr bo } } -func (tu *syncTestUtil) mineOnBlock(blk *store.FullTipSet, to int, miners []int, wait, fail bool, msgs [][]*types.SignedMessage) *store.FullTipSet { +func (tu *syncTestUtil) mineOnBlock(blk *store.FullTipSet, to int, miners []int, wait, fail bool, msgs [][]*types.SignedMessage, nulls abi.ChainEpoch) *store.FullTipSet { if miners == nil { for i := range tu.g.Miners { miners = append(miners, i) @@ -191,10 +244,10 @@ func (tu *syncTestUtil) mineOnBlock(blk *store.FullTipSet, to int, miners []int, var nts *store.FullTipSet var err error if msgs != nil { - nts, err = tu.g.NextTipSetFromMinersWithMessages(blk.TipSet(), maddrs, msgs) + nts, err = tu.g.NextTipSetFromMinersWithMessagesAndNulls(blk.TipSet(), maddrs, msgs, 0) require.NoError(tu.t, err) } else { - mt, err := tu.g.NextTipSetFromMiners(blk.TipSet(), maddrs) + mt, err := tu.g.NextTipSetFromMiners(blk.TipSet(), maddrs, nulls) require.NoError(tu.t, err) nts = mt.TipSet } @@ -209,11 +262,11 @@ func (tu *syncTestUtil) mineOnBlock(blk *store.FullTipSet, to int, miners []int, } func (tu *syncTestUtil) mineNewBlock(src int, miners []int) { - mts := tu.mineOnBlock(tu.g.CurTipset, src, miners, true, false, nil) + mts := tu.mineOnBlock(tu.g.CurTipset, src, miners, true, false, nil, 0) tu.g.CurTipset = mts } -func (tu *syncTestUtil) addSourceNode(gen int) { +func (tu *syncTestUtil) addSourceNode(us stmgr.UpgradeSchedule, gen int) { if tu.genesis != nil { tu.t.Fatal("source node already exists") } @@ -229,6 +282,7 @@ func (tu *syncTestUtil) addSourceNode(gen int) { node.Test(), node.Override(new(modules.Genesis), modules.LoadGenesis(genesis)), + node.Override(new(stmgr.UpgradeSchedule), us), ) require.NoError(tu.t, err) tu.t.Cleanup(func() { _ = stop(context.Background()) }) @@ -445,7 +499,7 @@ func TestSyncBadTimestamp(t *testing.T) { fmt.Println("BASE: ", base.Cids()) tu.printHeads() - a1 := tu.mineOnBlock(base, 0, nil, false, true, nil) + a1 := tu.mineOnBlock(base, 0, nil, false, true, nil, 0) tu.g.Timestamper = nil require.NoError(t, tu.g.ResyncBankerNonce(a1.TipSet())) @@ -454,7 +508,7 @@ func TestSyncBadTimestamp(t *testing.T) { fmt.Println("After mine bad block!") tu.printHeads() - a2 := tu.mineOnBlock(base, 0, nil, true, false, nil) + a2 := tu.mineOnBlock(base, 0, nil, true, false, nil, 0) tu.waitUntilSync(0, client) @@ -498,7 +552,7 @@ func TestSyncBadWinningPoSt(t *testing.T) { tu.g.SetWinningPoStProver(tu.g.Miners[1], &badWpp{}) // now ensure that new blocks are not accepted - tu.mineOnBlock(base, client, nil, false, true, nil) + tu.mineOnBlock(base, client, nil, false, true, nil, 0) } func (tu *syncTestUtil) loadChainToNode(to int) { @@ -543,16 +597,16 @@ func TestSyncFork(t *testing.T) { fmt.Println("Mining base: ", base.TipSet().Cids(), base.TipSet().Height()) // The two nodes fork at this point into 'a' and 'b' - a1 := tu.mineOnBlock(base, p1, []int{0}, true, false, nil) - a := tu.mineOnBlock(a1, p1, []int{0}, true, false, nil) - a = tu.mineOnBlock(a, p1, []int{0}, true, false, nil) + a1 := tu.mineOnBlock(base, p1, []int{0}, true, false, nil, 0) + a := tu.mineOnBlock(a1, p1, []int{0}, true, false, nil, 0) + a = tu.mineOnBlock(a, p1, []int{0}, true, false, nil, 0) require.NoError(t, tu.g.ResyncBankerNonce(a1.TipSet())) // chain B will now be heaviest - b := tu.mineOnBlock(base, p2, []int{1}, true, false, nil) - b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil) - b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil) - b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil) + b := tu.mineOnBlock(base, p2, []int{1}, true, false, nil, 0) + b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil, 0) + b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil, 0) + b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil, 0) fmt.Println("A: ", a.Cids(), a.TipSet().Height()) fmt.Println("B: ", b.Cids(), b.TipSet().Height()) @@ -614,13 +668,13 @@ func TestDuplicateNonce(t *testing.T) { msgs[k] = []*types.SignedMessage{makeMsg(tu.g.Miners[k])} } - ts1 := tu.mineOnBlock(base, 0, []int{0, 1}, true, false, msgs) + ts1 := tu.mineOnBlock(base, 0, []int{0, 1}, true, false, msgs, 0) tu.waitUntilSyncTarget(0, ts1.TipSet()) // mine another tipset - ts2 := tu.mineOnBlock(ts1, 0, []int{0, 1}, true, false, make([][]*types.SignedMessage, 2)) + ts2 := tu.mineOnBlock(ts1, 0, []int{0, 1}, true, false, make([][]*types.SignedMessage, 2), 0) tu.waitUntilSyncTarget(0, ts2.TipSet()) var includedMsg cid.Cid @@ -671,11 +725,14 @@ func TestBadNonce(t *testing.T) { base := tu.g.CurTipset + // Get the banker from computed tipset state, not the parent. + st, _, err := tu.g.StateManager().TipSetState(context.TODO(), base.TipSet()) + require.NoError(t, err) + ba, err := tu.g.StateManager().LoadActorRaw(context.TODO(), tu.g.Banker(), st) + require.NoError(t, err) + // Produce a message from the banker with a bad nonce makeBadMsg := func() *types.SignedMessage { - - ba, err := tu.nds[0].StateGetActor(context.TODO(), tu.g.Banker(), base.TipSet().Key()) - require.NoError(t, err) msg := types.Message{ To: tu.g.Banker(), From: tu.g.Banker(), @@ -703,7 +760,115 @@ func TestBadNonce(t *testing.T) { msgs := make([][]*types.SignedMessage, 1) msgs[0] = []*types.SignedMessage{makeBadMsg()} - tu.mineOnBlock(base, 0, []int{0}, true, true, msgs) + tu.mineOnBlock(base, 0, []int{0}, true, true, msgs, 0) +} + +// This test introduces a block that has 2 messages, with the same sender, and same nonce. +// One of the messages uses the sender's robust address, the other uses the ID address. +// Such a block is invalid and should not sync. +func TestMismatchedNoncesRobustID(t *testing.T) { + v5h := abi.ChainEpoch(4) + tu := prepSyncTestWithV5Height(t, int(v5h+5), v5h) + + base := tu.g.CurTipset + + // Get the banker from computed tipset state, not the parent. + st, _, err := tu.g.StateManager().TipSetState(context.TODO(), base.TipSet()) + require.NoError(t, err) + ba, err := tu.g.StateManager().LoadActorRaw(context.TODO(), tu.g.Banker(), st) + require.NoError(t, err) + + // Produce a message from the banker + makeMsg := func(id bool) *types.SignedMessage { + sender := tu.g.Banker() + if id { + s, err := tu.nds[0].StateLookupID(context.TODO(), sender, base.TipSet().Key()) + require.NoError(t, err) + sender = s + } + + msg := types.Message{ + To: tu.g.Banker(), + From: sender, + + Nonce: ba.Nonce, + + Value: types.NewInt(1), + + Method: 0, + + GasLimit: 100_000_000, + GasFeeCap: types.NewInt(0), + GasPremium: types.NewInt(0), + } + + sig, err := tu.g.Wallet().WalletSign(context.TODO(), tu.g.Banker(), msg.Cid().Bytes(), api.MsgMeta{}) + require.NoError(t, err) + + return &types.SignedMessage{ + Message: msg, + Signature: *sig, + } + } + + msgs := make([][]*types.SignedMessage, 1) + msgs[0] = []*types.SignedMessage{makeMsg(false), makeMsg(true)} + + tu.mineOnBlock(base, 0, []int{0}, true, true, msgs, 0) +} + +// This test introduces a block that has 2 messages, with the same sender, and nonces N and N+1 (so both can be included in a block) +// One of the messages uses the sender's robust address, the other uses the ID address. +// Such a block is valid and should sync. +func TestMatchedNoncesRobustID(t *testing.T) { + v5h := abi.ChainEpoch(4) + tu := prepSyncTestWithV5Height(t, int(v5h+5), v5h) + + base := tu.g.CurTipset + + // Get the banker from computed tipset state, not the parent. + st, _, err := tu.g.StateManager().TipSetState(context.TODO(), base.TipSet()) + require.NoError(t, err) + ba, err := tu.g.StateManager().LoadActorRaw(context.TODO(), tu.g.Banker(), st) + require.NoError(t, err) + + // Produce a message from the banker with specified nonce + makeMsg := func(n uint64, id bool) *types.SignedMessage { + sender := tu.g.Banker() + if id { + s, err := tu.nds[0].StateLookupID(context.TODO(), sender, base.TipSet().Key()) + require.NoError(t, err) + sender = s + } + + msg := types.Message{ + To: tu.g.Banker(), + From: sender, + + Nonce: n, + + Value: types.NewInt(1), + + Method: 0, + + GasLimit: 100_000_000, + GasFeeCap: types.NewInt(0), + GasPremium: types.NewInt(0), + } + + sig, err := tu.g.Wallet().WalletSign(context.TODO(), tu.g.Banker(), msg.Cid().Bytes(), api.MsgMeta{}) + require.NoError(t, err) + + return &types.SignedMessage{ + Message: msg, + Signature: *sig, + } + } + + msgs := make([][]*types.SignedMessage, 1) + msgs[0] = []*types.SignedMessage{makeMsg(ba.Nonce, false), makeMsg(ba.Nonce+1, true)} + + tu.mineOnBlock(base, 0, []int{0}, true, false, msgs, 0) } func BenchmarkSyncBasic(b *testing.B) { @@ -768,19 +933,19 @@ func TestSyncCheckpointHead(t *testing.T) { fmt.Println("Mining base: ", base.TipSet().Cids(), base.TipSet().Height()) // The two nodes fork at this point into 'a' and 'b' - a1 := tu.mineOnBlock(base, p1, []int{0}, true, false, nil) - a := tu.mineOnBlock(a1, p1, []int{0}, true, false, nil) - a = tu.mineOnBlock(a, p1, []int{0}, true, false, nil) + a1 := tu.mineOnBlock(base, p1, []int{0}, true, false, nil, 0) + a := tu.mineOnBlock(a1, p1, []int{0}, true, false, nil, 0) + a = tu.mineOnBlock(a, p1, []int{0}, true, false, nil, 0) tu.waitUntilSyncTarget(p1, a.TipSet()) tu.checkpointTs(p1, a.TipSet().Key()) require.NoError(t, tu.g.ResyncBankerNonce(a1.TipSet())) // chain B will now be heaviest - b := tu.mineOnBlock(base, p2, []int{1}, true, false, nil) - b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil) - b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil) - b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil) + b := tu.mineOnBlock(base, p2, []int{1}, true, false, nil, 0) + b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil, 0) + b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil, 0) + b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil, 0) fmt.Println("A: ", a.Cids(), a.TipSet().Height()) fmt.Println("B: ", b.Cids(), b.TipSet().Height()) @@ -815,19 +980,19 @@ func TestSyncCheckpointEarlierThanHead(t *testing.T) { fmt.Println("Mining base: ", base.TipSet().Cids(), base.TipSet().Height()) // The two nodes fork at this point into 'a' and 'b' - a1 := tu.mineOnBlock(base, p1, []int{0}, true, false, nil) - a := tu.mineOnBlock(a1, p1, []int{0}, true, false, nil) - a = tu.mineOnBlock(a, p1, []int{0}, true, false, nil) + a1 := tu.mineOnBlock(base, p1, []int{0}, true, false, nil, 0) + a := tu.mineOnBlock(a1, p1, []int{0}, true, false, nil, 0) + a = tu.mineOnBlock(a, p1, []int{0}, true, false, nil, 0) tu.waitUntilSyncTarget(p1, a.TipSet()) tu.checkpointTs(p1, a1.TipSet().Key()) require.NoError(t, tu.g.ResyncBankerNonce(a1.TipSet())) // chain B will now be heaviest - b := tu.mineOnBlock(base, p2, []int{1}, true, false, nil) - b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil) - b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil) - b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil) + b := tu.mineOnBlock(base, p2, []int{1}, true, false, nil, 0) + b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil, 0) + b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil, 0) + b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil, 0) fmt.Println("A: ", a.Cids(), a.TipSet().Height()) fmt.Println("B: ", b.Cids(), b.TipSet().Height()) @@ -846,3 +1011,58 @@ func TestSyncCheckpointEarlierThanHead(t *testing.T) { p1Head = tu.getHead(p1) require.True(tu.t, p1Head.Equals(b.TipSet())) } + +func TestDrandNull(t *testing.T) { + H := 10 + v5h := abi.ChainEpoch(50) + ov5h := build.UpgradeHyperdriveHeight + build.UpgradeHyperdriveHeight = v5h + tu := prepSyncTestWithV5Height(t, H, v5h) + + entropy := []byte{0, 2, 3, 4} + // arbitrarily chosen + pers := crypto.DomainSeparationTag_WinningPoStChallengeSeed + + beforeNull := tu.g.CurTipset + afterNull := tu.mineOnBlock(beforeNull, 0, nil, false, false, nil, 2) + nullHeight := beforeNull.TipSet().Height() + 1 + if afterNull.TipSet().Height() == nullHeight { + t.Fatal("didn't inject nulls as expected") + } + + rand, err := tu.nds[0].ChainGetRandomnessFromBeacon(tu.ctx, afterNull.TipSet().Key(), pers, nullHeight, entropy) + require.NoError(t, err) + + // calculate the expected randomness based on the beacon BEFORE the null + expectedBE := beforeNull.Blocks[0].Header.BeaconEntries + expectedRand, err := store.DrawRandomness(expectedBE[len(expectedBE)-1].Data, pers, nullHeight, entropy) + require.NoError(t, err) + + require.Equal(t, []byte(rand), expectedRand) + + // zoom zoom to past the v5 upgrade by injecting many many nulls + postUpgrade := tu.mineOnBlock(afterNull, 0, nil, false, false, nil, v5h) + nv, err := tu.nds[0].StateNetworkVersion(tu.ctx, types.EmptyTSK) + require.NoError(t, err) + if nv != network.Version13 { + t.Fatal("expect to be v13 by now") + } + + afterNull = tu.mineOnBlock(postUpgrade, 0, nil, false, false, nil, 2) + nullHeight = postUpgrade.TipSet().Height() + 1 + if afterNull.TipSet().Height() == nullHeight { + t.Fatal("didn't inject nulls as expected") + } + + rand, err = tu.nds[0].ChainGetRandomnessFromBeacon(tu.ctx, afterNull.TipSet().Key(), pers, nullHeight, entropy) + require.NoError(t, err) + + // calculate the expected randomness based on the beacon AFTER the null + expectedBE = afterNull.Blocks[0].Header.BeaconEntries + expectedRand, err = store.DrawRandomness(expectedBE[len(expectedBE)-1].Data, pers, nullHeight, entropy) + require.NoError(t, err) + + require.Equal(t, []byte(rand), expectedRand) + build.UpgradeHyperdriveHeight = ov5h + +} diff --git a/chain/types/state.go b/chain/types/state.go index b561aab71..c8f8f1cd9 100644 --- a/chain/types/state.go +++ b/chain/types/state.go @@ -13,8 +13,10 @@ const ( StateTreeVersion1 // StateTreeVersion2 corresponds to actors v3. StateTreeVersion2 - // StateTreeVersion3 corresponds to actors >= v4. + // StateTreeVersion3 corresponds to actors v4. StateTreeVersion3 + // StateTreeVersion4 corresponds to actors v5. + StateTreeVersion4 ) type StateRoot struct { diff --git a/chain/vm/gas.go b/chain/vm/gas.go index eef431aef..c860ce9a0 100644 --- a/chain/vm/gas.go +++ b/chain/vm/gas.go @@ -9,8 +9,8 @@ import ( addr "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/crypto" - vmr2 "github.com/filecoin-project/specs-actors/v2/actors/runtime" - proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" + vmr5 "github.com/filecoin-project/specs-actors/v5/actors/runtime" + proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" "github.com/ipfs/go-cid" ) @@ -74,8 +74,9 @@ type Pricelist interface { OnVerifySignature(sigType crypto.SigType, planTextSize int) (GasCharge, error) OnHashing(dataSize int) GasCharge OnComputeUnsealedSectorCid(proofType abi.RegisteredSealProof, pieces []abi.PieceInfo) GasCharge - OnVerifySeal(info proof2.SealVerifyInfo) GasCharge - OnVerifyPost(info proof2.WindowPoStVerifyInfo) GasCharge + OnVerifySeal(info proof5.SealVerifyInfo) GasCharge + OnVerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) GasCharge + OnVerifyPost(info proof5.WindowPoStVerifyInfo) GasCharge OnVerifyConsensusFault() GasCharge } @@ -111,6 +112,7 @@ var prices = map[abi.ChainEpoch]Pricelist{ hashingBase: 31355, computeUnsealedSectorCidBase: 98647, verifySealBase: 2000, // TODO gas , it VerifySeal syscall is not used + verifyAggregateSealBase: 0, verifyPostLookup: map[abi.RegisteredPoStProof]scalingCost{ abi.RegisteredPoStProof_StackedDrgWindow512MiBV1: { flat: 123861062, @@ -158,7 +160,35 @@ var prices = map[abi.ChainEpoch]Pricelist{ hashingBase: 31355, computeUnsealedSectorCidBase: 98647, - verifySealBase: 2000, // TODO gas , it VerifySeal syscall is not used + verifySealBase: 2000, // TODO gas, it VerifySeal syscall is not used + + verifyAggregateSealPer: map[abi.RegisteredSealProof]int64{ + abi.RegisteredSealProof_StackedDrg32GiBV1_1: 449900, + abi.RegisteredSealProof_StackedDrg64GiBV1_1: 359272, + }, + verifyAggregateSealSteps: map[abi.RegisteredSealProof]stepCost{ + abi.RegisteredSealProof_StackedDrg32GiBV1_1: { + {4, 103994170}, + {7, 112356810}, + {13, 122912610}, + {26, 137559930}, + {52, 162039100}, + {103, 210960780}, + {205, 318351180}, + {410, 528274980}, + }, + abi.RegisteredSealProof_StackedDrg64GiBV1_1: { + {4, 102581240}, + {7, 110803030}, + {13, 120803700}, + {26, 134642130}, + {52, 157357890}, + {103, 203017690}, + {205, 304253590}, + {410, 509880640}, + }, + }, + verifyPostLookup: map[abi.RegisteredPoStProof]scalingCost{ abi.RegisteredPoStProof_StackedDrgWindow512MiBV1: { flat: 117680921, @@ -198,7 +228,7 @@ func PricelistByEpoch(epoch abi.ChainEpoch) Pricelist { } type pricedSyscalls struct { - under vmr2.Syscalls + under vmr5.Syscalls pl Pricelist chargeGas func(GasCharge) } @@ -232,7 +262,7 @@ func (ps pricedSyscalls) ComputeUnsealedSectorCID(reg abi.RegisteredSealProof, p } // Verifies a sector seal proof. -func (ps pricedSyscalls) VerifySeal(vi proof2.SealVerifyInfo) error { +func (ps pricedSyscalls) VerifySeal(vi proof5.SealVerifyInfo) error { ps.chargeGas(ps.pl.OnVerifySeal(vi)) defer ps.chargeGas(gasOnActorExec) @@ -240,7 +270,7 @@ func (ps pricedSyscalls) VerifySeal(vi proof2.SealVerifyInfo) error { } // Verifies a proof of spacetime. -func (ps pricedSyscalls) VerifyPoSt(vi proof2.WindowPoStVerifyInfo) error { +func (ps pricedSyscalls) VerifyPoSt(vi proof5.WindowPoStVerifyInfo) error { ps.chargeGas(ps.pl.OnVerifyPost(vi)) defer ps.chargeGas(gasOnActorExec) @@ -257,14 +287,14 @@ func (ps pricedSyscalls) VerifyPoSt(vi proof2.WindowPoStVerifyInfo) error { // the "parent grinding fault", in which case it must be the sibling of h1 (same parent tipset) and one of the // blocks in the parent of h2 (i.e. h2's grandparent). // Returns nil and an error if the headers don't prove a fault. -func (ps pricedSyscalls) VerifyConsensusFault(h1 []byte, h2 []byte, extra []byte) (*vmr2.ConsensusFault, error) { +func (ps pricedSyscalls) VerifyConsensusFault(h1 []byte, h2 []byte, extra []byte) (*vmr5.ConsensusFault, error) { ps.chargeGas(ps.pl.OnVerifyConsensusFault()) defer ps.chargeGas(gasOnActorExec) return ps.under.VerifyConsensusFault(h1, h2, extra) } -func (ps pricedSyscalls) BatchVerifySeals(inp map[address.Address][]proof2.SealVerifyInfo) (map[address.Address][]bool, error) { +func (ps pricedSyscalls) BatchVerifySeals(inp map[address.Address][]proof5.SealVerifyInfo) (map[address.Address][]bool, error) { count := int64(0) for _, svis := range inp { count += int64(len(svis)) @@ -277,3 +307,10 @@ func (ps pricedSyscalls) BatchVerifySeals(inp map[address.Address][]proof2.SealV return ps.under.BatchVerifySeals(inp) } + +func (ps pricedSyscalls) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) error { + ps.chargeGas(ps.pl.OnVerifyAggregateSeals(aggregate)) + defer ps.chargeGas(gasOnActorExec) + + return ps.under.VerifyAggregateSeals(aggregate) +} diff --git a/chain/vm/gas_v0.go b/chain/vm/gas_v0.go index 7c864b7f9..13c5fdd86 100644 --- a/chain/vm/gas_v0.go +++ b/chain/vm/gas_v0.go @@ -4,6 +4,7 @@ import ( "fmt" proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" + proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" @@ -17,6 +18,28 @@ type scalingCost struct { scale int64 } +type stepCost []step + +type step struct { + start int64 + cost int64 +} + +func (sc stepCost) Lookup(x int64) int64 { + i := 0 + for ; i < len(sc); i++ { + if sc[i].start > x { + break + } + } + i-- // look at previous item + if i < 0 { + return 0 + } + + return sc[i].cost +} + type pricelistV0 struct { computeGasMulti int64 storageGasMulti int64 @@ -91,9 +114,13 @@ type pricelistV0 struct { computeUnsealedSectorCidBase int64 verifySealBase int64 - verifyPostLookup map[abi.RegisteredPoStProof]scalingCost - verifyPostDiscount bool - verifyConsensusFault int64 + verifyAggregateSealBase int64 + verifyAggregateSealPer map[abi.RegisteredSealProof]int64 + verifyAggregateSealSteps map[abi.RegisteredSealProof]stepCost + + verifyPostLookup map[abi.RegisteredPoStProof]scalingCost + verifyPostDiscount bool + verifyConsensusFault int64 } var _ Pricelist = (*pricelistV0)(nil) @@ -185,6 +212,22 @@ func (pl *pricelistV0) OnVerifySeal(info proof2.SealVerifyInfo) GasCharge { return newGasCharge("OnVerifySeal", pl.verifySealBase, 0) } +// OnVerifyAggregateSeals +func (pl *pricelistV0) OnVerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) GasCharge { + proofType := aggregate.SealProof + perProof, ok := pl.verifyAggregateSealPer[proofType] + if !ok { + perProof = pl.verifyAggregateSealPer[abi.RegisteredSealProof_StackedDrg32GiBV1_1] + } + + step, ok := pl.verifyAggregateSealSteps[proofType] + if !ok { + step = pl.verifyAggregateSealSteps[abi.RegisteredSealProof_StackedDrg32GiBV1_1] + } + num := int64(len(aggregate.Infos)) + return newGasCharge("OnVerifyAggregateSeals", perProof*num+step.Lookup(num), 0) +} + // OnVerifyPost func (pl *pricelistV0) OnVerifyPost(info proof2.WindowPoStVerifyInfo) GasCharge { sectorSize := "unknown" diff --git a/chain/vm/gas_v0_test.go b/chain/vm/gas_v0_test.go new file mode 100644 index 000000000..447e4f70c --- /dev/null +++ b/chain/vm/gas_v0_test.go @@ -0,0 +1,32 @@ +package vm + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestStepGasCost(t *testing.T) { + s := stepCost{ + {4, 103994170}, + {7, 112356810}, + {13, 122912610}, + {26, 137559930}, + {52, 162039100}, + {103, 210960780}, + {205, 318351180}, + {410, 528274980}, + } + + assert.EqualValues(t, 0, s.Lookup(0)) + assert.EqualValues(t, 0, s.Lookup(3)) + assert.EqualValues(t, 103994170, s.Lookup(4)) + assert.EqualValues(t, 103994170, s.Lookup(6)) + assert.EqualValues(t, 112356810, s.Lookup(7)) + assert.EqualValues(t, 210960780, s.Lookup(103)) + assert.EqualValues(t, 210960780, s.Lookup(204)) + assert.EqualValues(t, 318351180, s.Lookup(205)) + assert.EqualValues(t, 318351180, s.Lookup(409)) + assert.EqualValues(t, 528274980, s.Lookup(410)) + assert.EqualValues(t, 528274980, s.Lookup(10000000000)) +} diff --git a/chain/vm/invoker.go b/chain/vm/invoker.go index 8e0e6edd6..e4b154031 100644 --- a/chain/vm/invoker.go +++ b/chain/vm/invoker.go @@ -16,9 +16,10 @@ import ( exported0 "github.com/filecoin-project/specs-actors/actors/builtin/exported" exported2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/exported" - vmr "github.com/filecoin-project/specs-actors/v2/actors/runtime" exported3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/exported" exported4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/exported" + exported5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/exported" + vmr "github.com/filecoin-project/specs-actors/v5/actors/runtime" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/exitcode" @@ -66,6 +67,7 @@ func NewActorRegistry() *ActorRegistry { inv.Register(ActorsVersionPredicate(actors.Version2), exported2.BuiltinActors()...) inv.Register(ActorsVersionPredicate(actors.Version3), exported3.BuiltinActors()...) inv.Register(ActorsVersionPredicate(actors.Version4), exported4.BuiltinActors()...) + inv.Register(ActorsVersionPredicate(actors.Version5), exported5.BuiltinActors()...) return inv } @@ -153,7 +155,7 @@ func (*ActorRegistry) transform(instance invokee) (nativeCode, error) { "vmr.Runtime, ") } if !runtimeType.Implements(t.In(0)) { - return nil, newErr("first arguemnt should be vmr.Runtime") + return nil, newErr("first argument should be vmr.Runtime") } if t.In(1).Kind() != reflect.Ptr { return nil, newErr("second argument should be of kind reflect.Ptr") diff --git a/chain/vm/mkactor.go b/chain/vm/mkactor.go index 11de7362b..669c1450f 100644 --- a/chain/vm/mkactor.go +++ b/chain/vm/mkactor.go @@ -18,6 +18,7 @@ import ( builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" "github.com/filecoin-project/go-address" "github.com/filecoin-project/lotus/chain/actors/aerrors" @@ -105,6 +106,8 @@ func newAccountActor(ver actors.Version) *types.Actor { code = builtin3.AccountActorCodeID case actors.Version4: code = builtin4.AccountActorCodeID + case actors.Version5: + code = builtin5.AccountActorCodeID default: panic("unsupported actors version") } diff --git a/chain/vm/runtime.go b/chain/vm/runtime.go index cdb1720de..7c40fed62 100644 --- a/chain/vm/runtime.go +++ b/chain/vm/runtime.go @@ -16,7 +16,7 @@ import ( "github.com/filecoin-project/go-state-types/network" rtt "github.com/filecoin-project/go-state-types/rt" rt0 "github.com/filecoin-project/specs-actors/actors/runtime" - rt2 "github.com/filecoin-project/specs-actors/v2/actors/runtime" + rt5 "github.com/filecoin-project/specs-actors/v5/actors/runtime" "github.com/ipfs/go-cid" ipldcbor "github.com/ipfs/go-ipld-cbor" "go.opencensus.io/trace" @@ -54,8 +54,8 @@ func (m *Message) ValueReceived() abi.TokenAmount { var EnableGasTracing = false type Runtime struct { - rt2.Message - rt2.Syscalls + rt5.Message + rt5.Syscalls ctx context.Context @@ -81,6 +81,10 @@ type Runtime struct { lastGasCharge *types.GasTrace } +func (rt *Runtime) BaseFee() abi.TokenAmount { + return rt.vm.baseFee +} + func (rt *Runtime) NetworkVersion() network.Version { return rt.vm.GetNtwkVersion(rt.ctx, rt.CurrEpoch()) } @@ -136,7 +140,7 @@ func (rt *Runtime) StorePut(x cbor.Marshaler) cid.Cid { } var _ rt0.Runtime = (*Runtime)(nil) -var _ rt2.Runtime = (*Runtime)(nil) +var _ rt5.Runtime = (*Runtime)(nil) func (rt *Runtime) shimCall(f func() interface{}) (rval []byte, aerr aerrors.ActorError) { defer func() { @@ -208,17 +212,31 @@ func (rt *Runtime) GetActorCodeCID(addr address.Address) (ret cid.Cid, ok bool) } func (rt *Runtime) GetRandomnessFromTickets(personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) abi.Randomness { - res, err := rt.vm.rand.GetChainRandomness(rt.ctx, personalization, randEpoch, entropy) + var err error + var res []byte + if randEpoch > build.UpgradeHyperdriveHeight { + res, err = rt.vm.rand.GetChainRandomnessLookingForward(rt.ctx, personalization, randEpoch, entropy) + } else { + res, err = rt.vm.rand.GetChainRandomnessLookingBack(rt.ctx, personalization, randEpoch, entropy) + } + if err != nil { - panic(aerrors.Fatalf("could not get randomness: %s", err)) + panic(aerrors.Fatalf("could not get ticket randomness: %s", err)) } return res } func (rt *Runtime) GetRandomnessFromBeacon(personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) abi.Randomness { - res, err := rt.vm.rand.GetBeaconRandomness(rt.ctx, personalization, randEpoch, entropy) + var err error + var res []byte + if rt.vm.GetNtwkVersion(rt.ctx, randEpoch) >= network.Version13 { + res, err = rt.vm.rand.GetBeaconRandomnessLookingForward(rt.ctx, personalization, randEpoch, entropy) + } else { + res, err = rt.vm.rand.GetBeaconRandomnessLookingBack(rt.ctx, personalization, randEpoch, entropy) + } + if err != nil { - panic(aerrors.Fatalf("could not get randomness: %s", err)) + panic(aerrors.Fatalf("could not get beacon randomness: %s", err)) } return res } diff --git a/chain/vm/syscalls.go b/chain/vm/syscalls.go index 0bcfe10a7..bb93fce8d 100644 --- a/chain/vm/syscalls.go +++ b/chain/vm/syscalls.go @@ -26,8 +26,8 @@ import ( "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" "github.com/filecoin-project/lotus/lib/sigs" - runtime2 "github.com/filecoin-project/specs-actors/v2/actors/runtime" - proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" + runtime5 "github.com/filecoin-project/specs-actors/v5/actors/runtime" + proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" ) func init() { @@ -36,10 +36,10 @@ func init() { // Actual type is defined in chain/types/vmcontext.go because the VMContext interface is there -type SyscallBuilder func(ctx context.Context, rt *Runtime) runtime2.Syscalls +type SyscallBuilder func(ctx context.Context, rt *Runtime) runtime5.Syscalls func Syscalls(verifier ffiwrapper.Verifier) SyscallBuilder { - return func(ctx context.Context, rt *Runtime) runtime2.Syscalls { + return func(ctx context.Context, rt *Runtime) runtime5.Syscalls { return &syscallShim{ ctx: ctx, @@ -90,7 +90,7 @@ func (ss *syscallShim) HashBlake2b(data []byte) [32]byte { // Checks validity of the submitted consensus fault with the two block headers needed to prove the fault // and an optional extra one to check common ancestry (as needed). // Note that the blocks are ordered: the method requires a.Epoch() <= b.Epoch(). -func (ss *syscallShim) VerifyConsensusFault(a, b, extra []byte) (*runtime2.ConsensusFault, error) { +func (ss *syscallShim) VerifyConsensusFault(a, b, extra []byte) (*runtime5.ConsensusFault, error) { // Note that block syntax is not validated. Any validly signed block will be accepted pursuant to the below conditions. // Whether or not it could ever have been accepted in a chain is not checked/does not matter here. // for that reason when checking block parent relationships, rather than instantiating a Tipset to do so @@ -133,14 +133,14 @@ func (ss *syscallShim) VerifyConsensusFault(a, b, extra []byte) (*runtime2.Conse } // (2) check for the consensus faults themselves - var consensusFault *runtime2.ConsensusFault + var consensusFault *runtime5.ConsensusFault // (a) double-fork mining fault if blockA.Height == blockB.Height { - consensusFault = &runtime2.ConsensusFault{ + consensusFault = &runtime5.ConsensusFault{ Target: blockA.Miner, Epoch: blockB.Height, - Type: runtime2.ConsensusFaultDoubleForkMining, + Type: runtime5.ConsensusFaultDoubleForkMining, } } @@ -148,10 +148,10 @@ func (ss *syscallShim) VerifyConsensusFault(a, b, extra []byte) (*runtime2.Conse // strictly speaking no need to compare heights based on double fork mining check above, // but at same height this would be a different fault. if types.CidArrsEqual(blockA.Parents, blockB.Parents) && blockA.Height != blockB.Height { - consensusFault = &runtime2.ConsensusFault{ + consensusFault = &runtime5.ConsensusFault{ Target: blockA.Miner, Epoch: blockB.Height, - Type: runtime2.ConsensusFaultTimeOffsetMining, + Type: runtime5.ConsensusFaultTimeOffsetMining, } } @@ -171,10 +171,10 @@ func (ss *syscallShim) VerifyConsensusFault(a, b, extra []byte) (*runtime2.Conse if types.CidArrsEqual(blockA.Parents, blockC.Parents) && blockA.Height == blockC.Height && types.CidArrsContains(blockB.Parents, blockC.Cid()) && !types.CidArrsContains(blockB.Parents, blockA.Cid()) { - consensusFault = &runtime2.ConsensusFault{ + consensusFault = &runtime5.ConsensusFault{ Target: blockA.Miner, Epoch: blockB.Height, - Type: runtime2.ConsensusFaultParentGrinding, + Type: runtime5.ConsensusFaultParentGrinding, } } } @@ -243,7 +243,7 @@ func (ss *syscallShim) workerKeyAtLookback(height abi.ChainEpoch) (address.Addre return ResolveToKeyAddr(ss.cstate, ss.cst, info.Worker) } -func (ss *syscallShim) VerifyPoSt(proof proof2.WindowPoStVerifyInfo) error { +func (ss *syscallShim) VerifyPoSt(proof proof5.WindowPoStVerifyInfo) error { ok, err := ss.verifier.VerifyWindowPoSt(context.TODO(), proof) if err != nil { return err @@ -254,7 +254,7 @@ func (ss *syscallShim) VerifyPoSt(proof proof2.WindowPoStVerifyInfo) error { return nil } -func (ss *syscallShim) VerifySeal(info proof2.SealVerifyInfo) error { +func (ss *syscallShim) VerifySeal(info proof5.SealVerifyInfo) error { //_, span := trace.StartSpan(ctx, "ValidatePoRep") //defer span.End() @@ -281,6 +281,18 @@ func (ss *syscallShim) VerifySeal(info proof2.SealVerifyInfo) error { return nil } +func (ss *syscallShim) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) error { + ok, err := ss.verifier.VerifyAggregateSeals(aggregate) + if err != nil { + return xerrors.Errorf("failed to verify aggregated PoRep: %w", err) + } + if !ok { + return fmt.Errorf("invalid aggregate proof") + } + + return nil +} + func (ss *syscallShim) VerifySignature(sig crypto.Signature, addr address.Address, input []byte) error { // TODO: in genesis setup, we are currently faking signatures @@ -294,7 +306,7 @@ func (ss *syscallShim) VerifySignature(sig crypto.Signature, addr address.Addres var BatchSealVerifyParallelism = goruntime.NumCPU() -func (ss *syscallShim) BatchVerifySeals(inp map[address.Address][]proof2.SealVerifyInfo) (map[address.Address][]bool, error) { +func (ss *syscallShim) BatchVerifySeals(inp map[address.Address][]proof5.SealVerifyInfo) (map[address.Address][]bool, error) { out := make(map[address.Address][]bool) sema := make(chan struct{}, BatchSealVerifyParallelism) @@ -306,7 +318,7 @@ func (ss *syscallShim) BatchVerifySeals(inp map[address.Address][]proof2.SealVer for i, s := range seals { wg.Add(1) - go func(ma address.Address, ix int, svi proof2.SealVerifyInfo, res []bool) { + go func(ma address.Address, ix int, svi proof5.SealVerifyInfo, res []bool) { defer wg.Done() sema <- struct{}{} diff --git a/chain/vm/vm.go b/chain/vm/vm.go index f488c7864..c5bfffc7f 100644 --- a/chain/vm/vm.go +++ b/chain/vm/vm.go @@ -255,8 +255,10 @@ func NewVM(ctx context.Context, opts *VMOpts) (*VM, error) { } type Rand interface { - GetChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) - GetBeaconRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) + GetChainRandomnessLookingBack(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) + GetChainRandomnessLookingForward(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) + GetBeaconRandomnessLookingBack(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) + GetBeaconRandomnessLookingForward(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) } type ApplyRet struct { @@ -566,7 +568,7 @@ func (vm *VM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet, gasUsed = 0 } - burn, err := vm.ShouldBurn(st, msg, errcode) + burn, err := vm.ShouldBurn(ctx, st, msg, errcode) if err != nil { return nil, xerrors.Errorf("deciding whether should burn failed: %w", err) } @@ -609,26 +611,31 @@ func (vm *VM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet, }, nil } -func (vm *VM) ShouldBurn(st *state.StateTree, msg *types.Message, errcode exitcode.ExitCode) (bool, error) { - // Check to see if we should burn funds. We avoid burning on successful - // window post. This won't catch _indirect_ window post calls, but this - // is the best we can get for now. - if vm.blockHeight > build.UpgradeClausHeight && errcode == exitcode.Ok && msg.Method == miner.Methods.SubmitWindowedPoSt { - // Ok, we've checked the _method_, but we still need to check - // the target actor. It would be nice if we could just look at - // the trace, but I'm not sure if that's safe? - if toActor, err := st.GetActor(msg.To); err != nil { - // If the actor wasn't found, we probably deleted it or something. Move on. - if !xerrors.Is(err, types.ErrActorNotFound) { - // Otherwise, this should never fail and something is very wrong. - return false, xerrors.Errorf("failed to lookup target actor: %w", err) +func (vm *VM) ShouldBurn(ctx context.Context, st *state.StateTree, msg *types.Message, errcode exitcode.ExitCode) (bool, error) { + if vm.ntwkVersion(ctx, vm.blockHeight) <= network.Version12 { + // Check to see if we should burn funds. We avoid burning on successful + // window post. This won't catch _indirect_ window post calls, but this + // is the best we can get for now. + if vm.blockHeight > build.UpgradeClausHeight && errcode == exitcode.Ok && msg.Method == miner.Methods.SubmitWindowedPoSt { + // Ok, we've checked the _method_, but we still need to check + // the target actor. It would be nice if we could just look at + // the trace, but I'm not sure if that's safe? + if toActor, err := st.GetActor(msg.To); err != nil { + // If the actor wasn't found, we probably deleted it or something. Move on. + if !xerrors.Is(err, types.ErrActorNotFound) { + // Otherwise, this should never fail and something is very wrong. + return false, xerrors.Errorf("failed to lookup target actor: %w", err) + } + } else if builtin.IsStorageMinerActor(toActor.Code) { + // Ok, this is a storage miner and we've processed a window post. Remove the burn. + return false, nil } - } else if builtin.IsStorageMinerActor(toActor.Code) { - // Ok, this is a storage miner and we've processed a window post. Remove the burn. - return false, nil } + + return true, nil } + // Any "don't burn" rules from Network v13 onwards go here, for now we always return true return true, nil } diff --git a/cli/cmd.go b/cli/cmd.go index acc09465b..630aae1bc 100644 --- a/cli/cmd.go +++ b/cli/cmd.go @@ -71,7 +71,7 @@ var Commands = []*cli.Command{ WithCategory("basic", walletCmd), WithCategory("basic", clientCmd), WithCategory("basic", multisigCmd), - WithCategory("basic", verifRegCmd), + WithCategory("basic", filplusCmd), WithCategory("basic", paychCmd), WithCategory("developer", AuthCmd), WithCategory("developer", MpoolCmd), diff --git a/cli/verifreg.go b/cli/filplus.go similarity index 82% rename from cli/verifreg.go rename to cli/filplus.go index 70d03df26..53dc5092b 100644 --- a/cli/verifreg.go +++ b/cli/filplus.go @@ -7,7 +7,6 @@ import ( verifreg4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/verifreg" "github.com/filecoin-project/go-state-types/big" - "github.com/filecoin-project/lotus/api/v0api" "github.com/urfave/cli/v2" "golang.org/x/xerrors" @@ -15,6 +14,7 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/api/v0api" "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors" @@ -24,26 +24,26 @@ import ( cbor "github.com/ipfs/go-ipld-cbor" ) -var verifRegCmd = &cli.Command{ - Name: "verifreg", - Usage: "Interact with the verified registry actor", +var filplusCmd = &cli.Command{ + Name: "filplus", + Usage: "Interact with the verified registry actor used by Filplus", Flags: []cli.Flag{}, Subcommands: []*cli.Command{ - verifRegVerifyClientCmd, - verifRegListVerifiersCmd, - verifRegListClientsCmd, - verifRegCheckClientCmd, - verifRegCheckVerifierCmd, + filplusVerifyClientCmd, + filplusListNotariesCmd, + filplusListClientsCmd, + filplusCheckClientCmd, + filplusCheckNotaryCmd, }, } -var verifRegVerifyClientCmd = &cli.Command{ - Name: "verify-client", +var filplusVerifyClientCmd = &cli.Command{ + Name: "grant-datacap", Usage: "give allowance to the specified verified client address", Flags: []cli.Flag{ &cli.StringFlag{ Name: "from", - Usage: "specify your verifier address to send the message from", + Usage: "specify your notary address to send the message from", Required: true, }, }, @@ -79,17 +79,17 @@ var verifRegVerifyClientCmd = &cli.Command{ defer closer() ctx := ReqContext(cctx) - found, dcap, err := checkVerifier(ctx, api, fromk) + found, dcap, err := checkNotary(ctx, api, fromk) if err != nil { return err } if !found { - return xerrors.New("sender address must be a verifier") + return xerrors.New("sender address must be a notary") } if dcap.Cmp(allowance.Int) < 0 { - return xerrors.Errorf("cannot allot more allowance than verifier data cap: %s < %s", dcap, allowance) + return xerrors.Errorf("cannot allot more allowance than notary data cap: %s < %s", dcap, allowance) } // TODO: This should be abstracted over actor versions @@ -125,9 +125,9 @@ var verifRegVerifyClientCmd = &cli.Command{ }, } -var verifRegListVerifiersCmd = &cli.Command{ - Name: "list-verifiers", - Usage: "list all verifiers", +var filplusListNotariesCmd = &cli.Command{ + Name: "list-notaries", + Usage: "list all notaries", Action: func(cctx *cli.Context) error { api, closer, err := GetFullNodeAPI(cctx) if err != nil { @@ -155,7 +155,7 @@ var verifRegListVerifiersCmd = &cli.Command{ }, } -var verifRegListClientsCmd = &cli.Command{ +var filplusListClientsCmd = &cli.Command{ Name: "list-clients", Usage: "list all verified clients", Action: func(cctx *cli.Context) error { @@ -185,8 +185,8 @@ var verifRegListClientsCmd = &cli.Command{ }, } -var verifRegCheckClientCmd = &cli.Command{ - Name: "check-client", +var filplusCheckClientCmd = &cli.Command{ + Name: "check-client-datacap", Usage: "check verified client remaining bytes", Action: func(cctx *cli.Context) error { if !cctx.Args().Present() { @@ -219,12 +219,12 @@ var verifRegCheckClientCmd = &cli.Command{ }, } -var verifRegCheckVerifierCmd = &cli.Command{ - Name: "check-verifier", - Usage: "check verifiers remaining bytes", +var filplusCheckNotaryCmd = &cli.Command{ + Name: "check-notaries-datacap", + Usage: "check notaries remaining bytes", Action: func(cctx *cli.Context) error { if !cctx.Args().Present() { - return fmt.Errorf("must specify verifier address to check") + return fmt.Errorf("must specify notary address to check") } vaddr, err := address.NewFromString(cctx.Args().First()) @@ -239,7 +239,7 @@ var verifRegCheckVerifierCmd = &cli.Command{ defer closer() ctx := ReqContext(cctx) - found, dcap, err := checkVerifier(ctx, api, vaddr) + found, dcap, err := checkNotary(ctx, api, vaddr) if err != nil { return err } @@ -253,7 +253,7 @@ var verifRegCheckVerifierCmd = &cli.Command{ }, } -func checkVerifier(ctx context.Context, api v0api.FullNode, vaddr address.Address) (bool, abi.StoragePower, error) { +func checkNotary(ctx context.Context, api v0api.FullNode, vaddr address.Address) (bool, abi.StoragePower, error) { vid, err := api.StateLookupID(ctx, vaddr, types.EmptyTSK) if err != nil { return false, big.Zero(), err diff --git a/cli/params.go b/cli/params.go index 8419507b8..1aa6555c5 100644 --- a/cli/params.go +++ b/cli/params.go @@ -23,7 +23,7 @@ var FetchParamCmd = &cli.Command{ } sectorSize := uint64(sectorSizeInt) - err = paramfetch.GetParams(ReqContext(cctx), build.ParametersJSON(), sectorSize) + err = paramfetch.GetParams(ReqContext(cctx), build.ParametersJSON(), build.SrsJSON(), sectorSize) if err != nil { return xerrors.Errorf("fetching proof parameters: %w", err) } diff --git a/cmd/lotus-bench/caching_verifier.go b/cmd/lotus-bench/caching_verifier.go index 5b434c762..f4cc0f837 100644 --- a/cmd/lotus-bench/caching_verifier.go +++ b/cmd/lotus-bench/caching_verifier.go @@ -8,6 +8,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" + proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" "github.com/ipfs/go-datastore" "github.com/minio/blake2b-simd" cbg "github.com/whyrusleeping/cbor-gen" @@ -96,4 +97,8 @@ func (cv *cachingVerifier) GenerateWinningPoStSectorChallenge(ctx context.Contex return cv.backend.GenerateWinningPoStSectorChallenge(ctx, proofType, a, rnd, u) } +func (cv cachingVerifier) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) (bool, error) { + return cv.backend.VerifyAggregateSeals(aggregate) +} + var _ ffiwrapper.Verifier = (*cachingVerifier)(nil) diff --git a/cmd/lotus-bench/main.go b/cmd/lotus-bench/main.go index 81aa09a75..0b8ec6fe3 100644 --- a/cmd/lotus-bench/main.go +++ b/cmd/lotus-bench/main.go @@ -243,7 +243,7 @@ var sealBenchCmd = &cli.Command{ // Only fetch parameters if actually needed skipc2 := c.Bool("skip-commit2") if !skipc2 { - if err := paramfetch.GetParams(lcli.ReqContext(c), build.ParametersJSON(), uint64(sectorSize)); err != nil { + if err := paramfetch.GetParams(lcli.ReqContext(c), build.ParametersJSON(), build.SrsJSON(), uint64(sectorSize)); err != nil { return xerrors.Errorf("getting params: %w", err) } } @@ -738,7 +738,7 @@ var proveCmd = &cli.Command{ return xerrors.Errorf("unmarshalling input file: %w", err) } - if err := paramfetch.GetParams(lcli.ReqContext(c), build.ParametersJSON(), c2in.SectorSize); err != nil { + if err := paramfetch.GetParams(lcli.ReqContext(c), build.ParametersJSON(), build.SrsJSON(), c2in.SectorSize); err != nil { return xerrors.Errorf("getting params: %w", err) } diff --git a/cmd/lotus-seal-worker/main.go b/cmd/lotus-seal-worker/main.go index a206d3371..df00928a5 100644 --- a/cmd/lotus-seal-worker/main.go +++ b/cmd/lotus-seal-worker/main.go @@ -228,7 +228,7 @@ var runCmd = &cli.Command{ } if cctx.Bool("commit") { - if err := paramfetch.GetParams(ctx, build.ParametersJSON(), uint64(ssize)); err != nil { + if err := paramfetch.GetParams(ctx, build.ParametersJSON(), build.SrsJSON(), uint64(ssize)); err != nil { return xerrors.Errorf("get params: %w", err) } } diff --git a/cmd/lotus-shed/cron-count.go b/cmd/lotus-shed/cron-count.go index 79fd6ec42..622f38791 100644 --- a/cmd/lotus-shed/cron-count.go +++ b/cmd/lotus-shed/cron-count.go @@ -60,7 +60,7 @@ func findDeadlineCrons(c *cli.Context) (map[address.Address]struct{}, error) { // All miners have active cron before v4. // v4 upgrade epoch is last epoch running v3 epoch and api.StateReadState reads // parent state, so v4 state isn't read until upgrade epoch + 2 - if ts.Height() <= build.UpgradeActorsV4Height+1 { + if ts.Height() <= build.UpgradeTurboHeight+1 { activeMiners[mAddr] = struct{}{} continue } diff --git a/cmd/lotus-shed/params.go b/cmd/lotus-shed/params.go index 3f7e7b6fb..e45d9489c 100644 --- a/cmd/lotus-shed/params.go +++ b/cmd/lotus-shed/params.go @@ -25,7 +25,7 @@ var fetchParamCmd = &cli.Command{ return err } sectorSize := uint64(sectorSizeInt) - err = paramfetch.GetParams(lcli.ReqContext(cctx), build.ParametersJSON(), sectorSize) + err = paramfetch.GetParams(lcli.ReqContext(cctx), build.ParametersJSON(), build.SrsJSON(), sectorSize) if err != nil { return xerrors.Errorf("fetching proof parameters: %w", err) } diff --git a/cmd/lotus-storage-miner/info.go b/cmd/lotus-storage-miner/info.go index 7650de035..0fe14f1ff 100644 --- a/cmd/lotus-storage-miner/info.go +++ b/cmd/lotus-storage-miner/info.go @@ -291,10 +291,14 @@ var stateList = []stateMeta{ {col: color.FgYellow, state: sealing.PreCommit2}, {col: color.FgYellow, state: sealing.PreCommitting}, {col: color.FgYellow, state: sealing.PreCommitWait}, + {col: color.FgYellow, state: sealing.SubmitPreCommitBatch}, + {col: color.FgYellow, state: sealing.PreCommitBatchWait}, {col: color.FgYellow, state: sealing.WaitSeed}, {col: color.FgYellow, state: sealing.Committing}, {col: color.FgYellow, state: sealing.SubmitCommit}, {col: color.FgYellow, state: sealing.CommitWait}, + {col: color.FgYellow, state: sealing.SubmitCommitAggregate}, + {col: color.FgYellow, state: sealing.CommitAggregateWait}, {col: color.FgYellow, state: sealing.FinalizeSector}, {col: color.FgCyan, state: sealing.Terminating}, diff --git a/cmd/lotus-storage-miner/init.go b/cmd/lotus-storage-miner/init.go index a02520116..76451f418 100644 --- a/cmd/lotus-storage-miner/init.go +++ b/cmd/lotus-storage-miner/init.go @@ -145,7 +145,7 @@ var initCmd = &cli.Command{ log.Info("Checking proof parameters") - if err := paramfetch.GetParams(ctx, build.ParametersJSON(), uint64(ssize)); err != nil { + if err := paramfetch.GetParams(ctx, build.ParametersJSON(), build.SrsJSON(), uint64(ssize)); err != nil { return xerrors.Errorf("fetching proof parameters: %w", err) } diff --git a/cmd/lotus-storage-miner/init_restore.go b/cmd/lotus-storage-miner/init_restore.go index eec7b8413..b495e1cd9 100644 --- a/cmd/lotus-storage-miner/init_restore.go +++ b/cmd/lotus-storage-miner/init_restore.go @@ -255,7 +255,7 @@ var initRestoreCmd = &cli.Command{ log.Info("Checking proof parameters") - if err := paramfetch.GetParams(ctx, build.ParametersJSON(), uint64(mi.SectorSize)); err != nil { + if err := paramfetch.GetParams(ctx, build.ParametersJSON(), build.SrsJSON(), uint64(mi.SectorSize)); err != nil { return xerrors.Errorf("fetching proof parameters: %w", err) } diff --git a/cmd/lotus-storage-miner/sectors.go b/cmd/lotus-storage-miner/sectors.go index 3791dbf07..2476c16e8 100644 --- a/cmd/lotus-storage-miner/sectors.go +++ b/cmd/lotus-storage-miner/sectors.go @@ -45,6 +45,7 @@ var sectorsCmd = &cli.Command{ sectorsStartSealCmd, sectorsSealDelayCmd, sectorsCapacityCollateralCmd, + sectorsBatching, }, } @@ -969,6 +970,135 @@ var sectorsUpdateCmd = &cli.Command{ }, } +var sectorsBatching = &cli.Command{ + Name: "batching", + Usage: "manage batch sector operations", + Subcommands: []*cli.Command{ + sectorsBatchingPendingCommit, + sectorsBatchingPendingPreCommit, + }, +} + +var sectorsBatchingPendingCommit = &cli.Command{ + Name: "commit", + Usage: "list sectors waiting in commit batch queue", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "publish-now", + Usage: "send a batch now", + }, + }, + Action: func(cctx *cli.Context) error { + api, closer, err := lcli.GetStorageMinerAPI(cctx) + if err != nil { + return err + } + defer closer() + ctx := lcli.ReqContext(cctx) + + if cctx.Bool("publish-now") { + res, err := api.SectorCommitFlush(ctx) + if err != nil { + return xerrors.Errorf("flush: %w", err) + } + if res == nil { + return xerrors.Errorf("no sectors to publish") + } + + for i, re := range res { + fmt.Printf("Batch %d:\n", i) + if re.Error != "" { + fmt.Printf("\tError: %s\n", re.Error) + } else { + fmt.Printf("\tMessage: %s\n", re.Msg) + } + fmt.Printf("\tSectors:\n") + for _, sector := range re.Sectors { + if e, found := re.FailedSectors[sector]; found { + fmt.Printf("\t\t%d\tERROR %s\n", sector, e) + } else { + fmt.Printf("\t\t%d\tOK\n", sector) + } + } + } + return nil + } + + pending, err := api.SectorCommitPending(ctx) + if err != nil { + return xerrors.Errorf("getting pending deals: %w", err) + } + + if len(pending) > 0 { + for _, sector := range pending { + fmt.Println(sector.Number) + } + return nil + } + + fmt.Println("No sectors queued to be committed") + return nil + }, +} + +var sectorsBatchingPendingPreCommit = &cli.Command{ + Name: "precommit", + Usage: "list sectors waiting in precommit batch queue", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "publish-now", + Usage: "send a batch now", + }, + }, + Action: func(cctx *cli.Context) error { + api, closer, err := lcli.GetStorageMinerAPI(cctx) + if err != nil { + return err + } + defer closer() + ctx := lcli.ReqContext(cctx) + + if cctx.Bool("publish-now") { + res, err := api.SectorPreCommitFlush(ctx) + if err != nil { + return xerrors.Errorf("flush: %w", err) + } + if res == nil { + return xerrors.Errorf("no sectors to publish") + } + + for i, re := range res { + fmt.Printf("Batch %d:\n", i) + if re.Error != "" { + fmt.Printf("\tError: %s\n", re.Error) + } else { + fmt.Printf("\tMessage: %s\n", re.Msg) + } + fmt.Printf("\tSectors:\n") + for _, sector := range re.Sectors { + fmt.Printf("\t\t%d\tOK\n", sector) + } + } + return nil + } + + pending, err := api.SectorPreCommitPending(ctx) + if err != nil { + return xerrors.Errorf("getting pending deals: %w", err) + } + + if len(pending) > 0 { + for _, sector := range pending { + fmt.Println(sector.Number) + } + return nil + } + + fmt.Println("No sectors queued to be committed") + return nil + }, +} + func yesno(b bool) string { if b { return color.GreenString("YES") diff --git a/cmd/lotus/daemon.go b/cmd/lotus/daemon.go index 5a59ec816..644892ee2 100644 --- a/cmd/lotus/daemon.go +++ b/cmd/lotus/daemon.go @@ -231,7 +231,7 @@ var DaemonCmd = &cli.Command{ freshRepo := err != repo.ErrRepoExists if !isLite { - if err := paramfetch.GetParams(lcli.ReqContext(cctx), build.ParametersJSON(), 0); err != nil { + if err := paramfetch.GetParams(lcli.ReqContext(cctx), build.ParametersJSON(), build.SrsJSON(), 0); err != nil { return xerrors.Errorf("fetching proof parameters: %w", err) } } diff --git a/cmd/tvx/codenames.go b/cmd/tvx/codenames.go index b9f590914..f8da07e8d 100644 --- a/cmd/tvx/codenames.go +++ b/cmd/tvx/codenames.go @@ -20,7 +20,7 @@ var ProtocolCodenames = []struct { {build.UpgradeSmokeHeight + 1, "smoke"}, {build.UpgradeIgnitionHeight + 1, "ignition"}, {build.UpgradeRefuelHeight + 1, "refuel"}, - {build.UpgradeActorsV2Height + 1, "actorsv2"}, + {build.UpgradeAssemblyHeight + 1, "actorsv2"}, {build.UpgradeTapeHeight + 1, "tape"}, {build.UpgradeLiftoffHeight + 1, "liftoff"}, {build.UpgradeKumquatHeight + 1, "postliftoff"}, diff --git a/cmd/tvx/codenames_test.go b/cmd/tvx/codenames_test.go index bef2e982f..e7136d6cc 100644 --- a/cmd/tvx/codenames_test.go +++ b/cmd/tvx/codenames_test.go @@ -18,7 +18,7 @@ func TestProtocolCodenames(t *testing.T) { t.Fatal("expected breeze codename") } - if height := build.UpgradeActorsV2Height + 1; GetProtocolCodename(abi.ChainEpoch(height)) != "actorsv2" { + if height := build.UpgradeAssemblyHeight + 1; GetProtocolCodename(abi.ChainEpoch(height)) != "actorsv2" { t.Fatal("expected actorsv2 codename") } diff --git a/conformance/rand_fixed.go b/conformance/rand_fixed.go index d356b53d0..f15910e1d 100644 --- a/conformance/rand_fixed.go +++ b/conformance/rand_fixed.go @@ -19,10 +19,18 @@ func NewFixedRand() vm.Rand { return &fixedRand{} } -func (r *fixedRand) GetChainRandomness(_ context.Context, _ crypto.DomainSeparationTag, _ abi.ChainEpoch, _ []byte) ([]byte, error) { +func (r *fixedRand) GetChainRandomnessLookingForward(_ context.Context, _ crypto.DomainSeparationTag, _ abi.ChainEpoch, _ []byte) ([]byte, error) { return []byte("i_am_random_____i_am_random_____"), nil // 32 bytes. } -func (r *fixedRand) GetBeaconRandomness(_ context.Context, _ crypto.DomainSeparationTag, _ abi.ChainEpoch, _ []byte) ([]byte, error) { +func (r *fixedRand) GetChainRandomnessLookingBack(_ context.Context, _ crypto.DomainSeparationTag, _ abi.ChainEpoch, _ []byte) ([]byte, error) { + return []byte("i_am_random_____i_am_random_____"), nil // 32 bytes. +} + +func (r *fixedRand) GetBeaconRandomnessLookingForward(_ context.Context, _ crypto.DomainSeparationTag, _ abi.ChainEpoch, _ []byte) ([]byte, error) { + return []byte("i_am_random_____i_am_random_____"), nil // 32 bytes. +} + +func (r *fixedRand) GetBeaconRandomnessLookingBack(_ context.Context, _ crypto.DomainSeparationTag, _ abi.ChainEpoch, _ []byte) ([]byte, error) { return []byte("i_am_random_____i_am_random_____"), nil // 32 bytes. } diff --git a/conformance/rand_record.go b/conformance/rand_record.go index 165e86e85..906d6b73d 100644 --- a/conformance/rand_record.go +++ b/conformance/rand_record.go @@ -45,8 +45,17 @@ func (r *RecordingRand) loadHead() { r.head = head.Key() } -func (r *RecordingRand) GetChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { +func (r *RecordingRand) GetChainRandomnessLookingForward(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { + return r.getChainRandomness(ctx, pers, round, entropy) +} + +func (r *RecordingRand) GetChainRandomnessLookingBack(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { + return r.getChainRandomness(ctx, pers, round, entropy) +} + +func (r *RecordingRand) getChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { r.once.Do(r.loadHead) + // FullNode's ChainGetRandomnessFromTickets handles whether we should be looking forward or back ret, err := r.api.ChainGetRandomnessFromTickets(ctx, r.head, pers, round, entropy) if err != nil { return ret, err @@ -70,7 +79,15 @@ func (r *RecordingRand) GetChainRandomness(ctx context.Context, pers crypto.Doma return ret, err } -func (r *RecordingRand) GetBeaconRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { +func (r *RecordingRand) GetBeaconRandomnessLookingForward(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { + return r.getBeaconRandomness(ctx, pers, round, entropy) +} + +func (r *RecordingRand) GetBeaconRandomnessLookingBack(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { + return r.getBeaconRandomness(ctx, pers, round, entropy) +} + +func (r *RecordingRand) getBeaconRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { r.once.Do(r.loadHead) ret, err := r.api.ChainGetRandomnessFromBeacon(ctx, r.head, pers, round, entropy) if err != nil { diff --git a/conformance/rand_replay.go b/conformance/rand_replay.go index 1b73e5a08..faae1d090 100644 --- a/conformance/rand_replay.go +++ b/conformance/rand_replay.go @@ -43,7 +43,15 @@ func (r *ReplayingRand) match(requested schema.RandomnessRule) ([]byte, bool) { return nil, false } -func (r *ReplayingRand) GetChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { +func (r *ReplayingRand) GetChainRandomnessLookingForward(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { + return r.getChainRandomness(ctx, pers, round, entropy, false) +} + +func (r *ReplayingRand) GetChainRandomnessLookingBack(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { + return r.getChainRandomness(ctx, pers, round, entropy, true) +} + +func (r *ReplayingRand) getChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte, lookback bool) ([]byte, error) { rule := schema.RandomnessRule{ Kind: schema.RandomnessChain, DomainSeparationTag: int64(pers), @@ -57,10 +65,23 @@ func (r *ReplayingRand) GetChainRandomness(ctx context.Context, pers crypto.Doma } r.reporter.Logf("returning fallback chain randomness: dst=%d, epoch=%d, entropy=%x", pers, round, entropy) - return r.fallback.GetChainRandomness(ctx, pers, round, entropy) + + if lookback { + return r.fallback.GetChainRandomnessLookingBack(ctx, pers, round, entropy) + } + + return r.fallback.GetChainRandomnessLookingForward(ctx, pers, round, entropy) } -func (r *ReplayingRand) GetBeaconRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { +func (r *ReplayingRand) GetBeaconRandomnessLookingForward(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { + return r.getBeaconRandomness(ctx, pers, round, entropy, false) +} + +func (r *ReplayingRand) GetBeaconRandomnessLookingBack(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { + return r.getBeaconRandomness(ctx, pers, round, entropy, true) +} + +func (r *ReplayingRand) getBeaconRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte, lookback bool) ([]byte, error) { rule := schema.RandomnessRule{ Kind: schema.RandomnessBeacon, DomainSeparationTag: int64(pers), @@ -74,6 +95,10 @@ func (r *ReplayingRand) GetBeaconRandomness(ctx context.Context, pers crypto.Dom } r.reporter.Logf("returning fallback beacon randomness: dst=%d, epoch=%d, entropy=%x", pers, round, entropy) - return r.fallback.GetBeaconRandomness(ctx, pers, round, entropy) + if lookback { + return r.fallback.GetBeaconRandomnessLookingBack(ctx, pers, round, entropy) + } + + return r.fallback.GetBeaconRandomnessLookingForward(ctx, pers, round, entropy) } diff --git a/documentation/en/api-v0-methods-miner.md b/documentation/en/api-v0-methods-miner.md index ea5ca75f8..53d485815 100644 --- a/documentation/en/api-v0-methods-miner.md +++ b/documentation/en/api-v0-methods-miner.md @@ -98,9 +98,13 @@ * [SealingAbort](#SealingAbort) * [SealingSchedDiag](#SealingSchedDiag) * [Sector](#Sector) + * [SectorCommitFlush](#SectorCommitFlush) + * [SectorCommitPending](#SectorCommitPending) * [SectorGetExpectedSealDuration](#SectorGetExpectedSealDuration) * [SectorGetSealDelay](#SectorGetSealDelay) * [SectorMarkForUpgrade](#SectorMarkForUpgrade) + * [SectorPreCommitFlush](#SectorPreCommitFlush) + * [SectorPreCommitPending](#SectorPreCommitPending) * [SectorRemove](#SectorRemove) * [SectorSetExpectedSealDuration](#SectorSetExpectedSealDuration) * [SectorSetSealDelay](#SectorSetSealDelay) @@ -1556,6 +1560,27 @@ Response: `{}` ## Sector +### SectorCommitFlush +SectorCommitFlush immediately sends a Commit message with sectors aggregated for Commit. +Returns null if message wasn't sent + + +Perms: admin + +Inputs: `null` + +Response: `null` + +### SectorCommitPending +SectorCommitPending returns a list of pending Commit sectors to be sent in the next aggregate message + + +Perms: admin + +Inputs: `null` + +Response: `null` + ### SectorGetExpectedSealDuration SectorGetExpectedSealDuration gets the expected time for a sector to seal @@ -1591,6 +1616,27 @@ Inputs: Response: `{}` +### SectorPreCommitFlush +SectorPreCommitFlush immediately sends a PreCommit message with sectors batched for PreCommit. +Returns null if message wasn't sent + + +Perms: admin + +Inputs: `null` + +Response: `null` + +### SectorPreCommitPending +SectorPreCommitPending returns a list of pending PreCommit sectors to be sent in the next batch message + + +Perms: admin + +Inputs: `null` + +Response: `null` + ### SectorRemove SectorRemove removes the sector from storage. It doesn't terminate it on-chain, which can be done with SectorTerminate. Removing and not terminating live sectors will cause additional penalties. diff --git a/documentation/en/api-v0-methods.md b/documentation/en/api-v0-methods.md index a8b760f8a..337c7a624 100644 --- a/documentation/en/api-v0-methods.md +++ b/documentation/en/api-v0-methods.md @@ -4551,7 +4551,7 @@ Inputs: ] ``` -Response: `12` +Response: `13` ### StateReadState StateReadState returns the indicated actor's state. diff --git a/documentation/en/api-v1-unstable-methods.md b/documentation/en/api-v1-unstable-methods.md index be326b3e8..3c337332b 100644 --- a/documentation/en/api-v1-unstable-methods.md +++ b/documentation/en/api-v1-unstable-methods.md @@ -4772,7 +4772,7 @@ Inputs: ] ``` -Response: `12` +Response: `13` ### StateReadState StateReadState returns the indicated actor's state. diff --git a/documentation/en/cli-lotus-miner.md b/documentation/en/cli-lotus-miner.md index 1b9b80ee9..b4b245514 100644 --- a/documentation/en/cli-lotus-miner.md +++ b/documentation/en/cli-lotus-miner.md @@ -1310,6 +1310,7 @@ COMMANDS: seal Manually start sealing a sector (filling any unused space with junk) set-seal-delay Set the time, in minutes, that a new sector waits for deals before sealing starts get-cc-collateral Get the collateral required to pledge a committed capacity sector + batching manage batch sector operations help, h Shows a list of commands or help for one command OPTIONS: @@ -1523,6 +1524,53 @@ OPTIONS: ``` +### lotus-miner sectors batching +``` +NAME: + lotus-miner sectors batching - manage batch sector operations + +USAGE: + lotus-miner sectors batching command [command options] [arguments...] + +COMMANDS: + commit list sectors waiting in commit batch queue + precommit list sectors waiting in precommit batch queue + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +#### lotus-miner sectors batching commit +``` +NAME: + lotus-miner sectors batching commit - list sectors waiting in commit batch queue + +USAGE: + lotus-miner sectors batching commit [command options] [arguments...] + +OPTIONS: + --publish-now send a batch now (default: false) + --help, -h show help (default: false) + +``` + +#### lotus-miner sectors batching precommit +``` +NAME: + lotus-miner sectors batching precommit - list sectors waiting in precommit batch queue + +USAGE: + lotus-miner sectors batching precommit [command options] [arguments...] + +OPTIONS: + --publish-now send a batch now (default: false) + --help, -h show help (default: false) + +``` + ## lotus-miner proving ``` NAME: diff --git a/documentation/en/cli-lotus.md b/documentation/en/cli-lotus.md index 8e7e45f51..35ea5e735 100644 --- a/documentation/en/cli-lotus.md +++ b/documentation/en/cli-lotus.md @@ -15,12 +15,12 @@ COMMANDS: version Print version help, h Shows a list of commands or help for one command BASIC: - send Send funds between accounts - wallet Manage wallet - client Make deals, store data, retrieve data - msig Interact with a multisig wallet - verifreg Interact with the verified registry actor - paych Manage payment channels + send Send funds between accounts + wallet Manage wallet + client Make deals, store data, retrieve data + msig Interact with a multisig wallet + filplus Interact with the verified registry actor used by Filplus + paych Manage payment channels DEVELOPER: auth Manage RPC permissions mpool Manage message pool @@ -1035,21 +1035,21 @@ OPTIONS: ``` -## lotus verifreg +## lotus filplus ``` NAME: - lotus verifreg - Interact with the verified registry actor + lotus filplus - Interact with the verified registry actor used by Filplus USAGE: - lotus verifreg command [command options] [arguments...] + lotus filplus command [command options] [arguments...] COMMANDS: - verify-client give allowance to the specified verified client address - list-verifiers list all verifiers - list-clients list all verified clients - check-client check verified client remaining bytes - check-verifier check verifiers remaining bytes - help, h Shows a list of commands or help for one command + grant-datacap give allowance to the specified verified client address + list-notaries list all notaries + list-clients list all verified clients + check-client-datacap check verified client remaining bytes + check-notaries-datacap check notaries remaining bytes + help, h Shows a list of commands or help for one command OPTIONS: --help, -h show help (default: false) @@ -1057,66 +1057,66 @@ OPTIONS: ``` -### lotus verifreg verify-client +### lotus filplus grant-datacap ``` NAME: - lotus verifreg verify-client - give allowance to the specified verified client address + lotus filplus grant-datacap - give allowance to the specified verified client address USAGE: - lotus verifreg verify-client [command options] [arguments...] + lotus filplus grant-datacap [command options] [arguments...] OPTIONS: - --from value specify your verifier address to send the message from + --from value specify your notary address to send the message from --help, -h show help (default: false) ``` -### lotus verifreg list-verifiers +### lotus filplus list-notaries ``` NAME: - lotus verifreg list-verifiers - list all verifiers + lotus filplus list-notaries - list all notaries USAGE: - lotus verifreg list-verifiers [command options] [arguments...] + lotus filplus list-notaries [command options] [arguments...] OPTIONS: --help, -h show help (default: false) ``` -### lotus verifreg list-clients +### lotus filplus list-clients ``` NAME: - lotus verifreg list-clients - list all verified clients + lotus filplus list-clients - list all verified clients USAGE: - lotus verifreg list-clients [command options] [arguments...] + lotus filplus list-clients [command options] [arguments...] OPTIONS: --help, -h show help (default: false) ``` -### lotus verifreg check-client +### lotus filplus check-client-datacap ``` NAME: - lotus verifreg check-client - check verified client remaining bytes + lotus filplus check-client-datacap - check verified client remaining bytes USAGE: - lotus verifreg check-client [command options] [arguments...] + lotus filplus check-client-datacap [command options] [arguments...] OPTIONS: --help, -h show help (default: false) ``` -### lotus verifreg check-verifier +### lotus filplus check-notaries-datacap ``` NAME: - lotus verifreg check-verifier - check verifiers remaining bytes + lotus filplus check-notaries-datacap - check notaries remaining bytes USAGE: - lotus verifreg check-verifier [command options] [arguments...] + lotus filplus check-notaries-datacap [command options] [arguments...] OPTIONS: --help, -h show help (default: false) diff --git a/documentation/misc/actors_version_checklist.md b/documentation/misc/actors_version_checklist.md index 308358948..1fae4bd8a 100644 --- a/documentation/misc/actors_version_checklist.md +++ b/documentation/misc/actors_version_checklist.md @@ -10,10 +10,10 @@ - [ ] Register in `chain/vm/invoker.go` - [ ] Register in `chain/vm/mkactor.go` - [ ] Update `chain/types/state.go` -- [ ] Update `chain/state/statetree.go` +- [ ] Update `chain/state/statetree.go` (New / Load) - [ ] Update `chain/stmgr/forks.go` - [ ] Schedule - [ ] Migration -- [ ] Update upgrade schedule in `api/test/test.go` +- [ ] Update upgrade schedule in `api/test/test.go` and `chain/sync_test.go` - [ ] Update `NewestNetworkVersion` in `build/params_shared_vals.go` - [ ] Register in init in `chain/stmgr/utils.go` diff --git a/extern/filecoin-ffi b/extern/filecoin-ffi index dc4e4e8dc..8b97bd823 160000 --- a/extern/filecoin-ffi +++ b/extern/filecoin-ffi @@ -1 +1 @@ -Subproject commit dc4e4e8dc9554dedb6f48304f7f0c6328331f9ec +Subproject commit 8b97bd8230b77bd32f4f27e4766a6d8a03b4e801 diff --git a/extern/sector-storage/ffiwrapper/prover_cgo.go b/extern/sector-storage/ffiwrapper/prover_cgo.go new file mode 100644 index 000000000..3ad73c81c --- /dev/null +++ b/extern/sector-storage/ffiwrapper/prover_cgo.go @@ -0,0 +1,18 @@ +//+build cgo + +package ffiwrapper + +import ( + ffi "github.com/filecoin-project/filecoin-ffi" + proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" +) + +var ProofProver = proofProver{} + +var _ Prover = ProofProver + +type proofProver struct{} + +func (v proofProver) AggregateSealProofs(aggregateInfo proof5.AggregateSealVerifyProofAndInfos, proofs [][]byte) ([]byte, error) { + return ffi.AggregateSealProofs(aggregateInfo, proofs) +} diff --git a/extern/sector-storage/ffiwrapper/sealer_test.go b/extern/sector-storage/ffiwrapper/sealer_test.go index 39e37f0cc..5d96f187f 100644 --- a/extern/sector-storage/ffiwrapper/sealer_test.go +++ b/extern/sector-storage/ffiwrapper/sealer_test.go @@ -18,6 +18,7 @@ import ( commpffi "github.com/filecoin-project/go-commp-utils/ffiwrapper" proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" + proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" "github.com/ipfs/go-cid" @@ -31,6 +32,7 @@ import ( ffi "github.com/filecoin-project/filecoin-ffi" + "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper/basicfs" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" "github.com/filecoin-project/lotus/extern/storage-sealing/lib/nullreader" @@ -83,9 +85,10 @@ func (s *seal) precommit(t *testing.T, sb *Sealer, id storage.SectorRef, done fu s.cids = cids } -func (s *seal) commit(t *testing.T, sb *Sealer, done func()) { +var seed = abi.InteractiveSealRandomness{0, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 9, 8, 7, 6, 45, 3, 2, 1, 0, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 9} + +func (s *seal) commit(t *testing.T, sb *Sealer, done func()) storage.Proof { defer done() - seed := abi.InteractiveSealRandomness{0, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 9, 8, 7, 6, 45, 3, 2, 1, 0, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 9} pc1, err := sb.SealCommit1(context.TODO(), s.ref, s.ticket, seed, []abi.PieceInfo{s.pi}, s.cids) if err != nil { @@ -112,6 +115,8 @@ func (s *seal) commit(t *testing.T, sb *Sealer, done func()) { if !ok { t.Fatal("proof failed to validate") } + + return proof } func (s *seal) unseal(t *testing.T, sb *Sealer, sp *basicfs.Provider, si storage.SectorRef, done func()) { @@ -229,7 +234,12 @@ func getGrothParamFileAndVerifyingKeys(s abi.SectorSize) { panic(err) } - err = paramfetch.GetParams(context.TODO(), dat, uint64(s)) + datSrs, err := ioutil.ReadFile("../../../build/proof-params/srs-inner-product.json") + if err != nil { + panic(err) + } + + err = paramfetch.GetParams(context.TODO(), dat, datSrs, uint64(s)) if err != nil { panic(xerrors.Errorf("failed to acquire Groth parameters for 2KiB sectors: %w", err)) } @@ -242,7 +252,7 @@ func getGrothParamFileAndVerifyingKeys(s abi.SectorSize) { // go test -run=^TestDownloadParams // func TestDownloadParams(t *testing.T) { - defer requireFDsClosed(t, openFDs(t)) + // defer requireFDsClosed(t, openFDs(t)) flaky likely cause of how go-embed works with param files getGrothParamFileAndVerifyingKeys(sectorSize) } @@ -462,6 +472,97 @@ func TestSealAndVerify3(t *testing.T) { post(t, sb, []abi.SectorID{si1.ID, si2.ID}, s1, s2, s3) } +func TestSealAndVerifyAggregate(t *testing.T) { + numAgg := 5 + + if testing.Short() { + t.Skip("skipping test in short mode") + } + + defer requireFDsClosed(t, openFDs(t)) + + if runtime.NumCPU() < 10 && os.Getenv("CI") == "" { // don't bother on slow hardware + t.Skip("this is slow") + } + _ = os.Setenv("RUST_LOG", "info") + + getGrothParamFileAndVerifyingKeys(sectorSize) + + cdir, err := ioutil.TempDir("", "sbtest-c-") + if err != nil { + t.Fatal(err) + } + miner := abi.ActorID(123) + + sp := &basicfs.Provider{ + Root: cdir, + } + sb, err := New(sp) + if err != nil { + t.Fatalf("%+v", err) + } + cleanup := func() { + if t.Failed() { + fmt.Printf("not removing %s\n", cdir) + return + } + if err := os.RemoveAll(cdir); err != nil { + t.Error(err) + } + } + defer cleanup() + + avi := proof5.AggregateSealVerifyProofAndInfos{ + Miner: miner, + SealProof: sealProofType, + AggregateProof: policy.GetDefaultAggregationProof(), + Proof: nil, + Infos: make([]proof5.AggregateSealVerifyInfo, numAgg), + } + + toAggregate := make([][]byte, numAgg) + for i := 0; i < numAgg; i++ { + si := storage.SectorRef{ + ID: abi.SectorID{Miner: miner, Number: abi.SectorNumber(i + 1)}, + ProofType: sealProofType, + } + + s := seal{ref: si} + s.precommit(t, sb, si, func() {}) + toAggregate[i] = s.commit(t, sb, func() {}) + + avi.Infos[i] = proof5.AggregateSealVerifyInfo{ + Number: abi.SectorNumber(i + 1), + Randomness: s.ticket, + InteractiveRandomness: seed, + SealedCID: s.cids.Sealed, + UnsealedCID: s.cids.Unsealed, + } + } + + aggStart := time.Now() + + avi.Proof, err = ProofProver.AggregateSealProofs(avi, toAggregate) + require.NoError(t, err) + + aggDone := time.Now() + + _, err = ProofProver.AggregateSealProofs(avi, toAggregate) + require.NoError(t, err) + + aggHot := time.Now() + + ok, err := ProofVerifier.VerifyAggregateSeals(avi) + require.NoError(t, err) + require.True(t, ok) + + verifDone := time.Now() + + fmt.Printf("Aggregate: %s\n", aggDone.Sub(aggStart).String()) + fmt.Printf("Hot: %s\n", aggHot.Sub(aggDone).String()) + fmt.Printf("Verify: %s\n", verifDone.Sub(aggHot).String()) +} + func BenchmarkWriteWithAlignment(b *testing.B) { bt := abi.UnpaddedPieceSize(2 * 127 * 1024 * 1024) b.SetBytes(int64(bt)) diff --git a/extern/sector-storage/ffiwrapper/types.go b/extern/sector-storage/ffiwrapper/types.go index b7e96636a..a5b2fdf1f 100644 --- a/extern/sector-storage/ffiwrapper/types.go +++ b/extern/sector-storage/ffiwrapper/types.go @@ -4,7 +4,7 @@ import ( "context" "io" - proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" + proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" "github.com/ipfs/go-cid" @@ -34,13 +34,21 @@ type Storage interface { } type Verifier interface { - VerifySeal(proof2.SealVerifyInfo) (bool, error) - VerifyWinningPoSt(ctx context.Context, info proof2.WinningPoStVerifyInfo) (bool, error) - VerifyWindowPoSt(ctx context.Context, info proof2.WindowPoStVerifyInfo) (bool, error) + VerifySeal(proof5.SealVerifyInfo) (bool, error) + VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) (bool, error) + VerifyWinningPoSt(ctx context.Context, info proof5.WinningPoStVerifyInfo) (bool, error) + VerifyWindowPoSt(ctx context.Context, info proof5.WindowPoStVerifyInfo) (bool, error) GenerateWinningPoStSectorChallenge(context.Context, abi.RegisteredPoStProof, abi.ActorID, abi.PoStRandomness, uint64) ([]uint64, error) } +// Prover contains cheap proving-related methods +type Prover interface { + // TODO: move GenerateWinningPoStSectorChallenge from the Verifier interface to here + + AggregateSealProofs(aggregateInfo proof5.AggregateSealVerifyProofAndInfos, proofs [][]byte) ([]byte, error) +} + type SectorProvider interface { // * returns storiface.ErrSectorNotFound if a requested existing sector doesn't exist // * returns an error when allocate is set, and existing isn't, and the sector exists diff --git a/extern/sector-storage/ffiwrapper/verifier_cgo.go b/extern/sector-storage/ffiwrapper/verifier_cgo.go index 15e0e6ab3..95724bb7c 100644 --- a/extern/sector-storage/ffiwrapper/verifier_cgo.go +++ b/extern/sector-storage/ffiwrapper/verifier_cgo.go @@ -10,13 +10,13 @@ import ( ffi "github.com/filecoin-project/filecoin-ffi" "github.com/filecoin-project/go-state-types/abi" - proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" + proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" "github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" ) -func (sb *Sealer) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof2.SectorInfo, randomness abi.PoStRandomness) ([]proof2.PoStProof, error) { +func (sb *Sealer) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof5.SectorInfo, randomness abi.PoStRandomness) ([]proof5.PoStProof, error) { randomness[31] &= 0x3f privsectors, skipped, done, err := sb.pubSectorToPriv(ctx, minerID, sectorInfo, nil, abi.RegisteredSealProof.RegisteredWinningPoStProof) // TODO: FAULTS? if err != nil { @@ -30,7 +30,7 @@ func (sb *Sealer) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, return ffi.GenerateWinningPoSt(minerID, privsectors, randomness) } -func (sb *Sealer) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof2.SectorInfo, randomness abi.PoStRandomness) ([]proof2.PoStProof, []abi.SectorID, error) { +func (sb *Sealer) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof5.SectorInfo, randomness abi.PoStRandomness) ([]proof5.PoStProof, []abi.SectorID, error) { randomness[31] &= 0x3f privsectors, skipped, done, err := sb.pubSectorToPriv(ctx, minerID, sectorInfo, nil, abi.RegisteredSealProof.RegisteredWindowPoStProof) if err != nil { @@ -55,7 +55,7 @@ func (sb *Sealer) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, s return proof, faultyIDs, err } -func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorInfo []proof2.SectorInfo, faults []abi.SectorNumber, rpt func(abi.RegisteredSealProof) (abi.RegisteredPoStProof, error)) (ffi.SortedPrivateSectorInfo, []abi.SectorID, func(), error) { +func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorInfo []proof5.SectorInfo, faults []abi.SectorNumber, rpt func(abi.RegisteredSealProof) (abi.RegisteredPoStProof, error)) (ffi.SortedPrivateSectorInfo, []abi.SectorID, func(), error) { fmap := map[abi.SectorNumber]struct{}{} for _, fault := range faults { fmap[fault] = struct{}{} @@ -111,11 +111,15 @@ type proofVerifier struct{} var ProofVerifier = proofVerifier{} -func (proofVerifier) VerifySeal(info proof2.SealVerifyInfo) (bool, error) { +func (proofVerifier) VerifySeal(info proof5.SealVerifyInfo) (bool, error) { return ffi.VerifySeal(info) } -func (proofVerifier) VerifyWinningPoSt(ctx context.Context, info proof2.WinningPoStVerifyInfo) (bool, error) { +func (proofVerifier) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) (bool, error) { + return ffi.VerifyAggregateSeals(aggregate) +} + +func (proofVerifier) VerifyWinningPoSt(ctx context.Context, info proof5.WinningPoStVerifyInfo) (bool, error) { info.Randomness[31] &= 0x3f _, span := trace.StartSpan(ctx, "VerifyWinningPoSt") defer span.End() @@ -123,7 +127,7 @@ func (proofVerifier) VerifyWinningPoSt(ctx context.Context, info proof2.WinningP return ffi.VerifyWinningPoSt(info) } -func (proofVerifier) VerifyWindowPoSt(ctx context.Context, info proof2.WindowPoStVerifyInfo) (bool, error) { +func (proofVerifier) VerifyWindowPoSt(ctx context.Context, info proof5.WindowPoStVerifyInfo) (bool, error) { info.Randomness[31] &= 0x3f _, span := trace.StartSpan(ctx, "VerifyWindowPoSt") defer span.End() diff --git a/extern/sector-storage/mock/mock.go b/extern/sector-storage/mock/mock.go index ae7d54985..52496f836 100644 --- a/extern/sector-storage/mock/mock.go +++ b/extern/sector-storage/mock/mock.go @@ -9,7 +9,7 @@ import ( "math/rand" "sync" - proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" + proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" ffiwrapper2 "github.com/filecoin-project/go-commp-utils/ffiwrapper" commcid "github.com/filecoin-project/go-fil-commcid" @@ -34,7 +34,7 @@ type SectorMgr struct { lk sync.Mutex } -type mockVerif struct{} +type mockVerifProver struct{} func NewMockSectorMgr(genesisSectors []abi.SectorID) *SectorMgr { sectors := make(map[abi.SectorID]*sectorState) @@ -300,14 +300,14 @@ func AddOpFinish(ctx context.Context) (context.Context, func()) { } } -func (mgr *SectorMgr) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof2.SectorInfo, randomness abi.PoStRandomness) ([]proof2.PoStProof, error) { +func (mgr *SectorMgr) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof5.SectorInfo, randomness abi.PoStRandomness) ([]proof5.PoStProof, error) { mgr.lk.Lock() defer mgr.lk.Unlock() return generateFakePoSt(sectorInfo, abi.RegisteredSealProof.RegisteredWinningPoStProof, randomness), nil } -func (mgr *SectorMgr) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof2.SectorInfo, randomness abi.PoStRandomness) ([]proof2.PoStProof, []abi.SectorID, error) { +func (mgr *SectorMgr) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof5.SectorInfo, randomness abi.PoStRandomness) ([]proof5.PoStProof, []abi.SectorID, error) { mgr.lk.Lock() defer mgr.lk.Unlock() @@ -315,7 +315,8 @@ func (mgr *SectorMgr) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorI return nil, nil, xerrors.Errorf("failed to post (mock)") } - si := make([]proof2.SectorInfo, 0, len(sectorInfo)) + si := make([]proof5.SectorInfo, 0, len(sectorInfo)) + var skipped []abi.SectorID var err error @@ -343,7 +344,7 @@ func (mgr *SectorMgr) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorI return generateFakePoSt(si, abi.RegisteredSealProof.RegisteredWindowPoStProof, randomness), skipped, nil } -func generateFakePoStProof(sectorInfo []proof2.SectorInfo, randomness abi.PoStRandomness) []byte { +func generateFakePoStProof(sectorInfo []proof5.SectorInfo, randomness abi.PoStRandomness) []byte { randomness[31] &= 0x3f hasher := sha256.New() @@ -358,13 +359,13 @@ func generateFakePoStProof(sectorInfo []proof2.SectorInfo, randomness abi.PoStRa } -func generateFakePoSt(sectorInfo []proof2.SectorInfo, rpt func(abi.RegisteredSealProof) (abi.RegisteredPoStProof, error), randomness abi.PoStRandomness) []proof2.PoStProof { +func generateFakePoSt(sectorInfo []proof5.SectorInfo, rpt func(abi.RegisteredSealProof) (abi.RegisteredPoStProof, error), randomness abi.PoStRandomness) []proof5.PoStProof { wp, err := rpt(sectorInfo[0].SealProof) if err != nil { panic(err) } - return []proof2.PoStProof{ + return []proof5.PoStProof{ { PoStProof: wp, ProofBytes: generateFakePoStProof(sectorInfo, randomness), @@ -489,7 +490,7 @@ func (mgr *SectorMgr) ReturnFetch(ctx context.Context, callID storiface.CallID, panic("not supported") } -func (m mockVerif) VerifySeal(svi proof2.SealVerifyInfo) (bool, error) { +func (m mockVerifProver) VerifySeal(svi proof5.SealVerifyInfo) (bool, error) { plen, err := svi.SealProof.ProofSize() if err != nil { return false, err @@ -501,6 +502,7 @@ func (m mockVerif) VerifySeal(svi proof2.SealVerifyInfo) (bool, error) { // only the first 32 bytes, the rest are 0. for i, b := range svi.Proof[:32] { + // unsealed+sealed-seed*ticket if b != svi.UnsealedCID.Bytes()[i]+svi.SealedCID.Bytes()[31-i]-svi.InteractiveRandomness[i]*svi.Randomness[i] { return false, nil } @@ -509,12 +511,66 @@ func (m mockVerif) VerifySeal(svi proof2.SealVerifyInfo) (bool, error) { return true, nil } -func (m mockVerif) VerifyWinningPoSt(ctx context.Context, info proof2.WinningPoStVerifyInfo) (bool, error) { +func (m mockVerifProver) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) (bool, error) { + out := make([]byte, m.aggLen(len(aggregate.Infos))) + for pi, svi := range aggregate.Infos { + for i := 0; i < 32; i++ { + b := svi.UnsealedCID.Bytes()[i] + svi.SealedCID.Bytes()[31-i] - svi.InteractiveRandomness[i]*svi.Randomness[i] // raw proof byte + + b *= uint8(pi) // with aggregate index + out[i] += b + } + } + + return bytes.Equal(aggregate.Proof, out), nil +} + +func (m mockVerifProver) AggregateSealProofs(aggregateInfo proof5.AggregateSealVerifyProofAndInfos, proofs [][]byte) ([]byte, error) { + out := make([]byte, m.aggLen(len(aggregateInfo.Infos))) // todo: figure out more real length + for pi, proof := range proofs { + for i := range proof[:32] { + out[i] += proof[i] * uint8(pi) + } + } + + return out, nil +} + +func (m mockVerifProver) aggLen(nproofs int) int { + switch { + case nproofs <= 8: + return 11220 + case nproofs <= 16: + return 14196 + case nproofs <= 32: + return 17172 + case nproofs <= 64: + return 20148 + case nproofs <= 128: + return 23124 + case nproofs <= 256: + return 26100 + case nproofs <= 512: + return 29076 + case nproofs <= 1024: + return 32052 + case nproofs <= 2048: + return 35028 + case nproofs <= 4096: + return 38004 + case nproofs <= 8192: + return 40980 + default: + panic("too many proofs") + } +} + +func (m mockVerifProver) VerifyWinningPoSt(ctx context.Context, info proof5.WinningPoStVerifyInfo) (bool, error) { info.Randomness[31] &= 0x3f return true, nil } -func (m mockVerif) VerifyWindowPoSt(ctx context.Context, info proof2.WindowPoStVerifyInfo) (bool, error) { +func (m mockVerifProver) VerifyWindowPoSt(ctx context.Context, info proof5.WindowPoStVerifyInfo) (bool, error) { if len(info.Proofs) != 1 { return false, xerrors.Errorf("expected 1 proof entry") } @@ -528,15 +584,17 @@ func (m mockVerif) VerifyWindowPoSt(ctx context.Context, info proof2.WindowPoStV return true, nil } -func (m mockVerif) GenerateDataCommitment(pt abi.RegisteredSealProof, pieces []abi.PieceInfo) (cid.Cid, error) { +func (m mockVerifProver) GenerateDataCommitment(pt abi.RegisteredSealProof, pieces []abi.PieceInfo) (cid.Cid, error) { return ffiwrapper.GenerateUnsealedCID(pt, pieces) } -func (m mockVerif) GenerateWinningPoStSectorChallenge(ctx context.Context, proofType abi.RegisteredPoStProof, minerID abi.ActorID, randomness abi.PoStRandomness, eligibleSectorCount uint64) ([]uint64, error) { +func (m mockVerifProver) GenerateWinningPoStSectorChallenge(ctx context.Context, proofType abi.RegisteredPoStProof, minerID abi.ActorID, randomness abi.PoStRandomness, eligibleSectorCount uint64) ([]uint64, error) { return []uint64{0}, nil } -var MockVerifier = mockVerif{} +var MockVerifier = mockVerifProver{} +var MockProver = mockVerifProver{} var _ storage.Sealer = &SectorMgr{} var _ ffiwrapper.Verifier = MockVerifier +var _ ffiwrapper.Prover = MockProver diff --git a/extern/storage-sealing/commit_batch.go b/extern/storage-sealing/commit_batch.go new file mode 100644 index 000000000..7d128fe76 --- /dev/null +++ b/extern/storage-sealing/commit_batch.go @@ -0,0 +1,495 @@ +package sealing + +import ( + "bytes" + "context" + "sort" + "sync" + "time" + + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-bitfield" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" + proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/actors/policy" + "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" + "github.com/filecoin-project/lotus/extern/storage-sealing/sealiface" +) + +const arp = abi.RegisteredAggregationProof_SnarkPackV1 + +type CommitBatcherApi interface { + SendMsg(ctx context.Context, from, to address.Address, method abi.MethodNum, value, maxFee abi.TokenAmount, params []byte) (cid.Cid, error) + StateMinerInfo(context.Context, address.Address, TipSetToken) (miner.MinerInfo, error) + ChainHead(ctx context.Context) (TipSetToken, abi.ChainEpoch, error) + + StateSectorPreCommitInfo(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok TipSetToken) (*miner.SectorPreCommitOnChainInfo, error) + StateMinerInitialPledgeCollateral(context.Context, address.Address, miner.SectorPreCommitInfo, TipSetToken) (big.Int, error) +} + +type AggregateInput struct { + spt abi.RegisteredSealProof + info proof5.AggregateSealVerifyInfo + proof []byte +} + +type CommitBatcher struct { + api CommitBatcherApi + maddr address.Address + mctx context.Context + addrSel AddrSel + feeCfg FeeConfig + getConfig GetSealingConfigFunc + prover ffiwrapper.Prover + + deadlines map[abi.SectorNumber]time.Time + todo map[abi.SectorNumber]AggregateInput + waiting map[abi.SectorNumber][]chan sealiface.CommitBatchRes + + notify, stop, stopped chan struct{} + force chan chan []sealiface.CommitBatchRes + lk sync.Mutex +} + +func NewCommitBatcher(mctx context.Context, maddr address.Address, api CommitBatcherApi, addrSel AddrSel, feeCfg FeeConfig, getConfig GetSealingConfigFunc, prov ffiwrapper.Prover) *CommitBatcher { + b := &CommitBatcher{ + api: api, + maddr: maddr, + mctx: mctx, + addrSel: addrSel, + feeCfg: feeCfg, + getConfig: getConfig, + prover: prov, + + deadlines: map[abi.SectorNumber]time.Time{}, + todo: map[abi.SectorNumber]AggregateInput{}, + waiting: map[abi.SectorNumber][]chan sealiface.CommitBatchRes{}, + + notify: make(chan struct{}, 1), + force: make(chan chan []sealiface.CommitBatchRes), + stop: make(chan struct{}), + stopped: make(chan struct{}), + } + + go b.run() + + return b +} + +func (b *CommitBatcher) run() { + var forceRes chan []sealiface.CommitBatchRes + var lastMsg []sealiface.CommitBatchRes + + cfg, err := b.getConfig() + if err != nil { + panic(err) + } + + for { + if forceRes != nil { + forceRes <- lastMsg + forceRes = nil + } + lastMsg = nil + + var sendAboveMax, sendAboveMin bool + select { + case <-b.stop: + close(b.stopped) + return + case <-b.notify: + sendAboveMax = true + case <-b.batchWait(cfg.CommitBatchWait, cfg.CommitBatchSlack): + sendAboveMin = true + case fr := <-b.force: // user triggered + forceRes = fr + } + + var err error + lastMsg, err = b.maybeStartBatch(sendAboveMax, sendAboveMin) + if err != nil { + log.Warnw("CommitBatcher processBatch error", "error", err) + } + } +} + +func (b *CommitBatcher) batchWait(maxWait, slack time.Duration) <-chan time.Time { + now := time.Now() + + b.lk.Lock() + defer b.lk.Unlock() + + if len(b.todo) == 0 { + return nil + } + + var deadline time.Time + for sn := range b.todo { + sectorDeadline := b.deadlines[sn] + if deadline.IsZero() || (!sectorDeadline.IsZero() && sectorDeadline.Before(deadline)) { + deadline = sectorDeadline + } + } + for sn := range b.waiting { + sectorDeadline := b.deadlines[sn] + if deadline.IsZero() || (!sectorDeadline.IsZero() && sectorDeadline.Before(deadline)) { + deadline = sectorDeadline + } + } + + if deadline.IsZero() { + return time.After(maxWait) + } + + deadline = deadline.Add(-slack) + if deadline.Before(now) { + return time.After(time.Nanosecond) // can't return 0 + } + + wait := deadline.Sub(now) + if wait > maxWait { + wait = maxWait + } + + return time.After(wait) +} + +func (b *CommitBatcher) maybeStartBatch(notif, after bool) ([]sealiface.CommitBatchRes, error) { + b.lk.Lock() + defer b.lk.Unlock() + + total := len(b.todo) + if total == 0 { + return nil, nil // nothing to do + } + + cfg, err := b.getConfig() + if err != nil { + return nil, xerrors.Errorf("getting config: %w", err) + } + + if notif && total < cfg.MaxCommitBatch { + return nil, nil + } + + if after && total < cfg.MinCommitBatch { + return nil, nil + } + + var res []sealiface.CommitBatchRes + + if total < cfg.MinCommitBatch || total < miner5.MinAggregatedSectors { + res, err = b.processIndividually() + } else { + res, err = b.processBatch(cfg) + } + if err != nil && len(res) == 0 { + return nil, err + } + + for _, r := range res { + if err != nil { + r.Error = err.Error() + } + + for _, sn := range r.Sectors { + for _, ch := range b.waiting[sn] { + ch <- r // buffered + } + + delete(b.waiting, sn) + delete(b.todo, sn) + delete(b.deadlines, sn) + } + } + + return res, nil +} + +func (b *CommitBatcher) processBatch(cfg sealiface.Config) ([]sealiface.CommitBatchRes, error) { + tok, _, err := b.api.ChainHead(b.mctx) + if err != nil { + return nil, err + } + + total := len(b.todo) + + var res sealiface.CommitBatchRes + + params := miner5.ProveCommitAggregateParams{ + SectorNumbers: bitfield.New(), + } + + proofs := make([][]byte, 0, total) + infos := make([]proof5.AggregateSealVerifyInfo, 0, total) + collateral := big.Zero() + + for id, p := range b.todo { + if len(infos) >= cfg.MaxCommitBatch { + log.Infow("commit batch full") + break + } + + sc, err := b.getSectorCollateral(id, tok) + if err != nil { + res.FailedSectors[id] = err.Error() + continue + } + + collateral = big.Add(collateral, sc) + + res.Sectors = append(res.Sectors, id) + params.SectorNumbers.Set(uint64(id)) + infos = append(infos, p.info) + } + + sort.Slice(infos, func(i, j int) bool { + return infos[i].Number < infos[j].Number + }) + + for _, info := range infos { + proofs = append(proofs, b.todo[info.Number].proof) + } + + mid, err := address.IDFromAddress(b.maddr) + if err != nil { + return []sealiface.CommitBatchRes{res}, xerrors.Errorf("getting miner id: %w", err) + } + + params.AggregateProof, err = b.prover.AggregateSealProofs(proof5.AggregateSealVerifyProofAndInfos{ + Miner: abi.ActorID(mid), + SealProof: b.todo[infos[0].Number].spt, + AggregateProof: arp, + Infos: infos, + }, proofs) + if err != nil { + return []sealiface.CommitBatchRes{res}, xerrors.Errorf("aggregating proofs: %w", err) + } + + enc := new(bytes.Buffer) + if err := params.MarshalCBOR(enc); err != nil { + return []sealiface.CommitBatchRes{res}, xerrors.Errorf("couldn't serialize ProveCommitAggregateParams: %w", err) + } + + mi, err := b.api.StateMinerInfo(b.mctx, b.maddr, nil) + if err != nil { + return []sealiface.CommitBatchRes{res}, xerrors.Errorf("couldn't get miner info: %w", err) + } + + goodFunds := big.Add(b.feeCfg.MaxCommitGasFee, collateral) + + from, _, err := b.addrSel(b.mctx, mi, api.CommitAddr, goodFunds, collateral) + if err != nil { + return []sealiface.CommitBatchRes{res}, xerrors.Errorf("no good address found: %w", err) + } + + mcid, err := b.api.SendMsg(b.mctx, from, b.maddr, miner.Methods.ProveCommitAggregate, collateral, b.feeCfg.MaxCommitGasFee, enc.Bytes()) + if err != nil { + return []sealiface.CommitBatchRes{res}, xerrors.Errorf("sending message failed: %w", err) + } + + res.Msg = &mcid + + log.Infow("Sent ProveCommitAggregate message", "cid", mcid, "from", from, "todo", total, "sectors", len(infos)) + + return []sealiface.CommitBatchRes{res}, nil +} + +func (b *CommitBatcher) processIndividually() ([]sealiface.CommitBatchRes, error) { + mi, err := b.api.StateMinerInfo(b.mctx, b.maddr, nil) + if err != nil { + return nil, xerrors.Errorf("couldn't get miner info: %w", err) + } + + tok, _, err := b.api.ChainHead(b.mctx) + if err != nil { + return nil, err + } + + var res []sealiface.CommitBatchRes + + for sn, info := range b.todo { + r := sealiface.CommitBatchRes{ + Sectors: []abi.SectorNumber{sn}, + } + + mcid, err := b.processSingle(mi, sn, info, tok) + if err != nil { + log.Errorf("process single error: %+v", err) // todo: return to user + r.FailedSectors[sn] = err.Error() + } else { + r.Msg = &mcid + } + + res = append(res, r) + } + + return res, nil +} + +func (b *CommitBatcher) processSingle(mi miner.MinerInfo, sn abi.SectorNumber, info AggregateInput, tok TipSetToken) (cid.Cid, error) { + enc := new(bytes.Buffer) + params := &miner.ProveCommitSectorParams{ + SectorNumber: sn, + Proof: info.proof, + } + + if err := params.MarshalCBOR(enc); err != nil { + return cid.Undef, xerrors.Errorf("marshaling commit params: %w", err) + } + + collateral, err := b.getSectorCollateral(sn, tok) + if err != nil { + return cid.Undef, err + } + + goodFunds := big.Add(collateral, b.feeCfg.MaxCommitGasFee) + + from, _, err := b.addrSel(b.mctx, mi, api.CommitAddr, goodFunds, collateral) + if err != nil { + return cid.Undef, xerrors.Errorf("no good address to send commit message from: %w", err) + } + + mcid, err := b.api.SendMsg(b.mctx, from, b.maddr, miner.Methods.ProveCommitSector, collateral, b.feeCfg.MaxCommitGasFee, enc.Bytes()) + if err != nil { + return cid.Undef, xerrors.Errorf("pushing message to mpool: %w", err) + } + + return mcid, nil +} + +// register commit, wait for batch message, return message CID +func (b *CommitBatcher) AddCommit(ctx context.Context, s SectorInfo, in AggregateInput) (res sealiface.CommitBatchRes, err error) { + _, curEpoch, err := b.api.ChainHead(b.mctx) + if err != nil { + log.Errorf("getting chain head: %s", err) + return sealiface.CommitBatchRes{}, nil + } + + sn := s.SectorNumber + + b.lk.Lock() + b.deadlines[sn] = getSectorDeadline(curEpoch, s) + b.todo[sn] = in + + sent := make(chan sealiface.CommitBatchRes, 1) + b.waiting[sn] = append(b.waiting[sn], sent) + + select { + case b.notify <- struct{}{}: + default: // already have a pending notification, don't need more + } + b.lk.Unlock() + + select { + case r := <-sent: + return r, nil + case <-ctx.Done(): + return sealiface.CommitBatchRes{}, ctx.Err() + } +} + +func (b *CommitBatcher) Flush(ctx context.Context) ([]sealiface.CommitBatchRes, error) { + resCh := make(chan []sealiface.CommitBatchRes, 1) + select { + case b.force <- resCh: + select { + case res := <-resCh: + return res, nil + case <-ctx.Done(): + return nil, ctx.Err() + } + case <-ctx.Done(): + return nil, ctx.Err() + } +} + +func (b *CommitBatcher) Pending(ctx context.Context) ([]abi.SectorID, error) { + b.lk.Lock() + defer b.lk.Unlock() + + mid, err := address.IDFromAddress(b.maddr) + if err != nil { + return nil, err + } + + res := make([]abi.SectorID, 0) + for _, s := range b.todo { + res = append(res, abi.SectorID{ + Miner: abi.ActorID(mid), + Number: s.info.Number, + }) + } + + sort.Slice(res, func(i, j int) bool { + if res[i].Miner != res[j].Miner { + return res[i].Miner < res[j].Miner + } + + return res[i].Number < res[j].Number + }) + + return res, nil +} + +func (b *CommitBatcher) Stop(ctx context.Context) error { + close(b.stop) + + select { + case <-b.stopped: + return nil + case <-ctx.Done(): + return ctx.Err() + } +} + +func getSectorDeadline(curEpoch abi.ChainEpoch, si SectorInfo) time.Time { + deadlineEpoch := si.TicketEpoch + policy.MaxPreCommitRandomnessLookback + for _, p := range si.Pieces { + if p.DealInfo == nil { + continue + } + + startEpoch := p.DealInfo.DealSchedule.StartEpoch + if startEpoch < deadlineEpoch { + deadlineEpoch = startEpoch + } + } + + if deadlineEpoch <= curEpoch { + return time.Now() + } + + return time.Now().Add(time.Duration(deadlineEpoch-curEpoch) * time.Duration(build.BlockDelaySecs) * time.Second) +} + +func (b *CommitBatcher) getSectorCollateral(sn abi.SectorNumber, tok TipSetToken) (abi.TokenAmount, error) { + pci, err := b.api.StateSectorPreCommitInfo(b.mctx, b.maddr, sn, tok) + if err != nil { + return big.Zero(), xerrors.Errorf("getting precommit info: %w", err) + } + if pci == nil { + return big.Zero(), xerrors.Errorf("precommit info not found on chain") + } + + collateral, err := b.api.StateMinerInitialPledgeCollateral(b.mctx, b.maddr, pci.Info, tok) + if err != nil { + return big.Zero(), xerrors.Errorf("getting initial pledge collateral: %w", err) + } + + collateral = big.Sub(collateral, pci.PreCommitDeposit) + if collateral.LessThan(big.Zero()) { + collateral = big.Zero() + } + + return collateral, nil +} diff --git a/extern/storage-sealing/fsm.go b/extern/storage-sealing/fsm.go index a3b0db1c4..640aaf19a 100644 --- a/extern/storage-sealing/fsm.go +++ b/extern/storage-sealing/fsm.go @@ -72,13 +72,27 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto on(SectorSealPreCommit1Failed{}, SealPreCommit1Failed), ), PreCommitting: planOne( - on(SectorSealPreCommit1Failed{}, SealPreCommit1Failed), + on(SectorPreCommitBatch{}, SubmitPreCommitBatch), on(SectorPreCommitted{}, PreCommitWait), + on(SectorSealPreCommit1Failed{}, SealPreCommit1Failed), on(SectorChainPreCommitFailed{}, PreCommitFailed), on(SectorPreCommitLanded{}, WaitSeed), on(SectorDealsExpired{}, DealsExpired), on(SectorInvalidDealIDs{}, RecoverDealIDs), ), + SubmitPreCommitBatch: planOne( + on(SectorPreCommitBatchSent{}, PreCommitBatchWait), + on(SectorSealPreCommit1Failed{}, SealPreCommit1Failed), + on(SectorChainPreCommitFailed{}, PreCommitFailed), + on(SectorPreCommitLanded{}, WaitSeed), + on(SectorDealsExpired{}, DealsExpired), + on(SectorInvalidDealIDs{}, RecoverDealIDs), + ), + PreCommitBatchWait: planOne( + on(SectorChainPreCommitFailed{}, PreCommitFailed), + on(SectorPreCommitLanded{}, WaitSeed), + on(SectorRetryPreCommit{}, PreCommitting), + ), PreCommitWait: planOne( on(SectorChainPreCommitFailed{}, PreCommitFailed), on(SectorPreCommitLanded{}, WaitSeed), @@ -91,6 +105,11 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto Committing: planCommitting, SubmitCommit: planOne( on(SectorCommitSubmitted{}, CommitWait), + on(SectorSubmitCommitAggregate{}, SubmitCommitAggregate), + on(SectorCommitFailed{}, CommitFailed), + ), + SubmitCommitAggregate: planOne( + on(SectorCommitAggregateSent{}, CommitWait), on(SectorCommitFailed{}, CommitFailed), ), CommitWait: planOne( @@ -98,6 +117,11 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto on(SectorCommitFailed{}, CommitFailed), on(SectorRetrySubmitCommit{}, SubmitCommit), ), + CommitAggregateWait: planOne( + on(SectorProving{}, FinalizeSector), + on(SectorCommitFailed{}, CommitFailed), + on(SectorRetrySubmitCommit{}, SubmitCommit), + ), FinalizeSector: planOne( on(SectorFinalized{}, Proving), @@ -337,6 +361,10 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta return m.handlePreCommit2, processed, nil case PreCommitting: return m.handlePreCommitting, processed, nil + case SubmitPreCommitBatch: + return m.handleSubmitPreCommitBatch, processed, nil + case PreCommitBatchWait: + fallthrough case PreCommitWait: return m.handlePreCommitWait, processed, nil case WaitSeed: @@ -345,6 +373,10 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta return m.handleCommitting, processed, nil case SubmitCommit: return m.handleSubmitCommit, processed, nil + case SubmitCommitAggregate: + return m.handleSubmitCommitAggregate, processed, nil + case CommitAggregateWait: + fallthrough case CommitWait: return m.handleCommitWait, processed, nil case FinalizeSector: diff --git a/extern/storage-sealing/fsm_events.go b/extern/storage-sealing/fsm_events.go index 8d11b248b..7ec8f3dfc 100644 --- a/extern/storage-sealing/fsm_events.go +++ b/extern/storage-sealing/fsm_events.go @@ -150,6 +150,18 @@ func (evt SectorPreCommit2) apply(state *SectorInfo) { state.CommR = &commr } +type SectorPreCommitBatch struct{} + +func (evt SectorPreCommitBatch) apply(*SectorInfo) {} + +type SectorPreCommitBatchSent struct { + Message cid.Cid +} + +func (evt SectorPreCommitBatchSent) apply(state *SectorInfo) { + state.PreCommitMessage = &evt.Message +} + type SectorPreCommitLanded struct { TipSet TipSetToken } @@ -233,6 +245,10 @@ func (evt SectorCommitted) apply(state *SectorInfo) { state.Proof = evt.Proof } +type SectorSubmitCommitAggregate struct{} + +func (evt SectorSubmitCommitAggregate) apply(*SectorInfo) {} + type SectorCommitSubmitted struct { Message cid.Cid } @@ -241,6 +257,14 @@ func (evt SectorCommitSubmitted) apply(state *SectorInfo) { state.CommitMessage = &evt.Message } +type SectorCommitAggregateSent struct { + Message cid.Cid +} + +func (evt SectorCommitAggregateSent) apply(state *SectorInfo) { + state.CommitMessage = &evt.Message +} + type SectorProving struct{} func (evt SectorProving) apply(*SectorInfo) {} diff --git a/extern/storage-sealing/precommit_batch.go b/extern/storage-sealing/precommit_batch.go new file mode 100644 index 000000000..dd674d331 --- /dev/null +++ b/extern/storage-sealing/precommit_batch.go @@ -0,0 +1,332 @@ +package sealing + +import ( + "bytes" + "context" + "sort" + "sync" + "time" + + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" + miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/extern/storage-sealing/sealiface" +) + +type PreCommitBatcherApi interface { + SendMsg(ctx context.Context, from, to address.Address, method abi.MethodNum, value, maxFee abi.TokenAmount, params []byte) (cid.Cid, error) + StateMinerInfo(context.Context, address.Address, TipSetToken) (miner.MinerInfo, error) + ChainHead(ctx context.Context) (TipSetToken, abi.ChainEpoch, error) +} + +type preCommitEntry struct { + deposit abi.TokenAmount + pci *miner0.SectorPreCommitInfo +} + +type PreCommitBatcher struct { + api PreCommitBatcherApi + maddr address.Address + mctx context.Context + addrSel AddrSel + feeCfg FeeConfig + getConfig GetSealingConfigFunc + + deadlines map[abi.SectorNumber]time.Time + todo map[abi.SectorNumber]*preCommitEntry + waiting map[abi.SectorNumber][]chan sealiface.PreCommitBatchRes + + notify, stop, stopped chan struct{} + force chan chan []sealiface.PreCommitBatchRes + lk sync.Mutex +} + +func NewPreCommitBatcher(mctx context.Context, maddr address.Address, api PreCommitBatcherApi, addrSel AddrSel, feeCfg FeeConfig, getConfig GetSealingConfigFunc) *PreCommitBatcher { + b := &PreCommitBatcher{ + api: api, + maddr: maddr, + mctx: mctx, + addrSel: addrSel, + feeCfg: feeCfg, + getConfig: getConfig, + + deadlines: map[abi.SectorNumber]time.Time{}, + todo: map[abi.SectorNumber]*preCommitEntry{}, + waiting: map[abi.SectorNumber][]chan sealiface.PreCommitBatchRes{}, + + notify: make(chan struct{}, 1), + force: make(chan chan []sealiface.PreCommitBatchRes), + stop: make(chan struct{}), + stopped: make(chan struct{}), + } + + go b.run() + + return b +} + +func (b *PreCommitBatcher) run() { + var forceRes chan []sealiface.PreCommitBatchRes + var lastRes []sealiface.PreCommitBatchRes + + cfg, err := b.getConfig() + if err != nil { + panic(err) + } + + for { + if forceRes != nil { + forceRes <- lastRes + forceRes = nil + } + lastRes = nil + + var sendAboveMax, sendAboveMin bool + select { + case <-b.stop: + close(b.stopped) + return + case <-b.notify: + sendAboveMax = true + case <-b.batchWait(cfg.PreCommitBatchWait, cfg.PreCommitBatchSlack): + sendAboveMin = true + case fr := <-b.force: // user triggered + forceRes = fr + } + + var err error + lastRes, err = b.maybeStartBatch(sendAboveMax, sendAboveMin) + if err != nil { + log.Warnw("PreCommitBatcher processBatch error", "error", err) + } + } +} + +func (b *PreCommitBatcher) batchWait(maxWait, slack time.Duration) <-chan time.Time { + now := time.Now() + + b.lk.Lock() + defer b.lk.Unlock() + + if len(b.todo) == 0 { + return nil + } + + var deadline time.Time + for sn := range b.todo { + sectorDeadline := b.deadlines[sn] + if deadline.IsZero() || (!sectorDeadline.IsZero() && sectorDeadline.Before(deadline)) { + deadline = sectorDeadline + } + } + for sn := range b.waiting { + sectorDeadline := b.deadlines[sn] + if deadline.IsZero() || (!sectorDeadline.IsZero() && sectorDeadline.Before(deadline)) { + deadline = sectorDeadline + } + } + + if deadline.IsZero() { + return time.After(maxWait) + } + + deadline = deadline.Add(-slack) + if deadline.Before(now) { + return time.After(time.Nanosecond) // can't return 0 + } + + wait := deadline.Sub(now) + if wait > maxWait { + wait = maxWait + } + + return time.After(wait) +} + +func (b *PreCommitBatcher) maybeStartBatch(notif, after bool) ([]sealiface.PreCommitBatchRes, error) { + b.lk.Lock() + defer b.lk.Unlock() + + total := len(b.todo) + if total == 0 { + return nil, nil // nothing to do + } + + cfg, err := b.getConfig() + if err != nil { + return nil, xerrors.Errorf("getting config: %w", err) + } + + if notif && total < cfg.MaxPreCommitBatch { + return nil, nil + } + + if after && total < cfg.MinPreCommitBatch { + return nil, nil + } + + // todo support multiple batches + res, err := b.processBatch(cfg) + if err != nil && len(res) == 0 { + return nil, err + } + + for _, r := range res { + if err != nil { + r.Error = err.Error() + } + + for _, sn := range r.Sectors { + for _, ch := range b.waiting[sn] { + ch <- r // buffered + } + + delete(b.waiting, sn) + delete(b.todo, sn) + delete(b.deadlines, sn) + } + } + + return res, nil +} + +func (b *PreCommitBatcher) processBatch(cfg sealiface.Config) ([]sealiface.PreCommitBatchRes, error) { + params := miner5.PreCommitSectorBatchParams{} + deposit := big.Zero() + var res sealiface.PreCommitBatchRes + + for _, p := range b.todo { + if len(params.Sectors) >= cfg.MaxPreCommitBatch { + log.Infow("precommit batch full") + break + } + + res.Sectors = append(res.Sectors, p.pci.SectorNumber) + params.Sectors = append(params.Sectors, *p.pci) + deposit = big.Add(deposit, p.deposit) + } + + enc := new(bytes.Buffer) + if err := params.MarshalCBOR(enc); err != nil { + return []sealiface.PreCommitBatchRes{res}, xerrors.Errorf("couldn't serialize PreCommitSectorBatchParams: %w", err) + } + + mi, err := b.api.StateMinerInfo(b.mctx, b.maddr, nil) + if err != nil { + return []sealiface.PreCommitBatchRes{res}, xerrors.Errorf("couldn't get miner info: %w", err) + } + + goodFunds := big.Add(deposit, b.feeCfg.MaxPreCommitGasFee) + + from, _, err := b.addrSel(b.mctx, mi, api.PreCommitAddr, goodFunds, deposit) + if err != nil { + return []sealiface.PreCommitBatchRes{res}, xerrors.Errorf("no good address found: %w", err) + } + + mcid, err := b.api.SendMsg(b.mctx, from, b.maddr, miner.Methods.PreCommitSectorBatch, deposit, b.feeCfg.MaxPreCommitGasFee, enc.Bytes()) + if err != nil { + return []sealiface.PreCommitBatchRes{res}, xerrors.Errorf("sending message failed: %w", err) + } + + res.Msg = &mcid + + log.Infow("Sent ProveCommitAggregate message", "cid", mcid, "from", from, "sectors", len(b.todo)) + + return []sealiface.PreCommitBatchRes{res}, nil +} + +// register PreCommit, wait for batch message, return message CID +func (b *PreCommitBatcher) AddPreCommit(ctx context.Context, s SectorInfo, deposit abi.TokenAmount, in *miner0.SectorPreCommitInfo) (res sealiface.PreCommitBatchRes, err error) { + _, curEpoch, err := b.api.ChainHead(b.mctx) + if err != nil { + log.Errorf("getting chain head: %s", err) + return sealiface.PreCommitBatchRes{}, err + } + + sn := s.SectorNumber + + b.lk.Lock() + b.deadlines[sn] = getSectorDeadline(curEpoch, s) + b.todo[sn] = &preCommitEntry{ + deposit: deposit, + pci: in, + } + + sent := make(chan sealiface.PreCommitBatchRes, 1) + b.waiting[sn] = append(b.waiting[sn], sent) + + select { + case b.notify <- struct{}{}: + default: // already have a pending notification, don't need more + } + b.lk.Unlock() + + select { + case c := <-sent: + return c, nil + case <-ctx.Done(): + return sealiface.PreCommitBatchRes{}, ctx.Err() + } +} + +func (b *PreCommitBatcher) Flush(ctx context.Context) ([]sealiface.PreCommitBatchRes, error) { + resCh := make(chan []sealiface.PreCommitBatchRes, 1) + select { + case b.force <- resCh: + select { + case res := <-resCh: + return res, nil + case <-ctx.Done(): + return nil, ctx.Err() + } + case <-ctx.Done(): + return nil, ctx.Err() + } +} + +func (b *PreCommitBatcher) Pending(ctx context.Context) ([]abi.SectorID, error) { + b.lk.Lock() + defer b.lk.Unlock() + + mid, err := address.IDFromAddress(b.maddr) + if err != nil { + return nil, err + } + + res := make([]abi.SectorID, 0) + for _, s := range b.todo { + res = append(res, abi.SectorID{ + Miner: abi.ActorID(mid), + Number: s.pci.SectorNumber, + }) + } + + sort.Slice(res, func(i, j int) bool { + if res[i].Miner != res[j].Miner { + return res[i].Miner < res[j].Miner + } + + return res[i].Number < res[j].Number + }) + + return res, nil +} + +func (b *PreCommitBatcher) Stop(ctx context.Context) error { + close(b.stop) + + select { + case <-b.stopped: + return nil + case <-ctx.Done(): + return ctx.Err() + } +} diff --git a/extern/storage-sealing/sealiface/batching.go b/extern/storage-sealing/sealiface/batching.go new file mode 100644 index 000000000..d0e6d4178 --- /dev/null +++ b/extern/storage-sealing/sealiface/batching.go @@ -0,0 +1,23 @@ +package sealiface + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-state-types/abi" +) + +type CommitBatchRes struct { + Sectors []abi.SectorNumber + + FailedSectors map[abi.SectorNumber]string + + Msg *cid.Cid + Error string // if set, means that all sectors are failed, implies Msg==nil +} + +type PreCommitBatchRes struct { + Sectors []abi.SectorNumber + + Msg *cid.Cid + Error string // if set, means that all sectors are failed, implies Msg==nil +} diff --git a/extern/storage-sealing/sealiface/config.go b/extern/storage-sealing/sealiface/config.go index 7ac5f6160..54ba2ef58 100644 --- a/extern/storage-sealing/sealiface/config.go +++ b/extern/storage-sealing/sealiface/config.go @@ -17,4 +17,20 @@ type Config struct { WaitDealsDelay time.Duration AlwaysKeepUnsealedCopy bool + + BatchPreCommits bool + MaxPreCommitBatch int + MinPreCommitBatch int + PreCommitBatchWait time.Duration + PreCommitBatchSlack time.Duration + + AggregateCommits bool + MinCommitBatch int + MaxCommitBatch int + CommitBatchWait time.Duration + CommitBatchSlack time.Duration + + TerminateBatchMax uint64 + TerminateBatchMin uint64 + TerminateBatchWait time.Duration } diff --git a/extern/storage-sealing/sealing.go b/extern/storage-sealing/sealing.go index 7c118901b..69746268f 100644 --- a/extern/storage-sealing/sealing.go +++ b/extern/storage-sealing/sealing.go @@ -27,6 +27,7 @@ import ( "github.com/filecoin-project/lotus/chain/types" sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" + "github.com/filecoin-project/lotus/extern/storage-sealing/sealiface" ) const SectorStorePrefix = "/sectors" @@ -103,7 +104,9 @@ type Sealing struct { stats SectorStats - terminator *TerminateBatcher + terminator *TerminateBatcher + precommiter *PreCommitBatcher + commiter *CommitBatcher getConfig GetSealingConfigFunc dealInfo *CurrentDealInfoManager @@ -131,7 +134,7 @@ type pendingPiece struct { accepted func(abi.SectorNumber, abi.UnpaddedPieceSize, error) } -func New(api SealingAPI, fc FeeConfig, events Events, maddr address.Address, ds datastore.Batching, sealer sectorstorage.SectorManager, sc SectorIDCounter, verif ffiwrapper.Verifier, pcp PreCommitPolicy, gc GetSealingConfigFunc, notifee SectorStateNotifee, as AddrSel) *Sealing { +func New(api SealingAPI, fc FeeConfig, events Events, maddr address.Address, ds datastore.Batching, sealer sectorstorage.SectorManager, sc SectorIDCounter, verif ffiwrapper.Verifier, prov ffiwrapper.Prover, pcp PreCommitPolicy, gc GetSealingConfigFunc, notifee SectorStateNotifee, as AddrSel) *Sealing { s := &Sealing{ api: api, feeCfg: fc, @@ -152,7 +155,9 @@ func New(api SealingAPI, fc FeeConfig, events Events, maddr address.Address, ds notifee: notifee, addrSel: as, - terminator: NewTerminationBatcher(context.TODO(), maddr, api, as, fc), + terminator: NewTerminationBatcher(context.TODO(), maddr, api, as, fc, gc), + precommiter: NewPreCommitBatcher(context.TODO(), maddr, api, as, fc, gc), + commiter: NewCommitBatcher(context.TODO(), maddr, api, as, fc, gc, prov), getConfig: gc, dealInfo: &CurrentDealInfoManager{api}, @@ -203,6 +208,22 @@ func (m *Sealing) TerminatePending(ctx context.Context) ([]abi.SectorID, error) return m.terminator.Pending(ctx) } +func (m *Sealing) SectorPreCommitFlush(ctx context.Context) ([]sealiface.PreCommitBatchRes, error) { + return m.precommiter.Flush(ctx) +} + +func (m *Sealing) SectorPreCommitPending(ctx context.Context) ([]abi.SectorID, error) { + return m.precommiter.Pending(ctx) +} + +func (m *Sealing) CommitFlush(ctx context.Context) ([]sealiface.CommitBatchRes, error) { + return m.commiter.Flush(ctx) +} + +func (m *Sealing) CommitPending(ctx context.Context) ([]abi.SectorID, error) { + return m.commiter.Pending(ctx) +} + func (m *Sealing) currentSealProof(ctx context.Context) (abi.RegisteredSealProof, error) { mi, err := m.api.StateMinerInfo(ctx, m.maddr, nil) if err != nil { diff --git a/extern/storage-sealing/sector_state.go b/extern/storage-sealing/sector_state.go index b636614d1..23c7695e7 100644 --- a/extern/storage-sealing/sector_state.go +++ b/extern/storage-sealing/sector_state.go @@ -3,61 +3,76 @@ package sealing type SectorState string var ExistSectorStateList = map[SectorState]struct{}{ - Empty: {}, - WaitDeals: {}, - Packing: {}, - AddPiece: {}, - AddPieceFailed: {}, - GetTicket: {}, - PreCommit1: {}, - PreCommit2: {}, - PreCommitting: {}, - PreCommitWait: {}, - WaitSeed: {}, - Committing: {}, - SubmitCommit: {}, - CommitWait: {}, - FinalizeSector: {}, - Proving: {}, - FailedUnrecoverable: {}, - SealPreCommit1Failed: {}, - SealPreCommit2Failed: {}, - PreCommitFailed: {}, - ComputeProofFailed: {}, - CommitFailed: {}, - PackingFailed: {}, - FinalizeFailed: {}, - DealsExpired: {}, - RecoverDealIDs: {}, - Faulty: {}, - FaultReported: {}, - FaultedFinal: {}, - Terminating: {}, - TerminateWait: {}, - TerminateFinality: {}, - TerminateFailed: {}, - Removing: {}, - RemoveFailed: {}, - Removed: {}, + Empty: {}, + WaitDeals: {}, + Packing: {}, + AddPiece: {}, + AddPieceFailed: {}, + GetTicket: {}, + PreCommit1: {}, + PreCommit2: {}, + PreCommitting: {}, + PreCommitWait: {}, + SubmitPreCommitBatch: {}, + PreCommitBatchWait: {}, + WaitSeed: {}, + Committing: {}, + SubmitCommit: {}, + CommitWait: {}, + SubmitCommitAggregate: {}, + CommitAggregateWait: {}, + FinalizeSector: {}, + Proving: {}, + FailedUnrecoverable: {}, + SealPreCommit1Failed: {}, + SealPreCommit2Failed: {}, + PreCommitFailed: {}, + ComputeProofFailed: {}, + CommitFailed: {}, + PackingFailed: {}, + FinalizeFailed: {}, + DealsExpired: {}, + RecoverDealIDs: {}, + Faulty: {}, + FaultReported: {}, + FaultedFinal: {}, + Terminating: {}, + TerminateWait: {}, + TerminateFinality: {}, + TerminateFailed: {}, + Removing: {}, + RemoveFailed: {}, + Removed: {}, } const ( UndefinedSectorState SectorState = "" // happy path - Empty SectorState = "Empty" // deprecated - WaitDeals SectorState = "WaitDeals" // waiting for more pieces (deals) to be added to the sector - AddPiece SectorState = "AddPiece" // put deal data (and padding if required) into the sector - Packing SectorState = "Packing" // sector not in sealStore, and not on chain - GetTicket SectorState = "GetTicket" // generate ticket - PreCommit1 SectorState = "PreCommit1" // do PreCommit1 - PreCommit2 SectorState = "PreCommit2" // do PreCommit2 - PreCommitting SectorState = "PreCommitting" // on chain pre-commit - PreCommitWait SectorState = "PreCommitWait" // waiting for precommit to land on chain - WaitSeed SectorState = "WaitSeed" // waiting for seed - Committing SectorState = "Committing" // compute PoRep - SubmitCommit SectorState = "SubmitCommit" // send commit message to the chain - CommitWait SectorState = "CommitWait" // wait for the commit message to land on chain + Empty SectorState = "Empty" // deprecated + WaitDeals SectorState = "WaitDeals" // waiting for more pieces (deals) to be added to the sector + AddPiece SectorState = "AddPiece" // put deal data (and padding if required) into the sector + Packing SectorState = "Packing" // sector not in sealStore, and not on chain + GetTicket SectorState = "GetTicket" // generate ticket + PreCommit1 SectorState = "PreCommit1" // do PreCommit1 + PreCommit2 SectorState = "PreCommit2" // do PreCommit2 + + PreCommitting SectorState = "PreCommitting" // on chain pre-commit + PreCommitWait SectorState = "PreCommitWait" // waiting for precommit to land on chain + + SubmitPreCommitBatch SectorState = "SubmitPreCommitBatch" + PreCommitBatchWait SectorState = "PreCommitBatchWait" + + WaitSeed SectorState = "WaitSeed" // waiting for seed + Committing SectorState = "Committing" // compute PoRep + + // single commit + SubmitCommit SectorState = "SubmitCommit" // send commit message to the chain + CommitWait SectorState = "CommitWait" // wait for the commit message to land on chain + + SubmitCommitAggregate SectorState = "SubmitCommitAggregate" + CommitAggregateWait SectorState = "CommitAggregateWait" + FinalizeSector SectorState = "FinalizeSector" Proving SectorState = "Proving" // error modes @@ -91,7 +106,7 @@ func toStatState(st SectorState) statSectorState { switch st { case UndefinedSectorState, Empty, WaitDeals, AddPiece: return sstStaging - case Packing, GetTicket, PreCommit1, PreCommit2, PreCommitting, PreCommitWait, WaitSeed, Committing, SubmitCommit, CommitWait, FinalizeSector: + case Packing, GetTicket, PreCommit1, PreCommit2, PreCommitting, PreCommitWait, SubmitPreCommitBatch, PreCommitBatchWait, WaitSeed, Committing, SubmitCommit, CommitWait, SubmitCommitAggregate, CommitAggregateWait, FinalizeSector: return sstSealing case Proving, Removed, Removing, Terminating, TerminateWait, TerminateFinality, TerminateFailed: return sstProving diff --git a/extern/storage-sealing/states_sealing.go b/extern/storage-sealing/states_sealing.go index e5449b422..5e8f5269b 100644 --- a/extern/storage-sealing/states_sealing.go +++ b/extern/storage-sealing/states_sealing.go @@ -11,7 +11,9 @@ import ( "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/exitcode" + "github.com/filecoin-project/go-state-types/network" "github.com/filecoin-project/go-statemachine" + "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" "github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/lotus/api" @@ -224,56 +226,50 @@ func (m *Sealing) remarkForUpgrade(sid abi.SectorNumber) { } } -func (m *Sealing) handlePreCommitting(ctx statemachine.Context, sector SectorInfo) error { +func (m *Sealing) preCommitParams(ctx statemachine.Context, sector SectorInfo) (*miner.SectorPreCommitInfo, big.Int, TipSetToken, error) { tok, height, err := m.api.ChainHead(ctx.Context()) if err != nil { log.Errorf("handlePreCommitting: api error, not proceeding: %+v", err) - return nil - } - - mi, err := m.api.StateMinerInfo(ctx.Context(), m.maddr, tok) - if err != nil { - log.Errorf("handlePreCommitting: api error, not proceeding: %+v", err) - return nil + return nil, big.Zero(), nil, nil } if err := checkPrecommit(ctx.Context(), m.Address(), sector, tok, height, m.api); err != nil { switch err := err.(type) { case *ErrApi: log.Errorf("handlePreCommitting: api error, not proceeding: %+v", err) - return nil + return nil, big.Zero(), nil, nil case *ErrBadCommD: // TODO: Should this just back to packing? (not really needed since handlePreCommit1 will do that too) - return ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("bad CommD error: %w", err)}) + return nil, big.Zero(), nil, ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("bad CommD error: %w", err)}) case *ErrExpiredTicket: - return ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("ticket expired: %w", err)}) + return nil, big.Zero(), nil, ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("ticket expired: %w", err)}) case *ErrBadTicket: - return ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("bad ticket: %w", err)}) + return nil, big.Zero(), nil, ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("bad ticket: %w", err)}) case *ErrInvalidDeals: log.Warnf("invalid deals in sector %d: %v", sector.SectorNumber, err) - return ctx.Send(SectorInvalidDealIDs{Return: RetPreCommitting}) + return nil, big.Zero(), nil, ctx.Send(SectorInvalidDealIDs{Return: RetPreCommitting}) case *ErrExpiredDeals: - return ctx.Send(SectorDealsExpired{xerrors.Errorf("sector deals expired: %w", err)}) + return nil, big.Zero(), nil, ctx.Send(SectorDealsExpired{xerrors.Errorf("sector deals expired: %w", err)}) case *ErrPrecommitOnChain: - return ctx.Send(SectorPreCommitLanded{TipSet: tok}) // we re-did precommit + return nil, big.Zero(), nil, ctx.Send(SectorPreCommitLanded{TipSet: tok}) // we re-did precommit case *ErrSectorNumberAllocated: log.Errorf("handlePreCommitFailed: sector number already allocated, not proceeding: %+v", err) // TODO: check if the sector is committed (not sure how we'd end up here) - return nil + return nil, big.Zero(), nil, nil default: - return xerrors.Errorf("checkPrecommit sanity check error: %w", err) + return nil, big.Zero(), nil, xerrors.Errorf("checkPrecommit sanity check error: %w", err) } } expiration, err := m.pcp.Expiration(ctx.Context(), sector.Pieces...) if err != nil { - return ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("handlePreCommitting: failed to compute pre-commit expiry: %w", err)}) + return nil, big.Zero(), nil, ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("handlePreCommitting: failed to compute pre-commit expiry: %w", err)}) } // Sectors must last _at least_ MinSectorExpiration + MaxSealDuration. // TODO: The "+10" allows the pre-commit to take 10 blocks to be accepted. nv, err := m.api.StateNetworkVersion(ctx.Context(), tok) if err != nil { - return ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("failed to get network version: %w", err)}) + return nil, big.Zero(), nil, ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("failed to get network version: %w", err)}) } msd := policy.GetMaxProveCommitDuration(actors.VersionForNetwork(nv), sector.SectorType) @@ -295,17 +291,49 @@ func (m *Sealing) handlePreCommitting(ctx statemachine.Context, sector SectorInf depositMinimum := m.tryUpgradeSector(ctx.Context(), params) + collateral, err := m.api.StateMinerPreCommitDepositForPower(ctx.Context(), m.maddr, *params, tok) + if err != nil { + return nil, big.Zero(), nil, xerrors.Errorf("getting initial pledge collateral: %w", err) + } + + deposit := big.Max(depositMinimum, collateral) + + return params, deposit, tok, nil +} + +func (m *Sealing) handlePreCommitting(ctx statemachine.Context, sector SectorInfo) error { + cfg, err := m.getConfig() + if err != nil { + return xerrors.Errorf("getting config: %w", err) + } + + if cfg.BatchPreCommits { + nv, err := m.api.StateNetworkVersion(ctx.Context(), nil) + if err != nil { + return xerrors.Errorf("getting network version: %w", err) + } + + if nv >= network.Version13 { + return ctx.Send(SectorPreCommitBatch{}) + } + } + + params, deposit, tok, err := m.preCommitParams(ctx, sector) + if params == nil || err != nil { + return err + } + enc := new(bytes.Buffer) if err := params.MarshalCBOR(enc); err != nil { return ctx.Send(SectorChainPreCommitFailed{xerrors.Errorf("could not serialize pre-commit sector parameters: %w", err)}) } - collateral, err := m.api.StateMinerPreCommitDepositForPower(ctx.Context(), m.maddr, *params, tok) + mi, err := m.api.StateMinerInfo(ctx.Context(), m.maddr, tok) if err != nil { - return xerrors.Errorf("getting initial pledge collateral: %w", err) + log.Errorf("handlePreCommitting: api error, not proceeding: %+v", err) + return nil } - deposit := big.Max(depositMinimum, collateral) goodFunds := big.Add(deposit, m.feeCfg.MaxPreCommitGasFee) from, _, err := m.addrSel(ctx.Context(), mi, api.PreCommitAddr, goodFunds, deposit) @@ -325,6 +353,32 @@ func (m *Sealing) handlePreCommitting(ctx statemachine.Context, sector SectorInf return ctx.Send(SectorPreCommitted{Message: mcid, PreCommitDeposit: deposit, PreCommitInfo: *params}) } +func (m *Sealing) handleSubmitPreCommitBatch(ctx statemachine.Context, sector SectorInfo) error { + if sector.CommD == nil || sector.CommR == nil { + return ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("sector had nil commR or commD")}) + } + + params, deposit, _, err := m.preCommitParams(ctx, sector) + if params == nil || err != nil { + return ctx.Send(SectorChainPreCommitFailed{xerrors.Errorf("preCommitParams: %w", err)}) + } + + res, err := m.precommiter.AddPreCommit(ctx.Context(), sector, deposit, params) + if err != nil { + return ctx.Send(SectorChainPreCommitFailed{xerrors.Errorf("queuing precommit batch failed: %w", err)}) + } + + if res.Error != "" { + return ctx.Send(SectorChainPreCommitFailed{xerrors.Errorf("precommit batch error: %s", res.Error)}) + } + + if res.Msg == nil { + return ctx.Send(SectorChainPreCommitFailed{xerrors.Errorf("batch message was nil")}) + } + + return ctx.Send(SectorPreCommitBatchSent{*res.Msg}) +} + func (m *Sealing) handlePreCommitWait(ctx statemachine.Context, sector SectorInfo) error { if sector.PreCommitMessage == nil { return ctx.Send(SectorChainPreCommitFailed{xerrors.Errorf("precommit message was nil")}) @@ -452,6 +506,22 @@ func (m *Sealing) handleCommitting(ctx statemachine.Context, sector SectorInfo) } func (m *Sealing) handleSubmitCommit(ctx statemachine.Context, sector SectorInfo) error { + cfg, err := m.getConfig() + if err != nil { + return xerrors.Errorf("getting config: %w", err) + } + + if cfg.AggregateCommits { + nv, err := m.api.StateNetworkVersion(ctx.Context(), nil) + if err != nil { + return xerrors.Errorf("getting network version: %w", err) + } + + if nv >= network.Version13 { + return ctx.Send(SectorSubmitCommitAggregate{}) + } + } + tok, _, err := m.api.ChainHead(ctx.Context()) if err != nil { log.Errorf("handleCommitting: api error, not proceeding: %+v", err) @@ -514,6 +584,41 @@ func (m *Sealing) handleSubmitCommit(ctx statemachine.Context, sector SectorInfo }) } +func (m *Sealing) handleSubmitCommitAggregate(ctx statemachine.Context, sector SectorInfo) error { + if sector.CommD == nil || sector.CommR == nil { + return ctx.Send(SectorCommitFailed{xerrors.Errorf("sector had nil commR or commD")}) + } + + res, err := m.commiter.AddCommit(ctx.Context(), sector, AggregateInput{ + info: proof.AggregateSealVerifyInfo{ + Number: sector.SectorNumber, + Randomness: sector.TicketValue, + InteractiveRandomness: sector.SeedValue, + SealedCID: *sector.CommR, + UnsealedCID: *sector.CommD, + }, + proof: sector.Proof, // todo: this correct?? + spt: sector.SectorType, + }) + if err != nil { + return ctx.Send(SectorCommitFailed{xerrors.Errorf("queuing commit for aggregation failed: %w", err)}) + } + + if res.Error != "" { + return ctx.Send(SectorCommitFailed{xerrors.Errorf("aggregate error: %s", res.Error)}) + } + + if e, found := res.FailedSectors[sector.SectorNumber]; found { + return ctx.Send(SectorCommitFailed{xerrors.Errorf("sector failed in aggregate processing: %s", e)}) + } + + if res.Msg == nil { + return ctx.Send(SectorCommitFailed{xerrors.Errorf("aggregate message was nil")}) + } + + return ctx.Send(SectorCommitAggregateSent{*res.Msg}) +} + func (m *Sealing) handleCommitWait(ctx statemachine.Context, sector SectorInfo) error { if sector.CommitMessage == nil { log.Errorf("sector %d entered commit wait state without a message cid", sector.SectorNumber) diff --git a/extern/storage-sealing/terminate_batch.go b/extern/storage-sealing/terminate_batch.go index 0e96e8384..d545f443f 100644 --- a/extern/storage-sealing/terminate_batch.go +++ b/extern/storage-sealing/terminate_batch.go @@ -21,14 +21,6 @@ import ( "github.com/filecoin-project/lotus/chain/actors/builtin/miner" ) -var ( - // TODO: config - - TerminateBatchMax uint64 = 100 // adjust based on real-world gas numbers, actors limit at 10k - TerminateBatchMin uint64 = 1 - TerminateBatchWait = 5 * time.Minute -) - type TerminateBatcherApi interface { StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok TipSetToken) (*SectorLocation, error) SendMsg(ctx context.Context, from, to address.Address, method abi.MethodNum, value, maxFee abi.TokenAmount, params []byte) (cid.Cid, error) @@ -38,11 +30,12 @@ type TerminateBatcherApi interface { } type TerminateBatcher struct { - api TerminateBatcherApi - maddr address.Address - mctx context.Context - addrSel AddrSel - feeCfg FeeConfig + api TerminateBatcherApi + maddr address.Address + mctx context.Context + addrSel AddrSel + feeCfg FeeConfig + getConfig GetSealingConfigFunc todo map[SectorLocation]*bitfield.BitField // MinerSectorLocation -> BitField @@ -53,13 +46,14 @@ type TerminateBatcher struct { lk sync.Mutex } -func NewTerminationBatcher(mctx context.Context, maddr address.Address, api TerminateBatcherApi, addrSel AddrSel, feeCfg FeeConfig) *TerminateBatcher { +func NewTerminationBatcher(mctx context.Context, maddr address.Address, api TerminateBatcherApi, addrSel AddrSel, feeCfg FeeConfig, getConfig GetSealingConfigFunc) *TerminateBatcher { b := &TerminateBatcher{ - api: api, - maddr: maddr, - mctx: mctx, - addrSel: addrSel, - feeCfg: feeCfg, + api: api, + maddr: maddr, + mctx: mctx, + addrSel: addrSel, + feeCfg: feeCfg, + getConfig: getConfig, todo: map[SectorLocation]*bitfield.BitField{}, waiting: map[abi.SectorNumber][]chan cid.Cid{}, @@ -86,6 +80,11 @@ func (b *TerminateBatcher) run() { } lastMsg = nil + cfg, err := b.getConfig() + if err != nil { + log.Warnw("TerminateBatcher getconfig error", "error", err) + } + var sendAboveMax, sendAboveMin bool select { case <-b.stop: @@ -93,13 +92,12 @@ func (b *TerminateBatcher) run() { return case <-b.notify: sendAboveMax = true - case <-time.After(TerminateBatchWait): + case <-time.After(cfg.TerminateBatchWait): sendAboveMin = true case fr := <-b.force: // user triggered forceRes = fr } - var err error lastMsg, err = b.processBatch(sendAboveMax, sendAboveMin) if err != nil { log.Warnw("TerminateBatcher processBatch error", "error", err) @@ -113,6 +111,11 @@ func (b *TerminateBatcher) processBatch(notif, after bool) (*cid.Cid, error) { return nil, xerrors.Errorf("getting proving deadline info failed: %w", err) } + cfg, err := b.getConfig() + if err != nil { + return nil, xerrors.Errorf("getting sealing config: %W", err) + } + b.lk.Lock() defer b.lk.Unlock() params := miner2.TerminateSectorsParams{} @@ -180,7 +183,7 @@ func (b *TerminateBatcher) processBatch(notif, after bool) (*cid.Cid, error) { Sectors: toTerminate, }) - if total >= uint64(miner.AddressedSectorsMax) { + if total >= uint64(miner.AddressedSectorsMax) || total >= cfg.TerminateBatchMax { break } @@ -193,11 +196,11 @@ func (b *TerminateBatcher) processBatch(notif, after bool) (*cid.Cid, error) { return nil, nil // nothing to do } - if notif && total < TerminateBatchMax { + if notif && total < cfg.TerminateBatchMax { return nil, nil } - if after && total < TerminateBatchMin { + if after && total < cfg.TerminateBatchMin { return nil, nil } diff --git a/go.mod b/go.mod index cb4f4d161..21421345c 100644 --- a/go.mod +++ b/go.mod @@ -39,8 +39,8 @@ require ( github.com/filecoin-project/go-jsonrpc v0.1.4-0.20210217175800-45ea43ac2bec github.com/filecoin-project/go-multistore v0.0.3 github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20 - github.com/filecoin-project/go-paramfetch v0.0.2-0.20200701152213-3e0f0afdc261 - github.com/filecoin-project/go-state-types v0.1.0 + github.com/filecoin-project/go-paramfetch v0.0.2-0.20210330140417-936748d3f5ec + github.com/filecoin-project/go-state-types v0.1.1-0.20210506134452-99b279731c48 github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe github.com/filecoin-project/go-statestore v0.1.1 github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b @@ -48,6 +48,7 @@ require ( github.com/filecoin-project/specs-actors/v2 v2.3.5 github.com/filecoin-project/specs-actors/v3 v3.1.1 github.com/filecoin-project/specs-actors/v4 v4.0.1 + github.com/filecoin-project/specs-actors/v5 v5.0.0-20210602024058-0c296bb386bf github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506 github.com/filecoin-project/test-vectors/schema v0.0.5 github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1 diff --git a/go.sum b/go.sum index cd9b99a8b..8510e0363 100644 --- a/go.sum +++ b/go.sum @@ -254,8 +254,9 @@ github.com/filecoin-project/go-address v0.0.5/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+ github.com/filecoin-project/go-amt-ipld/v2 v2.1.0/go.mod h1:nfFPoGyX0CU9SkXX8EoCcSuHN1XcbN0c6KBh7yvP5fs= github.com/filecoin-project/go-amt-ipld/v2 v2.1.1-0.20201006184820-924ee87a1349 h1:pIuR0dnMD0i+as8wNnjjHyQrnhP5O5bmba/lmgQeRgU= github.com/filecoin-project/go-amt-ipld/v2 v2.1.1-0.20201006184820-924ee87a1349/go.mod h1:vgmwKBkx+ca5OIeEvstiQgzAZnb7R6QaqE1oEDSqa6g= -github.com/filecoin-project/go-amt-ipld/v3 v3.0.0 h1:Ou/q82QeHGOhpkedvaxxzpBYuqTxLCcj5OChkDNx4qc= github.com/filecoin-project/go-amt-ipld/v3 v3.0.0/go.mod h1:Qa95YNAbtoVCTSVtX38aAC1ptBnJfPma1R/zZsKmx4o= +github.com/filecoin-project/go-amt-ipld/v3 v3.1.0 h1:ZNJ9tEG5bE72vBWYiuh5bkxJVM3ViHNOmQ7qew9n6RE= +github.com/filecoin-project/go-amt-ipld/v3 v3.1.0/go.mod h1:UjM2QhDFrrjD5s1CdnkJkat4ga+LqZBZgTMniypABRo= github.com/filecoin-project/go-bitfield v0.2.0/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= github.com/filecoin-project/go-bitfield v0.2.3/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= github.com/filecoin-project/go-bitfield v0.2.4 h1:uZ7MeE+XfM5lqrHJZ93OnhQKc/rveW8p9au0C68JPgk= @@ -282,22 +283,24 @@ github.com/filecoin-project/go-hamt-ipld v0.1.5 h1:uoXrKbCQZ49OHpsTCkrThPNelC4W3 github.com/filecoin-project/go-hamt-ipld v0.1.5/go.mod h1:6Is+ONR5Cd5R6XZoCse1CWaXZc0Hdb/JeX+EQCQzX24= github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0 h1:b3UDemBYN2HNfk3KOXNuxgTTxlWi3xVvbQP0IT38fvM= github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0/go.mod h1:7aWZdaQ1b16BVoQUYR+eEvrDCGJoPLxFpDynFjYfBjI= -github.com/filecoin-project/go-hamt-ipld/v3 v3.0.1 h1:zbzs46G7bOctkZ+JUX3xirrj0RaEsi+27dtlsgrTNBg= github.com/filecoin-project/go-hamt-ipld/v3 v3.0.1/go.mod h1:gXpNmr3oQx8l3o7qkGyDjJjYSRX7hp/FGOStdqrWyDI= +github.com/filecoin-project/go-hamt-ipld/v3 v3.1.0 h1:rVVNq0x6RGQIzCo1iiJlGFm9AGIZzeifggxtKMU7zmI= +github.com/filecoin-project/go-hamt-ipld/v3 v3.1.0/go.mod h1:bxmzgT8tmeVQA1/gvBwFmYdT8SOFUwB3ovSUfG1Ux0g= github.com/filecoin-project/go-jsonrpc v0.1.4-0.20210217175800-45ea43ac2bec h1:rGI5I7fdU4viManxmDdbk5deZO7afe6L1Wc04dAmlOM= github.com/filecoin-project/go-jsonrpc v0.1.4-0.20210217175800-45ea43ac2bec/go.mod h1:XBBpuKIMaXIIzeqzO1iucq4GvbF8CxmXRFoezRh+Cx4= github.com/filecoin-project/go-multistore v0.0.3 h1:vaRBY4YiA2UZFPK57RNuewypB8u0DzzQwqsL0XarpnI= github.com/filecoin-project/go-multistore v0.0.3/go.mod h1:kaNqCC4IhU4B1uyr7YWFHd23TL4KM32aChS0jNkyUvQ= github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20 h1:+/4aUeUoKr6AKfPE3mBhXA5spIV6UcKdTYDPNU2Tdmg= github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20/go.mod h1:mPn+LRRd5gEKNAtc+r3ScpW2JRU/pj4NBKdADYWHiak= -github.com/filecoin-project/go-paramfetch v0.0.2-0.20200701152213-3e0f0afdc261 h1:A256QonvzRaknIIAuWhe/M2dpV2otzs3NBhi5TWa/UA= -github.com/filecoin-project/go-paramfetch v0.0.2-0.20200701152213-3e0f0afdc261/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc= +github.com/filecoin-project/go-paramfetch v0.0.2-0.20210330140417-936748d3f5ec h1:gExwWUiT1TcARkxGneS4nvp9C+wBsKU0bFdg7qFpNco= +github.com/filecoin-project/go-paramfetch v0.0.2-0.20210330140417-936748d3f5ec/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc= github.com/filecoin-project/go-state-types v0.0.0-20200903145444-247639ffa6ad/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I= github.com/filecoin-project/go-state-types v0.0.0-20200904021452-1883f36ca2f4/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I= github.com/filecoin-project/go-state-types v0.0.0-20200928172055-2df22083d8ab/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= github.com/filecoin-project/go-state-types v0.0.0-20201102161440-c8033295a1fc/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= -github.com/filecoin-project/go-state-types v0.1.0 h1:9r2HCSMMCmyMfGyMKxQtv0GKp6VT/m5GgVk8EhYbLJU= github.com/filecoin-project/go-state-types v0.1.0/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= +github.com/filecoin-project/go-state-types v0.1.1-0.20210506134452-99b279731c48 h1:Jc4OprDp3bRDxbsrXNHPwJabZJM3iDy+ri8/1e0ZnX4= +github.com/filecoin-project/go-state-types v0.1.1-0.20210506134452-99b279731c48/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe h1:dF8u+LEWeIcTcfUcCf3WFVlc81Fr2JKg8zPzIbBDKDw= github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe/go.mod h1:FGwQgZAt2Gh5mjlwJUlVB62JeYdo+if0xWxSEfBD9ig= github.com/filecoin-project/go-statestore v0.1.0/go.mod h1:LFc9hD+fRxPqiHiaqUEZOinUJB4WARkRfNl10O7kTnI= @@ -318,8 +321,12 @@ github.com/filecoin-project/specs-actors/v2 v2.3.5/go.mod h1:LljnY2Mn2homxZsmokJ github.com/filecoin-project/specs-actors/v3 v3.1.0/go.mod h1:mpynccOLlIRy0QnR008BwYBwT9fen+sPR13MA1VmMww= github.com/filecoin-project/specs-actors/v3 v3.1.1 h1:BE8fsns1GnEOxt1DTE5LxBK2FThXtWmCChgcJoHTg0E= github.com/filecoin-project/specs-actors/v3 v3.1.1/go.mod h1:mpynccOLlIRy0QnR008BwYBwT9fen+sPR13MA1VmMww= +github.com/filecoin-project/specs-actors/v4 v4.0.0/go.mod h1:TkHXf/l7Wyw4ZejyXIPS2rK8bBO0rdwhTZyQQgaglng= github.com/filecoin-project/specs-actors/v4 v4.0.1 h1:AiWrtvJZ63MHGe6rn7tPu4nSUY8bA1KDNszqJaD5+Fg= github.com/filecoin-project/specs-actors/v4 v4.0.1/go.mod h1:TkHXf/l7Wyw4ZejyXIPS2rK8bBO0rdwhTZyQQgaglng= +github.com/filecoin-project/specs-actors/v5 v5.0.0-20210512015452-4fe3889fff57/go.mod h1:283yBMMUSDB2abcjP/hhrwTkhb9h3sfM6KGrep/ZlBI= +github.com/filecoin-project/specs-actors/v5 v5.0.0-20210602024058-0c296bb386bf h1:xt9A1omyhSDbQvpVk7Na1J15a/n8y0y4GQDLeiWLpFs= +github.com/filecoin-project/specs-actors/v5 v5.0.0-20210602024058-0c296bb386bf/go.mod h1:b/btpRl84Q9SeDKlyIoORBQwe2OTmq14POrYrVvBWCM= github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506 h1:Ur/l2+6qN+lQiqjozWWc5p9UDaAMDZKTlDS98oRnlIw= github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506/go.mod h1:nJRRM7Aa9XVvygr3W9k6xGF46RWzr2zxF/iGoAIfA/g= github.com/filecoin-project/test-vectors/schema v0.0.5 h1:w3zHQhzM4pYxJDl21avXjOKBLF8egrvwUwjpT8TquDg= diff --git a/lotuspond/front/src/chain/methods.json b/lotuspond/front/src/chain/methods.json index a09d3ec91..5aced814a 100644 --- a/lotuspond/front/src/chain/methods.json +++ b/lotuspond/front/src/chain/methods.json @@ -410,5 +410,111 @@ "AddVerifiedClient", "UseBytes", "RestoreBytes" + ], + "fil/5/account": [ + "Send", + "Constructor", + "PubkeyAddress" + ], + "fil/5/cron": [ + "Send", + "Constructor", + "EpochTick" + ], + "fil/5/init": [ + "Send", + "Constructor", + "Exec" + ], + "fil/5/multisig": [ + "Send", + "Constructor", + "Propose", + "Approve", + "Cancel", + "AddSigner", + "RemoveSigner", + "SwapSigner", + "ChangeNumApprovalsThreshold", + "LockBalance" + ], + "fil/5/paymentchannel": [ + "Send", + "Constructor", + "UpdateChannelState", + "Settle", + "Collect" + ], + "fil/5/reward": [ + "Send", + "Constructor", + "AwardBlockReward", + "ThisEpochReward", + "UpdateNetworkKPI" + ], + "fil/5/storagemarket": [ + "Send", + "Constructor", + "AddBalance", + "WithdrawBalance", + "PublishStorageDeals", + "VerifyDealsForActivation", + "ActivateDeals", + "OnMinerSectorsTerminate", + "ComputeDataCommitment", + "CronTick" + ], + "fil/5/storageminer": [ + "Send", + "Constructor", + "ControlAddresses", + "ChangeWorkerAddress", + "ChangePeerID", + "SubmitWindowedPoSt", + "PreCommitSector", + "ProveCommitSector", + "ExtendSectorExpiration", + "TerminateSectors", + "DeclareFaults", + "DeclareFaultsRecovered", + "OnDeferredCronEvent", + "CheckSectorProven", + "ApplyRewards", + "ReportConsensusFault", + "WithdrawBalance", + "ConfirmSectorProofsValid", + "ChangeMultiaddrs", + "CompactPartitions", + "CompactSectorNumbers", + "ConfirmUpdateWorkerKey", + "RepayDebt", + "ChangeOwnerAddress", + "DisputeWindowedPoSt", + "PreCommitSectorBatch", + "ProveCommitAggregate" + ], + "fil/5/storagepower": [ + "Send", + "Constructor", + "CreateMiner", + "UpdateClaimedPower", + "EnrollCronEvent", + "OnEpochTickEnd", + "UpdatePledgeTotal", + "SubmitPoRepForBulkVerify", + "CurrentTotalPower" + ], + "fil/5/system": [ + "Send", + "Constructor" + ], + "fil/5/verifiedregistry": [ + "Send", + "Constructor", + "AddVerifier", + "RemoveVerifier", + "AddVerifiedClient", + "UseBytes", + "RestoreBytes" ] } \ No newline at end of file diff --git a/markets/storageadapter/ondealsectorcommitted.go b/markets/storageadapter/ondealsectorcommitted.go index b5f9c7510..31bc0b8bf 100644 --- a/markets/storageadapter/ondealsectorcommitted.go +++ b/markets/storageadapter/ondealsectorcommitted.go @@ -12,6 +12,7 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-fil-markets/storagemarket" "github.com/filecoin-project/go-state-types/abi" + miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors/builtin/market" @@ -109,7 +110,7 @@ func (mgr *SectorCommittedManager) OnDealSectorPreCommitted(ctx context.Context, // Watch for a pre-commit message to the provider. matchEvent := func(msg *types.Message) (bool, error) { - matched := msg.To == provider && msg.Method == miner.Methods.PreCommitSector + matched := msg.To == provider && (msg.Method == miner.Methods.PreCommitSector || msg.Method == miner.Methods.PreCommitSectorBatch) return matched, nil } @@ -137,12 +138,6 @@ func (mgr *SectorCommittedManager) OnDealSectorPreCommitted(ctx context.Context, return true, nil } - // Extract the message parameters - var params miner.SectorPreCommitInfo - if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil { - return false, xerrors.Errorf("unmarshal pre commit: %w", err) - } - // When there is a reorg, the deal ID may change, so get the // current deal ID from the publish message CID res, err := mgr.dealInfo.GetCurrentDealInfo(ctx, ts.Key().Bytes(), &proposal, publishCid) @@ -150,13 +145,14 @@ func (mgr *SectorCommittedManager) OnDealSectorPreCommitted(ctx context.Context, return false, err } - // Check through the deal IDs associated with this message - for _, did := range params.DealIDs { - if did == res.DealID { - // Found the deal ID in this message. Callback with the sector ID. - cb(params.SectorNumber, false, nil) - return false, nil - } + // Extract the message parameters + sn, err := dealSectorInPreCommitMsg(msg, res) + if err != nil { + return false, err + } + + if sn != nil { + cb(*sn, false, nil) } // Didn't find the deal ID in this message, so keep looking @@ -207,16 +203,11 @@ func (mgr *SectorCommittedManager) OnDealSectorCommitted(ctx context.Context, pr // Match a prove-commit sent to the provider with the given sector number matchEvent := func(msg *types.Message) (matched bool, err error) { - if msg.To != provider || msg.Method != miner.Methods.ProveCommitSector { + if msg.To != provider { return false, nil } - var params miner.ProveCommitSectorParams - if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil { - return false, xerrors.Errorf("failed to unmarshal prove commit sector params: %w", err) - } - - return params.SectorNumber == sectorNumber, nil + return sectorInCommitMsg(msg, sectorNumber) } // The deal must be accepted by the deal proposal start epoch, so timeout @@ -273,6 +264,73 @@ func (mgr *SectorCommittedManager) OnDealSectorCommitted(ctx context.Context, pr return nil } +// dealSectorInPreCommitMsg tries to find a sector containing the specified deal +func dealSectorInPreCommitMsg(msg *types.Message, res sealing.CurrentDealInfo) (*abi.SectorNumber, error) { + switch msg.Method { + case miner.Methods.PreCommitSector: + var params miner.SectorPreCommitInfo + if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil { + return nil, xerrors.Errorf("unmarshal pre commit: %w", err) + } + + // Check through the deal IDs associated with this message + for _, did := range params.DealIDs { + if did == res.DealID { + // Found the deal ID in this message. Callback with the sector ID. + return ¶ms.SectorNumber, nil + } + } + case miner.Methods.PreCommitSectorBatch: + var params miner5.PreCommitSectorBatchParams + if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil { + return nil, xerrors.Errorf("unmarshal pre commit: %w", err) + } + + for _, precommit := range params.Sectors { + // Check through the deal IDs associated with this message + for _, did := range precommit.DealIDs { + if did == res.DealID { + // Found the deal ID in this message. Callback with the sector ID. + return &precommit.SectorNumber, nil + } + } + } + default: + return nil, xerrors.Errorf("unexpected method %d", msg.Method) + } + + return nil, nil +} + +// sectorInCommitMsg checks if the provided message commits specified sector +func sectorInCommitMsg(msg *types.Message, sectorNumber abi.SectorNumber) (bool, error) { + switch msg.Method { + case miner.Methods.ProveCommitSector: + var params miner.ProveCommitSectorParams + if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil { + return false, xerrors.Errorf("failed to unmarshal prove commit sector params: %w", err) + } + + return params.SectorNumber == sectorNumber, nil + + case miner.Methods.ProveCommitAggregate: + var params miner5.ProveCommitAggregateParams + if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil { + return false, xerrors.Errorf("failed to unmarshal prove commit sector params: %w", err) + } + + set, err := params.SectorNumbers.IsSet(uint64(sectorNumber)) + if err != nil { + return false, xerrors.Errorf("checking if sectorNumber is set in commit aggregate message: %w", err) + } + + return set, nil + + default: + return false, nil + } +} + func (mgr *SectorCommittedManager) checkIfDealAlreadyActive(ctx context.Context, ts *types.TipSet, proposal *market.DealProposal, publishCid cid.Cid) (sealing.CurrentDealInfo, bool, error) { res, err := mgr.dealInfo.GetCurrentDealInfo(ctx, ts.Key().Bytes(), proposal, publishCid) if err != nil { diff --git a/node/builder.go b/node/builder.go index 34be610f5..9d9c81a85 100644 --- a/node/builder.go +++ b/node/builder.go @@ -381,6 +381,7 @@ var MinerNode = Options( // Sector storage: Proofs Override(new(ffiwrapper.Verifier), ffiwrapper.ProofVerifier), + Override(new(ffiwrapper.Prover), ffiwrapper.ProofProver), Override(new(storage2.Prover), From(new(sectorstorage.SectorManager))), // Sealing diff --git a/node/config/def.go b/node/config/def.go index b4cf5e2fa..c18f60a7a 100644 --- a/node/config/def.go +++ b/node/config/def.go @@ -6,6 +6,8 @@ import ( "github.com/ipfs/go-cid" + miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/types" sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage" ) @@ -82,6 +84,30 @@ type SealingConfig struct { AlwaysKeepUnsealedCopy bool + // enable / disable precommit batching (takes effect after nv13) + BatchPreCommits bool + // maximum precommit batch size - batches will be sent immediately above this size + MaxPreCommitBatch int + MinPreCommitBatch int + // how long to wait before submitting a batch after crossing the minimum batch size + PreCommitBatchWait Duration + // time buffer for forceful batch submission before sectors in batch would start expiring + PreCommitBatchSlack Duration + + // enable / disable commit aggregation (takes effect after nv13) + AggregateCommits bool + // maximum batched commit size - batches will be sent immediately above this size + MinCommitBatch int + MaxCommitBatch int + // how long to wait before submitting a batch after crossing the minimum batch size + CommitBatchWait Duration + // time buffer for forceful batch submission before sectors in batch would start expiring + CommitBatchSlack Duration + + TerminateBatchMax uint64 + TerminateBatchMin uint64 + TerminateBatchWait Duration + // Keep this many sectors in sealing pipeline, start CC if needed // todo TargetSealingSectors uint64 @@ -237,6 +263,22 @@ func DefaultStorageMiner() *StorageMiner { MaxSealingSectorsForDeals: 0, WaitDealsDelay: Duration(time.Hour * 6), AlwaysKeepUnsealedCopy: true, + + BatchPreCommits: true, + MinPreCommitBatch: 1, // we must have at least one proof to aggregate + MaxPreCommitBatch: miner5.PreCommitSectorBatchMaxSize, // + PreCommitBatchWait: Duration(24 * time.Hour), // this can be up to 6 days + PreCommitBatchSlack: Duration(3 * time.Hour), + + AggregateCommits: true, + MinCommitBatch: miner5.MinAggregatedSectors, // we must have at least four proofs to aggregate + MaxCommitBatch: miner5.MaxAggregatedSectors, // this is the maximum aggregation per FIP13 + CommitBatchWait: Duration(24 * time.Hour), // this can be up to 6 days + CommitBatchSlack: Duration(1 * time.Hour), + + TerminateBatchMin: 1, + TerminateBatchMax: 100, + TerminateBatchWait: Duration(5 * time.Minute), }, Storage: sectorstorage.SealerConfig{ diff --git a/node/impl/client/client.go b/node/impl/client/client.go index 370cde5da..b711bb696 100644 --- a/node/impl/client/client.go +++ b/node/impl/client/client.go @@ -67,7 +67,8 @@ import ( var DefaultHashFunction = uint64(mh.BLAKE2B_MIN + 31) -const dealStartBufferHours uint64 = 49 +// 8 days ~= SealDuration + PreCommit + MaxProveCommitDuration + 8 hour buffer +const dealStartBufferHours uint64 = 8 * 24 type API struct { fx.In diff --git a/node/impl/full/chain.go b/node/impl/full/chain.go index f7e28354a..d26c2d7ea 100644 --- a/node/impl/full/chain.go +++ b/node/impl/full/chain.go @@ -10,6 +10,8 @@ import ( "strings" "sync" + "github.com/filecoin-project/lotus/build" + "go.uber.org/fx" "golang.org/x/xerrors" @@ -97,7 +99,12 @@ func (a *ChainAPI) ChainGetRandomnessFromTickets(ctx context.Context, tsk types. return nil, xerrors.Errorf("loading tipset key: %w", err) } - return a.Chain.GetChainRandomness(ctx, pts.Cids(), personalization, randEpoch, entropy) + // Doing this here is slightly nicer than doing it in the chainstore directly, but it's still bad for ChainAPI to reason about network upgrades + if randEpoch > build.UpgradeHyperdriveHeight { + return a.Chain.GetChainRandomnessLookingForward(ctx, pts.Cids(), personalization, randEpoch, entropy) + } + + return a.Chain.GetChainRandomnessLookingBack(ctx, pts.Cids(), personalization, randEpoch, entropy) } func (a *ChainAPI) ChainGetRandomnessFromBeacon(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) { @@ -106,7 +113,12 @@ func (a *ChainAPI) ChainGetRandomnessFromBeacon(ctx context.Context, tsk types.T return nil, xerrors.Errorf("loading tipset key: %w", err) } - return a.Chain.GetBeaconRandomness(ctx, pts.Cids(), personalization, randEpoch, entropy) + // Doing this here is slightly nicer than doing it in the chainstore directly, but it's still bad for ChainAPI to reason about network upgrades + if randEpoch > build.UpgradeHyperdriveHeight { + return a.Chain.GetBeaconRandomnessLookingForward(ctx, pts.Cids(), personalization, randEpoch, entropy) + } + + return a.Chain.GetBeaconRandomnessLookingBack(ctx, pts.Cids(), personalization, randEpoch, entropy) } func (a *ChainAPI) ChainGetBlock(ctx context.Context, msg cid.Cid) (*types.BlockHeader, error) { diff --git a/node/impl/full/gas.go b/node/impl/full/gas.go index 7b624d39b..edf53ff63 100644 --- a/node/impl/full/gas.go +++ b/node/impl/full/gas.go @@ -267,7 +267,7 @@ func gasEstimateGasLimit( return -1, xerrors.Errorf("getting key address: %w", err) } - pending, ts := mpool.PendingFor(fromA) + pending, ts := mpool.PendingFor(ctx, fromA) priorMsgs := make([]types.ChainMsg, 0, len(pending)) for _, m := range pending { if m.Message.Nonce == msg.Nonce { diff --git a/node/impl/full/mpool.go b/node/impl/full/mpool.go index 63d3c7d58..5fe5fbec1 100644 --- a/node/impl/full/mpool.go +++ b/node/impl/full/mpool.go @@ -60,7 +60,7 @@ func (a *MpoolAPI) MpoolSelect(ctx context.Context, tsk types.TipSetKey, ticketQ return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } - return a.Mpool.SelectMessages(ts, ticketQuality) + return a.Mpool.SelectMessages(ctx, ts, ticketQuality) } func (a *MpoolAPI) MpoolPending(ctx context.Context, tsk types.TipSetKey) ([]*types.SignedMessage, error) { @@ -68,7 +68,7 @@ func (a *MpoolAPI) MpoolPending(ctx context.Context, tsk types.TipSetKey) ([]*ty if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } - pending, mpts := a.Mpool.Pending() + pending, mpts := a.Mpool.Pending(ctx) haveCids := map[cid.Cid]struct{}{} for _, m := range pending { @@ -122,16 +122,16 @@ func (a *MpoolAPI) MpoolPending(ctx context.Context, tsk types.TipSetKey) ([]*ty } func (a *MpoolAPI) MpoolClear(ctx context.Context, local bool) error { - a.Mpool.Clear(local) + a.Mpool.Clear(ctx, local) return nil } func (m *MpoolModule) MpoolPush(ctx context.Context, smsg *types.SignedMessage) (cid.Cid, error) { - return m.Mpool.Push(smsg) + return m.Mpool.Push(ctx, smsg) } func (a *MpoolAPI) MpoolPushUntrusted(ctx context.Context, smsg *types.SignedMessage) (cid.Cid, error) { - return a.Mpool.PushUntrusted(smsg) + return a.Mpool.PushUntrusted(ctx, smsg) } func (a *MpoolAPI) MpoolPushMessage(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec) (*types.SignedMessage, error) { @@ -192,7 +192,7 @@ func (a *MpoolAPI) MpoolPushMessage(ctx context.Context, msg *types.Message, spe func (a *MpoolAPI) MpoolBatchPush(ctx context.Context, smsgs []*types.SignedMessage) ([]cid.Cid, error) { var messageCids []cid.Cid for _, smsg := range smsgs { - smsgCid, err := a.Mpool.Push(smsg) + smsgCid, err := a.Mpool.Push(ctx, smsg) if err != nil { return messageCids, err } @@ -204,7 +204,7 @@ func (a *MpoolAPI) MpoolBatchPush(ctx context.Context, smsgs []*types.SignedMess func (a *MpoolAPI) MpoolBatchPushUntrusted(ctx context.Context, smsgs []*types.SignedMessage) ([]cid.Cid, error) { var messageCids []cid.Cid for _, smsg := range smsgs { - smsgCid, err := a.Mpool.PushUntrusted(smsg) + smsgCid, err := a.Mpool.PushUntrusted(ctx, smsg) if err != nil { return messageCids, err } diff --git a/node/impl/storminer.go b/node/impl/storminer.go index 553f5e459..61c69b2ba 100644 --- a/node/impl/storminer.go +++ b/node/impl/storminer.go @@ -32,6 +32,7 @@ import ( "github.com/filecoin-project/lotus/extern/sector-storage/stores" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" sealing "github.com/filecoin-project/lotus/extern/storage-sealing" + "github.com/filecoin-project/lotus/extern/storage-sealing/sealiface" "github.com/filecoin-project/lotus/api" apitypes "github.com/filecoin-project/lotus/api/types" @@ -373,10 +374,26 @@ func (sm *StorageMinerAPI) SectorTerminatePending(ctx context.Context) ([]abi.Se return sm.Miner.TerminatePending(ctx) } +func (sm *StorageMinerAPI) SectorPreCommitFlush(ctx context.Context) ([]sealiface.PreCommitBatchRes, error) { + return sm.Miner.SectorPreCommitFlush(ctx) +} + +func (sm *StorageMinerAPI) SectorPreCommitPending(ctx context.Context) ([]abi.SectorID, error) { + return sm.Miner.SectorPreCommitPending(ctx) +} + func (sm *StorageMinerAPI) SectorMarkForUpgrade(ctx context.Context, id abi.SectorNumber) error { return sm.Miner.MarkForUpgrade(id) } +func (sm *StorageMinerAPI) SectorCommitFlush(ctx context.Context) ([]sealiface.CommitBatchRes, error) { + return sm.Miner.CommitFlush(ctx) +} + +func (sm *StorageMinerAPI) SectorCommitPending(ctx context.Context) ([]abi.SectorID, error) { + return sm.Miner.CommitPending(ctx) +} + func (sm *StorageMinerAPI) WorkerConnect(ctx context.Context, url string) error { w, err := connectRemoteWorker(ctx, sm, url) if err != nil { diff --git a/node/modules/storageminer.go b/node/modules/storageminer.go index be949255f..711f1cbbe 100644 --- a/node/modules/storageminer.go +++ b/node/modules/storageminer.go @@ -100,7 +100,7 @@ func GetParams(spt abi.RegisteredSealProof) error { } // TODO: We should fetch the params for the actual proof type, not just based on the size. - if err := paramfetch.GetParams(context.TODO(), build.ParametersJSON(), uint64(ssize)); err != nil { + if err := paramfetch.GetParams(context.TODO(), build.ParametersJSON(), build.SrsJSON(), uint64(ssize)); err != nil { return xerrors.Errorf("fetching proof parameters: %w", err) } @@ -203,6 +203,7 @@ type StorageMinerParams struct { Sealer sectorstorage.SectorManager SectorIDCounter sealing.SectorIDCounter Verifier ffiwrapper.Verifier + Prover ffiwrapper.Prover GetSealingConfigFn dtypes.GetSealingConfigFunc Journal journal.Journal AddrSel *storage.AddressSelector @@ -219,6 +220,7 @@ func StorageMiner(fc config.MinerFeeConfig) func(params StorageMinerParams) (*st h = params.Host sc = params.SectorIDCounter verif = params.Verifier + prover = params.Prover gsd = params.GetSealingConfigFn j = params.Journal as = params.AddrSel @@ -236,7 +238,7 @@ func StorageMiner(fc config.MinerFeeConfig) func(params StorageMinerParams) (*st return nil, err } - sm, err := storage.NewMiner(api, maddr, h, ds, sealer, sc, verif, gsd, fc, j, as) + sm, err := storage.NewMiner(api, maddr, h, ds, sealer, sc, verif, prover, gsd, fc, j, as) if err != nil { return nil, err } @@ -825,6 +827,22 @@ func NewSetSealConfigFunc(r repo.LockedRepo) (dtypes.SetSealingConfigFunc, error MaxSealingSectorsForDeals: cfg.MaxSealingSectorsForDeals, WaitDealsDelay: config.Duration(cfg.WaitDealsDelay), AlwaysKeepUnsealedCopy: cfg.AlwaysKeepUnsealedCopy, + + BatchPreCommits: cfg.BatchPreCommits, + MinPreCommitBatch: cfg.MinPreCommitBatch, + MaxPreCommitBatch: cfg.MaxPreCommitBatch, + PreCommitBatchWait: config.Duration(cfg.PreCommitBatchWait), + PreCommitBatchSlack: config.Duration(cfg.PreCommitBatchSlack), + + AggregateCommits: cfg.AggregateCommits, + MinCommitBatch: cfg.MinCommitBatch, + MaxCommitBatch: cfg.MaxCommitBatch, + CommitBatchWait: config.Duration(cfg.CommitBatchWait), + CommitBatchSlack: config.Duration(cfg.CommitBatchSlack), + + TerminateBatchMax: cfg.TerminateBatchMax, + TerminateBatchMin: cfg.TerminateBatchMin, + TerminateBatchWait: config.Duration(cfg.TerminateBatchWait), } }) return @@ -840,6 +858,22 @@ func NewGetSealConfigFunc(r repo.LockedRepo) (dtypes.GetSealingConfigFunc, error MaxSealingSectorsForDeals: cfg.Sealing.MaxSealingSectorsForDeals, WaitDealsDelay: time.Duration(cfg.Sealing.WaitDealsDelay), AlwaysKeepUnsealedCopy: cfg.Sealing.AlwaysKeepUnsealedCopy, + + BatchPreCommits: cfg.Sealing.BatchPreCommits, + MinPreCommitBatch: cfg.Sealing.MinPreCommitBatch, + MaxPreCommitBatch: cfg.Sealing.MaxPreCommitBatch, + PreCommitBatchWait: time.Duration(cfg.Sealing.PreCommitBatchWait), + PreCommitBatchSlack: time.Duration(cfg.Sealing.PreCommitBatchSlack), + + AggregateCommits: cfg.Sealing.AggregateCommits, + MinCommitBatch: cfg.Sealing.MinCommitBatch, + MaxCommitBatch: cfg.Sealing.MaxCommitBatch, + CommitBatchWait: time.Duration(cfg.Sealing.CommitBatchWait), + CommitBatchSlack: time.Duration(cfg.Sealing.CommitBatchSlack), + + TerminateBatchMax: cfg.Sealing.TerminateBatchMax, + TerminateBatchMin: cfg.Sealing.TerminateBatchMin, + TerminateBatchWait: time.Duration(cfg.Sealing.TerminateBatchWait), } }) return diff --git a/node/node_test.go b/node/node_test.go index 19c72b057..dcbc70469 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -163,6 +163,15 @@ func TestPledgeSectors(t *testing.T) { }) } +func TestPledgeBatching(t *testing.T) { + t.Run("100", func(t *testing.T) { + test.TestPledgeBatching(t, builder.MockSbBuilder, 50*time.Millisecond, 100) + }) + t.Run("100-before-nv13", func(t *testing.T) { + test.TestPledgeBeforeNv13(t, builder.MockSbBuilder, 50*time.Millisecond, 100) + }) +} + func TestTapeFix(t *testing.T) { logging.SetLogLevel("miner", "ERROR") logging.SetLogLevel("chainstore", "ERROR") @@ -179,6 +188,7 @@ func TestWindowedPost(t *testing.T) { } logging.SetLogLevel("miner", "ERROR") + logging.SetLogLevel("gen", "ERROR") logging.SetLogLevel("chainstore", "ERROR") logging.SetLogLevel("chain", "ERROR") logging.SetLogLevel("sub", "ERROR") @@ -227,6 +237,7 @@ func TestWindowPostDispute(t *testing.T) { t.Skip("this takes a few minutes, set LOTUS_TEST_WINDOW_POST=1 to run") } logging.SetLogLevel("miner", "ERROR") + logging.SetLogLevel("gen", "ERROR") logging.SetLogLevel("chainstore", "ERROR") logging.SetLogLevel("chain", "ERROR") logging.SetLogLevel("sub", "ERROR") @@ -240,6 +251,7 @@ func TestWindowPostDisputeFails(t *testing.T) { t.Skip("this takes a few minutes, set LOTUS_TEST_WINDOW_POST=1 to run") } logging.SetLogLevel("miner", "ERROR") + logging.SetLogLevel("gen", "ERROR") logging.SetLogLevel("chainstore", "ERROR") logging.SetLogLevel("chain", "ERROR") logging.SetLogLevel("sub", "ERROR") @@ -248,11 +260,40 @@ func TestWindowPostDisputeFails(t *testing.T) { test.TestWindowPostDisputeFails(t, builder.MockSbBuilder, 2*time.Millisecond) } +func TestWindowPostBaseFeeNoBurn(t *testing.T) { + if os.Getenv("LOTUS_TEST_WINDOW_POST") != "1" { + t.Skip("this takes a few minutes, set LOTUS_TEST_WINDOW_POST=1 to run") + } + logging.SetLogLevel("miner", "ERROR") + logging.SetLogLevel("gen", "ERROR") + logging.SetLogLevel("chainstore", "ERROR") + logging.SetLogLevel("chain", "ERROR") + logging.SetLogLevel("sub", "ERROR") + logging.SetLogLevel("storageminer", "ERROR") + + test.TestWindowPostBaseFeeNoBurn(t, builder.MockSbBuilder, 2*time.Millisecond) +} + +func TestWindowPostBaseFeeBurn(t *testing.T) { + if os.Getenv("LOTUS_TEST_WINDOW_POST") != "1" { + t.Skip("this takes a few minutes, set LOTUS_TEST_WINDOW_POST=1 to run") + } + logging.SetLogLevel("miner", "ERROR") + logging.SetLogLevel("gen", "ERROR") + logging.SetLogLevel("chainstore", "ERROR") + logging.SetLogLevel("chain", "ERROR") + logging.SetLogLevel("sub", "ERROR") + logging.SetLogLevel("storageminer", "ERROR") + + test.TestWindowPostBaseFeeBurn(t, builder.MockSbBuilder, 2*time.Millisecond) +} + func TestDeadlineToggling(t *testing.T) { if os.Getenv("LOTUS_TEST_DEADLINE_TOGGLING") != "1" { t.Skip("this takes a few minutes, set LOTUS_TEST_DEADLINE_TOGGLING=1 to run") } logging.SetLogLevel("miner", "ERROR") + logging.SetLogLevel("gen", "ERROR") logging.SetLogLevel("chainstore", "ERROR") logging.SetLogLevel("chain", "ERROR") logging.SetLogLevel("sub", "ERROR") diff --git a/node/test/builder.go b/node/test/builder.go index 10828126d..6b0b9aa96 100644 --- a/node/test/builder.go +++ b/node/test/builder.go @@ -501,6 +501,7 @@ func mockSbBuilderOpts(t *testing.T, fullOpts []test.FullNodeOpts, storage []tes node.Test(), node.Override(new(ffiwrapper.Verifier), mock.MockVerifier), + node.Override(new(ffiwrapper.Prover), mock.MockProver), // so that we subscribe to pubsub topics immediately node.Override(new(dtypes.Bootstrapper), dtypes.Bootstrapper(true)), @@ -524,6 +525,7 @@ func mockSbBuilderOpts(t *testing.T, fullOpts []test.FullNodeOpts, storage []tes return mock.NewMockSectorMgr(nil), nil }), node.Override(new(ffiwrapper.Verifier), mock.MockVerifier), + node.Override(new(ffiwrapper.Prover), mock.MockProver), node.Unset(new(*sectorstorage.Manager)), )) } @@ -566,6 +568,7 @@ func mockSbBuilderOpts(t *testing.T, fullOpts []test.FullNodeOpts, storage []tes return mock.NewMockSectorMgr(sectors), nil }), node.Override(new(ffiwrapper.Verifier), mock.MockVerifier), + node.Override(new(ffiwrapper.Prover), mock.MockProver), node.Unset(new(*sectorstorage.Manager)), opts, )) diff --git a/storage/adapter_storage_miner.go b/storage/adapter_storage_miner.go index 502b9adb0..1376ae5fb 100644 --- a/storage/adapter_storage_miner.go +++ b/storage/adapter_storage_miner.go @@ -16,6 +16,7 @@ import ( "github.com/filecoin-project/go-state-types/network" market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" + market5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/market" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/blockstore" @@ -146,10 +147,28 @@ func (s SealingAPIAdapter) StateComputeDataCommitment(ctx context.Context, maddr return cid.Undef, xerrors.Errorf("failed to unmarshal TipSetToken to TipSetKey: %w", err) } - ccparams, err := actors.SerializeParams(&market2.ComputeDataCommitmentParams{ - DealIDs: deals, - SectorType: sectorType, - }) + nv, err := s.delegate.StateNetworkVersion(ctx, tsk) + if err != nil { + return cid.Cid{}, err + } + + var ccparams []byte + if nv < network.Version13 { + ccparams, err = actors.SerializeParams(&market2.ComputeDataCommitmentParams{ + DealIDs: deals, + SectorType: sectorType, + }) + } else { + ccparams, err = actors.SerializeParams(&market5.ComputeDataCommitmentParams{ + Inputs: []*market5.SectorDataSpec{ + { + DealIDs: deals, + SectorType: sectorType, + }, + }, + }) + } + if err != nil { return cid.Undef, xerrors.Errorf("computing params for ComputeDataCommitment: %w", err) } @@ -169,12 +188,25 @@ func (s SealingAPIAdapter) StateComputeDataCommitment(ctx context.Context, maddr return cid.Undef, xerrors.Errorf("receipt for ComputeDataCommitment had exit code %d", r.MsgRct.ExitCode) } - var c cbg.CborCid - if err := c.UnmarshalCBOR(bytes.NewReader(r.MsgRct.Return)); err != nil { + if nv < network.Version13 { + var c cbg.CborCid + if err := c.UnmarshalCBOR(bytes.NewReader(r.MsgRct.Return)); err != nil { + return cid.Undef, xerrors.Errorf("failed to unmarshal CBOR to CborCid: %w", err) + } + + return cid.Cid(c), nil + } + + var cr market5.ComputeDataCommitmentReturn + if err := cr.UnmarshalCBOR(bytes.NewReader(r.MsgRct.Return)); err != nil { return cid.Undef, xerrors.Errorf("failed to unmarshal CBOR to CborCid: %w", err) } - return cid.Cid(c), nil + if len(cr.CommDs) != 1 { + return cid.Undef, xerrors.Errorf("CommD output must have 1 entry") + } + + return cid.Cid(cr.CommDs[0]), nil } func (s SealingAPIAdapter) StateSectorPreCommitInfo(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok sealing.TipSetToken) (*miner.SectorPreCommitOnChainInfo, error) { diff --git a/storage/miner.go b/storage/miner.go index 6eb1789dc..106c09291 100644 --- a/storage/miner.go +++ b/storage/miner.go @@ -57,6 +57,7 @@ type Miner struct { ds datastore.Batching sc sealing.SectorIDCounter verif ffiwrapper.Verifier + prover ffiwrapper.Prover addrSel *AddressSelector maddr address.Address @@ -134,6 +135,7 @@ func NewMiner(api fullNodeFilteredAPI, sealer sectorstorage.SectorManager, sc sealing.SectorIDCounter, verif ffiwrapper.Verifier, + prover ffiwrapper.Prover, gsd dtypes.GetSealingConfigFunc, feeCfg config.MinerFeeConfig, journal journal.Journal, @@ -146,6 +148,7 @@ func NewMiner(api fullNodeFilteredAPI, ds: ds, sc: sc, verif: verif, + prover: prover, addrSel: as, maddr: maddr, @@ -202,7 +205,7 @@ func (m *Miner) Run(ctx context.Context) error { ) // Instantiate the sealing FSM. - m.sealing = sealing.New(adaptedAPI, fc, evtsAdapter, m.maddr, m.ds, m.sealer, m.sc, m.verif, &pcp, cfg, m.handleSealingNotifications, as) + m.sealing = sealing.New(adaptedAPI, fc, evtsAdapter, m.maddr, m.ds, m.sealer, m.sc, m.verif, m.prover, &pcp, cfg, m.handleSealingNotifications, as) // Run the sealing FSM. go m.sealing.Run(ctx) //nolint:errcheck // logged intside the function diff --git a/storage/miner_sealing.go b/storage/miner_sealing.go index 8981c3738..6a1195826 100644 --- a/storage/miner_sealing.go +++ b/storage/miner_sealing.go @@ -11,6 +11,7 @@ import ( "github.com/filecoin-project/specs-storage/storage" sealing "github.com/filecoin-project/lotus/extern/storage-sealing" + "github.com/filecoin-project/lotus/extern/storage-sealing/sealiface" ) // TODO: refactor this to be direct somehow @@ -59,6 +60,22 @@ func (m *Miner) TerminatePending(ctx context.Context) ([]abi.SectorID, error) { return m.sealing.TerminatePending(ctx) } +func (m *Miner) SectorPreCommitFlush(ctx context.Context) ([]sealiface.PreCommitBatchRes, error) { + return m.sealing.SectorPreCommitFlush(ctx) +} + +func (m *Miner) SectorPreCommitPending(ctx context.Context) ([]abi.SectorID, error) { + return m.sealing.SectorPreCommitPending(ctx) +} + +func (m *Miner) CommitFlush(ctx context.Context) ([]sealiface.CommitBatchRes, error) { + return m.sealing.CommitFlush(ctx) +} + +func (m *Miner) CommitPending(ctx context.Context) ([]abi.SectorID, error) { + return m.sealing.CommitPending(ctx) +} + func (m *Miner) MarkForUpgrade(id abi.SectorNumber) error { return m.sealing.MarkForUpgrade(id) } diff --git a/storage/wdpost_run.go b/storage/wdpost_run.go index b4c702197..d62b5e851 100644 --- a/storage/wdpost_run.go +++ b/storage/wdpost_run.go @@ -534,9 +534,14 @@ func (s *WindowPoStScheduler) runPoStCycle(ctx context.Context, di dline.Info, t return nil, xerrors.Errorf("getting partitions: %w", err) } + nv, err := s.api.StateNetworkVersion(ctx, ts.Key()) + if err != nil { + return nil, xerrors.Errorf("getting network version: %w", err) + } + // Split partitions into batches, so as not to exceed the number of sectors // allowed in a single message - partitionBatches, err := s.batchPartitions(partitions) + partitionBatches, err := s.batchPartitions(partitions, nv) if err != nil { return nil, err } @@ -716,7 +721,7 @@ func (s *WindowPoStScheduler) runPoStCycle(ctx context.Context, di dline.Info, t return posts, nil } -func (s *WindowPoStScheduler) batchPartitions(partitions []api.Partition) ([][]api.Partition, error) { +func (s *WindowPoStScheduler) batchPartitions(partitions []api.Partition, nv network.Version) ([][]api.Partition, error) { // We don't want to exceed the number of sectors allowed in a message. // So given the number of sectors in a partition, work out the number of // partitions that can be in a message without exceeding sectors per @@ -732,6 +737,11 @@ func (s *WindowPoStScheduler) batchPartitions(partitions []api.Partition) ([][]a return nil, xerrors.Errorf("getting sectors per partition: %w", err) } + // Also respect the AddressedPartitionsMax (which is the same as DeclarationsMax (which is all really just MaxPartitionsPerDeadline)) + if partitionsPerMsg > policy.GetDeclarationsMax(nv) { + partitionsPerMsg = policy.GetDeclarationsMax(nv) + } + // The number of messages will be: // ceiling(number of partitions / partitions per message) batchCount := len(partitions) / partitionsPerMsg diff --git a/storage/wdpost_run_test.go b/storage/wdpost_run_test.go index 584369dff..b878ff97e 100644 --- a/storage/wdpost_run_test.go +++ b/storage/wdpost_run_test.go @@ -5,6 +5,9 @@ import ( "context" "testing" + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" + miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" + "github.com/stretchr/testify/require" "golang.org/x/xerrors" @@ -23,6 +26,7 @@ import ( miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" tutils "github.com/filecoin-project/specs-actors/v2/support/testing" + proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" @@ -144,6 +148,10 @@ func (m mockVerif) VerifyWindowPoSt(ctx context.Context, info proof2.WindowPoStV return true, nil } +func (m mockVerif) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) (bool, error) { + panic("implement me") +} + func (m mockVerif) VerifySeal(proof2.SealVerifyInfo) (bool, error) { panic("implement me") } @@ -172,13 +180,16 @@ func TestWDPostDoPost(t *testing.T) { mockStgMinerAPI := newMockStorageMinerAPI() // Get the number of sectors allowed in a partition for this proof type - sectorsPerPartition, err := builtin2.PoStProofWindowPoStPartitionSectors(proofType) + sectorsPerPartition, err := builtin5.PoStProofWindowPoStPartitionSectors(proofType) require.NoError(t, err) // Work out the number of partitions that can be included in a message // without exceeding the message sector limit require.NoError(t, err) - partitionsPerMsg := int(miner2.AddressedSectorsMax / sectorsPerPartition) + partitionsPerMsg := int(miner5.AddressedSectorsMax / sectorsPerPartition) + if partitionsPerMsg > miner5.AddressedPartitionsMax { + partitionsPerMsg = miner5.AddressedPartitionsMax + } // Enough partitions to fill expectedMsgCount-1 messages partitionCount := (expectedMsgCount - 1) * partitionsPerMsg @@ -214,11 +225,11 @@ func TestWDPostDoPost(t *testing.T) { } di := &dline.Info{ - WPoStPeriodDeadlines: miner2.WPoStPeriodDeadlines, - WPoStProvingPeriod: miner2.WPoStProvingPeriod, - WPoStChallengeWindow: miner2.WPoStChallengeWindow, - WPoStChallengeLookback: miner2.WPoStChallengeLookback, - FaultDeclarationCutoff: miner2.FaultDeclarationCutoff, + WPoStPeriodDeadlines: miner5.WPoStPeriodDeadlines, + WPoStProvingPeriod: miner5.WPoStProvingPeriod, + WPoStChallengeWindow: miner5.WPoStChallengeWindow, + WPoStChallengeLookback: miner5.WPoStChallengeLookback, + FaultDeclarationCutoff: miner5.FaultDeclarationCutoff, } ts := mockTipSet(t) diff --git a/testplans/lotus-soup/go.mod b/testplans/lotus-soup/go.mod index ae9b4d4b6..0c8e92a1b 100644 --- a/testplans/lotus-soup/go.mod +++ b/testplans/lotus-soup/go.mod @@ -11,10 +11,11 @@ require ( github.com/filecoin-project/go-data-transfer v1.6.0 github.com/filecoin-project/go-fil-markets v1.4.0 github.com/filecoin-project/go-jsonrpc v0.1.4-0.20210217175800-45ea43ac2bec - github.com/filecoin-project/go-state-types v0.1.0 + github.com/filecoin-project/go-state-types v0.1.1-0.20210506134452-99b279731c48 github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b - github.com/filecoin-project/lotus v1.9.1-0.20210602101339-07b025a54f6d + github.com/filecoin-project/lotus v1.9.1-0.20210602131226-e1dc7ad6eb9e github.com/filecoin-project/specs-actors v0.9.14 + github.com/filecoin-project/specs-actors/v5 v5.0.0-20210602024058-0c296bb386bf // indirect github.com/google/uuid v1.1.2 github.com/gorilla/mux v1.7.4 github.com/hashicorp/go-multierror v1.1.0 diff --git a/testplans/lotus-soup/go.sum b/testplans/lotus-soup/go.sum index fc88afe6c..926f625cf 100644 --- a/testplans/lotus-soup/go.sum +++ b/testplans/lotus-soup/go.sum @@ -263,8 +263,9 @@ github.com/filecoin-project/go-address v0.0.5/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+ github.com/filecoin-project/go-amt-ipld/v2 v2.1.0/go.mod h1:nfFPoGyX0CU9SkXX8EoCcSuHN1XcbN0c6KBh7yvP5fs= github.com/filecoin-project/go-amt-ipld/v2 v2.1.1-0.20201006184820-924ee87a1349 h1:pIuR0dnMD0i+as8wNnjjHyQrnhP5O5bmba/lmgQeRgU= github.com/filecoin-project/go-amt-ipld/v2 v2.1.1-0.20201006184820-924ee87a1349/go.mod h1:vgmwKBkx+ca5OIeEvstiQgzAZnb7R6QaqE1oEDSqa6g= -github.com/filecoin-project/go-amt-ipld/v3 v3.0.0 h1:Ou/q82QeHGOhpkedvaxxzpBYuqTxLCcj5OChkDNx4qc= github.com/filecoin-project/go-amt-ipld/v3 v3.0.0/go.mod h1:Qa95YNAbtoVCTSVtX38aAC1ptBnJfPma1R/zZsKmx4o= +github.com/filecoin-project/go-amt-ipld/v3 v3.1.0 h1:ZNJ9tEG5bE72vBWYiuh5bkxJVM3ViHNOmQ7qew9n6RE= +github.com/filecoin-project/go-amt-ipld/v3 v3.1.0/go.mod h1:UjM2QhDFrrjD5s1CdnkJkat4ga+LqZBZgTMniypABRo= github.com/filecoin-project/go-bitfield v0.2.0/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= github.com/filecoin-project/go-bitfield v0.2.3/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= github.com/filecoin-project/go-bitfield v0.2.4 h1:uZ7MeE+XfM5lqrHJZ93OnhQKc/rveW8p9au0C68JPgk= @@ -291,22 +292,24 @@ github.com/filecoin-project/go-hamt-ipld v0.1.5 h1:uoXrKbCQZ49OHpsTCkrThPNelC4W3 github.com/filecoin-project/go-hamt-ipld v0.1.5/go.mod h1:6Is+ONR5Cd5R6XZoCse1CWaXZc0Hdb/JeX+EQCQzX24= github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0 h1:b3UDemBYN2HNfk3KOXNuxgTTxlWi3xVvbQP0IT38fvM= github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0/go.mod h1:7aWZdaQ1b16BVoQUYR+eEvrDCGJoPLxFpDynFjYfBjI= -github.com/filecoin-project/go-hamt-ipld/v3 v3.0.1 h1:zbzs46G7bOctkZ+JUX3xirrj0RaEsi+27dtlsgrTNBg= github.com/filecoin-project/go-hamt-ipld/v3 v3.0.1/go.mod h1:gXpNmr3oQx8l3o7qkGyDjJjYSRX7hp/FGOStdqrWyDI= +github.com/filecoin-project/go-hamt-ipld/v3 v3.1.0 h1:rVVNq0x6RGQIzCo1iiJlGFm9AGIZzeifggxtKMU7zmI= +github.com/filecoin-project/go-hamt-ipld/v3 v3.1.0/go.mod h1:bxmzgT8tmeVQA1/gvBwFmYdT8SOFUwB3ovSUfG1Ux0g= github.com/filecoin-project/go-jsonrpc v0.1.4-0.20210217175800-45ea43ac2bec h1:rGI5I7fdU4viManxmDdbk5deZO7afe6L1Wc04dAmlOM= github.com/filecoin-project/go-jsonrpc v0.1.4-0.20210217175800-45ea43ac2bec/go.mod h1:XBBpuKIMaXIIzeqzO1iucq4GvbF8CxmXRFoezRh+Cx4= github.com/filecoin-project/go-multistore v0.0.3 h1:vaRBY4YiA2UZFPK57RNuewypB8u0DzzQwqsL0XarpnI= github.com/filecoin-project/go-multistore v0.0.3/go.mod h1:kaNqCC4IhU4B1uyr7YWFHd23TL4KM32aChS0jNkyUvQ= github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20 h1:+/4aUeUoKr6AKfPE3mBhXA5spIV6UcKdTYDPNU2Tdmg= github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20/go.mod h1:mPn+LRRd5gEKNAtc+r3ScpW2JRU/pj4NBKdADYWHiak= -github.com/filecoin-project/go-paramfetch v0.0.2-0.20200701152213-3e0f0afdc261 h1:A256QonvzRaknIIAuWhe/M2dpV2otzs3NBhi5TWa/UA= -github.com/filecoin-project/go-paramfetch v0.0.2-0.20200701152213-3e0f0afdc261/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc= +github.com/filecoin-project/go-paramfetch v0.0.2-0.20210330140417-936748d3f5ec h1:gExwWUiT1TcARkxGneS4nvp9C+wBsKU0bFdg7qFpNco= +github.com/filecoin-project/go-paramfetch v0.0.2-0.20210330140417-936748d3f5ec/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc= github.com/filecoin-project/go-state-types v0.0.0-20200903145444-247639ffa6ad/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I= github.com/filecoin-project/go-state-types v0.0.0-20200904021452-1883f36ca2f4/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I= github.com/filecoin-project/go-state-types v0.0.0-20200928172055-2df22083d8ab/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= github.com/filecoin-project/go-state-types v0.0.0-20201102161440-c8033295a1fc/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= -github.com/filecoin-project/go-state-types v0.1.0 h1:9r2HCSMMCmyMfGyMKxQtv0GKp6VT/m5GgVk8EhYbLJU= github.com/filecoin-project/go-state-types v0.1.0/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= +github.com/filecoin-project/go-state-types v0.1.1-0.20210506134452-99b279731c48 h1:Jc4OprDp3bRDxbsrXNHPwJabZJM3iDy+ri8/1e0ZnX4= +github.com/filecoin-project/go-state-types v0.1.1-0.20210506134452-99b279731c48/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe h1:dF8u+LEWeIcTcfUcCf3WFVlc81Fr2JKg8zPzIbBDKDw= github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe/go.mod h1:FGwQgZAt2Gh5mjlwJUlVB62JeYdo+if0xWxSEfBD9ig= github.com/filecoin-project/go-statestore v0.1.0/go.mod h1:LFc9hD+fRxPqiHiaqUEZOinUJB4WARkRfNl10O7kTnI= @@ -314,8 +317,8 @@ github.com/filecoin-project/go-statestore v0.1.1 h1:ufMFq00VqnT2CAuDpcGnwLnCX1I/ github.com/filecoin-project/go-statestore v0.1.1/go.mod h1:LFc9hD+fRxPqiHiaqUEZOinUJB4WARkRfNl10O7kTnI= github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b h1:fkRZSPrYpk42PV3/lIXiL0LHetxde7vyYYvSsttQtfg= github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b/go.mod h1:Q0GQOBtKf1oE10eSXSlhN45kDBdGvEcVOqMiffqX+N8= -github.com/filecoin-project/lotus v1.9.1-0.20210602101339-07b025a54f6d h1:gMkgi1SssdZWFpCHXcQvqcrsUJW+HHaO10w//HA9eyI= -github.com/filecoin-project/lotus v1.9.1-0.20210602101339-07b025a54f6d/go.mod h1:8YWF0BqH6g3O47qB5mI0Pk9zgC2uA6xUlKXYo5VScIk= +github.com/filecoin-project/lotus v1.9.1-0.20210602131226-e1dc7ad6eb9e h1:JvtYGk30nM7K0TD4sTOUKYUePcSzZNj5ZD6g5vdrqMI= +github.com/filecoin-project/lotus v1.9.1-0.20210602131226-e1dc7ad6eb9e/go.mod h1:/ZeMXR8jPxJslaHSIW3ZxO9YPIaxcnsP+niEoBatzo8= github.com/filecoin-project/specs-actors v0.9.4/go.mod h1:BStZQzx5x7TmCkLv0Bpa07U6cPKol6fd3w9KjMPZ6Z4= github.com/filecoin-project/specs-actors v0.9.12/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao= github.com/filecoin-project/specs-actors v0.9.13/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao= @@ -329,8 +332,13 @@ github.com/filecoin-project/specs-actors/v2 v2.3.5/go.mod h1:LljnY2Mn2homxZsmokJ github.com/filecoin-project/specs-actors/v3 v3.1.0/go.mod h1:mpynccOLlIRy0QnR008BwYBwT9fen+sPR13MA1VmMww= github.com/filecoin-project/specs-actors/v3 v3.1.1 h1:BE8fsns1GnEOxt1DTE5LxBK2FThXtWmCChgcJoHTg0E= github.com/filecoin-project/specs-actors/v3 v3.1.1/go.mod h1:mpynccOLlIRy0QnR008BwYBwT9fen+sPR13MA1VmMww= +github.com/filecoin-project/specs-actors/v4 v4.0.0/go.mod h1:TkHXf/l7Wyw4ZejyXIPS2rK8bBO0rdwhTZyQQgaglng= github.com/filecoin-project/specs-actors/v4 v4.0.1 h1:AiWrtvJZ63MHGe6rn7tPu4nSUY8bA1KDNszqJaD5+Fg= github.com/filecoin-project/specs-actors/v4 v4.0.1/go.mod h1:TkHXf/l7Wyw4ZejyXIPS2rK8bBO0rdwhTZyQQgaglng= +github.com/filecoin-project/specs-actors/v5 v5.0.0-20210512015452-4fe3889fff57/go.mod h1:283yBMMUSDB2abcjP/hhrwTkhb9h3sfM6KGrep/ZlBI= +github.com/filecoin-project/specs-actors/v5 v5.0.0-20210528202914-a9f9f95f5e93/go.mod h1:kSDmoQuO8jlhMVzKNoesbhka1e6gHKcLQjKm9mE9Qhw= +github.com/filecoin-project/specs-actors/v5 v5.0.0-20210602024058-0c296bb386bf h1:xt9A1omyhSDbQvpVk7Na1J15a/n8y0y4GQDLeiWLpFs= +github.com/filecoin-project/specs-actors/v5 v5.0.0-20210602024058-0c296bb386bf/go.mod h1:b/btpRl84Q9SeDKlyIoORBQwe2OTmq14POrYrVvBWCM= github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506 h1:Ur/l2+6qN+lQiqjozWWc5p9UDaAMDZKTlDS98oRnlIw= github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506/go.mod h1:nJRRM7Aa9XVvygr3W9k6xGF46RWzr2zxF/iGoAIfA/g= github.com/filecoin-project/test-vectors/schema v0.0.5/go.mod h1:iQ9QXLpYWL3m7warwvK1JC/pTri8mnfEmKygNDqqY6E= diff --git a/testplans/lotus-soup/init.go b/testplans/lotus-soup/init.go index cad15a4fb..7eada2ed6 100644 --- a/testplans/lotus-soup/init.go +++ b/testplans/lotus-soup/init.go @@ -59,5 +59,5 @@ func init() { build.UpgradeLiftoffHeight = -3 // We need to _run_ this upgrade because genesis doesn't support v2, so // we run it at height 0. - build.UpgradeActorsV2Height = 0 + build.UpgradeAssemblyHeight = 0 }