diff --git a/api/api_storage.go b/api/api_storage.go index 9662e8cd8..202b40f93 100644 --- a/api/api_storage.go +++ b/api/api_storage.go @@ -80,6 +80,16 @@ type StorageMiner interface { // SectorTerminatePending returns a list of pending sector terminations to be sent in the next batch message SectorTerminatePending(ctx context.Context) ([]abi.SectorID, error) //perm:admin SectorMarkForUpgrade(ctx context.Context, id abi.SectorNumber) error //perm:admin + // SectorPreCommitFlush immediately sends a PreCommit message with sectors batched for PreCommit. + // Returns null if message wasn't sent + SectorPreCommitFlush(ctx context.Context) (*cid.Cid, error) //perm:admin + // SectorPreCommitPending returns a list of pending PreCommit sectors to be sent in the next batch message + SectorPreCommitPending(ctx context.Context) ([]abi.SectorID, error) //perm:admin + // SectorCommitFlush immediately sends a Commit message with sectors aggregated for Commit. + // Returns null if message wasn't sent + SectorCommitFlush(ctx context.Context) (*cid.Cid, error) //perm:admin + // SectorCommitPending returns a list of pending Commit sectors to be sent in the next aggregate message + SectorCommitPending(ctx context.Context) ([]abi.SectorID, error) //perm:admin // WorkerConnect tells the node to connect to workers RPC WorkerConnect(context.Context, string) error //perm:admin retry:true diff --git a/api/apistruct/struct.go b/api/apistruct/struct.go index 21ed6d56a..13375cf72 100644 --- a/api/apistruct/struct.go +++ b/api/apistruct/struct.go @@ -639,12 +639,20 @@ type StorageMinerStruct struct { SealingSchedDiag func(p0 context.Context, p1 bool) (interface{}, error) `perm:"admin"` + SectorCommitFlush func(p0 context.Context) (*cid.Cid, error) `perm:"admin"` + + SectorCommitPending func(p0 context.Context) ([]abi.SectorID, error) `perm:"admin"` + SectorGetExpectedSealDuration func(p0 context.Context) (time.Duration, error) `perm:"read"` SectorGetSealDelay func(p0 context.Context) (time.Duration, error) `perm:"read"` SectorMarkForUpgrade func(p0 context.Context, p1 abi.SectorNumber) error `perm:"admin"` + SectorPreCommitFlush func(p0 context.Context) (*cid.Cid, error) `perm:"admin"` + + SectorPreCommitPending func(p0 context.Context) ([]abi.SectorID, error) `perm:"admin"` + SectorRemove func(p0 context.Context, p1 abi.SectorNumber) error `perm:"admin"` SectorSetExpectedSealDuration func(p0 context.Context, p1 time.Duration) error `perm:"write"` @@ -1923,6 +1931,14 @@ func (s *StorageMinerStruct) SealingSchedDiag(p0 context.Context, p1 bool) (inte return s.Internal.SealingSchedDiag(p0, p1) } +func (s *StorageMinerStruct) SectorCommitFlush(p0 context.Context) (*cid.Cid, error) { + return s.Internal.SectorCommitFlush(p0) +} + +func (s *StorageMinerStruct) SectorCommitPending(p0 context.Context) ([]abi.SectorID, error) { + return s.Internal.SectorCommitPending(p0) +} + func (s *StorageMinerStruct) SectorGetExpectedSealDuration(p0 context.Context) (time.Duration, error) { return s.Internal.SectorGetExpectedSealDuration(p0) } @@ -1935,6 +1951,14 @@ func (s *StorageMinerStruct) SectorMarkForUpgrade(p0 context.Context, p1 abi.Sec return s.Internal.SectorMarkForUpgrade(p0, p1) } +func (s *StorageMinerStruct) SectorPreCommitFlush(p0 context.Context) (*cid.Cid, error) { + return s.Internal.SectorPreCommitFlush(p0) +} + +func (s *StorageMinerStruct) SectorPreCommitPending(p0 context.Context) ([]abi.SectorID, error) { + return s.Internal.SectorPreCommitPending(p0) +} + func (s *StorageMinerStruct) SectorRemove(p0 context.Context, p1 abi.SectorNumber) error { return s.Internal.SectorRemove(p0, p1) } diff --git a/api/test/deadlines.go b/api/test/deadlines.go index 8060c18f3..68186f0e1 100644 --- a/api/test/deadlines.go +++ b/api/test/deadlines.go @@ -61,7 +61,7 @@ func TestDeadlineToggling(t *testing.T, b APIBuilder, blocktime time.Duration) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - n, sn := b(t, []FullNodeOpts{FullNodeWithLatestActorsAt(upgradeH)}, OneMiner) + n, sn := b(t, []FullNodeOpts{FullNodeWithV4ActorsAt(upgradeH)}, OneMiner) client := n[0].FullNode.(*impl.FullNodeAPI) minerA := sn[0] diff --git a/api/test/deals.go b/api/test/deals.go index 7a9454bae..e3432ff0d 100644 --- a/api/test/deals.go +++ b/api/test/deals.go @@ -435,6 +435,8 @@ func startSealingWaiting(t *testing.T, ctx context.Context, miner TestStorageNod require.NoError(t, miner.SectorStartSealing(ctx, snum)) } } + + flushSealingBatches(t, ctx, miner) } func testRetrieval(t *testing.T, ctx context.Context, client api.FullNode, fcid cid.Cid, piece *cid.Cid, carExport bool, data []byte) { diff --git a/api/test/pledge.go b/api/test/pledge.go new file mode 100644 index 000000000..8752ad4ac --- /dev/null +++ b/api/test/pledge.go @@ -0,0 +1,389 @@ +package test + +import ( + "context" + "fmt" + "sort" + "strings" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/network" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/stmgr" + sealing "github.com/filecoin-project/lotus/extern/storage-sealing" + bminer "github.com/filecoin-project/lotus/miner" + "github.com/filecoin-project/lotus/node" + "github.com/filecoin-project/lotus/node/impl" +) + +func TestSDRUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + n, sn := b(t, []FullNodeOpts{FullNodeWithSDRAt(500, 1000)}, OneMiner) + client := n[0].FullNode.(*impl.FullNodeAPI) + miner := sn[0] + + addrinfo, err := client.NetAddrsListen(ctx) + if err != nil { + t.Fatal(err) + } + + if err := miner.NetConnect(ctx, addrinfo); err != nil { + t.Fatal(err) + } + build.Clock.Sleep(time.Second) + + pledge := make(chan struct{}) + mine := int64(1) + done := make(chan struct{}) + go func() { + defer close(done) + round := 0 + for atomic.LoadInt64(&mine) != 0 { + build.Clock.Sleep(blocktime) + if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) { + + }}); err != nil { + t.Error(err) + } + + // 3 sealing rounds: before, during after. + if round >= 3 { + continue + } + + head, err := client.ChainHead(ctx) + assert.NoError(t, err) + + // rounds happen every 100 blocks, with a 50 block offset. + if head.Height() >= abi.ChainEpoch(round*500+50) { + round++ + pledge <- struct{}{} + + ver, err := client.StateNetworkVersion(ctx, head.Key()) + assert.NoError(t, err) + switch round { + case 1: + assert.Equal(t, network.Version6, ver) + case 2: + assert.Equal(t, network.Version7, ver) + case 3: + assert.Equal(t, network.Version8, ver) + } + } + + } + }() + + // before. + pledgeSectors(t, ctx, miner, 9, 0, pledge) + + s, err := miner.SectorsList(ctx) + require.NoError(t, err) + sort.Slice(s, func(i, j int) bool { + return s[i] < s[j] + }) + + for i, id := range s { + info, err := miner.SectorsStatus(ctx, id, true) + require.NoError(t, err) + expectProof := abi.RegisteredSealProof_StackedDrg2KiBV1 + if i >= 3 { + // after + expectProof = abi.RegisteredSealProof_StackedDrg2KiBV1_1 + } + assert.Equal(t, expectProof, info.SealProof, "sector %d, id %d", i, id) + } + + atomic.StoreInt64(&mine, 0) + <-done +} + +func TestPledgeBatching(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + n, sn := b(t, []FullNodeOpts{FullNodeWithLatestActorsAt(-1)}, OneMiner) + client := n[0].FullNode.(*impl.FullNodeAPI) + miner := sn[0] + + addrinfo, err := client.NetAddrsListen(ctx) + if err != nil { + t.Fatal(err) + } + + if err := miner.NetConnect(ctx, addrinfo); err != nil { + t.Fatal(err) + } + build.Clock.Sleep(time.Second) + + mine := int64(1) + done := make(chan struct{}) + go func() { + defer close(done) + for atomic.LoadInt64(&mine) != 0 { + build.Clock.Sleep(blocktime) + if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) { + + }}); err != nil { + t.Error(err) + } + } + }() + + for { + h, err := client.ChainHead(ctx) + require.NoError(t, err) + if h.Height() > 10 { + break + } + } + + toCheck := startPledge(t, ctx, miner, nSectors, 0, nil) + + for len(toCheck) > 0 { + states := map[api.SectorState]int{} + + for n := range toCheck { + st, err := miner.SectorsStatus(ctx, n, false) + require.NoError(t, err) + states[st.State]++ + if st.State == api.SectorState(sealing.Proving) { + delete(toCheck, n) + } + if strings.Contains(string(st.State), "Fail") { + t.Fatal("sector in a failed state", st.State) + } + } + if states[api.SectorState(sealing.SubmitPreCommitBatch)] == nSectors || + (states[api.SectorState(sealing.SubmitPreCommitBatch)] > 0 && states[api.SectorState(sealing.PreCommit1)] == 0 && states[api.SectorState(sealing.PreCommit2)] == 0) { + pcb, err := miner.SectorPreCommitFlush(ctx) + require.NoError(t, err) + if pcb != nil { + fmt.Printf("PRECOMMIT BATCH: %s\n", *pcb) + } + } + + if states[api.SectorState(sealing.SubmitCommitAggregate)] == nSectors || + (states[api.SectorState(sealing.SubmitCommitAggregate)] > 0 && states[api.SectorState(sealing.WaitSeed)] == 0 && states[api.SectorState(sealing.Committing)] == 0) { + cb, err := miner.SectorCommitFlush(ctx) + require.NoError(t, err) + if cb != nil { + fmt.Printf("COMMIT BATCH: %s\n", *cb) + } + } + + build.Clock.Sleep(100 * time.Millisecond) + fmt.Printf("WaitSeal: %d %+v\n", len(toCheck), states) + } + + atomic.StoreInt64(&mine, 0) + <-done +} + +func TestPledgeBeforeNv13(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + n, sn := b(t, []FullNodeOpts{ + { + Opts: func(nodes []TestNode) node.Option { + return node.Override(new(stmgr.UpgradeSchedule), stmgr.UpgradeSchedule{{ + Network: network.Version9, + Height: 1, + Migration: stmgr.UpgradeActorsV2, + }, { + Network: network.Version10, + Height: 2, + Migration: stmgr.UpgradeActorsV3, + }, { + Network: network.Version12, + Height: 3, + Migration: stmgr.UpgradeActorsV4, + }, { + Network: network.Version13, + Height: 1000000000, + Migration: stmgr.UpgradeActorsV5, + }}) + }, + }, + }, OneMiner) + client := n[0].FullNode.(*impl.FullNodeAPI) + miner := sn[0] + + addrinfo, err := client.NetAddrsListen(ctx) + if err != nil { + t.Fatal(err) + } + + if err := miner.NetConnect(ctx, addrinfo); err != nil { + t.Fatal(err) + } + build.Clock.Sleep(time.Second) + + mine := int64(1) + done := make(chan struct{}) + go func() { + defer close(done) + for atomic.LoadInt64(&mine) != 0 { + build.Clock.Sleep(blocktime) + if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) { + + }}); err != nil { + t.Error(err) + } + } + }() + + for { + h, err := client.ChainHead(ctx) + require.NoError(t, err) + if h.Height() > 10 { + break + } + } + + toCheck := startPledge(t, ctx, miner, nSectors, 0, nil) + + for len(toCheck) > 0 { + states := map[api.SectorState]int{} + + for n := range toCheck { + st, err := miner.SectorsStatus(ctx, n, false) + require.NoError(t, err) + states[st.State]++ + if st.State == api.SectorState(sealing.Proving) { + delete(toCheck, n) + } + if strings.Contains(string(st.State), "Fail") { + t.Fatal("sector in a failed state", st.State) + } + } + + build.Clock.Sleep(100 * time.Millisecond) + fmt.Printf("WaitSeal: %d %+v\n", len(toCheck), states) + } + + atomic.StoreInt64(&mine, 0) + <-done +} + +func TestPledgeSector(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + n, sn := b(t, OneFull, OneMiner) + client := n[0].FullNode.(*impl.FullNodeAPI) + miner := sn[0] + + addrinfo, err := client.NetAddrsListen(ctx) + if err != nil { + t.Fatal(err) + } + + if err := miner.NetConnect(ctx, addrinfo); err != nil { + t.Fatal(err) + } + build.Clock.Sleep(time.Second) + + mine := int64(1) + done := make(chan struct{}) + go func() { + defer close(done) + for atomic.LoadInt64(&mine) != 0 { + build.Clock.Sleep(blocktime) + if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) { + + }}); err != nil { + t.Error(err) + } + } + }() + + pledgeSectors(t, ctx, miner, nSectors, 0, nil) + + atomic.StoreInt64(&mine, 0) + <-done +} + +func flushSealingBatches(t *testing.T, ctx context.Context, miner TestStorageNode) { + pcb, err := miner.SectorPreCommitFlush(ctx) + require.NoError(t, err) + if pcb != nil { + fmt.Printf("PRECOMMIT BATCH: %s\n", *pcb) + } + + cb, err := miner.SectorCommitFlush(ctx) + require.NoError(t, err) + if cb != nil { + fmt.Printf("COMMIT BATCH: %s\n", *cb) + } +} + +func startPledge(t *testing.T, ctx context.Context, miner TestStorageNode, n, existing int, blockNotif <-chan struct{}) map[abi.SectorNumber]struct{} { + for i := 0; i < n; i++ { + if i%3 == 0 && blockNotif != nil { + <-blockNotif + log.Errorf("WAIT") + } + log.Errorf("PLEDGING %d", i) + _, err := miner.PledgeSector(ctx) + require.NoError(t, err) + } + + for { + s, err := miner.SectorsList(ctx) // Note - the test builder doesn't import genesis sectors into FSM + require.NoError(t, err) + fmt.Printf("Sectors: %d\n", len(s)) + if len(s) >= n+existing { + break + } + + build.Clock.Sleep(100 * time.Millisecond) + } + + fmt.Printf("All sectors is fsm\n") + + s, err := miner.SectorsList(ctx) + require.NoError(t, err) + + toCheck := map[abi.SectorNumber]struct{}{} + for _, number := range s { + toCheck[number] = struct{}{} + } + + return toCheck +} + +func pledgeSectors(t *testing.T, ctx context.Context, miner TestStorageNode, n, existing int, blockNotif <-chan struct{}) { + toCheck := startPledge(t, ctx, miner, n, existing, blockNotif) + + for len(toCheck) > 0 { + flushSealingBatches(t, ctx, miner) + + states := map[api.SectorState]int{} + for n := range toCheck { + st, err := miner.SectorsStatus(ctx, n, false) + require.NoError(t, err) + states[st.State]++ + if st.State == api.SectorState(sealing.Proving) { + delete(toCheck, n) + } + if strings.Contains(string(st.State), "Fail") { + t.Fatal("sector in a failed state", st.State) + } + } + + build.Clock.Sleep(100 * time.Millisecond) + fmt.Printf("WaitSeal: %d %+v\n", len(toCheck), states) + } +} diff --git a/api/test/test.go b/api/test/test.go index 44530191b..c064ca6fb 100644 --- a/api/test/test.go +++ b/api/test/test.go @@ -169,6 +169,31 @@ var FullNodeWithSDRAt = func(calico, persian abi.ChainEpoch) FullNodeOpts { } } +var FullNodeWithV4ActorsAt = func(upgradeHeight abi.ChainEpoch) FullNodeOpts { + if upgradeHeight == -1 { + upgradeHeight = 3 + } + + return FullNodeOpts{ + Opts: func(nodes []TestNode) node.Option { + return node.Override(new(stmgr.UpgradeSchedule), stmgr.UpgradeSchedule{{ + // prepare for upgrade. + Network: network.Version9, + Height: 1, + Migration: stmgr.UpgradeActorsV2, + }, { + Network: network.Version10, + Height: 2, + Migration: stmgr.UpgradeActorsV3, + }, { + Network: network.Version12, + Height: upgradeHeight, + Migration: stmgr.UpgradeActorsV4, + }}) + }, + } +} + var MineNext = miner.MineReq{ InjectNulls: 0, Done: func(bool, abi.ChainEpoch, error) {}, diff --git a/api/test/window_post.go b/api/test/window_post.go index c987fa1f9..e6e3ff2a3 100644 --- a/api/test/window_post.go +++ b/api/test/window_post.go @@ -3,14 +3,9 @@ package test import ( "context" "fmt" - "sort" - "sync/atomic" - - "strings" "testing" "time" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/filecoin-project/go-address" @@ -18,7 +13,6 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/dline" - "github.com/filecoin-project/go-state-types/network" "github.com/filecoin-project/lotus/extern/sector-storage/mock" sealing "github.com/filecoin-project/lotus/extern/storage-sealing" proof3 "github.com/filecoin-project/specs-actors/v3/actors/runtime/proof" @@ -29,181 +23,9 @@ import ( "github.com/filecoin-project/lotus/chain/actors" minerActor "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/types" - bminer "github.com/filecoin-project/lotus/miner" "github.com/filecoin-project/lotus/node/impl" ) -func TestSDRUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n, sn := b(t, []FullNodeOpts{FullNodeWithSDRAt(500, 1000)}, OneMiner) - client := n[0].FullNode.(*impl.FullNodeAPI) - miner := sn[0] - - addrinfo, err := client.NetAddrsListen(ctx) - if err != nil { - t.Fatal(err) - } - - if err := miner.NetConnect(ctx, addrinfo); err != nil { - t.Fatal(err) - } - build.Clock.Sleep(time.Second) - - pledge := make(chan struct{}) - mine := int64(1) - done := make(chan struct{}) - go func() { - defer close(done) - round := 0 - for atomic.LoadInt64(&mine) != 0 { - build.Clock.Sleep(blocktime) - if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) { - - }}); err != nil { - t.Error(err) - } - - // 3 sealing rounds: before, during after. - if round >= 3 { - continue - } - - head, err := client.ChainHead(ctx) - assert.NoError(t, err) - - // rounds happen every 100 blocks, with a 50 block offset. - if head.Height() >= abi.ChainEpoch(round*500+50) { - round++ - pledge <- struct{}{} - - ver, err := client.StateNetworkVersion(ctx, head.Key()) - assert.NoError(t, err) - switch round { - case 1: - assert.Equal(t, network.Version6, ver) - case 2: - assert.Equal(t, network.Version7, ver) - case 3: - assert.Equal(t, network.Version8, ver) - } - } - - } - }() - - // before. - pledgeSectors(t, ctx, miner, 9, 0, pledge) - - s, err := miner.SectorsList(ctx) - require.NoError(t, err) - sort.Slice(s, func(i, j int) bool { - return s[i] < s[j] - }) - - for i, id := range s { - info, err := miner.SectorsStatus(ctx, id, true) - require.NoError(t, err) - expectProof := abi.RegisteredSealProof_StackedDrg2KiBV1 - if i >= 3 { - // after - expectProof = abi.RegisteredSealProof_StackedDrg2KiBV1_1 - } - assert.Equal(t, expectProof, info.SealProof, "sector %d, id %d", i, id) - } - - atomic.StoreInt64(&mine, 0) - <-done -} - -func TestPledgeSector(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n, sn := b(t, OneFull, OneMiner) - client := n[0].FullNode.(*impl.FullNodeAPI) - miner := sn[0] - - addrinfo, err := client.NetAddrsListen(ctx) - if err != nil { - t.Fatal(err) - } - - if err := miner.NetConnect(ctx, addrinfo); err != nil { - t.Fatal(err) - } - build.Clock.Sleep(time.Second) - - mine := int64(1) - done := make(chan struct{}) - go func() { - defer close(done) - for atomic.LoadInt64(&mine) != 0 { - build.Clock.Sleep(blocktime) - if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) { - - }}); err != nil { - t.Error(err) - } - } - }() - - pledgeSectors(t, ctx, miner, nSectors, 0, nil) - - atomic.StoreInt64(&mine, 0) - <-done -} - -func pledgeSectors(t *testing.T, ctx context.Context, miner TestStorageNode, n, existing int, blockNotif <-chan struct{}) { - for i := 0; i < n; i++ { - if i%3 == 0 && blockNotif != nil { - <-blockNotif - log.Errorf("WAIT") - } - log.Errorf("PLEDGING %d", i) - _, err := miner.PledgeSector(ctx) - require.NoError(t, err) - } - - for { - s, err := miner.SectorsList(ctx) // Note - the test builder doesn't import genesis sectors into FSM - require.NoError(t, err) - fmt.Printf("Sectors: %d\n", len(s)) - if len(s) >= n+existing { - break - } - - build.Clock.Sleep(100 * time.Millisecond) - } - - fmt.Printf("All sectors is fsm\n") - - s, err := miner.SectorsList(ctx) - require.NoError(t, err) - - toCheck := map[abi.SectorNumber]struct{}{} - for _, number := range s { - toCheck[number] = struct{}{} - } - - for len(toCheck) > 0 { - for n := range toCheck { - st, err := miner.SectorsStatus(ctx, n, false) - require.NoError(t, err) - if st.State == api.SectorState(sealing.Proving) { - delete(toCheck, n) - } - if strings.Contains(string(st.State), "Fail") { - t.Fatal("sector in a failed state", st.State) - } - } - - build.Clock.Sleep(100 * time.Millisecond) - fmt.Printf("WaitSeal: %d\n", len(s)) - } -} - func TestWindowPost(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) { for _, height := range []abi.ChainEpoch{ -1, // before @@ -617,7 +439,7 @@ func TestWindowPostDispute(t *testing.T, b APIBuilder, blocktime time.Duration) /// // Then we're going to manually submit bad proofs. n, sn := b(t, []FullNodeOpts{ - FullNodeWithLatestActorsAt(-1), + FullNodeWithV4ActorsAt(-1), }, []StorageMiner{ {Full: 0, Preseal: PresealGenesis}, {Full: 0}, @@ -900,7 +722,7 @@ func TestWindowPostDisputeFails(t *testing.T, b APIBuilder, blocktime time.Durat ctx, cancel := context.WithCancel(context.Background()) defer cancel() - n, sn := b(t, []FullNodeOpts{FullNodeWithLatestActorsAt(-1)}, OneMiner) + n, sn := b(t, []FullNodeOpts{FullNodeWithV4ActorsAt(-1)}, OneMiner) client := n[0].FullNode.(*impl.FullNodeAPI) miner := sn[0] diff --git a/build/openrpc/full.json.gz b/build/openrpc/full.json.gz index 65c5eddd3..a81cb1565 100644 Binary files a/build/openrpc/full.json.gz and b/build/openrpc/full.json.gz differ diff --git a/build/openrpc/miner.json.gz b/build/openrpc/miner.json.gz index 13b9917c8..1ddc27f12 100644 Binary files a/build/openrpc/miner.json.gz and b/build/openrpc/miner.json.gz differ diff --git a/build/openrpc/worker.json.gz b/build/openrpc/worker.json.gz index 798a6376c..5bb69aa49 100644 Binary files a/build/openrpc/worker.json.gz and b/build/openrpc/worker.json.gz differ diff --git a/build/parameters.go b/build/parameters.go index 7d34a7831..b70dad1c1 100644 --- a/build/parameters.go +++ b/build/parameters.go @@ -5,3 +5,7 @@ import rice "github.com/GeertJohan/go.rice" func ParametersJSON() []byte { return rice.MustFindBox("proof-params").MustBytes("parameters.json") } + +func SrsJSON() []byte { + return rice.MustFindBox("proof-params").MustBytes("srs-inner-product.json") +} diff --git a/build/params_2k.go b/build/params_2k.go index d93b26468..8ead218f5 100644 --- a/build/params_2k.go +++ b/build/params_2k.go @@ -24,22 +24,22 @@ var UpgradeIgnitionHeight = abi.ChainEpoch(-2) var UpgradeRefuelHeight = abi.ChainEpoch(-3) var UpgradeTapeHeight = abi.ChainEpoch(-4) -var UpgradeAssemblyHeight = abi.ChainEpoch(10) +var UpgradeAssemblyHeight = abi.ChainEpoch(5) var UpgradeLiftoffHeight = abi.ChainEpoch(-5) -var UpgradeKumquatHeight = abi.ChainEpoch(15) -var UpgradeCalicoHeight = abi.ChainEpoch(20) -var UpgradePersianHeight = abi.ChainEpoch(25) -var UpgradeOrangeHeight = abi.ChainEpoch(27) -var UpgradeClausHeight = abi.ChainEpoch(30) +var UpgradeKumquatHeight = abi.ChainEpoch(6) +var UpgradeCalicoHeight = abi.ChainEpoch(7) +var UpgradePersianHeight = abi.ChainEpoch(8) +var UpgradeOrangeHeight = abi.ChainEpoch(9) +var UpgradeClausHeight = abi.ChainEpoch(10) -var UpgradeTrustHeight = abi.ChainEpoch(35) +var UpgradeTrustHeight = abi.ChainEpoch(11) -var UpgradeNorwegianHeight = abi.ChainEpoch(40) +var UpgradeNorwegianHeight = abi.ChainEpoch(12) -var UpgradeTurboHeight = abi.ChainEpoch(45) +var UpgradeTurboHeight = abi.ChainEpoch(13) -var UpgradeHyperdriveHeight = abi.ChainEpoch(50) +var UpgradeHyperdriveHeight = abi.ChainEpoch(14) var DrandSchedule = map[abi.ChainEpoch]DrandEnum{ 0: DrandMainnet, diff --git a/build/proof-params/srs-inner-product.json b/build/proof-params/srs-inner-product.json new file mode 100644 index 000000000..8566bf5fd --- /dev/null +++ b/build/proof-params/srs-inner-product.json @@ -0,0 +1,7 @@ +{ + "v28-fil-inner-product-v1.srs": { + "cid": "Qmdq44DjcQnFfU3PJcdX7J49GCqcUYszr1TxMbHtAkvQ3g", + "digest": "ae20310138f5ba81451d723f858e3797", + "sector_size": 0 + } +} diff --git a/chain/actors/policy/policy.go b/chain/actors/policy/policy.go index 191ffb5f5..113544c05 100644 --- a/chain/actors/policy/policy.go +++ b/chain/actors/policy/policy.go @@ -302,6 +302,10 @@ func GetDefaultSectorSize() abi.SectorSize { return szs[0] } +func GetDefaultAggregationProof() abi.RegisteredAggregationProof { + return abi.RegisteredAggregationProof_SnarkPackV1 +} + func GetSectorMaxLifetime(proof abi.RegisteredSealProof, nwVer network.Version) abi.ChainEpoch { if nwVer <= network.Version10 { return builtin5.SealProofPoliciesV0[proof].SectorMaxLifetime diff --git a/chain/actors/policy/policy.go.template b/chain/actors/policy/policy.go.template index d395d7132..f5055c423 100644 --- a/chain/actors/policy/policy.go.template +++ b/chain/actors/policy/policy.go.template @@ -197,6 +197,10 @@ func GetDefaultSectorSize() abi.SectorSize { return szs[0] } +func GetDefaultAggregationProof() abi.RegisteredAggregationProof { + return abi.RegisteredAggregationProof_SnarkPackV1 +} + func GetSectorMaxLifetime(proof abi.RegisteredSealProof, nwVer network.Version) abi.ChainEpoch { if nwVer <= network.Version10 { return builtin{{.latestVersion}}.SealProofPoliciesV0[proof].SectorMaxLifetime diff --git a/chain/gen/gen.go b/chain/gen/gen.go index b4e04424c..27feaeaaa 100644 --- a/chain/gen/gen.go +++ b/chain/gen/gen.go @@ -25,7 +25,7 @@ import ( "go.opencensus.io/trace" "golang.org/x/xerrors" - proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" + proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/blockstore" @@ -51,7 +51,7 @@ const msgsPerBlock = 20 //nolint:deadcode,varcheck var log = logging.Logger("gen") -var ValidWpostForTesting = []proof2.PoStProof{{ +var ValidWpostForTesting = []proof5.PoStProof{{ ProofBytes: []byte("valid proof"), }} @@ -460,7 +460,7 @@ func (cg *ChainGen) NextTipSetFromMinersWithMessages(base *types.TipSet, miners func (cg *ChainGen) makeBlock(parents *types.TipSet, m address.Address, vrfticket *types.Ticket, eticket *types.ElectionProof, bvals []types.BeaconEntry, height abi.ChainEpoch, - wpost []proof2.PoStProof, msgs []*types.SignedMessage) (*types.FullBlock, error) { + wpost []proof5.PoStProof, msgs []*types.SignedMessage) (*types.FullBlock, error) { var ts uint64 if cg.Timestamper != nil { @@ -598,7 +598,7 @@ func (mca mca) WalletSign(ctx context.Context, a address.Address, v []byte) (*cr type WinningPoStProver interface { GenerateCandidates(context.Context, abi.PoStRandomness, uint64) ([]uint64, error) - ComputeProof(context.Context, []proof2.SectorInfo, abi.PoStRandomness) ([]proof2.PoStProof, error) + ComputeProof(context.Context, []proof5.SectorInfo, abi.PoStRandomness) ([]proof5.PoStProof, error) } type wppProvider struct{} @@ -607,7 +607,7 @@ func (wpp *wppProvider) GenerateCandidates(ctx context.Context, _ abi.PoStRandom return []uint64{0}, nil } -func (wpp *wppProvider) ComputeProof(context.Context, []proof2.SectorInfo, abi.PoStRandomness) ([]proof2.PoStProof, error) { +func (wpp *wppProvider) ComputeProof(context.Context, []proof5.SectorInfo, abi.PoStRandomness) ([]proof5.PoStProof, error) { return ValidWpostForTesting, nil } @@ -685,15 +685,19 @@ type genFakeVerifier struct{} var _ ffiwrapper.Verifier = (*genFakeVerifier)(nil) -func (m genFakeVerifier) VerifySeal(svi proof2.SealVerifyInfo) (bool, error) { +func (m genFakeVerifier) VerifySeal(svi proof5.SealVerifyInfo) (bool, error) { return true, nil } -func (m genFakeVerifier) VerifyWinningPoSt(ctx context.Context, info proof2.WinningPoStVerifyInfo) (bool, error) { +func (m genFakeVerifier) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) (bool, error) { panic("not supported") } -func (m genFakeVerifier) VerifyWindowPoSt(ctx context.Context, info proof2.WindowPoStVerifyInfo) (bool, error) { +func (m genFakeVerifier) VerifyWinningPoSt(ctx context.Context, info proof5.WinningPoStVerifyInfo) (bool, error) { + panic("not supported") +} + +func (m genFakeVerifier) VerifyWindowPoSt(ctx context.Context, info proof5.WindowPoStVerifyInfo) (bool, error) { panic("not supported") } diff --git a/chain/gen/genesis/miners.go b/chain/gen/genesis/miners.go index 297543886..be33560e5 100644 --- a/chain/gen/genesis/miners.go +++ b/chain/gen/genesis/miners.go @@ -27,7 +27,7 @@ import ( miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" power0 "github.com/filecoin-project/specs-actors/actors/builtin/power" reward0 "github.com/filecoin-project/specs-actors/actors/builtin/reward" - runtime2 "github.com/filecoin-project/specs-actors/v2/actors/runtime" + runtime5 "github.com/filecoin-project/specs-actors/v5/actors/runtime" "github.com/filecoin-project/lotus/chain/state" "github.com/filecoin-project/lotus/chain/store" @@ -46,7 +46,7 @@ func MinerAddress(genesisIndex uint64) address.Address { } type fakedSigSyscalls struct { - runtime2.Syscalls + runtime5.Syscalls } func (fss *fakedSigSyscalls) VerifySignature(signature crypto.Signature, signer address.Address, plaintext []byte) error { @@ -54,7 +54,7 @@ func (fss *fakedSigSyscalls) VerifySignature(signature crypto.Signature, signer } func mkFakedSigSyscalls(base vm.SyscallBuilder) vm.SyscallBuilder { - return func(ctx context.Context, rt *vm.Runtime) runtime2.Syscalls { + return func(ctx context.Context, rt *vm.Runtime) runtime5.Syscalls { return &fakedSigSyscalls{ base(ctx, rt), } diff --git a/chain/state/statetree.go b/chain/state/statetree.go index 33a8116df..d783fff69 100644 --- a/chain/state/statetree.go +++ b/chain/state/statetree.go @@ -25,6 +25,7 @@ import ( states2 "github.com/filecoin-project/specs-actors/v2/actors/states" states3 "github.com/filecoin-project/specs-actors/v3/actors/states" states4 "github.com/filecoin-project/specs-actors/v4/actors/states" + states5 "github.com/filecoin-project/specs-actors/v5/actors/states" ) var log = logging.Logger("statetree") @@ -191,6 +192,12 @@ func NewStateTree(cst cbor.IpldStore, ver types.StateTreeVersion) (*StateTree, e return nil, xerrors.Errorf("failed to create state tree: %w", err) } hamt = tree.Map + case types.StateTreeVersion4: + tree, err := states5.NewTree(store) + if err != nil { + return nil, xerrors.Errorf("failed to create state tree: %w", err) + } + hamt = tree.Map default: return nil, xerrors.Errorf("unsupported state tree version: %d", ver) } @@ -246,6 +253,12 @@ func LoadStateTree(cst cbor.IpldStore, c cid.Cid) (*StateTree, error) { if tree != nil { hamt = tree.Map } + case types.StateTreeVersion4: + var tree *states5.Tree + tree, err = states5.LoadTree(store, root.Actors) + if tree != nil { + hamt = tree.Map + } default: return nil, xerrors.Errorf("unsupported state tree version: %d", root.Version) } diff --git a/chain/stmgr/forks.go b/chain/stmgr/forks.go index de5e91388..ee5a26dea 100644 --- a/chain/stmgr/forks.go +++ b/chain/stmgr/forks.go @@ -1247,7 +1247,7 @@ func upgradeActorsV5Common( // Persist the result. newRoot, err := store.Put(ctx, &types.StateRoot{ - Version: types.StateTreeVersion3, + Version: types.StateTreeVersion4, Actors: newHamtRoot, Info: stateRoot.Info, }) diff --git a/chain/types/state.go b/chain/types/state.go index b561aab71..c8f8f1cd9 100644 --- a/chain/types/state.go +++ b/chain/types/state.go @@ -13,8 +13,10 @@ const ( StateTreeVersion1 // StateTreeVersion2 corresponds to actors v3. StateTreeVersion2 - // StateTreeVersion3 corresponds to actors >= v4. + // StateTreeVersion3 corresponds to actors v4. StateTreeVersion3 + // StateTreeVersion4 corresponds to actors v5. + StateTreeVersion4 ) type StateRoot struct { diff --git a/chain/vm/gas.go b/chain/vm/gas.go index eef431aef..67dd7677e 100644 --- a/chain/vm/gas.go +++ b/chain/vm/gas.go @@ -9,8 +9,8 @@ import ( addr "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/crypto" - vmr2 "github.com/filecoin-project/specs-actors/v2/actors/runtime" - proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" + vmr5 "github.com/filecoin-project/specs-actors/v5/actors/runtime" + proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" "github.com/ipfs/go-cid" ) @@ -74,8 +74,9 @@ type Pricelist interface { OnVerifySignature(sigType crypto.SigType, planTextSize int) (GasCharge, error) OnHashing(dataSize int) GasCharge OnComputeUnsealedSectorCid(proofType abi.RegisteredSealProof, pieces []abi.PieceInfo) GasCharge - OnVerifySeal(info proof2.SealVerifyInfo) GasCharge - OnVerifyPost(info proof2.WindowPoStVerifyInfo) GasCharge + OnVerifySeal(info proof5.SealVerifyInfo) GasCharge + OnVerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) GasCharge + OnVerifyPost(info proof5.WindowPoStVerifyInfo) GasCharge OnVerifyConsensusFault() GasCharge } @@ -111,6 +112,7 @@ var prices = map[abi.ChainEpoch]Pricelist{ hashingBase: 31355, computeUnsealedSectorCidBase: 98647, verifySealBase: 2000, // TODO gas , it VerifySeal syscall is not used + verifyAggregateSealBase: 0, verifyPostLookup: map[abi.RegisteredPoStProof]scalingCost{ abi.RegisteredPoStProof_StackedDrgWindow512MiBV1: { flat: 123861062, @@ -158,7 +160,8 @@ var prices = map[abi.ChainEpoch]Pricelist{ hashingBase: 31355, computeUnsealedSectorCidBase: 98647, - verifySealBase: 2000, // TODO gas , it VerifySeal syscall is not used + verifySealBase: 2000, // TODO gas , it VerifySeal syscall is not used + verifyAggregateSealBase: 400_000_000, // TODO (~40ms, I think) verifyPostLookup: map[abi.RegisteredPoStProof]scalingCost{ abi.RegisteredPoStProof_StackedDrgWindow512MiBV1: { flat: 117680921, @@ -198,7 +201,7 @@ func PricelistByEpoch(epoch abi.ChainEpoch) Pricelist { } type pricedSyscalls struct { - under vmr2.Syscalls + under vmr5.Syscalls pl Pricelist chargeGas func(GasCharge) } @@ -232,7 +235,7 @@ func (ps pricedSyscalls) ComputeUnsealedSectorCID(reg abi.RegisteredSealProof, p } // Verifies a sector seal proof. -func (ps pricedSyscalls) VerifySeal(vi proof2.SealVerifyInfo) error { +func (ps pricedSyscalls) VerifySeal(vi proof5.SealVerifyInfo) error { ps.chargeGas(ps.pl.OnVerifySeal(vi)) defer ps.chargeGas(gasOnActorExec) @@ -240,7 +243,7 @@ func (ps pricedSyscalls) VerifySeal(vi proof2.SealVerifyInfo) error { } // Verifies a proof of spacetime. -func (ps pricedSyscalls) VerifyPoSt(vi proof2.WindowPoStVerifyInfo) error { +func (ps pricedSyscalls) VerifyPoSt(vi proof5.WindowPoStVerifyInfo) error { ps.chargeGas(ps.pl.OnVerifyPost(vi)) defer ps.chargeGas(gasOnActorExec) @@ -257,14 +260,14 @@ func (ps pricedSyscalls) VerifyPoSt(vi proof2.WindowPoStVerifyInfo) error { // the "parent grinding fault", in which case it must be the sibling of h1 (same parent tipset) and one of the // blocks in the parent of h2 (i.e. h2's grandparent). // Returns nil and an error if the headers don't prove a fault. -func (ps pricedSyscalls) VerifyConsensusFault(h1 []byte, h2 []byte, extra []byte) (*vmr2.ConsensusFault, error) { +func (ps pricedSyscalls) VerifyConsensusFault(h1 []byte, h2 []byte, extra []byte) (*vmr5.ConsensusFault, error) { ps.chargeGas(ps.pl.OnVerifyConsensusFault()) defer ps.chargeGas(gasOnActorExec) return ps.under.VerifyConsensusFault(h1, h2, extra) } -func (ps pricedSyscalls) BatchVerifySeals(inp map[address.Address][]proof2.SealVerifyInfo) (map[address.Address][]bool, error) { +func (ps pricedSyscalls) BatchVerifySeals(inp map[address.Address][]proof5.SealVerifyInfo) (map[address.Address][]bool, error) { count := int64(0) for _, svis := range inp { count += int64(len(svis)) @@ -277,3 +280,10 @@ func (ps pricedSyscalls) BatchVerifySeals(inp map[address.Address][]proof2.SealV return ps.under.BatchVerifySeals(inp) } + +func (ps pricedSyscalls) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) error { + ps.chargeGas(ps.pl.OnVerifyAggregateSeals(aggregate)) + defer ps.chargeGas(gasOnActorExec) + + return ps.under.VerifyAggregateSeals(aggregate) +} diff --git a/chain/vm/gas_v0.go b/chain/vm/gas_v0.go index 7c864b7f9..c90ebaa73 100644 --- a/chain/vm/gas_v0.go +++ b/chain/vm/gas_v0.go @@ -4,6 +4,7 @@ import ( "fmt" proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" + proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" @@ -91,6 +92,7 @@ type pricelistV0 struct { computeUnsealedSectorCidBase int64 verifySealBase int64 + verifyAggregateSealBase int64 verifyPostLookup map[abi.RegisteredPoStProof]scalingCost verifyPostDiscount bool verifyConsensusFault int64 @@ -185,6 +187,12 @@ func (pl *pricelistV0) OnVerifySeal(info proof2.SealVerifyInfo) GasCharge { return newGasCharge("OnVerifySeal", pl.verifySealBase, 0) } +// OnVerifyAggregateSeals +func (pl *pricelistV0) OnVerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) GasCharge { + // TODO: this needs more cost tunning + return newGasCharge("OnVerifyAggregateSeals", pl.verifyAggregateSealBase, 0) +} + // OnVerifyPost func (pl *pricelistV0) OnVerifyPost(info proof2.WindowPoStVerifyInfo) GasCharge { sectorSize := "unknown" diff --git a/chain/vm/invoker.go b/chain/vm/invoker.go index 126b57090..e4b154031 100644 --- a/chain/vm/invoker.go +++ b/chain/vm/invoker.go @@ -16,10 +16,10 @@ import ( exported0 "github.com/filecoin-project/specs-actors/actors/builtin/exported" exported2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/exported" - vmr "github.com/filecoin-project/specs-actors/v2/actors/runtime" exported3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/exported" exported4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/exported" exported5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/exported" + vmr "github.com/filecoin-project/specs-actors/v5/actors/runtime" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/exitcode" @@ -155,7 +155,7 @@ func (*ActorRegistry) transform(instance invokee) (nativeCode, error) { "vmr.Runtime, ") } if !runtimeType.Implements(t.In(0)) { - return nil, newErr("first arguemnt should be vmr.Runtime") + return nil, newErr("first argument should be vmr.Runtime") } if t.In(1).Kind() != reflect.Ptr { return nil, newErr("second argument should be of kind reflect.Ptr") diff --git a/chain/vm/runtime.go b/chain/vm/runtime.go index cdb1720de..a3e2f293f 100644 --- a/chain/vm/runtime.go +++ b/chain/vm/runtime.go @@ -16,7 +16,7 @@ import ( "github.com/filecoin-project/go-state-types/network" rtt "github.com/filecoin-project/go-state-types/rt" rt0 "github.com/filecoin-project/specs-actors/actors/runtime" - rt2 "github.com/filecoin-project/specs-actors/v2/actors/runtime" + rt5 "github.com/filecoin-project/specs-actors/v5/actors/runtime" "github.com/ipfs/go-cid" ipldcbor "github.com/ipfs/go-ipld-cbor" "go.opencensus.io/trace" @@ -54,8 +54,8 @@ func (m *Message) ValueReceived() abi.TokenAmount { var EnableGasTracing = false type Runtime struct { - rt2.Message - rt2.Syscalls + rt5.Message + rt5.Syscalls ctx context.Context @@ -136,7 +136,7 @@ func (rt *Runtime) StorePut(x cbor.Marshaler) cid.Cid { } var _ rt0.Runtime = (*Runtime)(nil) -var _ rt2.Runtime = (*Runtime)(nil) +var _ rt5.Runtime = (*Runtime)(nil) func (rt *Runtime) shimCall(f func() interface{}) (rval []byte, aerr aerrors.ActorError) { defer func() { diff --git a/chain/vm/syscalls.go b/chain/vm/syscalls.go index 0bcfe10a7..568197bc8 100644 --- a/chain/vm/syscalls.go +++ b/chain/vm/syscalls.go @@ -26,8 +26,8 @@ import ( "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" "github.com/filecoin-project/lotus/lib/sigs" - runtime2 "github.com/filecoin-project/specs-actors/v2/actors/runtime" - proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" + runtime5 "github.com/filecoin-project/specs-actors/v5/actors/runtime" + proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" ) func init() { @@ -36,10 +36,10 @@ func init() { // Actual type is defined in chain/types/vmcontext.go because the VMContext interface is there -type SyscallBuilder func(ctx context.Context, rt *Runtime) runtime2.Syscalls +type SyscallBuilder func(ctx context.Context, rt *Runtime) runtime5.Syscalls func Syscalls(verifier ffiwrapper.Verifier) SyscallBuilder { - return func(ctx context.Context, rt *Runtime) runtime2.Syscalls { + return func(ctx context.Context, rt *Runtime) runtime5.Syscalls { return &syscallShim{ ctx: ctx, @@ -90,7 +90,7 @@ func (ss *syscallShim) HashBlake2b(data []byte) [32]byte { // Checks validity of the submitted consensus fault with the two block headers needed to prove the fault // and an optional extra one to check common ancestry (as needed). // Note that the blocks are ordered: the method requires a.Epoch() <= b.Epoch(). -func (ss *syscallShim) VerifyConsensusFault(a, b, extra []byte) (*runtime2.ConsensusFault, error) { +func (ss *syscallShim) VerifyConsensusFault(a, b, extra []byte) (*runtime5.ConsensusFault, error) { // Note that block syntax is not validated. Any validly signed block will be accepted pursuant to the below conditions. // Whether or not it could ever have been accepted in a chain is not checked/does not matter here. // for that reason when checking block parent relationships, rather than instantiating a Tipset to do so @@ -133,14 +133,14 @@ func (ss *syscallShim) VerifyConsensusFault(a, b, extra []byte) (*runtime2.Conse } // (2) check for the consensus faults themselves - var consensusFault *runtime2.ConsensusFault + var consensusFault *runtime5.ConsensusFault // (a) double-fork mining fault if blockA.Height == blockB.Height { - consensusFault = &runtime2.ConsensusFault{ + consensusFault = &runtime5.ConsensusFault{ Target: blockA.Miner, Epoch: blockB.Height, - Type: runtime2.ConsensusFaultDoubleForkMining, + Type: runtime5.ConsensusFaultDoubleForkMining, } } @@ -148,10 +148,10 @@ func (ss *syscallShim) VerifyConsensusFault(a, b, extra []byte) (*runtime2.Conse // strictly speaking no need to compare heights based on double fork mining check above, // but at same height this would be a different fault. if types.CidArrsEqual(blockA.Parents, blockB.Parents) && blockA.Height != blockB.Height { - consensusFault = &runtime2.ConsensusFault{ + consensusFault = &runtime5.ConsensusFault{ Target: blockA.Miner, Epoch: blockB.Height, - Type: runtime2.ConsensusFaultTimeOffsetMining, + Type: runtime5.ConsensusFaultTimeOffsetMining, } } @@ -171,10 +171,10 @@ func (ss *syscallShim) VerifyConsensusFault(a, b, extra []byte) (*runtime2.Conse if types.CidArrsEqual(blockA.Parents, blockC.Parents) && blockA.Height == blockC.Height && types.CidArrsContains(blockB.Parents, blockC.Cid()) && !types.CidArrsContains(blockB.Parents, blockA.Cid()) { - consensusFault = &runtime2.ConsensusFault{ + consensusFault = &runtime5.ConsensusFault{ Target: blockA.Miner, Epoch: blockB.Height, - Type: runtime2.ConsensusFaultParentGrinding, + Type: runtime5.ConsensusFaultParentGrinding, } } } @@ -243,7 +243,7 @@ func (ss *syscallShim) workerKeyAtLookback(height abi.ChainEpoch) (address.Addre return ResolveToKeyAddr(ss.cstate, ss.cst, info.Worker) } -func (ss *syscallShim) VerifyPoSt(proof proof2.WindowPoStVerifyInfo) error { +func (ss *syscallShim) VerifyPoSt(proof proof5.WindowPoStVerifyInfo) error { ok, err := ss.verifier.VerifyWindowPoSt(context.TODO(), proof) if err != nil { return err @@ -254,7 +254,7 @@ func (ss *syscallShim) VerifyPoSt(proof proof2.WindowPoStVerifyInfo) error { return nil } -func (ss *syscallShim) VerifySeal(info proof2.SealVerifyInfo) error { +func (ss *syscallShim) VerifySeal(info proof5.SealVerifyInfo) error { //_, span := trace.StartSpan(ctx, "ValidatePoRep") //defer span.End() @@ -281,6 +281,18 @@ func (ss *syscallShim) VerifySeal(info proof2.SealVerifyInfo) error { return nil } +func (ss *syscallShim) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) error { + ok, err := ss.verifier.VerifyAggregateSeals(aggregate) + if err != nil { + return xerrors.Errorf("failed to verify aggregated PoRep: %w", err) + } + if !ok { + return fmt.Errorf("invalid aggredate proof") + } + + return nil +} + func (ss *syscallShim) VerifySignature(sig crypto.Signature, addr address.Address, input []byte) error { // TODO: in genesis setup, we are currently faking signatures @@ -294,7 +306,7 @@ func (ss *syscallShim) VerifySignature(sig crypto.Signature, addr address.Addres var BatchSealVerifyParallelism = goruntime.NumCPU() -func (ss *syscallShim) BatchVerifySeals(inp map[address.Address][]proof2.SealVerifyInfo) (map[address.Address][]bool, error) { +func (ss *syscallShim) BatchVerifySeals(inp map[address.Address][]proof5.SealVerifyInfo) (map[address.Address][]bool, error) { out := make(map[address.Address][]bool) sema := make(chan struct{}, BatchSealVerifyParallelism) @@ -306,7 +318,7 @@ func (ss *syscallShim) BatchVerifySeals(inp map[address.Address][]proof2.SealVer for i, s := range seals { wg.Add(1) - go func(ma address.Address, ix int, svi proof2.SealVerifyInfo, res []bool) { + go func(ma address.Address, ix int, svi proof5.SealVerifyInfo, res []bool) { defer wg.Done() sema <- struct{}{} diff --git a/cli/params.go b/cli/params.go index 8419507b8..1aa6555c5 100644 --- a/cli/params.go +++ b/cli/params.go @@ -23,7 +23,7 @@ var FetchParamCmd = &cli.Command{ } sectorSize := uint64(sectorSizeInt) - err = paramfetch.GetParams(ReqContext(cctx), build.ParametersJSON(), sectorSize) + err = paramfetch.GetParams(ReqContext(cctx), build.ParametersJSON(), build.SrsJSON(), sectorSize) if err != nil { return xerrors.Errorf("fetching proof parameters: %w", err) } diff --git a/cmd/lotus-bench/caching_verifier.go b/cmd/lotus-bench/caching_verifier.go index 5b434c762..f4cc0f837 100644 --- a/cmd/lotus-bench/caching_verifier.go +++ b/cmd/lotus-bench/caching_verifier.go @@ -8,6 +8,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" + proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" "github.com/ipfs/go-datastore" "github.com/minio/blake2b-simd" cbg "github.com/whyrusleeping/cbor-gen" @@ -96,4 +97,8 @@ func (cv *cachingVerifier) GenerateWinningPoStSectorChallenge(ctx context.Contex return cv.backend.GenerateWinningPoStSectorChallenge(ctx, proofType, a, rnd, u) } +func (cv cachingVerifier) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) (bool, error) { + return cv.backend.VerifyAggregateSeals(aggregate) +} + var _ ffiwrapper.Verifier = (*cachingVerifier)(nil) diff --git a/cmd/lotus-bench/main.go b/cmd/lotus-bench/main.go index 81aa09a75..0b8ec6fe3 100644 --- a/cmd/lotus-bench/main.go +++ b/cmd/lotus-bench/main.go @@ -243,7 +243,7 @@ var sealBenchCmd = &cli.Command{ // Only fetch parameters if actually needed skipc2 := c.Bool("skip-commit2") if !skipc2 { - if err := paramfetch.GetParams(lcli.ReqContext(c), build.ParametersJSON(), uint64(sectorSize)); err != nil { + if err := paramfetch.GetParams(lcli.ReqContext(c), build.ParametersJSON(), build.SrsJSON(), uint64(sectorSize)); err != nil { return xerrors.Errorf("getting params: %w", err) } } @@ -738,7 +738,7 @@ var proveCmd = &cli.Command{ return xerrors.Errorf("unmarshalling input file: %w", err) } - if err := paramfetch.GetParams(lcli.ReqContext(c), build.ParametersJSON(), c2in.SectorSize); err != nil { + if err := paramfetch.GetParams(lcli.ReqContext(c), build.ParametersJSON(), build.SrsJSON(), c2in.SectorSize); err != nil { return xerrors.Errorf("getting params: %w", err) } diff --git a/cmd/lotus-seal-worker/main.go b/cmd/lotus-seal-worker/main.go index 24918e52a..5a78c6dac 100644 --- a/cmd/lotus-seal-worker/main.go +++ b/cmd/lotus-seal-worker/main.go @@ -228,7 +228,7 @@ var runCmd = &cli.Command{ } if cctx.Bool("commit") { - if err := paramfetch.GetParams(ctx, build.ParametersJSON(), uint64(ssize)); err != nil { + if err := paramfetch.GetParams(ctx, build.ParametersJSON(), build.SrsJSON(), uint64(ssize)); err != nil { return xerrors.Errorf("get params: %w", err) } } diff --git a/cmd/lotus-shed/params.go b/cmd/lotus-shed/params.go index 3f7e7b6fb..e45d9489c 100644 --- a/cmd/lotus-shed/params.go +++ b/cmd/lotus-shed/params.go @@ -25,7 +25,7 @@ var fetchParamCmd = &cli.Command{ return err } sectorSize := uint64(sectorSizeInt) - err = paramfetch.GetParams(lcli.ReqContext(cctx), build.ParametersJSON(), sectorSize) + err = paramfetch.GetParams(lcli.ReqContext(cctx), build.ParametersJSON(), build.SrsJSON(), sectorSize) if err != nil { return xerrors.Errorf("fetching proof parameters: %w", err) } diff --git a/cmd/lotus-storage-miner/info.go b/cmd/lotus-storage-miner/info.go index 7650de035..0fe14f1ff 100644 --- a/cmd/lotus-storage-miner/info.go +++ b/cmd/lotus-storage-miner/info.go @@ -291,10 +291,14 @@ var stateList = []stateMeta{ {col: color.FgYellow, state: sealing.PreCommit2}, {col: color.FgYellow, state: sealing.PreCommitting}, {col: color.FgYellow, state: sealing.PreCommitWait}, + {col: color.FgYellow, state: sealing.SubmitPreCommitBatch}, + {col: color.FgYellow, state: sealing.PreCommitBatchWait}, {col: color.FgYellow, state: sealing.WaitSeed}, {col: color.FgYellow, state: sealing.Committing}, {col: color.FgYellow, state: sealing.SubmitCommit}, {col: color.FgYellow, state: sealing.CommitWait}, + {col: color.FgYellow, state: sealing.SubmitCommitAggregate}, + {col: color.FgYellow, state: sealing.CommitAggregateWait}, {col: color.FgYellow, state: sealing.FinalizeSector}, {col: color.FgCyan, state: sealing.Terminating}, diff --git a/cmd/lotus-storage-miner/init.go b/cmd/lotus-storage-miner/init.go index 2e38dcc06..bac8444cc 100644 --- a/cmd/lotus-storage-miner/init.go +++ b/cmd/lotus-storage-miner/init.go @@ -143,7 +143,7 @@ var initCmd = &cli.Command{ log.Info("Checking proof parameters") - if err := paramfetch.GetParams(ctx, build.ParametersJSON(), uint64(ssize)); err != nil { + if err := paramfetch.GetParams(ctx, build.ParametersJSON(), build.SrsJSON(), uint64(ssize)); err != nil { return xerrors.Errorf("fetching proof parameters: %w", err) } diff --git a/cmd/lotus-storage-miner/init_restore.go b/cmd/lotus-storage-miner/init_restore.go index 12358e63a..af4c43c95 100644 --- a/cmd/lotus-storage-miner/init_restore.go +++ b/cmd/lotus-storage-miner/init_restore.go @@ -249,7 +249,7 @@ var initRestoreCmd = &cli.Command{ log.Info("Checking proof parameters") - if err := paramfetch.GetParams(ctx, build.ParametersJSON(), uint64(mi.SectorSize)); err != nil { + if err := paramfetch.GetParams(ctx, build.ParametersJSON(), build.SrsJSON(), uint64(mi.SectorSize)); err != nil { return xerrors.Errorf("fetching proof parameters: %w", err) } diff --git a/cmd/lotus-storage-miner/sectors.go b/cmd/lotus-storage-miner/sectors.go index 3791dbf07..2bb44b4f4 100644 --- a/cmd/lotus-storage-miner/sectors.go +++ b/cmd/lotus-storage-miner/sectors.go @@ -45,6 +45,7 @@ var sectorsCmd = &cli.Command{ sectorsStartSealCmd, sectorsSealDelayCmd, sectorsCapacityCollateralCmd, + sectorsBatching, }, } @@ -969,6 +970,109 @@ var sectorsUpdateCmd = &cli.Command{ }, } +var sectorsBatching = &cli.Command{ + Name: "batching", + Usage: "manage batch sector operations", + Subcommands: []*cli.Command{ + sectorsBatchingPendingCommit, + sectorsBatchingPendingPreCommit, + }, +} + +var sectorsBatchingPendingCommit = &cli.Command{ + Name: "pending-commit", + Usage: "list sectors waiting in commit batch queue", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "publish-now", + Usage: "send a batch now", + }, + }, + Action: func(cctx *cli.Context) error { + api, closer, err := lcli.GetStorageMinerAPI(cctx) + if err != nil { + return err + } + defer closer() + ctx := lcli.ReqContext(cctx) + + if cctx.Bool("publish-now") { + cid, err := api.SectorCommitFlush(ctx) + if err != nil { + return xerrors.Errorf("flush: %w", err) + } + if cid == nil { + return xerrors.Errorf("no sectors to publish") + } + + fmt.Println("sector batch published: ", cid) + return nil + } + + pending, err := api.SectorCommitPending(ctx) + if err != nil { + return xerrors.Errorf("getting pending deals: %w", err) + } + + if len(pending) > 0 { + for _, sector := range pending { + fmt.Println(sector.Number) + } + return nil + } + + fmt.Println("No sectors queued to be committed") + return nil + }, +} + +var sectorsBatchingPendingPreCommit = &cli.Command{ + Name: "pending-precommit", + Usage: "list sectors waiting in precommit batch queue", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "publish-now", + Usage: "send a batch now", + }, + }, + Action: func(cctx *cli.Context) error { + api, closer, err := lcli.GetStorageMinerAPI(cctx) + if err != nil { + return err + } + defer closer() + ctx := lcli.ReqContext(cctx) + + if cctx.Bool("publish-now") { + cid, err := api.SectorPreCommitFlush(ctx) + if err != nil { + return xerrors.Errorf("flush: %w", err) + } + if cid == nil { + return xerrors.Errorf("no sectors to publish") + } + + fmt.Println("sector batch published: ", cid) + return nil + } + + pending, err := api.SectorPreCommitPending(ctx) + if err != nil { + return xerrors.Errorf("getting pending deals: %w", err) + } + + if len(pending) > 0 { + for _, sector := range pending { + fmt.Println(sector.Number) + } + return nil + } + + fmt.Println("No sectors queued to be committed") + return nil + }, +} + func yesno(b bool) string { if b { return color.GreenString("YES") diff --git a/cmd/lotus/daemon.go b/cmd/lotus/daemon.go index 5a59ec816..644892ee2 100644 --- a/cmd/lotus/daemon.go +++ b/cmd/lotus/daemon.go @@ -231,7 +231,7 @@ var DaemonCmd = &cli.Command{ freshRepo := err != repo.ErrRepoExists if !isLite { - if err := paramfetch.GetParams(lcli.ReqContext(cctx), build.ParametersJSON(), 0); err != nil { + if err := paramfetch.GetParams(lcli.ReqContext(cctx), build.ParametersJSON(), build.SrsJSON(), 0); err != nil { return xerrors.Errorf("fetching proof parameters: %w", err) } } diff --git a/documentation/en/api-methods-miner.md b/documentation/en/api-methods-miner.md index d140d7d85..1e8e7cbc5 100644 --- a/documentation/en/api-methods-miner.md +++ b/documentation/en/api-methods-miner.md @@ -98,9 +98,13 @@ * [SealingAbort](#SealingAbort) * [SealingSchedDiag](#SealingSchedDiag) * [Sector](#Sector) + * [SectorCommitFlush](#SectorCommitFlush) + * [SectorCommitPending](#SectorCommitPending) * [SectorGetExpectedSealDuration](#SectorGetExpectedSealDuration) * [SectorGetSealDelay](#SectorGetSealDelay) * [SectorMarkForUpgrade](#SectorMarkForUpgrade) + * [SectorPreCommitFlush](#SectorPreCommitFlush) + * [SectorPreCommitPending](#SectorPreCommitPending) * [SectorRemove](#SectorRemove) * [SectorSetExpectedSealDuration](#SectorSetExpectedSealDuration) * [SectorSetSealDelay](#SectorSetSealDelay) @@ -1556,6 +1560,27 @@ Response: `{}` ## Sector +### SectorCommitFlush +SectorCommitFlush immediately sends a Commit message with sectors aggregated for Commit. +Returns null if message wasn't sent + + +Perms: admin + +Inputs: `null` + +Response: `null` + +### SectorCommitPending +SectorCommitPending returns a list of pending Commit sectors to be sent in the next aggregate message + + +Perms: admin + +Inputs: `null` + +Response: `null` + ### SectorGetExpectedSealDuration SectorGetExpectedSealDuration gets the expected time for a sector to seal @@ -1591,6 +1616,27 @@ Inputs: Response: `{}` +### SectorPreCommitFlush +SectorPreCommitFlush immediately sends a PreCommit message with sectors batched for PreCommit. +Returns null if message wasn't sent + + +Perms: admin + +Inputs: `null` + +Response: `null` + +### SectorPreCommitPending +SectorPreCommitPending returns a list of pending PreCommit sectors to be sent in the next batch message + + +Perms: admin + +Inputs: `null` + +Response: `null` + ### SectorRemove SectorRemove removes the sector from storage. It doesn't terminate it on-chain, which can be done with SectorTerminate. Removing and not terminating live sectors will cause additional penalties. diff --git a/documentation/en/cli-lotus-miner.md b/documentation/en/cli-lotus-miner.md new file mode 100644 index 000000000..dfa9072c9 --- /dev/null +++ b/documentation/en/cli-lotus-miner.md @@ -0,0 +1,1864 @@ +# lotus-miner +``` +NAME: + lotus-miner - Filecoin decentralized storage network miner + +USAGE: + lotus-miner [global options] command [command options] [arguments...] + +VERSION: + 1.11.0-dev + +COMMANDS: + init Initialize a lotus miner repo + run Start a lotus miner process + stop Stop a running lotus miner + config Output default configuration + backup Create node metadata backup + version Print version + help, h Shows a list of commands or help for one command + CHAIN: + actor manipulate the miner actor + info Print miner info + DEVELOPER: + auth Manage RPC permissions + log Manage logging + wait-api Wait for lotus api to come online + fetch-params Fetch proving parameters + MARKET: + storage-deals Manage storage deals and related configuration + retrieval-deals Manage retrieval deals and related configuration + data-transfers Manage data transfers + NETWORK: + net Manage P2P Network + RETRIEVAL: + pieces interact with the piecestore + STORAGE: + sectors interact with sector store + proving View proving information + storage manage sector storage + sealing interact with sealing pipeline + +GLOBAL OPTIONS: + --actor value, -a value specify other actor to check state for (read only) + --color (default: false) + --miner-repo value, --storagerepo value Specify miner repo path. flag(storagerepo) and env(LOTUS_STORAGE_PATH) are DEPRECATION, will REMOVE SOON (default: "~/.lotusminer") [$LOTUS_MINER_PATH, $LOTUS_STORAGE_PATH] + --help, -h show help (default: false) + --version, -v print the version (default: false) +``` + +## lotus-miner init +``` +NAME: + lotus-miner init - Initialize a lotus miner repo + +USAGE: + lotus-miner init command [command options] [arguments...] + +COMMANDS: + restore Initialize a lotus miner repo from a backup + help, h Shows a list of commands or help for one command + +OPTIONS: + --actor value specify the address of an already created miner actor + --create-worker-key create separate worker key (default: false) + --worker value, -w value worker key to use (overrides --create-worker-key) + --owner value, -o value owner key to use + --sector-size value specify sector size to use (default: "32GiB") + --pre-sealed-sectors value specify set of presealed sectors for starting as a genesis miner + --pre-sealed-metadata value specify the metadata file for the presealed sectors + --nosync don't check full-node sync status (default: false) + --symlink-imported-sectors attempt to symlink to presealed sectors instead of copying them into place (default: false) + --no-local-storage don't use storageminer repo for sector storage (default: false) + --gas-premium value set gas premium for initialization messages in AttoFIL (default: "0") + --from value select which address to send actor creation message from + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +### lotus-miner init restore +``` +NAME: + lotus-miner init restore - Initialize a lotus miner repo from a backup + +USAGE: + lotus-miner init restore [command options] [backupFile] + +OPTIONS: + --nosync don't check full-node sync status (default: false) + --config value config file (config.toml) + --storage-config value storage paths config (storage.json) + --help, -h show help (default: false) + +``` + +## lotus-miner run +``` +NAME: + lotus-miner run - Start a lotus miner process + +USAGE: + lotus-miner run [command options] [arguments...] + +OPTIONS: + --miner-api value 2345 + --enable-gpu-proving enable use of GPU for mining operations (default: true) + --nosync don't check full-node sync status (default: false) + --manage-fdlimit manage open file limit (default: true) + --help, -h show help (default: false) + +``` + +## lotus-miner stop +``` +NAME: + lotus-miner stop - Stop a running lotus miner + +USAGE: + lotus-miner stop [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +## lotus-miner config +``` +NAME: + lotus-miner config - Output default configuration + +USAGE: + lotus-miner config [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +## lotus-miner backup +``` +NAME: + lotus-miner backup - Create node metadata backup + +USAGE: + lotus-miner backup [command options] [backup file path] + +DESCRIPTION: + The backup command writes a copy of node metadata under the specified path + +Online backups: +For security reasons, the daemon must be have LOTUS_BACKUP_BASE_PATH env var set +to a path where backup files are supposed to be saved, and the path specified in +this command must be within this base path + +OPTIONS: + --offline create backup without the node running (default: false) + --help, -h show help (default: false) + +``` + +## lotus-miner version +``` +NAME: + lotus-miner version - Print version + +USAGE: + lotus-miner version [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +## lotus-miner actor +``` +NAME: + lotus-miner actor - manipulate the miner actor + +USAGE: + lotus-miner actor command [command options] [arguments...] + +COMMANDS: + set-addrs set addresses that your miner can be publicly dialed on + withdraw withdraw available balance + repay-debt pay down a miner's debt + set-peer-id set the peer id of your miner + set-owner Set owner address (this command should be invoked twice, first with the old owner as the senderAddress, and then with the new owner) + control Manage control addresses + propose-change-worker Propose a worker address change + confirm-change-worker Confirm a worker address change + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +### lotus-miner actor set-addrs +``` +NAME: + lotus-miner actor set-addrs - set addresses that your miner can be publicly dialed on + +USAGE: + lotus-miner actor set-addrs [command options] [arguments...] + +OPTIONS: + --gas-limit value set gas limit (default: 0) + --unset unset address (default: false) + --help, -h show help (default: false) + +``` + +### lotus-miner actor withdraw +``` +NAME: + lotus-miner actor withdraw - withdraw available balance + +USAGE: + lotus-miner actor withdraw [command options] [amount (FIL)] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus-miner actor repay-debt +``` +NAME: + lotus-miner actor repay-debt - pay down a miner's debt + +USAGE: + lotus-miner actor repay-debt [command options] [amount (FIL)] + +OPTIONS: + --from value optionally specify the account to send funds from + --help, -h show help (default: false) + +``` + +### lotus-miner actor set-peer-id +``` +NAME: + lotus-miner actor set-peer-id - set the peer id of your miner + +USAGE: + lotus-miner actor set-peer-id [command options] [arguments...] + +OPTIONS: + --gas-limit value set gas limit (default: 0) + --help, -h show help (default: false) + +``` + +### lotus-miner actor set-owner +``` +NAME: + lotus-miner actor set-owner - Set owner address (this command should be invoked twice, first with the old owner as the senderAddress, and then with the new owner) + +USAGE: + lotus-miner actor set-owner [command options] [newOwnerAddress senderAddress] + +OPTIONS: + --really-do-it Actually send transaction performing the action (default: false) + --help, -h show help (default: false) + +``` + +### lotus-miner actor control +``` +NAME: + lotus-miner actor control - Manage control addresses + +USAGE: + lotus-miner actor control command [command options] [arguments...] + +COMMANDS: + list Get currently set control addresses + set Set control address(-es) + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +#### lotus-miner actor control list +``` +NAME: + lotus-miner actor control list - Get currently set control addresses + +USAGE: + lotus-miner actor control list [command options] [arguments...] + +OPTIONS: + --verbose (default: false) + --color (default: true) + --help, -h show help (default: false) + +``` + +#### lotus-miner actor control set +``` +NAME: + lotus-miner actor control set - Set control address(-es) + +USAGE: + lotus-miner actor control set [command options] [...address] + +OPTIONS: + --really-do-it Actually send transaction performing the action (default: false) + --help, -h show help (default: false) + +``` + +### lotus-miner actor propose-change-worker +``` +NAME: + lotus-miner actor propose-change-worker - Propose a worker address change + +USAGE: + lotus-miner actor propose-change-worker [command options] [address] + +OPTIONS: + --really-do-it Actually send transaction performing the action (default: false) + --help, -h show help (default: false) + +``` + +### lotus-miner actor confirm-change-worker +``` +NAME: + lotus-miner actor confirm-change-worker - Confirm a worker address change + +USAGE: + lotus-miner actor confirm-change-worker [command options] [address] + +OPTIONS: + --really-do-it Actually send transaction performing the action (default: false) + --help, -h show help (default: false) + +``` + +## lotus-miner info +``` +NAME: + lotus-miner info - Print miner info + +USAGE: + lotus-miner info command [command options] [arguments...] + +COMMANDS: + all dump all related miner info + help, h Shows a list of commands or help for one command + +OPTIONS: + --hide-sectors-info hide sectors info (default: false) + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +### lotus-miner info all +``` +NAME: + lotus-miner info all - dump all related miner info + +USAGE: + lotus-miner info all [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +## lotus-miner auth +``` +NAME: + lotus-miner auth - Manage RPC permissions + +USAGE: + lotus-miner auth command [command options] [arguments...] + +COMMANDS: + create-token Create token + api-info Get token with API info required to connect to this node + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +### lotus-miner auth create-token +``` +NAME: + lotus-miner auth create-token - Create token + +USAGE: + lotus-miner auth create-token [command options] [arguments...] + +OPTIONS: + --perm value permission to assign to the token, one of: read, write, sign, admin + --help, -h show help (default: false) + +``` + +### lotus-miner auth api-info +``` +NAME: + lotus-miner auth api-info - Get token with API info required to connect to this node + +USAGE: + lotus-miner auth api-info [command options] [arguments...] + +OPTIONS: + --perm value permission to assign to the token, one of: read, write, sign, admin + --help, -h show help (default: false) + +``` + +## lotus-miner log +``` +NAME: + lotus-miner log - Manage logging + +USAGE: + lotus-miner log command [command options] [arguments...] + +COMMANDS: + list List log systems + set-level Set log level + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +### lotus-miner log list +``` +NAME: + lotus-miner log list - List log systems + +USAGE: + lotus-miner log list [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus-miner log set-level +``` +NAME: + lotus-miner log set-level - Set log level + +USAGE: + lotus-miner log set-level [command options] [level] + +DESCRIPTION: + Set the log level for logging systems: + + The system flag can be specified multiple times. + + eg) log set-level --system chain --system chainxchg debug + + Available Levels: + debug + info + warn + error + + Environment Variables: + GOLOG_LOG_LEVEL - Default log level for all log systems + GOLOG_LOG_FMT - Change output log format (json, nocolor) + GOLOG_FILE - Write logs to file + GOLOG_OUTPUT - Specify whether to output to file, stderr, stdout or a combination, i.e. file+stderr + + +OPTIONS: + --system value limit to log system + --help, -h show help (default: false) + +``` + +## lotus-miner wait-api +``` +NAME: + lotus-miner wait-api - Wait for lotus api to come online + +USAGE: + lotus-miner wait-api [command options] [arguments...] + +CATEGORY: + DEVELOPER + +OPTIONS: + --help, -h show help (default: false) + +``` + +## lotus-miner fetch-params +``` +NAME: + lotus-miner fetch-params - Fetch proving parameters + +USAGE: + lotus-miner fetch-params [command options] [sectorSize] + +CATEGORY: + DEVELOPER + +OPTIONS: + --help, -h show help (default: false) + +``` + +## lotus-miner storage-deals +``` +NAME: + lotus-miner storage-deals - Manage storage deals and related configuration + +USAGE: + lotus-miner storage-deals command [command options] [arguments...] + +COMMANDS: + import-data Manually import data for a deal + list List all deals for this miner + selection Configure acceptance criteria for storage deal proposals + set-ask Configure the miner's ask + get-ask Print the miner's ask + set-blocklist Set the miner's list of blocklisted piece CIDs + get-blocklist List the contents of the miner's piece CID blocklist + reset-blocklist Remove all entries from the miner's piece CID blocklist + set-seal-duration Set the expected time, in minutes, that you expect sealing sectors to take. Deals that start before this duration will be rejected. + pending-publish list deals waiting in publish queue + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +### lotus-miner storage-deals import-data +``` +NAME: + lotus-miner storage-deals import-data - Manually import data for a deal + +USAGE: + lotus-miner storage-deals import-data [command options] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus-miner storage-deals list +``` +NAME: + lotus-miner storage-deals list - List all deals for this miner + +USAGE: + lotus-miner storage-deals list [command options] [arguments...] + +OPTIONS: + --verbose, -v (default: false) + --watch watch deal updates in real-time, rather than a one time list (default: false) + --help, -h show help (default: false) + +``` + +### lotus-miner storage-deals selection +``` +NAME: + lotus-miner storage-deals selection - Configure acceptance criteria for storage deal proposals + +USAGE: + lotus-miner storage-deals selection command [command options] [arguments...] + +COMMANDS: + list List storage deal proposal selection criteria + reset Reset storage deal proposal selection criteria to default values + reject Configure criteria which necessitate automatic rejection + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +#### lotus-miner storage-deals selection list +``` +NAME: + lotus-miner storage-deals selection list - List storage deal proposal selection criteria + +USAGE: + lotus-miner storage-deals selection list [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +#### lotus-miner storage-deals selection reset +``` +NAME: + lotus-miner storage-deals selection reset - Reset storage deal proposal selection criteria to default values + +USAGE: + lotus-miner storage-deals selection reset [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +#### lotus-miner storage-deals selection reject +``` +NAME: + lotus-miner storage-deals selection reject - Configure criteria which necessitate automatic rejection + +USAGE: + lotus-miner storage-deals selection reject [command options] [arguments...] + +OPTIONS: + --online (default: false) + --offline (default: false) + --verified (default: false) + --unverified (default: false) + --help, -h show help (default: false) + +``` + +### lotus-miner storage-deals set-ask +``` +NAME: + lotus-miner storage-deals set-ask - Configure the miner's ask + +USAGE: + lotus-miner storage-deals set-ask [command options] [arguments...] + +OPTIONS: + --price PRICE Set the price of the ask for unverified deals (specified as FIL / GiB / Epoch) to PRICE. + --verified-price PRICE Set the price of the ask for verified deals (specified as FIL / GiB / Epoch) to PRICE + --min-piece-size SIZE Set minimum piece size (w/bit-padding, in bytes) in ask to SIZE (default: 256B) + --max-piece-size SIZE Set maximum piece size (w/bit-padding, in bytes) in ask to SIZE (default: miner sector size) + --help, -h show help (default: false) + +``` + +### lotus-miner storage-deals get-ask +``` +NAME: + lotus-miner storage-deals get-ask - Print the miner's ask + +USAGE: + lotus-miner storage-deals get-ask [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus-miner storage-deals set-blocklist +``` +NAME: + lotus-miner storage-deals set-blocklist - Set the miner's list of blocklisted piece CIDs + +USAGE: + lotus-miner storage-deals set-blocklist [command options] [ (optional, will read from stdin if omitted)] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus-miner storage-deals get-blocklist +``` +NAME: + lotus-miner storage-deals get-blocklist - List the contents of the miner's piece CID blocklist + +USAGE: + lotus-miner storage-deals get-blocklist [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus-miner storage-deals reset-blocklist +``` +NAME: + lotus-miner storage-deals reset-blocklist - Remove all entries from the miner's piece CID blocklist + +USAGE: + lotus-miner storage-deals reset-blocklist [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus-miner storage-deals set-seal-duration +``` +NAME: + lotus-miner storage-deals set-seal-duration - Set the expected time, in minutes, that you expect sealing sectors to take. Deals that start before this duration will be rejected. + +USAGE: + lotus-miner storage-deals set-seal-duration [command options] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus-miner storage-deals pending-publish +``` +NAME: + lotus-miner storage-deals pending-publish - list deals waiting in publish queue + +USAGE: + lotus-miner storage-deals pending-publish [command options] [arguments...] + +OPTIONS: + --publish-now send a publish message now (default: false) + --help, -h show help (default: false) + +``` + +## lotus-miner retrieval-deals +``` +NAME: + lotus-miner retrieval-deals - Manage retrieval deals and related configuration + +USAGE: + lotus-miner retrieval-deals command [command options] [arguments...] + +COMMANDS: + selection Configure acceptance criteria for retrieval deal proposals + list List all active retrieval deals for this miner + set-ask Configure the provider's retrieval ask + get-ask Get the provider's current retrieval ask + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +### lotus-miner retrieval-deals selection +``` +NAME: + lotus-miner retrieval-deals selection - Configure acceptance criteria for retrieval deal proposals + +USAGE: + lotus-miner retrieval-deals selection command [command options] [arguments...] + +COMMANDS: + list List retrieval deal proposal selection criteria + reset Reset retrieval deal proposal selection criteria to default values + reject Configure criteria which necessitate automatic rejection + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +#### lotus-miner retrieval-deals selection list +``` +NAME: + lotus-miner retrieval-deals selection list - List retrieval deal proposal selection criteria + +USAGE: + lotus-miner retrieval-deals selection list [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +#### lotus-miner retrieval-deals selection reset +``` +NAME: + lotus-miner retrieval-deals selection reset - Reset retrieval deal proposal selection criteria to default values + +USAGE: + lotus-miner retrieval-deals selection reset [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +#### lotus-miner retrieval-deals selection reject +``` +NAME: + lotus-miner retrieval-deals selection reject - Configure criteria which necessitate automatic rejection + +USAGE: + lotus-miner retrieval-deals selection reject [command options] [arguments...] + +OPTIONS: + --online (default: false) + --offline (default: false) + --help, -h show help (default: false) + +``` + +### lotus-miner retrieval-deals list +``` +NAME: + lotus-miner retrieval-deals list - List all active retrieval deals for this miner + +USAGE: + lotus-miner retrieval-deals list [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus-miner retrieval-deals set-ask +``` +NAME: + lotus-miner retrieval-deals set-ask - Configure the provider's retrieval ask + +USAGE: + lotus-miner retrieval-deals set-ask [command options] [arguments...] + +OPTIONS: + --price value Set the price of the ask for retrievals (FIL/GiB) + --unseal-price value Set the price to unseal + --payment-interval value Set the payment interval (in bytes) for retrieval (default: 1MiB) + --payment-interval-increase value Set the payment interval increase (in bytes) for retrieval (default: 1MiB) + --help, -h show help (default: false) + +``` + +### lotus-miner retrieval-deals get-ask +``` +NAME: + lotus-miner retrieval-deals get-ask - Get the provider's current retrieval ask + +USAGE: + lotus-miner retrieval-deals get-ask [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +## lotus-miner data-transfers +``` +NAME: + lotus-miner data-transfers - Manage data transfers + +USAGE: + lotus-miner data-transfers command [command options] [arguments...] + +COMMANDS: + list List ongoing data transfers for this miner + restart Force restart a stalled data transfer + cancel Force cancel a data transfer + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +### lotus-miner data-transfers list +``` +NAME: + lotus-miner data-transfers list - List ongoing data transfers for this miner + +USAGE: + lotus-miner data-transfers list [command options] [arguments...] + +OPTIONS: + --verbose, -v print verbose transfer details (default: false) + --color use color in display output (default: true) + --completed show completed data transfers (default: false) + --watch watch deal updates in real-time, rather than a one time list (default: false) + --show-failed show failed/cancelled transfers (default: false) + --help, -h show help (default: false) + +``` + +### lotus-miner data-transfers restart +``` +NAME: + lotus-miner data-transfers restart - Force restart a stalled data transfer + +USAGE: + lotus-miner data-transfers restart [command options] [arguments...] + +OPTIONS: + --peerid value narrow to transfer with specific peer + --initiator specify only transfers where peer is/is not initiator (default: false) + --help, -h show help (default: false) + +``` + +### lotus-miner data-transfers cancel +``` +NAME: + lotus-miner data-transfers cancel - Force cancel a data transfer + +USAGE: + lotus-miner data-transfers cancel [command options] [arguments...] + +OPTIONS: + --peerid value narrow to transfer with specific peer + --initiator specify only transfers where peer is/is not initiator (default: false) + --cancel-timeout value time to wait for cancel to be sent to client (default: 5s) + --help, -h show help (default: false) + +``` + +## lotus-miner net +``` +NAME: + lotus-miner net - Manage P2P Network + +USAGE: + lotus-miner net command [command options] [arguments...] + +COMMANDS: + peers Print peers + connect Connect to a peer + listen List listen addresses + id Get node identity + findpeer Find the addresses of a given peerID + scores Print peers' pubsub scores + reachability Print information about reachability from the internet + bandwidth Print bandwidth usage information + block Manage network connection gating rules + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +### lotus-miner net peers +``` +NAME: + lotus-miner net peers - Print peers + +USAGE: + lotus-miner net peers [command options] [arguments...] + +OPTIONS: + --agent, -a Print agent name (default: false) + --extended, -x Print extended peer information in json (default: false) + --help, -h show help (default: false) + +``` + +### lotus-miner net connect +``` +NAME: + lotus-miner net connect - Connect to a peer + +USAGE: + lotus-miner net connect [command options] [peerMultiaddr|minerActorAddress] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus-miner net listen +``` +NAME: + lotus-miner net listen - List listen addresses + +USAGE: + lotus-miner net listen [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus-miner net id +``` +NAME: + lotus-miner net id - Get node identity + +USAGE: + lotus-miner net id [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus-miner net findpeer +``` +NAME: + lotus-miner net findpeer - Find the addresses of a given peerID + +USAGE: + lotus-miner net findpeer [command options] [peerId] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus-miner net scores +``` +NAME: + lotus-miner net scores - Print peers' pubsub scores + +USAGE: + lotus-miner net scores [command options] [arguments...] + +OPTIONS: + --extended, -x print extended peer scores in json (default: false) + --help, -h show help (default: false) + +``` + +### lotus-miner net reachability +``` +NAME: + lotus-miner net reachability - Print information about reachability from the internet + +USAGE: + lotus-miner net reachability [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus-miner net bandwidth +``` +NAME: + lotus-miner net bandwidth - Print bandwidth usage information + +USAGE: + lotus-miner net bandwidth [command options] [arguments...] + +OPTIONS: + --by-peer list bandwidth usage by peer (default: false) + --by-protocol list bandwidth usage by protocol (default: false) + --help, -h show help (default: false) + +``` + +### lotus-miner net block +``` +NAME: + lotus-miner net block - Manage network connection gating rules + +USAGE: + lotus-miner net block command [command options] [arguments...] + +COMMANDS: + add Add connection gating rules + remove Remove connection gating rules + list list connection gating rules + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +#### lotus-miner net block add +``` +NAME: + lotus-miner net block add - Add connection gating rules + +USAGE: + lotus-miner net block add command [command options] [arguments...] + +COMMANDS: + peer Block a peer + ip Block an IP address + subnet Block an IP subnet + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +##### lotus-miner net block add peer +``` +NAME: + lotus-miner net block add peer - Block a peer + +USAGE: + lotus-miner net block add peer [command options] ... + +OPTIONS: + --help, -h show help (default: false) + +``` + +##### lotus-miner net block add ip +``` +NAME: + lotus-miner net block add ip - Block an IP address + +USAGE: + lotus-miner net block add ip [command options] ... + +OPTIONS: + --help, -h show help (default: false) + +``` + +##### lotus-miner net block add subnet +``` +NAME: + lotus-miner net block add subnet - Block an IP subnet + +USAGE: + lotus-miner net block add subnet [command options] ... + +OPTIONS: + --help, -h show help (default: false) + +``` + +#### lotus-miner net block remove +``` +NAME: + lotus-miner net block remove - Remove connection gating rules + +USAGE: + lotus-miner net block remove command [command options] [arguments...] + +COMMANDS: + peer Unblock a peer + ip Unblock an IP address + subnet Unblock an IP subnet + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +##### lotus-miner net block remove peer +``` +NAME: + lotus-miner net block remove peer - Unblock a peer + +USAGE: + lotus-miner net block remove peer [command options] ... + +OPTIONS: + --help, -h show help (default: false) + +``` + +##### lotus-miner net block remove ip +``` +NAME: + lotus-miner net block remove ip - Unblock an IP address + +USAGE: + lotus-miner net block remove ip [command options] ... + +OPTIONS: + --help, -h show help (default: false) + +``` + +##### lotus-miner net block remove subnet +``` +NAME: + lotus-miner net block remove subnet - Unblock an IP subnet + +USAGE: + lotus-miner net block remove subnet [command options] ... + +OPTIONS: + --help, -h show help (default: false) + +``` + +#### lotus-miner net block list +``` +NAME: + lotus-miner net block list - list connection gating rules + +USAGE: + lotus-miner net block list [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +## lotus-miner pieces +``` +NAME: + lotus-miner pieces - interact with the piecestore + +USAGE: + lotus-miner pieces command [command options] [arguments...] + +DESCRIPTION: + The piecestore is a database that tracks and manages data that is made available to the retrieval market + +COMMANDS: + list-pieces list registered pieces + list-cids list registered payload CIDs + piece-info get registered information for a given piece CID + cid-info get registered information for a given payload CID + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +### lotus-miner pieces list-pieces +``` +NAME: + lotus-miner pieces list-pieces - list registered pieces + +USAGE: + lotus-miner pieces list-pieces [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus-miner pieces list-cids +``` +NAME: + lotus-miner pieces list-cids - list registered payload CIDs + +USAGE: + lotus-miner pieces list-cids [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus-miner pieces piece-info +``` +NAME: + lotus-miner pieces piece-info - get registered information for a given piece CID + +USAGE: + lotus-miner pieces piece-info [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus-miner pieces cid-info +``` +NAME: + lotus-miner pieces cid-info - get registered information for a given payload CID + +USAGE: + lotus-miner pieces cid-info [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +## lotus-miner sectors +``` +NAME: + lotus-miner sectors - interact with sector store + +USAGE: + lotus-miner sectors command [command options] [arguments...] + +COMMANDS: + status Get the seal status of a sector by its number + list List sectors + refs List References to sectors + update-state ADVANCED: manually update the state of a sector, this may aid in error recovery + pledge store random data in a sector + extend Extend sector expiration + terminate Terminate sector on-chain then remove (WARNING: This means losing power and collateral for the removed sector) + remove Forcefully remove a sector (WARNING: This means losing power and collateral for the removed sector (use 'terminate' for lower penalty)) + mark-for-upgrade Mark a committed capacity sector for replacement by a sector with deals + seal Manually start sealing a sector (filling any unused space with junk) + set-seal-delay Set the time, in minutes, that a new sector waits for deals before sealing starts + get-cc-collateral Get the collateral required to pledge a committed capacity sector + batching manage batch sector operations + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +### lotus-miner sectors status +``` +NAME: + lotus-miner sectors status - Get the seal status of a sector by its number + +USAGE: + lotus-miner sectors status [command options] + +OPTIONS: + --log display event log (default: false) + --on-chain-info show sector on chain info (default: false) + --help, -h show help (default: false) + +``` + +### lotus-miner sectors list +``` +NAME: + lotus-miner sectors list - List sectors + +USAGE: + lotus-miner sectors list [command options] [arguments...] + +OPTIONS: + --show-removed show removed sectors (default: false) + --color, -c (default: true) + --fast don't show on-chain info for better performance (default: false) + --events display number of events the sector has received (default: false) + --seal-time display how long it took for the sector to be sealed (default: false) + --states value filter sectors by a comma-separated list of states + --help, -h show help (default: false) + +``` + +### lotus-miner sectors refs +``` +NAME: + lotus-miner sectors refs - List References to sectors + +USAGE: + lotus-miner sectors refs [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus-miner sectors update-state +``` +NAME: + lotus-miner sectors update-state - ADVANCED: manually update the state of a sector, this may aid in error recovery + +USAGE: + lotus-miner sectors update-state [command options] + +OPTIONS: + --really-do-it pass this flag if you know what you are doing (default: false) + --help, -h show help (default: false) + +``` + +### lotus-miner sectors pledge +``` +NAME: + lotus-miner sectors pledge - store random data in a sector + +USAGE: + lotus-miner sectors pledge [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus-miner sectors extend +``` +NAME: + lotus-miner sectors extend - Extend sector expiration + +USAGE: + lotus-miner sectors extend [command options] + +OPTIONS: + --new-expiration value new expiration epoch (default: 0) + --v1-sectors renews all v1 sectors up to the maximum possible lifetime (default: false) + --tolerance value when extending v1 sectors, don't try to extend sectors by fewer than this number of epochs (default: 20160) + --expiration-cutoff value when extending v1 sectors, skip sectors whose current expiration is more than epochs from now (infinity if unspecified) (default: 0) + + --help, -h show help (default: false) + +``` + +### lotus-miner sectors terminate +``` +NAME: + lotus-miner sectors terminate - Terminate sector on-chain then remove (WARNING: This means losing power and collateral for the removed sector) + +USAGE: + lotus-miner sectors terminate command [command options] + +COMMANDS: + flush Send a terminate message if there are sectors queued for termination + pending List sector numbers of sectors pending termination + help, h Shows a list of commands or help for one command + +OPTIONS: + --really-do-it pass this flag if you know what you are doing (default: false) + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +#### lotus-miner sectors terminate flush +``` +NAME: + lotus-miner sectors terminate flush - Send a terminate message if there are sectors queued for termination + +USAGE: + lotus-miner sectors terminate flush [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +#### lotus-miner sectors terminate pending +``` +NAME: + lotus-miner sectors terminate pending - List sector numbers of sectors pending termination + +USAGE: + lotus-miner sectors terminate pending [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus-miner sectors remove +``` +NAME: + lotus-miner sectors remove - Forcefully remove a sector (WARNING: This means losing power and collateral for the removed sector (use 'terminate' for lower penalty)) + +USAGE: + lotus-miner sectors remove [command options] + +OPTIONS: + --really-do-it pass this flag if you know what you are doing (default: false) + --help, -h show help (default: false) + +``` + +### lotus-miner sectors mark-for-upgrade +``` +NAME: + lotus-miner sectors mark-for-upgrade - Mark a committed capacity sector for replacement by a sector with deals + +USAGE: + lotus-miner sectors mark-for-upgrade [command options] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus-miner sectors seal +``` +NAME: + lotus-miner sectors seal - Manually start sealing a sector (filling any unused space with junk) + +USAGE: + lotus-miner sectors seal [command options] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus-miner sectors set-seal-delay +``` +NAME: + lotus-miner sectors set-seal-delay - Set the time, in minutes, that a new sector waits for deals before sealing starts + +USAGE: + lotus-miner sectors set-seal-delay [command options] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus-miner sectors get-cc-collateral +``` +NAME: + lotus-miner sectors get-cc-collateral - Get the collateral required to pledge a committed capacity sector + +USAGE: + lotus-miner sectors get-cc-collateral [command options] [arguments...] + +OPTIONS: + --expiration value the epoch when the sector will expire (default: 0) + --help, -h show help (default: false) + +``` + +### lotus-miner sectors batching +``` +NAME: + lotus-miner sectors batching - manage batch sector operations + +USAGE: + lotus-miner sectors batching command [command options] [arguments...] + +COMMANDS: + pending-commit list sectors waiting in commit batch queue + pending-precommit list sectors waiting in precommit batch queue + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +#### lotus-miner sectors batching pending-commit +``` +NAME: + lotus-miner sectors batching pending-commit - list sectors waiting in commit batch queue + +USAGE: + lotus-miner sectors batching pending-commit [command options] [arguments...] + +OPTIONS: + --publish-now send a batch now (default: false) + --help, -h show help (default: false) + +``` + +#### lotus-miner sectors batching pending-precommit +``` +NAME: + lotus-miner sectors batching pending-precommit - list sectors waiting in precommit batch queue + +USAGE: + lotus-miner sectors batching pending-precommit [command options] [arguments...] + +OPTIONS: + --publish-now send a batch now (default: false) + --help, -h show help (default: false) + +``` + +## lotus-miner proving +``` +NAME: + lotus-miner proving - View proving information + +USAGE: + lotus-miner proving command [command options] [arguments...] + +COMMANDS: + info View current state information + deadlines View the current proving period deadlines information + deadline View the current proving period deadline information by its index + faults View the currently known proving faulty sectors information + check Check sectors provable + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +### lotus-miner proving info +``` +NAME: + lotus-miner proving info - View current state information + +USAGE: + lotus-miner proving info [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus-miner proving deadlines +``` +NAME: + lotus-miner proving deadlines - View the current proving period deadlines information + +USAGE: + lotus-miner proving deadlines [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus-miner proving deadline +``` +NAME: + lotus-miner proving deadline - View the current proving period deadline information by its index + +USAGE: + lotus-miner proving deadline [command options] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus-miner proving faults +``` +NAME: + lotus-miner proving faults - View the currently known proving faulty sectors information + +USAGE: + lotus-miner proving faults [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus-miner proving check +``` +NAME: + lotus-miner proving check - Check sectors provable + +USAGE: + lotus-miner proving check [command options] + +OPTIONS: + --only-bad print only bad sectors (default: false) + --slow run slower checks (default: false) + --help, -h show help (default: false) + +``` + +## lotus-miner storage +``` +NAME: + lotus-miner storage - manage sector storage + +USAGE: + lotus-miner storage command [command options] [arguments...] + +DESCRIPTION: + Sectors can be stored across many filesystem paths. These +commands provide ways to manage the storage the miner will used to store sectors +long term for proving (references as 'store') as well as how sectors will be +stored while moving through the sealing pipeline (references as 'seal'). + +COMMANDS: + attach attach local storage path + list list local storage paths + find find sector in the storage system + cleanup trigger cleanup actions + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +### lotus-miner storage attach +``` +NAME: + lotus-miner storage attach - attach local storage path + +USAGE: + lotus-miner storage attach [command options] [arguments...] + +DESCRIPTION: + Storage can be attached to the miner using this command. The storage volume +list is stored local to the miner in $LOTUS_MINER_PATH/storage.json. We do not +recommend manually modifying this value without further understanding of the +storage system. + +Each storage volume contains a configuration file which describes the +capabilities of the volume. When the '--init' flag is provided, this file will +be created using the additional flags. + +Weight +A high weight value means data will be more likely to be stored in this path + +Seal +Data for the sealing process will be stored here + +Store +Finalized sectors that will be moved here for long term storage and be proven +over time + + +OPTIONS: + --init initialize the path first (default: false) + --weight value (for init) path weight (default: 10) + --seal (for init) use path for sealing (default: false) + --store (for init) use path for long-term storage (default: false) + --max-storage value (for init) limit storage space for sectors (expensive for very large paths!) + --help, -h show help (default: false) + +``` + +### lotus-miner storage list +``` +NAME: + lotus-miner storage list - list local storage paths + +USAGE: + lotus-miner storage list command [command options] [arguments...] + +COMMANDS: + sectors get list of all sector files + help, h Shows a list of commands or help for one command + +OPTIONS: + --color (default: false) + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +#### lotus-miner storage list sectors +``` +NAME: + lotus-miner storage list sectors - get list of all sector files + +USAGE: + lotus-miner storage list sectors [command options] [arguments...] + +OPTIONS: + --color (default: true) + --help, -h show help (default: false) + +``` + +### lotus-miner storage find +``` +NAME: + lotus-miner storage find - find sector in the storage system + +USAGE: + lotus-miner storage find [command options] [sector number] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus-miner storage cleanup +``` +NAME: + lotus-miner storage cleanup - trigger cleanup actions + +USAGE: + lotus-miner storage cleanup [command options] [arguments...] + +OPTIONS: + --removed cleanup remaining files from removed sectors (default: true) + --help, -h show help (default: false) + +``` + +## lotus-miner sealing +``` +NAME: + lotus-miner sealing - interact with sealing pipeline + +USAGE: + lotus-miner sealing command [command options] [arguments...] + +COMMANDS: + jobs list running jobs + workers list workers + sched-diag Dump internal scheduler state + abort Abort a running job + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +### lotus-miner sealing jobs +``` +NAME: + lotus-miner sealing jobs - list running jobs + +USAGE: + lotus-miner sealing jobs [command options] [arguments...] + +OPTIONS: + --color (default: false) + --show-ret-done show returned but not consumed calls (default: false) + --help, -h show help (default: false) + +``` + +### lotus-miner sealing workers +``` +NAME: + lotus-miner sealing workers - list workers + +USAGE: + lotus-miner sealing workers [command options] [arguments...] + +OPTIONS: + --color (default: false) + --help, -h show help (default: false) + +``` + +### lotus-miner sealing sched-diag +``` +NAME: + lotus-miner sealing sched-diag - Dump internal scheduler state + +USAGE: + lotus-miner sealing sched-diag [command options] [arguments...] + +OPTIONS: + --force-sched (default: false) + --help, -h show help (default: false) + +``` + +### lotus-miner sealing abort +``` +NAME: + lotus-miner sealing abort - Abort a running job + +USAGE: + lotus-miner sealing abort [command options] [callid] + +OPTIONS: + --help, -h show help (default: false) + +``` diff --git a/documentation/misc/actors_version_checklist.md b/documentation/misc/actors_version_checklist.md index 6c06441ab..7931acbcc 100644 --- a/documentation/misc/actors_version_checklist.md +++ b/documentation/misc/actors_version_checklist.md @@ -8,7 +8,7 @@ - [ ] Register in `chain/vm/invoker.go` - [ ] Register in `chain/vm/mkactor.go` - [ ] Update `chain/types/state.go` -- [ ] Update `chain/state/statetree.go` +- [ ] Update `chain/state/statetree.go` (New / Load) - [ ] Update `chain/stmgr/forks.go` - [ ] Schedule - [ ] Migration diff --git a/extern/filecoin-ffi b/extern/filecoin-ffi index dc4e4e8dc..58771ba4d 160000 --- a/extern/filecoin-ffi +++ b/extern/filecoin-ffi @@ -1 +1 @@ -Subproject commit dc4e4e8dc9554dedb6f48304f7f0c6328331f9ec +Subproject commit 58771ba4d942badc306925160a945022ad335161 diff --git a/extern/sector-storage/ffiwrapper/prover_cgo.go b/extern/sector-storage/ffiwrapper/prover_cgo.go new file mode 100644 index 000000000..3ad73c81c --- /dev/null +++ b/extern/sector-storage/ffiwrapper/prover_cgo.go @@ -0,0 +1,18 @@ +//+build cgo + +package ffiwrapper + +import ( + ffi "github.com/filecoin-project/filecoin-ffi" + proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" +) + +var ProofProver = proofProver{} + +var _ Prover = ProofProver + +type proofProver struct{} + +func (v proofProver) AggregateSealProofs(aggregateInfo proof5.AggregateSealVerifyProofAndInfos, proofs [][]byte) ([]byte, error) { + return ffi.AggregateSealProofs(aggregateInfo, proofs) +} diff --git a/extern/sector-storage/ffiwrapper/sealer_test.go b/extern/sector-storage/ffiwrapper/sealer_test.go index 2efcfc6a0..df657f097 100644 --- a/extern/sector-storage/ffiwrapper/sealer_test.go +++ b/extern/sector-storage/ffiwrapper/sealer_test.go @@ -18,6 +18,7 @@ import ( commpffi "github.com/filecoin-project/go-commp-utils/ffiwrapper" proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" + proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" "github.com/ipfs/go-cid" @@ -31,6 +32,7 @@ import ( ffi "github.com/filecoin-project/filecoin-ffi" + "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper/basicfs" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" "github.com/filecoin-project/lotus/extern/storage-sealing/lib/nullreader" @@ -83,9 +85,10 @@ func (s *seal) precommit(t *testing.T, sb *Sealer, id storage.SectorRef, done fu s.cids = cids } -func (s *seal) commit(t *testing.T, sb *Sealer, done func()) { +var seed = abi.InteractiveSealRandomness{0, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 9, 8, 7, 6, 45, 3, 2, 1, 0, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 9} + +func (s *seal) commit(t *testing.T, sb *Sealer, done func()) storage.Proof { defer done() - seed := abi.InteractiveSealRandomness{0, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 9, 8, 7, 6, 45, 3, 2, 1, 0, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 9} pc1, err := sb.SealCommit1(context.TODO(), s.ref, s.ticket, seed, []abi.PieceInfo{s.pi}, s.cids) if err != nil { @@ -112,6 +115,8 @@ func (s *seal) commit(t *testing.T, sb *Sealer, done func()) { if !ok { t.Fatal("proof failed to validate") } + + return proof } func (s *seal) unseal(t *testing.T, sb *Sealer, sp *basicfs.Provider, si storage.SectorRef, done func()) { @@ -229,7 +234,12 @@ func getGrothParamFileAndVerifyingKeys(s abi.SectorSize) { panic(err) } - err = paramfetch.GetParams(context.TODO(), dat, uint64(s)) + datSrs, err := ioutil.ReadFile("../../../build/proof-params/srs-inner-product.json") + if err != nil { + panic(err) + } + + err = paramfetch.GetParams(context.TODO(), dat, datSrs, uint64(s)) if err != nil { panic(xerrors.Errorf("failed to acquire Groth parameters for 2KiB sectors: %w", err)) } @@ -462,6 +472,97 @@ func TestSealAndVerify3(t *testing.T) { post(t, sb, []abi.SectorID{si1.ID, si2.ID}, s1, s2, s3) } +func TestSealAndVerifyAggregate(t *testing.T) { + numAgg := 5 + + if testing.Short() { + t.Skip("skipping test in short mode") + } + + defer requireFDsClosed(t, openFDs(t)) + + if runtime.NumCPU() < 10 && os.Getenv("CI") == "" { // don't bother on slow hardware + t.Skip("this is slow") + } + _ = os.Setenv("RUST_LOG", "info") + + getGrothParamFileAndVerifyingKeys(sectorSize) + + cdir, err := ioutil.TempDir("", "sbtest-c-") + if err != nil { + t.Fatal(err) + } + miner := abi.ActorID(123) + + sp := &basicfs.Provider{ + Root: cdir, + } + sb, err := New(sp) + if err != nil { + t.Fatalf("%+v", err) + } + cleanup := func() { + if t.Failed() { + fmt.Printf("not removing %s\n", cdir) + return + } + if err := os.RemoveAll(cdir); err != nil { + t.Error(err) + } + } + defer cleanup() + + avi := proof5.AggregateSealVerifyProofAndInfos{ + Miner: miner, + SealProof: sealProofType, + AggregateProof: policy.GetDefaultAggregationProof(), + Proof: nil, + Infos: make([]proof5.AggregateSealVerifyInfo, numAgg), + } + + toAggregate := make([][]byte, numAgg) + for i := 0; i < numAgg; i++ { + si := storage.SectorRef{ + ID: abi.SectorID{Miner: miner, Number: abi.SectorNumber(i + 1)}, + ProofType: sealProofType, + } + + s := seal{ref: si} + s.precommit(t, sb, si, func() {}) + toAggregate[i] = s.commit(t, sb, func() {}) + + avi.Infos[i] = proof5.AggregateSealVerifyInfo{ + Number: abi.SectorNumber(i + 1), + Randomness: s.ticket, + InteractiveRandomness: seed, + SealedCID: s.cids.Sealed, + UnsealedCID: s.cids.Unsealed, + } + } + + aggStart := time.Now() + + avi.Proof, err = ProofProver.AggregateSealProofs(avi, toAggregate) + require.NoError(t, err) + + aggDone := time.Now() + + _, err = ProofProver.AggregateSealProofs(avi, toAggregate) + require.NoError(t, err) + + aggHot := time.Now() + + ok, err := ProofVerifier.VerifyAggregateSeals(avi) + require.NoError(t, err) + require.True(t, ok) + + verifDone := time.Now() + + fmt.Printf("Aggregate: %s\n", aggDone.Sub(aggStart).String()) + fmt.Printf("Hot: %s\n", aggHot.Sub(aggDone).String()) + fmt.Printf("Verify: %s\n", verifDone.Sub(aggHot).String()) +} + func BenchmarkWriteWithAlignment(b *testing.B) { bt := abi.UnpaddedPieceSize(2 * 127 * 1024 * 1024) b.SetBytes(int64(bt)) diff --git a/extern/sector-storage/ffiwrapper/types.go b/extern/sector-storage/ffiwrapper/types.go index b7e96636a..a5b2fdf1f 100644 --- a/extern/sector-storage/ffiwrapper/types.go +++ b/extern/sector-storage/ffiwrapper/types.go @@ -4,7 +4,7 @@ import ( "context" "io" - proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" + proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" "github.com/ipfs/go-cid" @@ -34,13 +34,21 @@ type Storage interface { } type Verifier interface { - VerifySeal(proof2.SealVerifyInfo) (bool, error) - VerifyWinningPoSt(ctx context.Context, info proof2.WinningPoStVerifyInfo) (bool, error) - VerifyWindowPoSt(ctx context.Context, info proof2.WindowPoStVerifyInfo) (bool, error) + VerifySeal(proof5.SealVerifyInfo) (bool, error) + VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) (bool, error) + VerifyWinningPoSt(ctx context.Context, info proof5.WinningPoStVerifyInfo) (bool, error) + VerifyWindowPoSt(ctx context.Context, info proof5.WindowPoStVerifyInfo) (bool, error) GenerateWinningPoStSectorChallenge(context.Context, abi.RegisteredPoStProof, abi.ActorID, abi.PoStRandomness, uint64) ([]uint64, error) } +// Prover contains cheap proving-related methods +type Prover interface { + // TODO: move GenerateWinningPoStSectorChallenge from the Verifier interface to here + + AggregateSealProofs(aggregateInfo proof5.AggregateSealVerifyProofAndInfos, proofs [][]byte) ([]byte, error) +} + type SectorProvider interface { // * returns storiface.ErrSectorNotFound if a requested existing sector doesn't exist // * returns an error when allocate is set, and existing isn't, and the sector exists diff --git a/extern/sector-storage/ffiwrapper/verifier_cgo.go b/extern/sector-storage/ffiwrapper/verifier_cgo.go index 15e0e6ab3..95724bb7c 100644 --- a/extern/sector-storage/ffiwrapper/verifier_cgo.go +++ b/extern/sector-storage/ffiwrapper/verifier_cgo.go @@ -10,13 +10,13 @@ import ( ffi "github.com/filecoin-project/filecoin-ffi" "github.com/filecoin-project/go-state-types/abi" - proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" + proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" "github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" ) -func (sb *Sealer) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof2.SectorInfo, randomness abi.PoStRandomness) ([]proof2.PoStProof, error) { +func (sb *Sealer) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof5.SectorInfo, randomness abi.PoStRandomness) ([]proof5.PoStProof, error) { randomness[31] &= 0x3f privsectors, skipped, done, err := sb.pubSectorToPriv(ctx, minerID, sectorInfo, nil, abi.RegisteredSealProof.RegisteredWinningPoStProof) // TODO: FAULTS? if err != nil { @@ -30,7 +30,7 @@ func (sb *Sealer) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, return ffi.GenerateWinningPoSt(minerID, privsectors, randomness) } -func (sb *Sealer) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof2.SectorInfo, randomness abi.PoStRandomness) ([]proof2.PoStProof, []abi.SectorID, error) { +func (sb *Sealer) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof5.SectorInfo, randomness abi.PoStRandomness) ([]proof5.PoStProof, []abi.SectorID, error) { randomness[31] &= 0x3f privsectors, skipped, done, err := sb.pubSectorToPriv(ctx, minerID, sectorInfo, nil, abi.RegisteredSealProof.RegisteredWindowPoStProof) if err != nil { @@ -55,7 +55,7 @@ func (sb *Sealer) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, s return proof, faultyIDs, err } -func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorInfo []proof2.SectorInfo, faults []abi.SectorNumber, rpt func(abi.RegisteredSealProof) (abi.RegisteredPoStProof, error)) (ffi.SortedPrivateSectorInfo, []abi.SectorID, func(), error) { +func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorInfo []proof5.SectorInfo, faults []abi.SectorNumber, rpt func(abi.RegisteredSealProof) (abi.RegisteredPoStProof, error)) (ffi.SortedPrivateSectorInfo, []abi.SectorID, func(), error) { fmap := map[abi.SectorNumber]struct{}{} for _, fault := range faults { fmap[fault] = struct{}{} @@ -111,11 +111,15 @@ type proofVerifier struct{} var ProofVerifier = proofVerifier{} -func (proofVerifier) VerifySeal(info proof2.SealVerifyInfo) (bool, error) { +func (proofVerifier) VerifySeal(info proof5.SealVerifyInfo) (bool, error) { return ffi.VerifySeal(info) } -func (proofVerifier) VerifyWinningPoSt(ctx context.Context, info proof2.WinningPoStVerifyInfo) (bool, error) { +func (proofVerifier) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) (bool, error) { + return ffi.VerifyAggregateSeals(aggregate) +} + +func (proofVerifier) VerifyWinningPoSt(ctx context.Context, info proof5.WinningPoStVerifyInfo) (bool, error) { info.Randomness[31] &= 0x3f _, span := trace.StartSpan(ctx, "VerifyWinningPoSt") defer span.End() @@ -123,7 +127,7 @@ func (proofVerifier) VerifyWinningPoSt(ctx context.Context, info proof2.WinningP return ffi.VerifyWinningPoSt(info) } -func (proofVerifier) VerifyWindowPoSt(ctx context.Context, info proof2.WindowPoStVerifyInfo) (bool, error) { +func (proofVerifier) VerifyWindowPoSt(ctx context.Context, info proof5.WindowPoStVerifyInfo) (bool, error) { info.Randomness[31] &= 0x3f _, span := trace.StartSpan(ctx, "VerifyWindowPoSt") defer span.End() diff --git a/extern/sector-storage/mock/mock.go b/extern/sector-storage/mock/mock.go index ae7d54985..52496f836 100644 --- a/extern/sector-storage/mock/mock.go +++ b/extern/sector-storage/mock/mock.go @@ -9,7 +9,7 @@ import ( "math/rand" "sync" - proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" + proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" ffiwrapper2 "github.com/filecoin-project/go-commp-utils/ffiwrapper" commcid "github.com/filecoin-project/go-fil-commcid" @@ -34,7 +34,7 @@ type SectorMgr struct { lk sync.Mutex } -type mockVerif struct{} +type mockVerifProver struct{} func NewMockSectorMgr(genesisSectors []abi.SectorID) *SectorMgr { sectors := make(map[abi.SectorID]*sectorState) @@ -300,14 +300,14 @@ func AddOpFinish(ctx context.Context) (context.Context, func()) { } } -func (mgr *SectorMgr) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof2.SectorInfo, randomness abi.PoStRandomness) ([]proof2.PoStProof, error) { +func (mgr *SectorMgr) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof5.SectorInfo, randomness abi.PoStRandomness) ([]proof5.PoStProof, error) { mgr.lk.Lock() defer mgr.lk.Unlock() return generateFakePoSt(sectorInfo, abi.RegisteredSealProof.RegisteredWinningPoStProof, randomness), nil } -func (mgr *SectorMgr) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof2.SectorInfo, randomness abi.PoStRandomness) ([]proof2.PoStProof, []abi.SectorID, error) { +func (mgr *SectorMgr) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof5.SectorInfo, randomness abi.PoStRandomness) ([]proof5.PoStProof, []abi.SectorID, error) { mgr.lk.Lock() defer mgr.lk.Unlock() @@ -315,7 +315,8 @@ func (mgr *SectorMgr) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorI return nil, nil, xerrors.Errorf("failed to post (mock)") } - si := make([]proof2.SectorInfo, 0, len(sectorInfo)) + si := make([]proof5.SectorInfo, 0, len(sectorInfo)) + var skipped []abi.SectorID var err error @@ -343,7 +344,7 @@ func (mgr *SectorMgr) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorI return generateFakePoSt(si, abi.RegisteredSealProof.RegisteredWindowPoStProof, randomness), skipped, nil } -func generateFakePoStProof(sectorInfo []proof2.SectorInfo, randomness abi.PoStRandomness) []byte { +func generateFakePoStProof(sectorInfo []proof5.SectorInfo, randomness abi.PoStRandomness) []byte { randomness[31] &= 0x3f hasher := sha256.New() @@ -358,13 +359,13 @@ func generateFakePoStProof(sectorInfo []proof2.SectorInfo, randomness abi.PoStRa } -func generateFakePoSt(sectorInfo []proof2.SectorInfo, rpt func(abi.RegisteredSealProof) (abi.RegisteredPoStProof, error), randomness abi.PoStRandomness) []proof2.PoStProof { +func generateFakePoSt(sectorInfo []proof5.SectorInfo, rpt func(abi.RegisteredSealProof) (abi.RegisteredPoStProof, error), randomness abi.PoStRandomness) []proof5.PoStProof { wp, err := rpt(sectorInfo[0].SealProof) if err != nil { panic(err) } - return []proof2.PoStProof{ + return []proof5.PoStProof{ { PoStProof: wp, ProofBytes: generateFakePoStProof(sectorInfo, randomness), @@ -489,7 +490,7 @@ func (mgr *SectorMgr) ReturnFetch(ctx context.Context, callID storiface.CallID, panic("not supported") } -func (m mockVerif) VerifySeal(svi proof2.SealVerifyInfo) (bool, error) { +func (m mockVerifProver) VerifySeal(svi proof5.SealVerifyInfo) (bool, error) { plen, err := svi.SealProof.ProofSize() if err != nil { return false, err @@ -501,6 +502,7 @@ func (m mockVerif) VerifySeal(svi proof2.SealVerifyInfo) (bool, error) { // only the first 32 bytes, the rest are 0. for i, b := range svi.Proof[:32] { + // unsealed+sealed-seed*ticket if b != svi.UnsealedCID.Bytes()[i]+svi.SealedCID.Bytes()[31-i]-svi.InteractiveRandomness[i]*svi.Randomness[i] { return false, nil } @@ -509,12 +511,66 @@ func (m mockVerif) VerifySeal(svi proof2.SealVerifyInfo) (bool, error) { return true, nil } -func (m mockVerif) VerifyWinningPoSt(ctx context.Context, info proof2.WinningPoStVerifyInfo) (bool, error) { +func (m mockVerifProver) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) (bool, error) { + out := make([]byte, m.aggLen(len(aggregate.Infos))) + for pi, svi := range aggregate.Infos { + for i := 0; i < 32; i++ { + b := svi.UnsealedCID.Bytes()[i] + svi.SealedCID.Bytes()[31-i] - svi.InteractiveRandomness[i]*svi.Randomness[i] // raw proof byte + + b *= uint8(pi) // with aggregate index + out[i] += b + } + } + + return bytes.Equal(aggregate.Proof, out), nil +} + +func (m mockVerifProver) AggregateSealProofs(aggregateInfo proof5.AggregateSealVerifyProofAndInfos, proofs [][]byte) ([]byte, error) { + out := make([]byte, m.aggLen(len(aggregateInfo.Infos))) // todo: figure out more real length + for pi, proof := range proofs { + for i := range proof[:32] { + out[i] += proof[i] * uint8(pi) + } + } + + return out, nil +} + +func (m mockVerifProver) aggLen(nproofs int) int { + switch { + case nproofs <= 8: + return 11220 + case nproofs <= 16: + return 14196 + case nproofs <= 32: + return 17172 + case nproofs <= 64: + return 20148 + case nproofs <= 128: + return 23124 + case nproofs <= 256: + return 26100 + case nproofs <= 512: + return 29076 + case nproofs <= 1024: + return 32052 + case nproofs <= 2048: + return 35028 + case nproofs <= 4096: + return 38004 + case nproofs <= 8192: + return 40980 + default: + panic("too many proofs") + } +} + +func (m mockVerifProver) VerifyWinningPoSt(ctx context.Context, info proof5.WinningPoStVerifyInfo) (bool, error) { info.Randomness[31] &= 0x3f return true, nil } -func (m mockVerif) VerifyWindowPoSt(ctx context.Context, info proof2.WindowPoStVerifyInfo) (bool, error) { +func (m mockVerifProver) VerifyWindowPoSt(ctx context.Context, info proof5.WindowPoStVerifyInfo) (bool, error) { if len(info.Proofs) != 1 { return false, xerrors.Errorf("expected 1 proof entry") } @@ -528,15 +584,17 @@ func (m mockVerif) VerifyWindowPoSt(ctx context.Context, info proof2.WindowPoStV return true, nil } -func (m mockVerif) GenerateDataCommitment(pt abi.RegisteredSealProof, pieces []abi.PieceInfo) (cid.Cid, error) { +func (m mockVerifProver) GenerateDataCommitment(pt abi.RegisteredSealProof, pieces []abi.PieceInfo) (cid.Cid, error) { return ffiwrapper.GenerateUnsealedCID(pt, pieces) } -func (m mockVerif) GenerateWinningPoStSectorChallenge(ctx context.Context, proofType abi.RegisteredPoStProof, minerID abi.ActorID, randomness abi.PoStRandomness, eligibleSectorCount uint64) ([]uint64, error) { +func (m mockVerifProver) GenerateWinningPoStSectorChallenge(ctx context.Context, proofType abi.RegisteredPoStProof, minerID abi.ActorID, randomness abi.PoStRandomness, eligibleSectorCount uint64) ([]uint64, error) { return []uint64{0}, nil } -var MockVerifier = mockVerif{} +var MockVerifier = mockVerifProver{} +var MockProver = mockVerifProver{} var _ storage.Sealer = &SectorMgr{} var _ ffiwrapper.Verifier = MockVerifier +var _ ffiwrapper.Prover = MockProver diff --git a/extern/storage-sealing/commit_batch.go b/extern/storage-sealing/commit_batch.go new file mode 100644 index 000000000..845400ccf --- /dev/null +++ b/extern/storage-sealing/commit_batch.go @@ -0,0 +1,366 @@ +package sealing + +import ( + "bytes" + "context" + "sort" + "sync" + "time" + + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-bitfield" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" + proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/actors/policy" + "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" +) + +const arp = abi.RegisteredAggregationProof_SnarkPackV1 + +type CommitBatcherApi interface { + SendMsg(ctx context.Context, from, to address.Address, method abi.MethodNum, value, maxFee abi.TokenAmount, params []byte) (cid.Cid, error) + StateMinerInfo(context.Context, address.Address, TipSetToken) (miner.MinerInfo, error) + ChainHead(ctx context.Context) (TipSetToken, abi.ChainEpoch, error) +} + +type AggregateInput struct { + spt abi.RegisteredSealProof + info proof5.AggregateSealVerifyInfo + proof []byte +} + +type CommitBatcher struct { + api CommitBatcherApi + maddr address.Address + mctx context.Context + addrSel AddrSel + feeCfg FeeConfig + getConfig GetSealingConfigFunc + prover ffiwrapper.Prover + + deadlines map[abi.SectorNumber]time.Time + todo map[abi.SectorNumber]AggregateInput + waiting map[abi.SectorNumber][]chan cid.Cid + + notify, stop, stopped chan struct{} + force chan chan *cid.Cid + lk sync.Mutex +} + +func NewCommitBatcher(mctx context.Context, maddr address.Address, api CommitBatcherApi, addrSel AddrSel, feeCfg FeeConfig, getConfig GetSealingConfigFunc, prov ffiwrapper.Prover) *CommitBatcher { + b := &CommitBatcher{ + api: api, + maddr: maddr, + mctx: mctx, + addrSel: addrSel, + feeCfg: feeCfg, + getConfig: getConfig, + prover: prov, + + deadlines: map[abi.SectorNumber]time.Time{}, + todo: map[abi.SectorNumber]AggregateInput{}, + waiting: map[abi.SectorNumber][]chan cid.Cid{}, + + notify: make(chan struct{}, 1), + force: make(chan chan *cid.Cid), + stop: make(chan struct{}), + stopped: make(chan struct{}), + } + + go b.run() + + return b +} + +func (b *CommitBatcher) run() { + var forceRes chan *cid.Cid + var lastMsg *cid.Cid + + cfg, err := b.getConfig() + if err != nil { + panic(err) + } + + for { + if forceRes != nil { + forceRes <- lastMsg + forceRes = nil + } + lastMsg = nil + + var sendAboveMax, sendAboveMin bool + select { + case <-b.stop: + close(b.stopped) + return + case <-b.notify: + sendAboveMax = true + case <-b.batchWait(cfg.CommitBatchWait, cfg.CommitBatchSlack): + sendAboveMin = true + case fr := <-b.force: // user triggered + forceRes = fr + } + + var err error + lastMsg, err = b.processBatch(sendAboveMax, sendAboveMin) + if err != nil { + log.Warnw("CommitBatcher processBatch error", "error", err) + } + } +} + +func (b *CommitBatcher) batchWait(maxWait, slack time.Duration) <-chan time.Time { + now := time.Now() + + b.lk.Lock() + defer b.lk.Unlock() + + if len(b.todo) == 0 { + return nil + } + + var deadline time.Time + for sn := range b.todo { + sectorDeadline := b.deadlines[sn] + if deadline.IsZero() || (!sectorDeadline.IsZero() && sectorDeadline.Before(deadline)) { + deadline = sectorDeadline + } + } + for sn := range b.waiting { + sectorDeadline := b.deadlines[sn] + if deadline.IsZero() || (!sectorDeadline.IsZero() && sectorDeadline.Before(deadline)) { + deadline = sectorDeadline + } + } + + if deadline.IsZero() { + return time.After(maxWait) + } + + deadline = deadline.Add(-slack) + if deadline.Before(now) { + return time.After(time.Nanosecond) // can't return 0 + } + + wait := deadline.Sub(now) + if wait > maxWait { + wait = maxWait + } + + return time.After(wait) +} + +func (b *CommitBatcher) processBatch(notif, after bool) (*cid.Cid, error) { + b.lk.Lock() + defer b.lk.Unlock() + params := miner5.ProveCommitAggregateParams{ + SectorNumbers: bitfield.New(), + } + + total := len(b.todo) + if total == 0 { + return nil, nil // nothing to do + } + + cfg, err := b.getConfig() + if err != nil { + return nil, xerrors.Errorf("getting config: %w", err) + } + + if notif && total < cfg.MaxCommitBatch { + return nil, nil + } + + if after && total < cfg.MinCommitBatch { + return nil, nil + } + + proofs := make([][]byte, 0, total) + infos := make([]proof5.AggregateSealVerifyInfo, 0, total) + + for id, p := range b.todo { + if len(infos) >= cfg.MaxCommitBatch { + log.Infow("commit batch full") + break + } + + params.SectorNumbers.Set(uint64(id)) + infos = append(infos, p.info) + } + + sort.Slice(infos, func(i, j int) bool { + return infos[i].Number < infos[j].Number + }) + + for _, info := range infos { + proofs = append(proofs, b.todo[info.Number].proof) + } + + mid, err := address.IDFromAddress(b.maddr) + if err != nil { + return nil, xerrors.Errorf("getting miner id: %w", err) + } + + params.AggregateProof, err = b.prover.AggregateSealProofs(proof5.AggregateSealVerifyProofAndInfos{ + Miner: abi.ActorID(mid), + SealProof: b.todo[infos[0].Number].spt, + AggregateProof: arp, + Infos: infos, + }, proofs) + if err != nil { + return nil, xerrors.Errorf("aggregating proofs: %w", err) + } + + enc := new(bytes.Buffer) + if err := params.MarshalCBOR(enc); err != nil { + return nil, xerrors.Errorf("couldn't serialize ProveCommitAggregateParams: %w", err) + } + + mi, err := b.api.StateMinerInfo(b.mctx, b.maddr, nil) + if err != nil { + return nil, xerrors.Errorf("couldn't get miner info: %w", err) + } + + from, _, err := b.addrSel(b.mctx, mi, api.CommitAddr, b.feeCfg.MaxCommitGasFee, b.feeCfg.MaxCommitGasFee) + if err != nil { + return nil, xerrors.Errorf("no good address found: %w", err) + } + + mcid, err := b.api.SendMsg(b.mctx, from, b.maddr, miner.Methods.ProveCommitAggregate, big.Zero(), b.feeCfg.MaxCommitGasFee, enc.Bytes()) + if err != nil { + return nil, xerrors.Errorf("sending message failed: %w", err) + } + + log.Infow("Sent ProveCommitAggregate message", "cid", mcid, "from", from, "todo", total, "sectors", len(infos)) + + err = params.SectorNumbers.ForEach(func(us uint64) error { + sn := abi.SectorNumber(us) + + for _, ch := range b.waiting[sn] { + ch <- mcid // buffered + } + delete(b.waiting, sn) + delete(b.todo, sn) + delete(b.deadlines, sn) + return nil + }) + if err != nil { + return nil, xerrors.Errorf("done sectors foreach: %w", err) + } + + return &mcid, nil +} + +// register commit, wait for batch message, return message CID +func (b *CommitBatcher) AddCommit(ctx context.Context, s SectorInfo, in AggregateInput) (mcid cid.Cid, err error) { + _, curEpoch, err := b.api.ChainHead(b.mctx) + if err != nil { + log.Errorf("getting chain head: %s", err) + return cid.Undef, nil + } + + sn := s.SectorNumber + + b.lk.Lock() + b.deadlines[sn] = getSectorDeadline(curEpoch, s) + b.todo[sn] = in + + sent := make(chan cid.Cid, 1) + b.waiting[sn] = append(b.waiting[sn], sent) + + select { + case b.notify <- struct{}{}: + default: // already have a pending notification, don't need more + } + b.lk.Unlock() + + select { + case c := <-sent: + return c, nil + case <-ctx.Done(): + return cid.Undef, ctx.Err() + } +} + +func (b *CommitBatcher) Flush(ctx context.Context) (*cid.Cid, error) { + resCh := make(chan *cid.Cid, 1) + select { + case b.force <- resCh: + select { + case res := <-resCh: + return res, nil + case <-ctx.Done(): + return nil, ctx.Err() + } + case <-ctx.Done(): + return nil, ctx.Err() + } +} + +func (b *CommitBatcher) Pending(ctx context.Context) ([]abi.SectorID, error) { + b.lk.Lock() + defer b.lk.Unlock() + + mid, err := address.IDFromAddress(b.maddr) + if err != nil { + return nil, err + } + + res := make([]abi.SectorID, 0) + for _, s := range b.todo { + res = append(res, abi.SectorID{ + Miner: abi.ActorID(mid), + Number: s.info.Number, + }) + } + + sort.Slice(res, func(i, j int) bool { + if res[i].Miner != res[j].Miner { + return res[i].Miner < res[j].Miner + } + + return res[i].Number < res[j].Number + }) + + return res, nil +} + +func (b *CommitBatcher) Stop(ctx context.Context) error { + close(b.stop) + + select { + case <-b.stopped: + return nil + case <-ctx.Done(): + return ctx.Err() + } +} + +func getSectorDeadline(curEpoch abi.ChainEpoch, si SectorInfo) time.Time { + deadlineEpoch := si.TicketEpoch + policy.MaxPreCommitRandomnessLookback + for _, p := range si.Pieces { + if p.DealInfo == nil { + continue + } + + startEpoch := p.DealInfo.DealSchedule.StartEpoch + if startEpoch < deadlineEpoch { + deadlineEpoch = startEpoch + } + } + + if deadlineEpoch <= curEpoch { + return time.Now() + } + + return time.Now().Add(time.Duration(deadlineEpoch-curEpoch) * time.Duration(build.BlockDelaySecs) * time.Second) +} diff --git a/extern/storage-sealing/fsm.go b/extern/storage-sealing/fsm.go index d14d363e5..594f9f2f5 100644 --- a/extern/storage-sealing/fsm.go +++ b/extern/storage-sealing/fsm.go @@ -71,13 +71,27 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto on(SectorSealPreCommit1Failed{}, SealPreCommit1Failed), ), PreCommitting: planOne( - on(SectorSealPreCommit1Failed{}, SealPreCommit1Failed), + on(SectorPreCommitBatch{}, SubmitPreCommitBatch), on(SectorPreCommitted{}, PreCommitWait), + on(SectorSealPreCommit1Failed{}, SealPreCommit1Failed), on(SectorChainPreCommitFailed{}, PreCommitFailed), on(SectorPreCommitLanded{}, WaitSeed), on(SectorDealsExpired{}, DealsExpired), on(SectorInvalidDealIDs{}, RecoverDealIDs), ), + SubmitPreCommitBatch: planOne( + on(SectorPreCommitBatchSent{}, PreCommitBatchWait), + on(SectorSealPreCommit1Failed{}, SealPreCommit1Failed), + on(SectorChainPreCommitFailed{}, PreCommitFailed), + on(SectorPreCommitLanded{}, WaitSeed), + on(SectorDealsExpired{}, DealsExpired), + on(SectorInvalidDealIDs{}, RecoverDealIDs), + ), + PreCommitBatchWait: planOne( + on(SectorChainPreCommitFailed{}, PreCommitFailed), + on(SectorPreCommitLanded{}, WaitSeed), + on(SectorRetryPreCommit{}, PreCommitting), + ), PreCommitWait: planOne( on(SectorChainPreCommitFailed{}, PreCommitFailed), on(SectorPreCommitLanded{}, WaitSeed), @@ -90,6 +104,11 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto Committing: planCommitting, SubmitCommit: planOne( on(SectorCommitSubmitted{}, CommitWait), + on(SectorSubmitCommitAggregate{}, SubmitCommitAggregate), + on(SectorCommitFailed{}, CommitFailed), + ), + SubmitCommitAggregate: planOne( + on(SectorCommitAggregateSent{}, CommitWait), on(SectorCommitFailed{}, CommitFailed), ), CommitWait: planOne( @@ -97,6 +116,11 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto on(SectorCommitFailed{}, CommitFailed), on(SectorRetrySubmitCommit{}, SubmitCommit), ), + CommitAggregateWait: planOne( + on(SectorProving{}, FinalizeSector), + on(SectorCommitFailed{}, CommitFailed), + on(SectorRetrySubmitCommit{}, SubmitCommit), + ), FinalizeSector: planOne( on(SectorFinalized{}, Proving), @@ -330,6 +354,10 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta return m.handlePreCommit2, processed, nil case PreCommitting: return m.handlePreCommitting, processed, nil + case SubmitPreCommitBatch: + return m.handleSubmitPreCommitBatch, processed, nil + case PreCommitBatchWait: + fallthrough case PreCommitWait: return m.handlePreCommitWait, processed, nil case WaitSeed: @@ -338,6 +366,10 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta return m.handleCommitting, processed, nil case SubmitCommit: return m.handleSubmitCommit, processed, nil + case SubmitCommitAggregate: + return m.handleSubmitCommitAggregate, processed, nil + case CommitAggregateWait: + fallthrough case CommitWait: return m.handleCommitWait, processed, nil case FinalizeSector: diff --git a/extern/storage-sealing/fsm_events.go b/extern/storage-sealing/fsm_events.go index 8d11b248b..7ec8f3dfc 100644 --- a/extern/storage-sealing/fsm_events.go +++ b/extern/storage-sealing/fsm_events.go @@ -150,6 +150,18 @@ func (evt SectorPreCommit2) apply(state *SectorInfo) { state.CommR = &commr } +type SectorPreCommitBatch struct{} + +func (evt SectorPreCommitBatch) apply(*SectorInfo) {} + +type SectorPreCommitBatchSent struct { + Message cid.Cid +} + +func (evt SectorPreCommitBatchSent) apply(state *SectorInfo) { + state.PreCommitMessage = &evt.Message +} + type SectorPreCommitLanded struct { TipSet TipSetToken } @@ -233,6 +245,10 @@ func (evt SectorCommitted) apply(state *SectorInfo) { state.Proof = evt.Proof } +type SectorSubmitCommitAggregate struct{} + +func (evt SectorSubmitCommitAggregate) apply(*SectorInfo) {} + type SectorCommitSubmitted struct { Message cid.Cid } @@ -241,6 +257,14 @@ func (evt SectorCommitSubmitted) apply(state *SectorInfo) { state.CommitMessage = &evt.Message } +type SectorCommitAggregateSent struct { + Message cid.Cid +} + +func (evt SectorCommitAggregateSent) apply(state *SectorInfo) { + state.CommitMessage = &evt.Message +} + type SectorProving struct{} func (evt SectorProving) apply(*SectorInfo) {} diff --git a/extern/storage-sealing/precommit_batch.go b/extern/storage-sealing/precommit_batch.go new file mode 100644 index 000000000..69ba47c60 --- /dev/null +++ b/extern/storage-sealing/precommit_batch.go @@ -0,0 +1,312 @@ +package sealing + +import ( + "bytes" + "context" + "sort" + "sync" + "time" + + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" + miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" +) + +type PreCommitBatcherApi interface { + SendMsg(ctx context.Context, from, to address.Address, method abi.MethodNum, value, maxFee abi.TokenAmount, params []byte) (cid.Cid, error) + StateMinerInfo(context.Context, address.Address, TipSetToken) (miner.MinerInfo, error) + ChainHead(ctx context.Context) (TipSetToken, abi.ChainEpoch, error) +} + +type preCommitEntry struct { + deposit abi.TokenAmount + pci *miner0.SectorPreCommitInfo +} + +type PreCommitBatcher struct { + api PreCommitBatcherApi + maddr address.Address + mctx context.Context + addrSel AddrSel + feeCfg FeeConfig + getConfig GetSealingConfigFunc + + deadlines map[abi.SectorNumber]time.Time + todo map[abi.SectorNumber]*preCommitEntry + waiting map[abi.SectorNumber][]chan cid.Cid + + notify, stop, stopped chan struct{} + force chan chan *cid.Cid + lk sync.Mutex +} + +func NewPreCommitBatcher(mctx context.Context, maddr address.Address, api PreCommitBatcherApi, addrSel AddrSel, feeCfg FeeConfig, getConfig GetSealingConfigFunc) *PreCommitBatcher { + b := &PreCommitBatcher{ + api: api, + maddr: maddr, + mctx: mctx, + addrSel: addrSel, + feeCfg: feeCfg, + getConfig: getConfig, + + deadlines: map[abi.SectorNumber]time.Time{}, + todo: map[abi.SectorNumber]*preCommitEntry{}, + waiting: map[abi.SectorNumber][]chan cid.Cid{}, + + notify: make(chan struct{}, 1), + force: make(chan chan *cid.Cid), + stop: make(chan struct{}), + stopped: make(chan struct{}), + } + + go b.run() + + return b +} + +func (b *PreCommitBatcher) run() { + var forceRes chan *cid.Cid + var lastMsg *cid.Cid + + cfg, err := b.getConfig() + if err != nil { + panic(err) + } + + for { + if forceRes != nil { + forceRes <- lastMsg + forceRes = nil + } + lastMsg = nil + + var sendAboveMax, sendAboveMin bool + select { + case <-b.stop: + close(b.stopped) + return + case <-b.notify: + sendAboveMax = true + case <-b.batchWait(cfg.PreCommitBatchWait, cfg.PreCommitBatchSlack): + sendAboveMin = true + case fr := <-b.force: // user triggered + forceRes = fr + } + + var err error + lastMsg, err = b.processBatch(sendAboveMax, sendAboveMin) + if err != nil { + log.Warnw("PreCommitBatcher processBatch error", "error", err) + } + } +} + +func (b *PreCommitBatcher) batchWait(maxWait, slack time.Duration) <-chan time.Time { + now := time.Now() + + b.lk.Lock() + defer b.lk.Unlock() + + if len(b.todo) == 0 { + return nil + } + + var deadline time.Time + for sn := range b.todo { + sectorDeadline := b.deadlines[sn] + if deadline.IsZero() || (!sectorDeadline.IsZero() && sectorDeadline.Before(deadline)) { + deadline = sectorDeadline + } + } + for sn := range b.waiting { + sectorDeadline := b.deadlines[sn] + if deadline.IsZero() || (!sectorDeadline.IsZero() && sectorDeadline.Before(deadline)) { + deadline = sectorDeadline + } + } + + if deadline.IsZero() { + return time.After(maxWait) + } + + deadline = deadline.Add(-slack) + if deadline.Before(now) { + return time.After(time.Nanosecond) // can't return 0 + } + + wait := deadline.Sub(now) + if wait > maxWait { + wait = maxWait + } + + return time.After(wait) +} + +func (b *PreCommitBatcher) processBatch(notif, after bool) (*cid.Cid, error) { + b.lk.Lock() + defer b.lk.Unlock() + params := miner5.PreCommitSectorBatchParams{} + + total := len(b.todo) + if total == 0 { + return nil, nil // nothing to do + } + + cfg, err := b.getConfig() + if err != nil { + return nil, xerrors.Errorf("getting config: %w", err) + } + + if notif && total < cfg.MaxPreCommitBatch { + return nil, nil + } + + if after && total < cfg.MinPreCommitBatch { + return nil, nil + } + + deposit := big.Zero() + + for _, p := range b.todo { + if len(params.Sectors) >= cfg.MaxPreCommitBatch { + log.Infow("precommit batch full") + break + } + + params.Sectors = append(params.Sectors, p.pci) + deposit = big.Add(deposit, p.deposit) + } + + enc := new(bytes.Buffer) + if err := params.MarshalCBOR(enc); err != nil { + return nil, xerrors.Errorf("couldn't serialize PreCommitSectorBatchParams: %w", err) + } + + mi, err := b.api.StateMinerInfo(b.mctx, b.maddr, nil) + if err != nil { + return nil, xerrors.Errorf("couldn't get miner info: %w", err) + } + + goodFunds := big.Add(deposit, b.feeCfg.MaxPreCommitGasFee) + + from, _, err := b.addrSel(b.mctx, mi, api.PreCommitAddr, goodFunds, deposit) + if err != nil { + return nil, xerrors.Errorf("no good address found: %w", err) + } + + mcid, err := b.api.SendMsg(b.mctx, from, b.maddr, miner.Methods.PreCommitSectorBatch, deposit, b.feeCfg.MaxPreCommitGasFee, enc.Bytes()) + if err != nil { + return nil, xerrors.Errorf("sending message failed: %w", err) + } + + log.Infow("Sent ProveCommitAggregate message", "cid", mcid, "from", from, "sectors", total) + + for _, sector := range params.Sectors { + sn := sector.SectorNumber + + for _, ch := range b.waiting[sn] { + ch <- mcid // buffered + } + delete(b.waiting, sn) + delete(b.todo, sn) + delete(b.deadlines, sn) + } + + return &mcid, nil +} + +// register PreCommit, wait for batch message, return message CID +func (b *PreCommitBatcher) AddPreCommit(ctx context.Context, s SectorInfo, deposit abi.TokenAmount, in *miner0.SectorPreCommitInfo) (mcid cid.Cid, err error) { + _, curEpoch, err := b.api.ChainHead(b.mctx) + if err != nil { + log.Errorf("getting chain head: %s", err) + return cid.Undef, nil + } + + sn := s.SectorNumber + + b.lk.Lock() + b.deadlines[sn] = getSectorDeadline(curEpoch, s) + b.todo[sn] = &preCommitEntry{ + deposit: deposit, + pci: in, + } + + sent := make(chan cid.Cid, 1) + b.waiting[sn] = append(b.waiting[sn], sent) + + select { + case b.notify <- struct{}{}: + default: // already have a pending notification, don't need more + } + b.lk.Unlock() + + select { + case c := <-sent: + return c, nil + case <-ctx.Done(): + return cid.Undef, ctx.Err() + } +} + +func (b *PreCommitBatcher) Flush(ctx context.Context) (*cid.Cid, error) { + resCh := make(chan *cid.Cid, 1) + select { + case b.force <- resCh: + select { + case res := <-resCh: + return res, nil + case <-ctx.Done(): + return nil, ctx.Err() + } + case <-ctx.Done(): + return nil, ctx.Err() + } +} + +func (b *PreCommitBatcher) Pending(ctx context.Context) ([]abi.SectorID, error) { + b.lk.Lock() + defer b.lk.Unlock() + + mid, err := address.IDFromAddress(b.maddr) + if err != nil { + return nil, err + } + + res := make([]abi.SectorID, 0) + for _, s := range b.todo { + res = append(res, abi.SectorID{ + Miner: abi.ActorID(mid), + Number: s.pci.SectorNumber, + }) + } + + sort.Slice(res, func(i, j int) bool { + if res[i].Miner != res[j].Miner { + return res[i].Miner < res[j].Miner + } + + return res[i].Number < res[j].Number + }) + + return res, nil +} + +func (b *PreCommitBatcher) Stop(ctx context.Context) error { + close(b.stop) + + select { + case <-b.stopped: + return nil + case <-ctx.Done(): + return ctx.Err() + } +} diff --git a/extern/storage-sealing/sealiface/config.go b/extern/storage-sealing/sealiface/config.go index 7ac5f6160..54ba2ef58 100644 --- a/extern/storage-sealing/sealiface/config.go +++ b/extern/storage-sealing/sealiface/config.go @@ -17,4 +17,20 @@ type Config struct { WaitDealsDelay time.Duration AlwaysKeepUnsealedCopy bool + + BatchPreCommits bool + MaxPreCommitBatch int + MinPreCommitBatch int + PreCommitBatchWait time.Duration + PreCommitBatchSlack time.Duration + + AggregateCommits bool + MinCommitBatch int + MaxCommitBatch int + CommitBatchWait time.Duration + CommitBatchSlack time.Duration + + TerminateBatchMax uint64 + TerminateBatchMin uint64 + TerminateBatchWait time.Duration } diff --git a/extern/storage-sealing/sealing.go b/extern/storage-sealing/sealing.go index 8feca3b7b..fc452cc6f 100644 --- a/extern/storage-sealing/sealing.go +++ b/extern/storage-sealing/sealing.go @@ -102,7 +102,9 @@ type Sealing struct { stats SectorStats - terminator *TerminateBatcher + terminator *TerminateBatcher + precommiter *PreCommitBatcher + commiter *CommitBatcher getConfig GetSealingConfigFunc dealInfo *CurrentDealInfoManager @@ -130,7 +132,7 @@ type pendingPiece struct { accepted func(abi.SectorNumber, abi.UnpaddedPieceSize, error) } -func New(api SealingAPI, fc FeeConfig, events Events, maddr address.Address, ds datastore.Batching, sealer sectorstorage.SectorManager, sc SectorIDCounter, verif ffiwrapper.Verifier, pcp PreCommitPolicy, gc GetSealingConfigFunc, notifee SectorStateNotifee, as AddrSel) *Sealing { +func New(api SealingAPI, fc FeeConfig, events Events, maddr address.Address, ds datastore.Batching, sealer sectorstorage.SectorManager, sc SectorIDCounter, verif ffiwrapper.Verifier, prov ffiwrapper.Prover, pcp PreCommitPolicy, gc GetSealingConfigFunc, notifee SectorStateNotifee, as AddrSel) *Sealing { s := &Sealing{ api: api, feeCfg: fc, @@ -151,7 +153,9 @@ func New(api SealingAPI, fc FeeConfig, events Events, maddr address.Address, ds notifee: notifee, addrSel: as, - terminator: NewTerminationBatcher(context.TODO(), maddr, api, as, fc), + terminator: NewTerminationBatcher(context.TODO(), maddr, api, as, fc, gc), + precommiter: NewPreCommitBatcher(context.TODO(), maddr, api, as, fc, gc), + commiter: NewCommitBatcher(context.TODO(), maddr, api, as, fc, gc, prov), getConfig: gc, dealInfo: &CurrentDealInfoManager{api}, @@ -202,6 +206,22 @@ func (m *Sealing) TerminatePending(ctx context.Context) ([]abi.SectorID, error) return m.terminator.Pending(ctx) } +func (m *Sealing) SectorPreCommitFlush(ctx context.Context) (*cid.Cid, error) { + return m.precommiter.Flush(ctx) +} + +func (m *Sealing) SectorPreCommitPending(ctx context.Context) ([]abi.SectorID, error) { + return m.precommiter.Pending(ctx) +} + +func (m *Sealing) CommitFlush(ctx context.Context) (*cid.Cid, error) { + return m.commiter.Flush(ctx) +} + +func (m *Sealing) CommitPending(ctx context.Context) ([]abi.SectorID, error) { + return m.commiter.Pending(ctx) +} + func (m *Sealing) currentSealProof(ctx context.Context) (abi.RegisteredSealProof, error) { mi, err := m.api.StateMinerInfo(ctx, m.maddr, nil) if err != nil { diff --git a/extern/storage-sealing/sector_state.go b/extern/storage-sealing/sector_state.go index b636614d1..23c7695e7 100644 --- a/extern/storage-sealing/sector_state.go +++ b/extern/storage-sealing/sector_state.go @@ -3,61 +3,76 @@ package sealing type SectorState string var ExistSectorStateList = map[SectorState]struct{}{ - Empty: {}, - WaitDeals: {}, - Packing: {}, - AddPiece: {}, - AddPieceFailed: {}, - GetTicket: {}, - PreCommit1: {}, - PreCommit2: {}, - PreCommitting: {}, - PreCommitWait: {}, - WaitSeed: {}, - Committing: {}, - SubmitCommit: {}, - CommitWait: {}, - FinalizeSector: {}, - Proving: {}, - FailedUnrecoverable: {}, - SealPreCommit1Failed: {}, - SealPreCommit2Failed: {}, - PreCommitFailed: {}, - ComputeProofFailed: {}, - CommitFailed: {}, - PackingFailed: {}, - FinalizeFailed: {}, - DealsExpired: {}, - RecoverDealIDs: {}, - Faulty: {}, - FaultReported: {}, - FaultedFinal: {}, - Terminating: {}, - TerminateWait: {}, - TerminateFinality: {}, - TerminateFailed: {}, - Removing: {}, - RemoveFailed: {}, - Removed: {}, + Empty: {}, + WaitDeals: {}, + Packing: {}, + AddPiece: {}, + AddPieceFailed: {}, + GetTicket: {}, + PreCommit1: {}, + PreCommit2: {}, + PreCommitting: {}, + PreCommitWait: {}, + SubmitPreCommitBatch: {}, + PreCommitBatchWait: {}, + WaitSeed: {}, + Committing: {}, + SubmitCommit: {}, + CommitWait: {}, + SubmitCommitAggregate: {}, + CommitAggregateWait: {}, + FinalizeSector: {}, + Proving: {}, + FailedUnrecoverable: {}, + SealPreCommit1Failed: {}, + SealPreCommit2Failed: {}, + PreCommitFailed: {}, + ComputeProofFailed: {}, + CommitFailed: {}, + PackingFailed: {}, + FinalizeFailed: {}, + DealsExpired: {}, + RecoverDealIDs: {}, + Faulty: {}, + FaultReported: {}, + FaultedFinal: {}, + Terminating: {}, + TerminateWait: {}, + TerminateFinality: {}, + TerminateFailed: {}, + Removing: {}, + RemoveFailed: {}, + Removed: {}, } const ( UndefinedSectorState SectorState = "" // happy path - Empty SectorState = "Empty" // deprecated - WaitDeals SectorState = "WaitDeals" // waiting for more pieces (deals) to be added to the sector - AddPiece SectorState = "AddPiece" // put deal data (and padding if required) into the sector - Packing SectorState = "Packing" // sector not in sealStore, and not on chain - GetTicket SectorState = "GetTicket" // generate ticket - PreCommit1 SectorState = "PreCommit1" // do PreCommit1 - PreCommit2 SectorState = "PreCommit2" // do PreCommit2 - PreCommitting SectorState = "PreCommitting" // on chain pre-commit - PreCommitWait SectorState = "PreCommitWait" // waiting for precommit to land on chain - WaitSeed SectorState = "WaitSeed" // waiting for seed - Committing SectorState = "Committing" // compute PoRep - SubmitCommit SectorState = "SubmitCommit" // send commit message to the chain - CommitWait SectorState = "CommitWait" // wait for the commit message to land on chain + Empty SectorState = "Empty" // deprecated + WaitDeals SectorState = "WaitDeals" // waiting for more pieces (deals) to be added to the sector + AddPiece SectorState = "AddPiece" // put deal data (and padding if required) into the sector + Packing SectorState = "Packing" // sector not in sealStore, and not on chain + GetTicket SectorState = "GetTicket" // generate ticket + PreCommit1 SectorState = "PreCommit1" // do PreCommit1 + PreCommit2 SectorState = "PreCommit2" // do PreCommit2 + + PreCommitting SectorState = "PreCommitting" // on chain pre-commit + PreCommitWait SectorState = "PreCommitWait" // waiting for precommit to land on chain + + SubmitPreCommitBatch SectorState = "SubmitPreCommitBatch" + PreCommitBatchWait SectorState = "PreCommitBatchWait" + + WaitSeed SectorState = "WaitSeed" // waiting for seed + Committing SectorState = "Committing" // compute PoRep + + // single commit + SubmitCommit SectorState = "SubmitCommit" // send commit message to the chain + CommitWait SectorState = "CommitWait" // wait for the commit message to land on chain + + SubmitCommitAggregate SectorState = "SubmitCommitAggregate" + CommitAggregateWait SectorState = "CommitAggregateWait" + FinalizeSector SectorState = "FinalizeSector" Proving SectorState = "Proving" // error modes @@ -91,7 +106,7 @@ func toStatState(st SectorState) statSectorState { switch st { case UndefinedSectorState, Empty, WaitDeals, AddPiece: return sstStaging - case Packing, GetTicket, PreCommit1, PreCommit2, PreCommitting, PreCommitWait, WaitSeed, Committing, SubmitCommit, CommitWait, FinalizeSector: + case Packing, GetTicket, PreCommit1, PreCommit2, PreCommitting, PreCommitWait, SubmitPreCommitBatch, PreCommitBatchWait, WaitSeed, Committing, SubmitCommit, CommitWait, SubmitCommitAggregate, CommitAggregateWait, FinalizeSector: return sstSealing case Proving, Removed, Removing, Terminating, TerminateWait, TerminateFinality, TerminateFailed: return sstProving diff --git a/extern/storage-sealing/states_sealing.go b/extern/storage-sealing/states_sealing.go index e371ab33f..391951dea 100644 --- a/extern/storage-sealing/states_sealing.go +++ b/extern/storage-sealing/states_sealing.go @@ -11,7 +11,9 @@ import ( "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/exitcode" + "github.com/filecoin-project/go-state-types/network" "github.com/filecoin-project/go-statemachine" + "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" "github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/lotus/api" @@ -224,56 +226,50 @@ func (m *Sealing) remarkForUpgrade(sid abi.SectorNumber) { } } -func (m *Sealing) handlePreCommitting(ctx statemachine.Context, sector SectorInfo) error { +func (m *Sealing) preCommitParams(ctx statemachine.Context, sector SectorInfo) (*miner.SectorPreCommitInfo, big.Int, TipSetToken, error) { tok, height, err := m.api.ChainHead(ctx.Context()) if err != nil { log.Errorf("handlePreCommitting: api error, not proceeding: %+v", err) - return nil - } - - mi, err := m.api.StateMinerInfo(ctx.Context(), m.maddr, tok) - if err != nil { - log.Errorf("handlePreCommitting: api error, not proceeding: %+v", err) - return nil + return nil, big.Zero(), nil, nil } if err := checkPrecommit(ctx.Context(), m.Address(), sector, tok, height, m.api); err != nil { switch err := err.(type) { case *ErrApi: log.Errorf("handlePreCommitting: api error, not proceeding: %+v", err) - return nil + return nil, big.Zero(), nil, nil case *ErrBadCommD: // TODO: Should this just back to packing? (not really needed since handlePreCommit1 will do that too) - return ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("bad CommD error: %w", err)}) + return nil, big.Zero(), nil, ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("bad CommD error: %w", err)}) case *ErrExpiredTicket: - return ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("ticket expired: %w", err)}) + return nil, big.Zero(), nil, ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("ticket expired: %w", err)}) case *ErrBadTicket: - return ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("bad ticket: %w", err)}) + return nil, big.Zero(), nil, ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("bad ticket: %w", err)}) case *ErrInvalidDeals: log.Warnf("invalid deals in sector %d: %v", sector.SectorNumber, err) - return ctx.Send(SectorInvalidDealIDs{Return: RetPreCommitting}) + return nil, big.Zero(), nil, ctx.Send(SectorInvalidDealIDs{Return: RetPreCommitting}) case *ErrExpiredDeals: - return ctx.Send(SectorDealsExpired{xerrors.Errorf("sector deals expired: %w", err)}) + return nil, big.Zero(), nil, ctx.Send(SectorDealsExpired{xerrors.Errorf("sector deals expired: %w", err)}) case *ErrPrecommitOnChain: - return ctx.Send(SectorPreCommitLanded{TipSet: tok}) // we re-did precommit + return nil, big.Zero(), nil, ctx.Send(SectorPreCommitLanded{TipSet: tok}) // we re-did precommit case *ErrSectorNumberAllocated: log.Errorf("handlePreCommitFailed: sector number already allocated, not proceeding: %+v", err) // TODO: check if the sector is committed (not sure how we'd end up here) - return nil + return nil, big.Zero(), nil, nil default: - return xerrors.Errorf("checkPrecommit sanity check error: %w", err) + return nil, big.Zero(), nil, xerrors.Errorf("checkPrecommit sanity check error: %w", err) } } expiration, err := m.pcp.Expiration(ctx.Context(), sector.Pieces...) if err != nil { - return ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("handlePreCommitting: failed to compute pre-commit expiry: %w", err)}) + return nil, big.Zero(), nil, ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("handlePreCommitting: failed to compute pre-commit expiry: %w", err)}) } // Sectors must last _at least_ MinSectorExpiration + MaxSealDuration. // TODO: The "+10" allows the pre-commit to take 10 blocks to be accepted. nv, err := m.api.StateNetworkVersion(ctx.Context(), tok) if err != nil { - return ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("failed to get network version: %w", err)}) + return nil, big.Zero(), nil, ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("failed to get network version: %w", err)}) } msd := policy.GetMaxProveCommitDuration(actors.VersionForNetwork(nv), sector.SectorType) @@ -295,17 +291,49 @@ func (m *Sealing) handlePreCommitting(ctx statemachine.Context, sector SectorInf depositMinimum := m.tryUpgradeSector(ctx.Context(), params) + collateral, err := m.api.StateMinerPreCommitDepositForPower(ctx.Context(), m.maddr, *params, tok) + if err != nil { + return nil, big.Zero(), nil, xerrors.Errorf("getting initial pledge collateral: %w", err) + } + + deposit := big.Max(depositMinimum, collateral) + + return params, deposit, tok, nil +} + +func (m *Sealing) handlePreCommitting(ctx statemachine.Context, sector SectorInfo) error { + cfg, err := m.getConfig() + if err != nil { + return xerrors.Errorf("getting config: %w", err) + } + + if cfg.BatchPreCommits { + nv, err := m.api.StateNetworkVersion(ctx.Context(), nil) + if err != nil { + return xerrors.Errorf("getting network version: %w", err) + } + + if nv >= network.Version13 { + return ctx.Send(SectorPreCommitBatch{}) + } + } + + params, deposit, tok, err := m.preCommitParams(ctx, sector) + if params == nil || err != nil { + return err + } + enc := new(bytes.Buffer) if err := params.MarshalCBOR(enc); err != nil { return ctx.Send(SectorChainPreCommitFailed{xerrors.Errorf("could not serialize pre-commit sector parameters: %w", err)}) } - collateral, err := m.api.StateMinerPreCommitDepositForPower(ctx.Context(), m.maddr, *params, tok) + mi, err := m.api.StateMinerInfo(ctx.Context(), m.maddr, tok) if err != nil { - return xerrors.Errorf("getting initial pledge collateral: %w", err) + log.Errorf("handlePreCommitting: api error, not proceeding: %+v", err) + return nil } - deposit := big.Max(depositMinimum, collateral) goodFunds := big.Add(deposit, m.feeCfg.MaxPreCommitGasFee) from, _, err := m.addrSel(ctx.Context(), mi, api.PreCommitAddr, goodFunds, deposit) @@ -325,6 +353,24 @@ func (m *Sealing) handlePreCommitting(ctx statemachine.Context, sector SectorInf return ctx.Send(SectorPreCommitted{Message: mcid, PreCommitDeposit: deposit, PreCommitInfo: *params}) } +func (m *Sealing) handleSubmitPreCommitBatch(ctx statemachine.Context, sector SectorInfo) error { + if sector.CommD == nil || sector.CommR == nil { + return ctx.Send(SectorCommitFailed{xerrors.Errorf("sector had nil commR or commD")}) + } + + params, deposit, _, err := m.preCommitParams(ctx, sector) + if params == nil || err != nil { + return err + } + + mcid, err := m.precommiter.AddPreCommit(ctx.Context(), sector, deposit, params) + if err != nil { + return ctx.Send(SectorCommitFailed{xerrors.Errorf("queuing precommit batch failed: %w", err)}) + } + + return ctx.Send(SectorPreCommitBatchSent{mcid}) +} + func (m *Sealing) handlePreCommitWait(ctx statemachine.Context, sector SectorInfo) error { if sector.PreCommitMessage == nil { return ctx.Send(SectorChainPreCommitFailed{xerrors.Errorf("precommit message was nil")}) @@ -452,6 +498,22 @@ func (m *Sealing) handleCommitting(ctx statemachine.Context, sector SectorInfo) } func (m *Sealing) handleSubmitCommit(ctx statemachine.Context, sector SectorInfo) error { + cfg, err := m.getConfig() + if err != nil { + return xerrors.Errorf("getting config: %w", err) + } + + if cfg.AggregateCommits { + nv, err := m.api.StateNetworkVersion(ctx.Context(), nil) + if err != nil { + return xerrors.Errorf("getting network version: %w", err) + } + + if nv >= network.Version13 { + return ctx.Send(SectorSubmitCommitAggregate{}) + } + } + tok, _, err := m.api.ChainHead(ctx.Context()) if err != nil { log.Errorf("handleCommitting: api error, not proceeding: %+v", err) @@ -514,6 +576,29 @@ func (m *Sealing) handleSubmitCommit(ctx statemachine.Context, sector SectorInfo }) } +func (m *Sealing) handleSubmitCommitAggregate(ctx statemachine.Context, sector SectorInfo) error { + if sector.CommD == nil || sector.CommR == nil { + return ctx.Send(SectorCommitFailed{xerrors.Errorf("sector had nil commR or commD")}) + } + + mcid, err := m.commiter.AddCommit(ctx.Context(), sector, AggregateInput{ + info: proof.AggregateSealVerifyInfo{ + Number: sector.SectorNumber, + Randomness: sector.TicketValue, + InteractiveRandomness: sector.SeedValue, + SealedCID: *sector.CommR, + UnsealedCID: *sector.CommD, + }, + proof: sector.Proof, // todo: this correct?? + spt: sector.SectorType, + }) + if err != nil { + return ctx.Send(SectorCommitFailed{xerrors.Errorf("queuing commit for aggregation failed: %w", err)}) + } + + return ctx.Send(SectorCommitAggregateSent{mcid}) +} + func (m *Sealing) handleCommitWait(ctx statemachine.Context, sector SectorInfo) error { if sector.CommitMessage == nil { log.Errorf("sector %d entered commit wait state without a message cid", sector.SectorNumber) diff --git a/extern/storage-sealing/terminate_batch.go b/extern/storage-sealing/terminate_batch.go index 0e96e8384..d545f443f 100644 --- a/extern/storage-sealing/terminate_batch.go +++ b/extern/storage-sealing/terminate_batch.go @@ -21,14 +21,6 @@ import ( "github.com/filecoin-project/lotus/chain/actors/builtin/miner" ) -var ( - // TODO: config - - TerminateBatchMax uint64 = 100 // adjust based on real-world gas numbers, actors limit at 10k - TerminateBatchMin uint64 = 1 - TerminateBatchWait = 5 * time.Minute -) - type TerminateBatcherApi interface { StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok TipSetToken) (*SectorLocation, error) SendMsg(ctx context.Context, from, to address.Address, method abi.MethodNum, value, maxFee abi.TokenAmount, params []byte) (cid.Cid, error) @@ -38,11 +30,12 @@ type TerminateBatcherApi interface { } type TerminateBatcher struct { - api TerminateBatcherApi - maddr address.Address - mctx context.Context - addrSel AddrSel - feeCfg FeeConfig + api TerminateBatcherApi + maddr address.Address + mctx context.Context + addrSel AddrSel + feeCfg FeeConfig + getConfig GetSealingConfigFunc todo map[SectorLocation]*bitfield.BitField // MinerSectorLocation -> BitField @@ -53,13 +46,14 @@ type TerminateBatcher struct { lk sync.Mutex } -func NewTerminationBatcher(mctx context.Context, maddr address.Address, api TerminateBatcherApi, addrSel AddrSel, feeCfg FeeConfig) *TerminateBatcher { +func NewTerminationBatcher(mctx context.Context, maddr address.Address, api TerminateBatcherApi, addrSel AddrSel, feeCfg FeeConfig, getConfig GetSealingConfigFunc) *TerminateBatcher { b := &TerminateBatcher{ - api: api, - maddr: maddr, - mctx: mctx, - addrSel: addrSel, - feeCfg: feeCfg, + api: api, + maddr: maddr, + mctx: mctx, + addrSel: addrSel, + feeCfg: feeCfg, + getConfig: getConfig, todo: map[SectorLocation]*bitfield.BitField{}, waiting: map[abi.SectorNumber][]chan cid.Cid{}, @@ -86,6 +80,11 @@ func (b *TerminateBatcher) run() { } lastMsg = nil + cfg, err := b.getConfig() + if err != nil { + log.Warnw("TerminateBatcher getconfig error", "error", err) + } + var sendAboveMax, sendAboveMin bool select { case <-b.stop: @@ -93,13 +92,12 @@ func (b *TerminateBatcher) run() { return case <-b.notify: sendAboveMax = true - case <-time.After(TerminateBatchWait): + case <-time.After(cfg.TerminateBatchWait): sendAboveMin = true case fr := <-b.force: // user triggered forceRes = fr } - var err error lastMsg, err = b.processBatch(sendAboveMax, sendAboveMin) if err != nil { log.Warnw("TerminateBatcher processBatch error", "error", err) @@ -113,6 +111,11 @@ func (b *TerminateBatcher) processBatch(notif, after bool) (*cid.Cid, error) { return nil, xerrors.Errorf("getting proving deadline info failed: %w", err) } + cfg, err := b.getConfig() + if err != nil { + return nil, xerrors.Errorf("getting sealing config: %W", err) + } + b.lk.Lock() defer b.lk.Unlock() params := miner2.TerminateSectorsParams{} @@ -180,7 +183,7 @@ func (b *TerminateBatcher) processBatch(notif, after bool) (*cid.Cid, error) { Sectors: toTerminate, }) - if total >= uint64(miner.AddressedSectorsMax) { + if total >= uint64(miner.AddressedSectorsMax) || total >= cfg.TerminateBatchMax { break } @@ -193,11 +196,11 @@ func (b *TerminateBatcher) processBatch(notif, after bool) (*cid.Cid, error) { return nil, nil // nothing to do } - if notif && total < TerminateBatchMax { + if notif && total < cfg.TerminateBatchMax { return nil, nil } - if after && total < TerminateBatchMin { + if after && total < cfg.TerminateBatchMin { return nil, nil } diff --git a/go.mod b/go.mod index af2ee9c3e..98e9d3691 100644 --- a/go.mod +++ b/go.mod @@ -38,7 +38,7 @@ require ( github.com/filecoin-project/go-jsonrpc v0.1.4-0.20210217175800-45ea43ac2bec github.com/filecoin-project/go-multistore v0.0.3 github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20 - github.com/filecoin-project/go-paramfetch v0.0.2-0.20200701152213-3e0f0afdc261 + github.com/filecoin-project/go-paramfetch v0.0.2-0.20210330140417-936748d3f5ec github.com/filecoin-project/go-state-types v0.1.1-0.20210506134452-99b279731c48 github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe github.com/filecoin-project/go-statestore v0.1.1 @@ -47,7 +47,7 @@ require ( github.com/filecoin-project/specs-actors/v2 v2.3.5-0.20210114162132-5b58b773f4fb github.com/filecoin-project/specs-actors/v3 v3.1.0 github.com/filecoin-project/specs-actors/v4 v4.0.0 - github.com/filecoin-project/specs-actors/v5 v5.0.0-20210510162709-3255bdd9f2bb + github.com/filecoin-project/specs-actors/v5 v5.0.0-20210517165532-c7cff61d07fb github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506 github.com/filecoin-project/test-vectors/schema v0.0.5 github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1 diff --git a/go.sum b/go.sum index ecdab7a65..3ef546ba4 100644 --- a/go.sum +++ b/go.sum @@ -287,13 +287,12 @@ github.com/filecoin-project/go-multistore v0.0.3 h1:vaRBY4YiA2UZFPK57RNuewypB8u0 github.com/filecoin-project/go-multistore v0.0.3/go.mod h1:kaNqCC4IhU4B1uyr7YWFHd23TL4KM32aChS0jNkyUvQ= github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20 h1:+/4aUeUoKr6AKfPE3mBhXA5spIV6UcKdTYDPNU2Tdmg= github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20/go.mod h1:mPn+LRRd5gEKNAtc+r3ScpW2JRU/pj4NBKdADYWHiak= -github.com/filecoin-project/go-paramfetch v0.0.2-0.20200701152213-3e0f0afdc261 h1:A256QonvzRaknIIAuWhe/M2dpV2otzs3NBhi5TWa/UA= -github.com/filecoin-project/go-paramfetch v0.0.2-0.20200701152213-3e0f0afdc261/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc= +github.com/filecoin-project/go-paramfetch v0.0.2-0.20210330140417-936748d3f5ec h1:gExwWUiT1TcARkxGneS4nvp9C+wBsKU0bFdg7qFpNco= +github.com/filecoin-project/go-paramfetch v0.0.2-0.20210330140417-936748d3f5ec/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc= github.com/filecoin-project/go-state-types v0.0.0-20200903145444-247639ffa6ad/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I= github.com/filecoin-project/go-state-types v0.0.0-20200904021452-1883f36ca2f4/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I= github.com/filecoin-project/go-state-types v0.0.0-20200928172055-2df22083d8ab/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= github.com/filecoin-project/go-state-types v0.0.0-20201102161440-c8033295a1fc/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= -github.com/filecoin-project/go-state-types v0.1.0 h1:9r2HCSMMCmyMfGyMKxQtv0GKp6VT/m5GgVk8EhYbLJU= github.com/filecoin-project/go-state-types v0.1.0/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= github.com/filecoin-project/go-state-types v0.1.1-0.20210506134452-99b279731c48 h1:Jc4OprDp3bRDxbsrXNHPwJabZJM3iDy+ri8/1e0ZnX4= github.com/filecoin-project/go-state-types v0.1.1-0.20210506134452-99b279731c48/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= @@ -316,8 +315,9 @@ github.com/filecoin-project/specs-actors/v3 v3.1.0 h1:s4qiPw8pgypqBGAy853u/zdZJ7 github.com/filecoin-project/specs-actors/v3 v3.1.0/go.mod h1:mpynccOLlIRy0QnR008BwYBwT9fen+sPR13MA1VmMww= github.com/filecoin-project/specs-actors/v4 v4.0.0 h1:vMALksY5G3J5rj3q9rbcyB+f4Tk1xrLqSgdB3jOok4s= github.com/filecoin-project/specs-actors/v4 v4.0.0/go.mod h1:TkHXf/l7Wyw4ZejyXIPS2rK8bBO0rdwhTZyQQgaglng= -github.com/filecoin-project/specs-actors/v5 v5.0.0-20210510162709-3255bdd9f2bb h1:i2ZBHLiNYyyhNlfjfB/TGtGLlb8dgiGiVCDZlGpUtUc= -github.com/filecoin-project/specs-actors/v5 v5.0.0-20210510162709-3255bdd9f2bb/go.mod h1:XAgQWq5pu0MBwx3MI5uJ6fK/Q8jCkZnKNNLxvDcbXew= +github.com/filecoin-project/specs-actors/v5 v5.0.0-20210512015452-4fe3889fff57/go.mod h1:283yBMMUSDB2abcjP/hhrwTkhb9h3sfM6KGrep/ZlBI= +github.com/filecoin-project/specs-actors/v5 v5.0.0-20210517165532-c7cff61d07fb h1:818gGdeEC+7aHGl2X7ptdtYuqoEgRsY3jwz+DvUYUFk= +github.com/filecoin-project/specs-actors/v5 v5.0.0-20210517165532-c7cff61d07fb/go.mod h1:283yBMMUSDB2abcjP/hhrwTkhb9h3sfM6KGrep/ZlBI= github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506 h1:Ur/l2+6qN+lQiqjozWWc5p9UDaAMDZKTlDS98oRnlIw= github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506/go.mod h1:nJRRM7Aa9XVvygr3W9k6xGF46RWzr2zxF/iGoAIfA/g= github.com/filecoin-project/test-vectors/schema v0.0.5 h1:w3zHQhzM4pYxJDl21avXjOKBLF8egrvwUwjpT8TquDg= diff --git a/lotuspond/front/src/chain/methods.json b/lotuspond/front/src/chain/methods.json index 12e0e8abf..5aced814a 100644 --- a/lotuspond/front/src/chain/methods.json +++ b/lotuspond/front/src/chain/methods.json @@ -489,7 +489,9 @@ "ConfirmUpdateWorkerKey", "RepayDebt", "ChangeOwnerAddress", - "DisputeWindowedPoSt" + "DisputeWindowedPoSt", + "PreCommitSectorBatch", + "ProveCommitAggregate" ], "fil/5/storagepower": [ "Send", diff --git a/markets/storageadapter/ondealsectorcommitted.go b/markets/storageadapter/ondealsectorcommitted.go index b5f9c7510..31bc0b8bf 100644 --- a/markets/storageadapter/ondealsectorcommitted.go +++ b/markets/storageadapter/ondealsectorcommitted.go @@ -12,6 +12,7 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-fil-markets/storagemarket" "github.com/filecoin-project/go-state-types/abi" + miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors/builtin/market" @@ -109,7 +110,7 @@ func (mgr *SectorCommittedManager) OnDealSectorPreCommitted(ctx context.Context, // Watch for a pre-commit message to the provider. matchEvent := func(msg *types.Message) (bool, error) { - matched := msg.To == provider && msg.Method == miner.Methods.PreCommitSector + matched := msg.To == provider && (msg.Method == miner.Methods.PreCommitSector || msg.Method == miner.Methods.PreCommitSectorBatch) return matched, nil } @@ -137,12 +138,6 @@ func (mgr *SectorCommittedManager) OnDealSectorPreCommitted(ctx context.Context, return true, nil } - // Extract the message parameters - var params miner.SectorPreCommitInfo - if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil { - return false, xerrors.Errorf("unmarshal pre commit: %w", err) - } - // When there is a reorg, the deal ID may change, so get the // current deal ID from the publish message CID res, err := mgr.dealInfo.GetCurrentDealInfo(ctx, ts.Key().Bytes(), &proposal, publishCid) @@ -150,13 +145,14 @@ func (mgr *SectorCommittedManager) OnDealSectorPreCommitted(ctx context.Context, return false, err } - // Check through the deal IDs associated with this message - for _, did := range params.DealIDs { - if did == res.DealID { - // Found the deal ID in this message. Callback with the sector ID. - cb(params.SectorNumber, false, nil) - return false, nil - } + // Extract the message parameters + sn, err := dealSectorInPreCommitMsg(msg, res) + if err != nil { + return false, err + } + + if sn != nil { + cb(*sn, false, nil) } // Didn't find the deal ID in this message, so keep looking @@ -207,16 +203,11 @@ func (mgr *SectorCommittedManager) OnDealSectorCommitted(ctx context.Context, pr // Match a prove-commit sent to the provider with the given sector number matchEvent := func(msg *types.Message) (matched bool, err error) { - if msg.To != provider || msg.Method != miner.Methods.ProveCommitSector { + if msg.To != provider { return false, nil } - var params miner.ProveCommitSectorParams - if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil { - return false, xerrors.Errorf("failed to unmarshal prove commit sector params: %w", err) - } - - return params.SectorNumber == sectorNumber, nil + return sectorInCommitMsg(msg, sectorNumber) } // The deal must be accepted by the deal proposal start epoch, so timeout @@ -273,6 +264,73 @@ func (mgr *SectorCommittedManager) OnDealSectorCommitted(ctx context.Context, pr return nil } +// dealSectorInPreCommitMsg tries to find a sector containing the specified deal +func dealSectorInPreCommitMsg(msg *types.Message, res sealing.CurrentDealInfo) (*abi.SectorNumber, error) { + switch msg.Method { + case miner.Methods.PreCommitSector: + var params miner.SectorPreCommitInfo + if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil { + return nil, xerrors.Errorf("unmarshal pre commit: %w", err) + } + + // Check through the deal IDs associated with this message + for _, did := range params.DealIDs { + if did == res.DealID { + // Found the deal ID in this message. Callback with the sector ID. + return ¶ms.SectorNumber, nil + } + } + case miner.Methods.PreCommitSectorBatch: + var params miner5.PreCommitSectorBatchParams + if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil { + return nil, xerrors.Errorf("unmarshal pre commit: %w", err) + } + + for _, precommit := range params.Sectors { + // Check through the deal IDs associated with this message + for _, did := range precommit.DealIDs { + if did == res.DealID { + // Found the deal ID in this message. Callback with the sector ID. + return &precommit.SectorNumber, nil + } + } + } + default: + return nil, xerrors.Errorf("unexpected method %d", msg.Method) + } + + return nil, nil +} + +// sectorInCommitMsg checks if the provided message commits specified sector +func sectorInCommitMsg(msg *types.Message, sectorNumber abi.SectorNumber) (bool, error) { + switch msg.Method { + case miner.Methods.ProveCommitSector: + var params miner.ProveCommitSectorParams + if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil { + return false, xerrors.Errorf("failed to unmarshal prove commit sector params: %w", err) + } + + return params.SectorNumber == sectorNumber, nil + + case miner.Methods.ProveCommitAggregate: + var params miner5.ProveCommitAggregateParams + if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil { + return false, xerrors.Errorf("failed to unmarshal prove commit sector params: %w", err) + } + + set, err := params.SectorNumbers.IsSet(uint64(sectorNumber)) + if err != nil { + return false, xerrors.Errorf("checking if sectorNumber is set in commit aggregate message: %w", err) + } + + return set, nil + + default: + return false, nil + } +} + func (mgr *SectorCommittedManager) checkIfDealAlreadyActive(ctx context.Context, ts *types.TipSet, proposal *market.DealProposal, publishCid cid.Cid) (sealing.CurrentDealInfo, bool, error) { res, err := mgr.dealInfo.GetCurrentDealInfo(ctx, ts.Key().Bytes(), proposal, publishCid) if err != nil { diff --git a/node/builder.go b/node/builder.go index c884b169b..ce00fc18d 100644 --- a/node/builder.go +++ b/node/builder.go @@ -379,6 +379,7 @@ var MinerNode = Options( // Sector storage: Proofs Override(new(ffiwrapper.Verifier), ffiwrapper.ProofVerifier), + Override(new(ffiwrapper.Prover), ffiwrapper.ProofProver), Override(new(storage2.Prover), From(new(sectorstorage.SectorManager))), // Sealing diff --git a/node/config/def.go b/node/config/def.go index 63099516b..988b0a6c9 100644 --- a/node/config/def.go +++ b/node/config/def.go @@ -6,6 +6,8 @@ import ( "github.com/ipfs/go-cid" + miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/types" sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage" ) @@ -82,6 +84,30 @@ type SealingConfig struct { AlwaysKeepUnsealedCopy bool + // enable / disable precommit batching (takes effect after nv13) + BatchPreCommits bool + // maximum precommit batch size - batches will be sent immediately above this size + MaxPreCommitBatch int + MinPreCommitBatch int + // how long to wait before submitting a batch after crossing the minimum batch size + PreCommitBatchWait Duration + // time buffer for forceful batch submission before sectors in batch would start expiring + PreCommitBatchSlack Duration + + // enable / disable commit aggregation (takes effect after nv13) + AggregateCommits bool + // maximum batched commit size - batches will be sent immediately above this size + MinCommitBatch int + MaxCommitBatch int + // how long to wait before submitting a batch after crossing the minimum batch size + CommitBatchWait Duration + // time buffer for forceful batch submission before sectors in batch would start expiring + CommitBatchSlack Duration + + TerminateBatchMax uint64 + TerminateBatchMin uint64 + TerminateBatchWait Duration + // Keep this many sectors in sealing pipeline, start CC if needed // todo TargetSealingSectors uint64 @@ -237,6 +263,22 @@ func DefaultStorageMiner() *StorageMiner { MaxSealingSectorsForDeals: 0, WaitDealsDelay: Duration(time.Hour * 6), AlwaysKeepUnsealedCopy: true, + + BatchPreCommits: true, + MinPreCommitBatch: 1, // we must have at least one proof to aggregate + MaxPreCommitBatch: miner5.PreCommitSectorBatchMaxSize, // + PreCommitBatchWait: Duration(24 * time.Hour), // this can be up to 6 days + PreCommitBatchSlack: Duration(3 * time.Hour), + + AggregateCommits: true, + MinCommitBatch: 1, // we must have at least one proof to aggregate + MaxCommitBatch: miner5.MaxAggregatedSectors, // this is the maximum aggregation per FIP13 + CommitBatchWait: Duration(24 * time.Hour), // this can be up to 6 days + CommitBatchSlack: Duration(1 * time.Hour), + + TerminateBatchMin: 1, + TerminateBatchMax: 100, + TerminateBatchWait: Duration(5 * time.Minute), }, Storage: sectorstorage.SealerConfig{ diff --git a/node/impl/storminer.go b/node/impl/storminer.go index cad886e2d..8660f1efb 100644 --- a/node/impl/storminer.go +++ b/node/impl/storminer.go @@ -374,10 +374,26 @@ func (sm *StorageMinerAPI) SectorTerminatePending(ctx context.Context) ([]abi.Se return sm.Miner.TerminatePending(ctx) } +func (sm *StorageMinerAPI) SectorPreCommitFlush(ctx context.Context) (*cid.Cid, error) { + return sm.Miner.SectorPreCommitFlush(ctx) +} + +func (sm *StorageMinerAPI) SectorPreCommitPending(ctx context.Context) ([]abi.SectorID, error) { + return sm.Miner.SectorPreCommitPending(ctx) +} + func (sm *StorageMinerAPI) SectorMarkForUpgrade(ctx context.Context, id abi.SectorNumber) error { return sm.Miner.MarkForUpgrade(id) } +func (sm *StorageMinerAPI) SectorCommitFlush(ctx context.Context) (*cid.Cid, error) { + return sm.Miner.CommitFlush(ctx) +} + +func (sm *StorageMinerAPI) SectorCommitPending(ctx context.Context) ([]abi.SectorID, error) { + return sm.Miner.CommitPending(ctx) +} + func (sm *StorageMinerAPI) WorkerConnect(ctx context.Context, url string) error { w, err := connectRemoteWorker(ctx, sm, url) if err != nil { diff --git a/node/modules/storageminer.go b/node/modules/storageminer.go index 1781d0493..122bec519 100644 --- a/node/modules/storageminer.go +++ b/node/modules/storageminer.go @@ -99,7 +99,7 @@ func GetParams(spt abi.RegisteredSealProof) error { } // TODO: We should fetch the params for the actual proof type, not just based on the size. - if err := paramfetch.GetParams(context.TODO(), build.ParametersJSON(), uint64(ssize)); err != nil { + if err := paramfetch.GetParams(context.TODO(), build.ParametersJSON(), build.SrsJSON(), uint64(ssize)); err != nil { return xerrors.Errorf("fetching proof parameters: %w", err) } @@ -202,6 +202,7 @@ type StorageMinerParams struct { Sealer sectorstorage.SectorManager SectorIDCounter sealing.SectorIDCounter Verifier ffiwrapper.Verifier + Prover ffiwrapper.Prover GetSealingConfigFn dtypes.GetSealingConfigFunc Journal journal.Journal AddrSel *storage.AddressSelector @@ -218,6 +219,7 @@ func StorageMiner(fc config.MinerFeeConfig) func(params StorageMinerParams) (*st h = params.Host sc = params.SectorIDCounter verif = params.Verifier + prover = params.Prover gsd = params.GetSealingConfigFn j = params.Journal as = params.AddrSel @@ -235,7 +237,7 @@ func StorageMiner(fc config.MinerFeeConfig) func(params StorageMinerParams) (*st return nil, err } - sm, err := storage.NewMiner(api, maddr, h, ds, sealer, sc, verif, gsd, fc, j, as) + sm, err := storage.NewMiner(api, maddr, h, ds, sealer, sc, verif, prover, gsd, fc, j, as) if err != nil { return nil, err } @@ -824,6 +826,22 @@ func NewSetSealConfigFunc(r repo.LockedRepo) (dtypes.SetSealingConfigFunc, error MaxSealingSectorsForDeals: cfg.MaxSealingSectorsForDeals, WaitDealsDelay: config.Duration(cfg.WaitDealsDelay), AlwaysKeepUnsealedCopy: cfg.AlwaysKeepUnsealedCopy, + + BatchPreCommits: cfg.BatchPreCommits, + MinPreCommitBatch: cfg.MinPreCommitBatch, + MaxPreCommitBatch: cfg.MaxPreCommitBatch, + PreCommitBatchWait: config.Duration(cfg.PreCommitBatchWait), + PreCommitBatchSlack: config.Duration(cfg.PreCommitBatchSlack), + + AggregateCommits: cfg.AggregateCommits, + MinCommitBatch: cfg.MinCommitBatch, + MaxCommitBatch: cfg.MaxCommitBatch, + CommitBatchWait: config.Duration(cfg.CommitBatchWait), + CommitBatchSlack: config.Duration(cfg.CommitBatchSlack), + + TerminateBatchMax: cfg.TerminateBatchMax, + TerminateBatchMin: cfg.TerminateBatchMin, + TerminateBatchWait: config.Duration(cfg.TerminateBatchWait), } }) return @@ -839,6 +857,22 @@ func NewGetSealConfigFunc(r repo.LockedRepo) (dtypes.GetSealingConfigFunc, error MaxSealingSectorsForDeals: cfg.Sealing.MaxSealingSectorsForDeals, WaitDealsDelay: time.Duration(cfg.Sealing.WaitDealsDelay), AlwaysKeepUnsealedCopy: cfg.Sealing.AlwaysKeepUnsealedCopy, + + BatchPreCommits: cfg.Sealing.BatchPreCommits, + MinPreCommitBatch: cfg.Sealing.MinPreCommitBatch, + MaxPreCommitBatch: cfg.Sealing.MaxPreCommitBatch, + PreCommitBatchWait: time.Duration(cfg.Sealing.PreCommitBatchWait), + PreCommitBatchSlack: time.Duration(cfg.Sealing.PreCommitBatchSlack), + + AggregateCommits: cfg.Sealing.AggregateCommits, + MinCommitBatch: cfg.Sealing.MinCommitBatch, + MaxCommitBatch: cfg.Sealing.MaxCommitBatch, + CommitBatchWait: time.Duration(cfg.Sealing.CommitBatchWait), + CommitBatchSlack: time.Duration(cfg.Sealing.CommitBatchSlack), + + TerminateBatchMax: cfg.Sealing.TerminateBatchMax, + TerminateBatchMin: cfg.Sealing.TerminateBatchMin, + TerminateBatchWait: time.Duration(cfg.Sealing.TerminateBatchWait), } }) return diff --git a/node/node_test.go b/node/node_test.go index 91348647d..5ae15fd8c 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -148,6 +148,15 @@ func TestPledgeSectors(t *testing.T) { }) } +func TestPledgeBatching(t *testing.T) { + t.Run("100", func(t *testing.T) { + test.TestPledgeBatching(t, builder.MockSbBuilder, 50*time.Millisecond, 100) + }) + t.Run("100-before-nv13", func(t *testing.T) { + test.TestPledgeBeforeNv13(t, builder.MockSbBuilder, 50*time.Millisecond, 100) + }) +} + func TestTapeFix(t *testing.T) { logging.SetLogLevel("miner", "ERROR") logging.SetLogLevel("chainstore", "ERROR") @@ -164,6 +173,7 @@ func TestWindowedPost(t *testing.T) { } logging.SetLogLevel("miner", "ERROR") + logging.SetLogLevel("gen", "ERROR") logging.SetLogLevel("chainstore", "ERROR") logging.SetLogLevel("chain", "ERROR") logging.SetLogLevel("sub", "ERROR") @@ -212,6 +222,7 @@ func TestWindowPostDispute(t *testing.T) { t.Skip("this takes a few minutes, set LOTUS_TEST_WINDOW_POST=1 to run") } logging.SetLogLevel("miner", "ERROR") + logging.SetLogLevel("gen", "ERROR") logging.SetLogLevel("chainstore", "ERROR") logging.SetLogLevel("chain", "ERROR") logging.SetLogLevel("sub", "ERROR") @@ -225,6 +236,7 @@ func TestWindowPostDisputeFails(t *testing.T) { t.Skip("this takes a few minutes, set LOTUS_TEST_WINDOW_POST=1 to run") } logging.SetLogLevel("miner", "ERROR") + logging.SetLogLevel("gen", "ERROR") logging.SetLogLevel("chainstore", "ERROR") logging.SetLogLevel("chain", "ERROR") logging.SetLogLevel("sub", "ERROR") @@ -238,6 +250,7 @@ func TestDeadlineToggling(t *testing.T) { t.Skip("this takes a few minutes, set LOTUS_TEST_DEADLINE_TOGGLING=1 to run") } logging.SetLogLevel("miner", "ERROR") + logging.SetLogLevel("gen", "ERROR") logging.SetLogLevel("chainstore", "ERROR") logging.SetLogLevel("chain", "ERROR") logging.SetLogLevel("sub", "ERROR") diff --git a/node/test/builder.go b/node/test/builder.go index cd0ecc55b..174f07592 100644 --- a/node/test/builder.go +++ b/node/test/builder.go @@ -463,6 +463,7 @@ func mockSbBuilderOpts(t *testing.T, fullOpts []test.FullNodeOpts, storage []tes node.Test(), node.Override(new(ffiwrapper.Verifier), mock.MockVerifier), + node.Override(new(ffiwrapper.Prover), mock.MockProver), // so that we subscribe to pubsub topics immediately node.Override(new(dtypes.Bootstrapper), dtypes.Bootstrapper(true)), @@ -486,6 +487,7 @@ func mockSbBuilderOpts(t *testing.T, fullOpts []test.FullNodeOpts, storage []tes return mock.NewMockSectorMgr(nil), nil }), node.Override(new(ffiwrapper.Verifier), mock.MockVerifier), + node.Override(new(ffiwrapper.Prover), mock.MockProver), node.Unset(new(*sectorstorage.Manager)), )) } @@ -524,6 +526,7 @@ func mockSbBuilderOpts(t *testing.T, fullOpts []test.FullNodeOpts, storage []tes return mock.NewMockSectorMgr(sectors), nil }), node.Override(new(ffiwrapper.Verifier), mock.MockVerifier), + node.Override(new(ffiwrapper.Prover), mock.MockProver), node.Unset(new(*sectorstorage.Manager)), opts, )) diff --git a/storage/adapter_storage_miner.go b/storage/adapter_storage_miner.go index 41d7461a8..f4d1e0038 100644 --- a/storage/adapter_storage_miner.go +++ b/storage/adapter_storage_miner.go @@ -16,6 +16,7 @@ import ( "github.com/filecoin-project/go-state-types/network" market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" + market5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/market" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/blockstore" @@ -146,10 +147,36 @@ func (s SealingAPIAdapter) StateComputeDataCommitment(ctx context.Context, maddr return cid.Undef, xerrors.Errorf("failed to unmarshal TipSetToken to TipSetKey: %w", err) } - ccparams, err := actors.SerializeParams(&market2.ComputeDataCommitmentParams{ - DealIDs: deals, - SectorType: sectorType, - }) + ts, err := s.delegate.ChainGetTipSet(ctx, tsk) + if err != nil { + return cid.Cid{}, err + } + + // using parent ts because the migration won't be run on the first nv13 + // tipset we apply StateCall to (because we don't run migrations in StateCall + // and just apply to parent state) + nv, err := s.delegate.StateNetworkVersion(ctx, ts.Parents()) + if err != nil { + return cid.Cid{}, err + } + + var ccparams []byte + if nv < network.Version13 { + ccparams, err = actors.SerializeParams(&market2.ComputeDataCommitmentParams{ + DealIDs: deals, + SectorType: sectorType, + }) + } else { + ccparams, err = actors.SerializeParams(&market5.ComputeDataCommitmentParams{ + Inputs: []*market5.SectorDataSpec{ + { + DealIDs: deals, + SectorType: sectorType, + }, + }, + }) + } + if err != nil { return cid.Undef, xerrors.Errorf("computing params for ComputeDataCommitment: %w", err) } @@ -169,12 +196,25 @@ func (s SealingAPIAdapter) StateComputeDataCommitment(ctx context.Context, maddr return cid.Undef, xerrors.Errorf("receipt for ComputeDataCommitment had exit code %d", r.MsgRct.ExitCode) } - var c cbg.CborCid - if err := c.UnmarshalCBOR(bytes.NewReader(r.MsgRct.Return)); err != nil { + if nv < network.Version13 { + var c cbg.CborCid + if err := c.UnmarshalCBOR(bytes.NewReader(r.MsgRct.Return)); err != nil { + return cid.Undef, xerrors.Errorf("failed to unmarshal CBOR to CborCid: %w", err) + } + + return cid.Cid(c), nil + } + + var cr market5.ComputeDataCommitmentReturn + if err := cr.UnmarshalCBOR(bytes.NewReader(r.MsgRct.Return)); err != nil { return cid.Undef, xerrors.Errorf("failed to unmarshal CBOR to CborCid: %w", err) } - return cid.Cid(c), nil + if len(cr.CommDs) != 1 { + return cid.Undef, xerrors.Errorf("CommD output must have 1 entry") + } + + return cid.Cid(cr.CommDs[0]), nil } func (s SealingAPIAdapter) StateSectorPreCommitInfo(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok sealing.TipSetToken) (*miner.SectorPreCommitOnChainInfo, error) { diff --git a/storage/miner.go b/storage/miner.go index 52be4d7b8..1c1a9a0bc 100644 --- a/storage/miner.go +++ b/storage/miner.go @@ -48,6 +48,7 @@ type Miner struct { ds datastore.Batching sc sealing.SectorIDCounter verif ffiwrapper.Verifier + prover ffiwrapper.Prover addrSel *AddressSelector maddr address.Address @@ -116,7 +117,7 @@ type storageMinerApi interface { WalletHas(context.Context, address.Address) (bool, error) } -func NewMiner(api storageMinerApi, maddr address.Address, h host.Host, ds datastore.Batching, sealer sectorstorage.SectorManager, sc sealing.SectorIDCounter, verif ffiwrapper.Verifier, gsd dtypes.GetSealingConfigFunc, feeCfg config.MinerFeeConfig, journal journal.Journal, as *AddressSelector) (*Miner, error) { +func NewMiner(api storageMinerApi, maddr address.Address, h host.Host, ds datastore.Batching, sealer sectorstorage.SectorManager, sc sealing.SectorIDCounter, verif ffiwrapper.Verifier, prover ffiwrapper.Prover, gsd dtypes.GetSealingConfigFunc, feeCfg config.MinerFeeConfig, journal journal.Journal, as *AddressSelector) (*Miner, error) { m := &Miner{ api: api, feeCfg: feeCfg, @@ -125,6 +126,7 @@ func NewMiner(api storageMinerApi, maddr address.Address, h host.Host, ds datast ds: ds, sc: sc, verif: verif, + prover: prover, addrSel: as, maddr: maddr, @@ -161,7 +163,7 @@ func (m *Miner) Run(ctx context.Context) error { return m.addrSel.AddressFor(ctx, m.api, mi, use, goodFunds, minFunds) } - m.sealing = sealing.New(adaptedAPI, fc, NewEventsAdapter(evts), m.maddr, m.ds, m.sealer, m.sc, m.verif, &pcp, sealing.GetSealingConfigFunc(m.getSealConfig), m.handleSealingNotifications, as) + m.sealing = sealing.New(adaptedAPI, fc, NewEventsAdapter(evts), m.maddr, m.ds, m.sealer, m.sc, m.verif, m.prover, &pcp, sealing.GetSealingConfigFunc(m.getSealConfig), m.handleSealingNotifications, as) go m.sealing.Run(ctx) //nolint:errcheck // logged intside the function diff --git a/storage/sealing.go b/storage/sealing.go index 8981c3738..cd215f238 100644 --- a/storage/sealing.go +++ b/storage/sealing.go @@ -59,6 +59,22 @@ func (m *Miner) TerminatePending(ctx context.Context) ([]abi.SectorID, error) { return m.sealing.TerminatePending(ctx) } +func (m *Miner) SectorPreCommitFlush(ctx context.Context) (*cid.Cid, error) { + return m.sealing.SectorPreCommitFlush(ctx) +} + +func (m *Miner) SectorPreCommitPending(ctx context.Context) ([]abi.SectorID, error) { + return m.sealing.SectorPreCommitPending(ctx) +} + +func (m *Miner) CommitFlush(ctx context.Context) (*cid.Cid, error) { + return m.sealing.CommitFlush(ctx) +} + +func (m *Miner) CommitPending(ctx context.Context) ([]abi.SectorID, error) { + return m.sealing.CommitPending(ctx) +} + func (m *Miner) MarkForUpgrade(id abi.SectorNumber) error { return m.sealing.MarkForUpgrade(id) } diff --git a/storage/wdpost_run_test.go b/storage/wdpost_run_test.go index 4bf30e3e9..5117e718a 100644 --- a/storage/wdpost_run_test.go +++ b/storage/wdpost_run_test.go @@ -23,6 +23,7 @@ import ( miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" tutils "github.com/filecoin-project/specs-actors/v2/support/testing" + proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" @@ -144,6 +145,10 @@ func (m mockVerif) VerifyWindowPoSt(ctx context.Context, info proof2.WindowPoStV return true, nil } +func (m mockVerif) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) (bool, error) { + panic("implement me") +} + func (m mockVerif) VerifySeal(proof2.SealVerifyInfo) (bool, error) { panic("implement me") }