Merge pull request #6235 from filecoin-project/feat/fip-0013

WIP: Integrate FIP0013
This commit is contained in:
Łukasz Magiera 2021-05-26 11:04:15 +02:00 committed by GitHub
commit 2b93bcde72
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
67 changed files with 4071 additions and 414 deletions

View File

@ -80,6 +80,16 @@ type StorageMiner interface {
// SectorTerminatePending returns a list of pending sector terminations to be sent in the next batch message // SectorTerminatePending returns a list of pending sector terminations to be sent in the next batch message
SectorTerminatePending(ctx context.Context) ([]abi.SectorID, error) //perm:admin SectorTerminatePending(ctx context.Context) ([]abi.SectorID, error) //perm:admin
SectorMarkForUpgrade(ctx context.Context, id abi.SectorNumber) error //perm:admin SectorMarkForUpgrade(ctx context.Context, id abi.SectorNumber) error //perm:admin
// SectorPreCommitFlush immediately sends a PreCommit message with sectors batched for PreCommit.
// Returns null if message wasn't sent
SectorPreCommitFlush(ctx context.Context) (*cid.Cid, error) //perm:admin
// SectorPreCommitPending returns a list of pending PreCommit sectors to be sent in the next batch message
SectorPreCommitPending(ctx context.Context) ([]abi.SectorID, error) //perm:admin
// SectorCommitFlush immediately sends a Commit message with sectors aggregated for Commit.
// Returns null if message wasn't sent
SectorCommitFlush(ctx context.Context) (*cid.Cid, error) //perm:admin
// SectorCommitPending returns a list of pending Commit sectors to be sent in the next aggregate message
SectorCommitPending(ctx context.Context) ([]abi.SectorID, error) //perm:admin
// WorkerConnect tells the node to connect to workers RPC // WorkerConnect tells the node to connect to workers RPC
WorkerConnect(context.Context, string) error //perm:admin retry:true WorkerConnect(context.Context, string) error //perm:admin retry:true

View File

@ -639,12 +639,20 @@ type StorageMinerStruct struct {
SealingSchedDiag func(p0 context.Context, p1 bool) (interface{}, error) `perm:"admin"` SealingSchedDiag func(p0 context.Context, p1 bool) (interface{}, error) `perm:"admin"`
SectorCommitFlush func(p0 context.Context) (*cid.Cid, error) `perm:"admin"`
SectorCommitPending func(p0 context.Context) ([]abi.SectorID, error) `perm:"admin"`
SectorGetExpectedSealDuration func(p0 context.Context) (time.Duration, error) `perm:"read"` SectorGetExpectedSealDuration func(p0 context.Context) (time.Duration, error) `perm:"read"`
SectorGetSealDelay func(p0 context.Context) (time.Duration, error) `perm:"read"` SectorGetSealDelay func(p0 context.Context) (time.Duration, error) `perm:"read"`
SectorMarkForUpgrade func(p0 context.Context, p1 abi.SectorNumber) error `perm:"admin"` SectorMarkForUpgrade func(p0 context.Context, p1 abi.SectorNumber) error `perm:"admin"`
SectorPreCommitFlush func(p0 context.Context) (*cid.Cid, error) `perm:"admin"`
SectorPreCommitPending func(p0 context.Context) ([]abi.SectorID, error) `perm:"admin"`
SectorRemove func(p0 context.Context, p1 abi.SectorNumber) error `perm:"admin"` SectorRemove func(p0 context.Context, p1 abi.SectorNumber) error `perm:"admin"`
SectorSetExpectedSealDuration func(p0 context.Context, p1 time.Duration) error `perm:"write"` SectorSetExpectedSealDuration func(p0 context.Context, p1 time.Duration) error `perm:"write"`
@ -1923,6 +1931,14 @@ func (s *StorageMinerStruct) SealingSchedDiag(p0 context.Context, p1 bool) (inte
return s.Internal.SealingSchedDiag(p0, p1) return s.Internal.SealingSchedDiag(p0, p1)
} }
func (s *StorageMinerStruct) SectorCommitFlush(p0 context.Context) (*cid.Cid, error) {
return s.Internal.SectorCommitFlush(p0)
}
func (s *StorageMinerStruct) SectorCommitPending(p0 context.Context) ([]abi.SectorID, error) {
return s.Internal.SectorCommitPending(p0)
}
func (s *StorageMinerStruct) SectorGetExpectedSealDuration(p0 context.Context) (time.Duration, error) { func (s *StorageMinerStruct) SectorGetExpectedSealDuration(p0 context.Context) (time.Duration, error) {
return s.Internal.SectorGetExpectedSealDuration(p0) return s.Internal.SectorGetExpectedSealDuration(p0)
} }
@ -1935,6 +1951,14 @@ func (s *StorageMinerStruct) SectorMarkForUpgrade(p0 context.Context, p1 abi.Sec
return s.Internal.SectorMarkForUpgrade(p0, p1) return s.Internal.SectorMarkForUpgrade(p0, p1)
} }
func (s *StorageMinerStruct) SectorPreCommitFlush(p0 context.Context) (*cid.Cid, error) {
return s.Internal.SectorPreCommitFlush(p0)
}
func (s *StorageMinerStruct) SectorPreCommitPending(p0 context.Context) ([]abi.SectorID, error) {
return s.Internal.SectorPreCommitPending(p0)
}
func (s *StorageMinerStruct) SectorRemove(p0 context.Context, p1 abi.SectorNumber) error { func (s *StorageMinerStruct) SectorRemove(p0 context.Context, p1 abi.SectorNumber) error {
return s.Internal.SectorRemove(p0, p1) return s.Internal.SectorRemove(p0, p1)
} }

View File

@ -61,7 +61,7 @@ func TestDeadlineToggling(t *testing.T, b APIBuilder, blocktime time.Duration) {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
n, sn := b(t, []FullNodeOpts{FullNodeWithLatestActorsAt(upgradeH)}, OneMiner) n, sn := b(t, []FullNodeOpts{FullNodeWithV4ActorsAt(upgradeH)}, OneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI) client := n[0].FullNode.(*impl.FullNodeAPI)
minerA := sn[0] minerA := sn[0]

View File

@ -435,6 +435,8 @@ func startSealingWaiting(t *testing.T, ctx context.Context, miner TestStorageNod
require.NoError(t, miner.SectorStartSealing(ctx, snum)) require.NoError(t, miner.SectorStartSealing(ctx, snum))
} }
} }
flushSealingBatches(t, ctx, miner)
} }
func testRetrieval(t *testing.T, ctx context.Context, client api.FullNode, fcid cid.Cid, piece *cid.Cid, carExport bool, data []byte) { func testRetrieval(t *testing.T, ctx context.Context, client api.FullNode, fcid cid.Cid, piece *cid.Cid, carExport bool, data []byte) {

389
api/test/pledge.go Normal file
View File

@ -0,0 +1,389 @@
package test
import (
"context"
"fmt"
"sort"
"strings"
"sync/atomic"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/stmgr"
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
bminer "github.com/filecoin-project/lotus/miner"
"github.com/filecoin-project/lotus/node"
"github.com/filecoin-project/lotus/node/impl"
)
func TestSDRUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
n, sn := b(t, []FullNodeOpts{FullNodeWithSDRAt(500, 1000)}, OneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
addrinfo, err := client.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := miner.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
build.Clock.Sleep(time.Second)
pledge := make(chan struct{})
mine := int64(1)
done := make(chan struct{})
go func() {
defer close(done)
round := 0
for atomic.LoadInt64(&mine) != 0 {
build.Clock.Sleep(blocktime)
if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) {
}}); err != nil {
t.Error(err)
}
// 3 sealing rounds: before, during after.
if round >= 3 {
continue
}
head, err := client.ChainHead(ctx)
assert.NoError(t, err)
// rounds happen every 100 blocks, with a 50 block offset.
if head.Height() >= abi.ChainEpoch(round*500+50) {
round++
pledge <- struct{}{}
ver, err := client.StateNetworkVersion(ctx, head.Key())
assert.NoError(t, err)
switch round {
case 1:
assert.Equal(t, network.Version6, ver)
case 2:
assert.Equal(t, network.Version7, ver)
case 3:
assert.Equal(t, network.Version8, ver)
}
}
}
}()
// before.
pledgeSectors(t, ctx, miner, 9, 0, pledge)
s, err := miner.SectorsList(ctx)
require.NoError(t, err)
sort.Slice(s, func(i, j int) bool {
return s[i] < s[j]
})
for i, id := range s {
info, err := miner.SectorsStatus(ctx, id, true)
require.NoError(t, err)
expectProof := abi.RegisteredSealProof_StackedDrg2KiBV1
if i >= 3 {
// after
expectProof = abi.RegisteredSealProof_StackedDrg2KiBV1_1
}
assert.Equal(t, expectProof, info.SealProof, "sector %d, id %d", i, id)
}
atomic.StoreInt64(&mine, 0)
<-done
}
func TestPledgeBatching(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
n, sn := b(t, []FullNodeOpts{FullNodeWithLatestActorsAt(-1)}, OneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
addrinfo, err := client.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := miner.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
build.Clock.Sleep(time.Second)
mine := int64(1)
done := make(chan struct{})
go func() {
defer close(done)
for atomic.LoadInt64(&mine) != 0 {
build.Clock.Sleep(blocktime)
if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) {
}}); err != nil {
t.Error(err)
}
}
}()
for {
h, err := client.ChainHead(ctx)
require.NoError(t, err)
if h.Height() > 10 {
break
}
}
toCheck := startPledge(t, ctx, miner, nSectors, 0, nil)
for len(toCheck) > 0 {
states := map[api.SectorState]int{}
for n := range toCheck {
st, err := miner.SectorsStatus(ctx, n, false)
require.NoError(t, err)
states[st.State]++
if st.State == api.SectorState(sealing.Proving) {
delete(toCheck, n)
}
if strings.Contains(string(st.State), "Fail") {
t.Fatal("sector in a failed state", st.State)
}
}
if states[api.SectorState(sealing.SubmitPreCommitBatch)] == nSectors ||
(states[api.SectorState(sealing.SubmitPreCommitBatch)] > 0 && states[api.SectorState(sealing.PreCommit1)] == 0 && states[api.SectorState(sealing.PreCommit2)] == 0) {
pcb, err := miner.SectorPreCommitFlush(ctx)
require.NoError(t, err)
if pcb != nil {
fmt.Printf("PRECOMMIT BATCH: %s\n", *pcb)
}
}
if states[api.SectorState(sealing.SubmitCommitAggregate)] == nSectors ||
(states[api.SectorState(sealing.SubmitCommitAggregate)] > 0 && states[api.SectorState(sealing.WaitSeed)] == 0 && states[api.SectorState(sealing.Committing)] == 0) {
cb, err := miner.SectorCommitFlush(ctx)
require.NoError(t, err)
if cb != nil {
fmt.Printf("COMMIT BATCH: %s\n", *cb)
}
}
build.Clock.Sleep(100 * time.Millisecond)
fmt.Printf("WaitSeal: %d %+v\n", len(toCheck), states)
}
atomic.StoreInt64(&mine, 0)
<-done
}
func TestPledgeBeforeNv13(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
n, sn := b(t, []FullNodeOpts{
{
Opts: func(nodes []TestNode) node.Option {
return node.Override(new(stmgr.UpgradeSchedule), stmgr.UpgradeSchedule{{
Network: network.Version9,
Height: 1,
Migration: stmgr.UpgradeActorsV2,
}, {
Network: network.Version10,
Height: 2,
Migration: stmgr.UpgradeActorsV3,
}, {
Network: network.Version12,
Height: 3,
Migration: stmgr.UpgradeActorsV4,
}, {
Network: network.Version13,
Height: 1000000000,
Migration: stmgr.UpgradeActorsV5,
}})
},
},
}, OneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
addrinfo, err := client.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := miner.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
build.Clock.Sleep(time.Second)
mine := int64(1)
done := make(chan struct{})
go func() {
defer close(done)
for atomic.LoadInt64(&mine) != 0 {
build.Clock.Sleep(blocktime)
if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) {
}}); err != nil {
t.Error(err)
}
}
}()
for {
h, err := client.ChainHead(ctx)
require.NoError(t, err)
if h.Height() > 10 {
break
}
}
toCheck := startPledge(t, ctx, miner, nSectors, 0, nil)
for len(toCheck) > 0 {
states := map[api.SectorState]int{}
for n := range toCheck {
st, err := miner.SectorsStatus(ctx, n, false)
require.NoError(t, err)
states[st.State]++
if st.State == api.SectorState(sealing.Proving) {
delete(toCheck, n)
}
if strings.Contains(string(st.State), "Fail") {
t.Fatal("sector in a failed state", st.State)
}
}
build.Clock.Sleep(100 * time.Millisecond)
fmt.Printf("WaitSeal: %d %+v\n", len(toCheck), states)
}
atomic.StoreInt64(&mine, 0)
<-done
}
func TestPledgeSector(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
n, sn := b(t, OneFull, OneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
addrinfo, err := client.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := miner.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
build.Clock.Sleep(time.Second)
mine := int64(1)
done := make(chan struct{})
go func() {
defer close(done)
for atomic.LoadInt64(&mine) != 0 {
build.Clock.Sleep(blocktime)
if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) {
}}); err != nil {
t.Error(err)
}
}
}()
pledgeSectors(t, ctx, miner, nSectors, 0, nil)
atomic.StoreInt64(&mine, 0)
<-done
}
func flushSealingBatches(t *testing.T, ctx context.Context, miner TestStorageNode) {
pcb, err := miner.SectorPreCommitFlush(ctx)
require.NoError(t, err)
if pcb != nil {
fmt.Printf("PRECOMMIT BATCH: %s\n", *pcb)
}
cb, err := miner.SectorCommitFlush(ctx)
require.NoError(t, err)
if cb != nil {
fmt.Printf("COMMIT BATCH: %s\n", *cb)
}
}
func startPledge(t *testing.T, ctx context.Context, miner TestStorageNode, n, existing int, blockNotif <-chan struct{}) map[abi.SectorNumber]struct{} {
for i := 0; i < n; i++ {
if i%3 == 0 && blockNotif != nil {
<-blockNotif
log.Errorf("WAIT")
}
log.Errorf("PLEDGING %d", i)
_, err := miner.PledgeSector(ctx)
require.NoError(t, err)
}
for {
s, err := miner.SectorsList(ctx) // Note - the test builder doesn't import genesis sectors into FSM
require.NoError(t, err)
fmt.Printf("Sectors: %d\n", len(s))
if len(s) >= n+existing {
break
}
build.Clock.Sleep(100 * time.Millisecond)
}
fmt.Printf("All sectors is fsm\n")
s, err := miner.SectorsList(ctx)
require.NoError(t, err)
toCheck := map[abi.SectorNumber]struct{}{}
for _, number := range s {
toCheck[number] = struct{}{}
}
return toCheck
}
func pledgeSectors(t *testing.T, ctx context.Context, miner TestStorageNode, n, existing int, blockNotif <-chan struct{}) {
toCheck := startPledge(t, ctx, miner, n, existing, blockNotif)
for len(toCheck) > 0 {
flushSealingBatches(t, ctx, miner)
states := map[api.SectorState]int{}
for n := range toCheck {
st, err := miner.SectorsStatus(ctx, n, false)
require.NoError(t, err)
states[st.State]++
if st.State == api.SectorState(sealing.Proving) {
delete(toCheck, n)
}
if strings.Contains(string(st.State), "Fail") {
t.Fatal("sector in a failed state", st.State)
}
}
build.Clock.Sleep(100 * time.Millisecond)
fmt.Printf("WaitSeal: %d %+v\n", len(toCheck), states)
}
}

View File

@ -169,6 +169,31 @@ var FullNodeWithSDRAt = func(calico, persian abi.ChainEpoch) FullNodeOpts {
} }
} }
var FullNodeWithV4ActorsAt = func(upgradeHeight abi.ChainEpoch) FullNodeOpts {
if upgradeHeight == -1 {
upgradeHeight = 3
}
return FullNodeOpts{
Opts: func(nodes []TestNode) node.Option {
return node.Override(new(stmgr.UpgradeSchedule), stmgr.UpgradeSchedule{{
// prepare for upgrade.
Network: network.Version9,
Height: 1,
Migration: stmgr.UpgradeActorsV2,
}, {
Network: network.Version10,
Height: 2,
Migration: stmgr.UpgradeActorsV3,
}, {
Network: network.Version12,
Height: upgradeHeight,
Migration: stmgr.UpgradeActorsV4,
}})
},
}
}
var MineNext = miner.MineReq{ var MineNext = miner.MineReq{
InjectNulls: 0, InjectNulls: 0,
Done: func(bool, abi.ChainEpoch, error) {}, Done: func(bool, abi.ChainEpoch, error) {},

View File

@ -3,14 +3,9 @@ package test
import ( import (
"context" "context"
"fmt" "fmt"
"sort"
"sync/atomic"
"strings"
"testing" "testing"
"time" "time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
@ -18,7 +13,6 @@ import (
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/go-state-types/dline" "github.com/filecoin-project/go-state-types/dline"
"github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/lotus/extern/sector-storage/mock" "github.com/filecoin-project/lotus/extern/sector-storage/mock"
sealing "github.com/filecoin-project/lotus/extern/storage-sealing" sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
proof3 "github.com/filecoin-project/specs-actors/v3/actors/runtime/proof" proof3 "github.com/filecoin-project/specs-actors/v3/actors/runtime/proof"
@ -29,181 +23,9 @@ import (
"github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors"
minerActor "github.com/filecoin-project/lotus/chain/actors/builtin/miner" minerActor "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
bminer "github.com/filecoin-project/lotus/miner"
"github.com/filecoin-project/lotus/node/impl" "github.com/filecoin-project/lotus/node/impl"
) )
func TestSDRUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
n, sn := b(t, []FullNodeOpts{FullNodeWithSDRAt(500, 1000)}, OneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
addrinfo, err := client.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := miner.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
build.Clock.Sleep(time.Second)
pledge := make(chan struct{})
mine := int64(1)
done := make(chan struct{})
go func() {
defer close(done)
round := 0
for atomic.LoadInt64(&mine) != 0 {
build.Clock.Sleep(blocktime)
if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) {
}}); err != nil {
t.Error(err)
}
// 3 sealing rounds: before, during after.
if round >= 3 {
continue
}
head, err := client.ChainHead(ctx)
assert.NoError(t, err)
// rounds happen every 100 blocks, with a 50 block offset.
if head.Height() >= abi.ChainEpoch(round*500+50) {
round++
pledge <- struct{}{}
ver, err := client.StateNetworkVersion(ctx, head.Key())
assert.NoError(t, err)
switch round {
case 1:
assert.Equal(t, network.Version6, ver)
case 2:
assert.Equal(t, network.Version7, ver)
case 3:
assert.Equal(t, network.Version8, ver)
}
}
}
}()
// before.
pledgeSectors(t, ctx, miner, 9, 0, pledge)
s, err := miner.SectorsList(ctx)
require.NoError(t, err)
sort.Slice(s, func(i, j int) bool {
return s[i] < s[j]
})
for i, id := range s {
info, err := miner.SectorsStatus(ctx, id, true)
require.NoError(t, err)
expectProof := abi.RegisteredSealProof_StackedDrg2KiBV1
if i >= 3 {
// after
expectProof = abi.RegisteredSealProof_StackedDrg2KiBV1_1
}
assert.Equal(t, expectProof, info.SealProof, "sector %d, id %d", i, id)
}
atomic.StoreInt64(&mine, 0)
<-done
}
func TestPledgeSector(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
n, sn := b(t, OneFull, OneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
addrinfo, err := client.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := miner.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
build.Clock.Sleep(time.Second)
mine := int64(1)
done := make(chan struct{})
go func() {
defer close(done)
for atomic.LoadInt64(&mine) != 0 {
build.Clock.Sleep(blocktime)
if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) {
}}); err != nil {
t.Error(err)
}
}
}()
pledgeSectors(t, ctx, miner, nSectors, 0, nil)
atomic.StoreInt64(&mine, 0)
<-done
}
func pledgeSectors(t *testing.T, ctx context.Context, miner TestStorageNode, n, existing int, blockNotif <-chan struct{}) {
for i := 0; i < n; i++ {
if i%3 == 0 && blockNotif != nil {
<-blockNotif
log.Errorf("WAIT")
}
log.Errorf("PLEDGING %d", i)
_, err := miner.PledgeSector(ctx)
require.NoError(t, err)
}
for {
s, err := miner.SectorsList(ctx) // Note - the test builder doesn't import genesis sectors into FSM
require.NoError(t, err)
fmt.Printf("Sectors: %d\n", len(s))
if len(s) >= n+existing {
break
}
build.Clock.Sleep(100 * time.Millisecond)
}
fmt.Printf("All sectors is fsm\n")
s, err := miner.SectorsList(ctx)
require.NoError(t, err)
toCheck := map[abi.SectorNumber]struct{}{}
for _, number := range s {
toCheck[number] = struct{}{}
}
for len(toCheck) > 0 {
for n := range toCheck {
st, err := miner.SectorsStatus(ctx, n, false)
require.NoError(t, err)
if st.State == api.SectorState(sealing.Proving) {
delete(toCheck, n)
}
if strings.Contains(string(st.State), "Fail") {
t.Fatal("sector in a failed state", st.State)
}
}
build.Clock.Sleep(100 * time.Millisecond)
fmt.Printf("WaitSeal: %d\n", len(s))
}
}
func TestWindowPost(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) { func TestWindowPost(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) {
for _, height := range []abi.ChainEpoch{ for _, height := range []abi.ChainEpoch{
-1, // before -1, // before
@ -617,7 +439,7 @@ func TestWindowPostDispute(t *testing.T, b APIBuilder, blocktime time.Duration)
/// ///
// Then we're going to manually submit bad proofs. // Then we're going to manually submit bad proofs.
n, sn := b(t, []FullNodeOpts{ n, sn := b(t, []FullNodeOpts{
FullNodeWithLatestActorsAt(-1), FullNodeWithV4ActorsAt(-1),
}, []StorageMiner{ }, []StorageMiner{
{Full: 0, Preseal: PresealGenesis}, {Full: 0, Preseal: PresealGenesis},
{Full: 0}, {Full: 0},
@ -900,7 +722,7 @@ func TestWindowPostDisputeFails(t *testing.T, b APIBuilder, blocktime time.Durat
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
n, sn := b(t, []FullNodeOpts{FullNodeWithLatestActorsAt(-1)}, OneMiner) n, sn := b(t, []FullNodeOpts{FullNodeWithV4ActorsAt(-1)}, OneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI) client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0] miner := sn[0]

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -5,3 +5,7 @@ import rice "github.com/GeertJohan/go.rice"
func ParametersJSON() []byte { func ParametersJSON() []byte {
return rice.MustFindBox("proof-params").MustBytes("parameters.json") return rice.MustFindBox("proof-params").MustBytes("parameters.json")
} }
func SrsJSON() []byte {
return rice.MustFindBox("proof-params").MustBytes("srs-inner-product.json")
}

View File

@ -24,22 +24,22 @@ var UpgradeIgnitionHeight = abi.ChainEpoch(-2)
var UpgradeRefuelHeight = abi.ChainEpoch(-3) var UpgradeRefuelHeight = abi.ChainEpoch(-3)
var UpgradeTapeHeight = abi.ChainEpoch(-4) var UpgradeTapeHeight = abi.ChainEpoch(-4)
var UpgradeAssemblyHeight = abi.ChainEpoch(10) var UpgradeAssemblyHeight = abi.ChainEpoch(5)
var UpgradeLiftoffHeight = abi.ChainEpoch(-5) var UpgradeLiftoffHeight = abi.ChainEpoch(-5)
var UpgradeKumquatHeight = abi.ChainEpoch(15) var UpgradeKumquatHeight = abi.ChainEpoch(6)
var UpgradeCalicoHeight = abi.ChainEpoch(20) var UpgradeCalicoHeight = abi.ChainEpoch(7)
var UpgradePersianHeight = abi.ChainEpoch(25) var UpgradePersianHeight = abi.ChainEpoch(8)
var UpgradeOrangeHeight = abi.ChainEpoch(27) var UpgradeOrangeHeight = abi.ChainEpoch(9)
var UpgradeClausHeight = abi.ChainEpoch(30) var UpgradeClausHeight = abi.ChainEpoch(10)
var UpgradeTrustHeight = abi.ChainEpoch(35) var UpgradeTrustHeight = abi.ChainEpoch(11)
var UpgradeNorwegianHeight = abi.ChainEpoch(40) var UpgradeNorwegianHeight = abi.ChainEpoch(12)
var UpgradeTurboHeight = abi.ChainEpoch(45) var UpgradeTurboHeight = abi.ChainEpoch(13)
var UpgradeHyperdriveHeight = abi.ChainEpoch(50) var UpgradeHyperdriveHeight = abi.ChainEpoch(14)
var DrandSchedule = map[abi.ChainEpoch]DrandEnum{ var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
0: DrandMainnet, 0: DrandMainnet,

View File

@ -0,0 +1,7 @@
{
"v28-fil-inner-product-v1.srs": {
"cid": "Qmdq44DjcQnFfU3PJcdX7J49GCqcUYszr1TxMbHtAkvQ3g",
"digest": "ae20310138f5ba81451d723f858e3797",
"sector_size": 0
}
}

View File

@ -302,6 +302,10 @@ func GetDefaultSectorSize() abi.SectorSize {
return szs[0] return szs[0]
} }
func GetDefaultAggregationProof() abi.RegisteredAggregationProof {
return abi.RegisteredAggregationProof_SnarkPackV1
}
func GetSectorMaxLifetime(proof abi.RegisteredSealProof, nwVer network.Version) abi.ChainEpoch { func GetSectorMaxLifetime(proof abi.RegisteredSealProof, nwVer network.Version) abi.ChainEpoch {
if nwVer <= network.Version10 { if nwVer <= network.Version10 {
return builtin5.SealProofPoliciesV0[proof].SectorMaxLifetime return builtin5.SealProofPoliciesV0[proof].SectorMaxLifetime

View File

@ -197,6 +197,10 @@ func GetDefaultSectorSize() abi.SectorSize {
return szs[0] return szs[0]
} }
func GetDefaultAggregationProof() abi.RegisteredAggregationProof {
return abi.RegisteredAggregationProof_SnarkPackV1
}
func GetSectorMaxLifetime(proof abi.RegisteredSealProof, nwVer network.Version) abi.ChainEpoch { func GetSectorMaxLifetime(proof abi.RegisteredSealProof, nwVer network.Version) abi.ChainEpoch {
if nwVer <= network.Version10 { if nwVer <= network.Version10 {
return builtin{{.latestVersion}}.SealProofPoliciesV0[proof].SectorMaxLifetime return builtin{{.latestVersion}}.SealProofPoliciesV0[proof].SectorMaxLifetime

View File

@ -25,7 +25,7 @@ import (
"go.opencensus.io/trace" "go.opencensus.io/trace"
"golang.org/x/xerrors" "golang.org/x/xerrors"
proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof"
"github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/blockstore"
@ -51,7 +51,7 @@ const msgsPerBlock = 20
//nolint:deadcode,varcheck //nolint:deadcode,varcheck
var log = logging.Logger("gen") var log = logging.Logger("gen")
var ValidWpostForTesting = []proof2.PoStProof{{ var ValidWpostForTesting = []proof5.PoStProof{{
ProofBytes: []byte("valid proof"), ProofBytes: []byte("valid proof"),
}} }}
@ -460,7 +460,7 @@ func (cg *ChainGen) NextTipSetFromMinersWithMessages(base *types.TipSet, miners
func (cg *ChainGen) makeBlock(parents *types.TipSet, m address.Address, vrfticket *types.Ticket, func (cg *ChainGen) makeBlock(parents *types.TipSet, m address.Address, vrfticket *types.Ticket,
eticket *types.ElectionProof, bvals []types.BeaconEntry, height abi.ChainEpoch, eticket *types.ElectionProof, bvals []types.BeaconEntry, height abi.ChainEpoch,
wpost []proof2.PoStProof, msgs []*types.SignedMessage) (*types.FullBlock, error) { wpost []proof5.PoStProof, msgs []*types.SignedMessage) (*types.FullBlock, error) {
var ts uint64 var ts uint64
if cg.Timestamper != nil { if cg.Timestamper != nil {
@ -598,7 +598,7 @@ func (mca mca) WalletSign(ctx context.Context, a address.Address, v []byte) (*cr
type WinningPoStProver interface { type WinningPoStProver interface {
GenerateCandidates(context.Context, abi.PoStRandomness, uint64) ([]uint64, error) GenerateCandidates(context.Context, abi.PoStRandomness, uint64) ([]uint64, error)
ComputeProof(context.Context, []proof2.SectorInfo, abi.PoStRandomness) ([]proof2.PoStProof, error) ComputeProof(context.Context, []proof5.SectorInfo, abi.PoStRandomness) ([]proof5.PoStProof, error)
} }
type wppProvider struct{} type wppProvider struct{}
@ -607,7 +607,7 @@ func (wpp *wppProvider) GenerateCandidates(ctx context.Context, _ abi.PoStRandom
return []uint64{0}, nil return []uint64{0}, nil
} }
func (wpp *wppProvider) ComputeProof(context.Context, []proof2.SectorInfo, abi.PoStRandomness) ([]proof2.PoStProof, error) { func (wpp *wppProvider) ComputeProof(context.Context, []proof5.SectorInfo, abi.PoStRandomness) ([]proof5.PoStProof, error) {
return ValidWpostForTesting, nil return ValidWpostForTesting, nil
} }
@ -685,15 +685,19 @@ type genFakeVerifier struct{}
var _ ffiwrapper.Verifier = (*genFakeVerifier)(nil) var _ ffiwrapper.Verifier = (*genFakeVerifier)(nil)
func (m genFakeVerifier) VerifySeal(svi proof2.SealVerifyInfo) (bool, error) { func (m genFakeVerifier) VerifySeal(svi proof5.SealVerifyInfo) (bool, error) {
return true, nil return true, nil
} }
func (m genFakeVerifier) VerifyWinningPoSt(ctx context.Context, info proof2.WinningPoStVerifyInfo) (bool, error) { func (m genFakeVerifier) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) (bool, error) {
panic("not supported") panic("not supported")
} }
func (m genFakeVerifier) VerifyWindowPoSt(ctx context.Context, info proof2.WindowPoStVerifyInfo) (bool, error) { func (m genFakeVerifier) VerifyWinningPoSt(ctx context.Context, info proof5.WinningPoStVerifyInfo) (bool, error) {
panic("not supported")
}
func (m genFakeVerifier) VerifyWindowPoSt(ctx context.Context, info proof5.WindowPoStVerifyInfo) (bool, error) {
panic("not supported") panic("not supported")
} }

View File

@ -27,7 +27,7 @@ import (
miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
power0 "github.com/filecoin-project/specs-actors/actors/builtin/power" power0 "github.com/filecoin-project/specs-actors/actors/builtin/power"
reward0 "github.com/filecoin-project/specs-actors/actors/builtin/reward" reward0 "github.com/filecoin-project/specs-actors/actors/builtin/reward"
runtime2 "github.com/filecoin-project/specs-actors/v2/actors/runtime" runtime5 "github.com/filecoin-project/specs-actors/v5/actors/runtime"
"github.com/filecoin-project/lotus/chain/state" "github.com/filecoin-project/lotus/chain/state"
"github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/store"
@ -46,7 +46,7 @@ func MinerAddress(genesisIndex uint64) address.Address {
} }
type fakedSigSyscalls struct { type fakedSigSyscalls struct {
runtime2.Syscalls runtime5.Syscalls
} }
func (fss *fakedSigSyscalls) VerifySignature(signature crypto.Signature, signer address.Address, plaintext []byte) error { func (fss *fakedSigSyscalls) VerifySignature(signature crypto.Signature, signer address.Address, plaintext []byte) error {
@ -54,7 +54,7 @@ func (fss *fakedSigSyscalls) VerifySignature(signature crypto.Signature, signer
} }
func mkFakedSigSyscalls(base vm.SyscallBuilder) vm.SyscallBuilder { func mkFakedSigSyscalls(base vm.SyscallBuilder) vm.SyscallBuilder {
return func(ctx context.Context, rt *vm.Runtime) runtime2.Syscalls { return func(ctx context.Context, rt *vm.Runtime) runtime5.Syscalls {
return &fakedSigSyscalls{ return &fakedSigSyscalls{
base(ctx, rt), base(ctx, rt),
} }

View File

@ -25,6 +25,7 @@ import (
states2 "github.com/filecoin-project/specs-actors/v2/actors/states" states2 "github.com/filecoin-project/specs-actors/v2/actors/states"
states3 "github.com/filecoin-project/specs-actors/v3/actors/states" states3 "github.com/filecoin-project/specs-actors/v3/actors/states"
states4 "github.com/filecoin-project/specs-actors/v4/actors/states" states4 "github.com/filecoin-project/specs-actors/v4/actors/states"
states5 "github.com/filecoin-project/specs-actors/v5/actors/states"
) )
var log = logging.Logger("statetree") var log = logging.Logger("statetree")
@ -191,6 +192,12 @@ func NewStateTree(cst cbor.IpldStore, ver types.StateTreeVersion) (*StateTree, e
return nil, xerrors.Errorf("failed to create state tree: %w", err) return nil, xerrors.Errorf("failed to create state tree: %w", err)
} }
hamt = tree.Map hamt = tree.Map
case types.StateTreeVersion4:
tree, err := states5.NewTree(store)
if err != nil {
return nil, xerrors.Errorf("failed to create state tree: %w", err)
}
hamt = tree.Map
default: default:
return nil, xerrors.Errorf("unsupported state tree version: %d", ver) return nil, xerrors.Errorf("unsupported state tree version: %d", ver)
} }
@ -246,6 +253,12 @@ func LoadStateTree(cst cbor.IpldStore, c cid.Cid) (*StateTree, error) {
if tree != nil { if tree != nil {
hamt = tree.Map hamt = tree.Map
} }
case types.StateTreeVersion4:
var tree *states5.Tree
tree, err = states5.LoadTree(store, root.Actors)
if tree != nil {
hamt = tree.Map
}
default: default:
return nil, xerrors.Errorf("unsupported state tree version: %d", root.Version) return nil, xerrors.Errorf("unsupported state tree version: %d", root.Version)
} }

View File

@ -1247,7 +1247,7 @@ func upgradeActorsV5Common(
// Persist the result. // Persist the result.
newRoot, err := store.Put(ctx, &types.StateRoot{ newRoot, err := store.Put(ctx, &types.StateRoot{
Version: types.StateTreeVersion3, Version: types.StateTreeVersion4,
Actors: newHamtRoot, Actors: newHamtRoot,
Info: stateRoot.Info, Info: stateRoot.Info,
}) })

View File

@ -13,8 +13,10 @@ const (
StateTreeVersion1 StateTreeVersion1
// StateTreeVersion2 corresponds to actors v3. // StateTreeVersion2 corresponds to actors v3.
StateTreeVersion2 StateTreeVersion2
// StateTreeVersion3 corresponds to actors >= v4. // StateTreeVersion3 corresponds to actors v4.
StateTreeVersion3 StateTreeVersion3
// StateTreeVersion4 corresponds to actors v5.
StateTreeVersion4
) )
type StateRoot struct { type StateRoot struct {

View File

@ -9,8 +9,8 @@ import (
addr "github.com/filecoin-project/go-address" addr "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/crypto"
vmr2 "github.com/filecoin-project/specs-actors/v2/actors/runtime" vmr5 "github.com/filecoin-project/specs-actors/v5/actors/runtime"
proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
) )
@ -74,8 +74,9 @@ type Pricelist interface {
OnVerifySignature(sigType crypto.SigType, planTextSize int) (GasCharge, error) OnVerifySignature(sigType crypto.SigType, planTextSize int) (GasCharge, error)
OnHashing(dataSize int) GasCharge OnHashing(dataSize int) GasCharge
OnComputeUnsealedSectorCid(proofType abi.RegisteredSealProof, pieces []abi.PieceInfo) GasCharge OnComputeUnsealedSectorCid(proofType abi.RegisteredSealProof, pieces []abi.PieceInfo) GasCharge
OnVerifySeal(info proof2.SealVerifyInfo) GasCharge OnVerifySeal(info proof5.SealVerifyInfo) GasCharge
OnVerifyPost(info proof2.WindowPoStVerifyInfo) GasCharge OnVerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) GasCharge
OnVerifyPost(info proof5.WindowPoStVerifyInfo) GasCharge
OnVerifyConsensusFault() GasCharge OnVerifyConsensusFault() GasCharge
} }
@ -111,6 +112,7 @@ var prices = map[abi.ChainEpoch]Pricelist{
hashingBase: 31355, hashingBase: 31355,
computeUnsealedSectorCidBase: 98647, computeUnsealedSectorCidBase: 98647,
verifySealBase: 2000, // TODO gas , it VerifySeal syscall is not used verifySealBase: 2000, // TODO gas , it VerifySeal syscall is not used
verifyAggregateSealBase: 0,
verifyPostLookup: map[abi.RegisteredPoStProof]scalingCost{ verifyPostLookup: map[abi.RegisteredPoStProof]scalingCost{
abi.RegisteredPoStProof_StackedDrgWindow512MiBV1: { abi.RegisteredPoStProof_StackedDrgWindow512MiBV1: {
flat: 123861062, flat: 123861062,
@ -158,7 +160,8 @@ var prices = map[abi.ChainEpoch]Pricelist{
hashingBase: 31355, hashingBase: 31355,
computeUnsealedSectorCidBase: 98647, computeUnsealedSectorCidBase: 98647,
verifySealBase: 2000, // TODO gas , it VerifySeal syscall is not used verifySealBase: 2000, // TODO gas , it VerifySeal syscall is not used
verifyAggregateSealBase: 400_000_000, // TODO (~40ms, I think)
verifyPostLookup: map[abi.RegisteredPoStProof]scalingCost{ verifyPostLookup: map[abi.RegisteredPoStProof]scalingCost{
abi.RegisteredPoStProof_StackedDrgWindow512MiBV1: { abi.RegisteredPoStProof_StackedDrgWindow512MiBV1: {
flat: 117680921, flat: 117680921,
@ -198,7 +201,7 @@ func PricelistByEpoch(epoch abi.ChainEpoch) Pricelist {
} }
type pricedSyscalls struct { type pricedSyscalls struct {
under vmr2.Syscalls under vmr5.Syscalls
pl Pricelist pl Pricelist
chargeGas func(GasCharge) chargeGas func(GasCharge)
} }
@ -232,7 +235,7 @@ func (ps pricedSyscalls) ComputeUnsealedSectorCID(reg abi.RegisteredSealProof, p
} }
// Verifies a sector seal proof. // Verifies a sector seal proof.
func (ps pricedSyscalls) VerifySeal(vi proof2.SealVerifyInfo) error { func (ps pricedSyscalls) VerifySeal(vi proof5.SealVerifyInfo) error {
ps.chargeGas(ps.pl.OnVerifySeal(vi)) ps.chargeGas(ps.pl.OnVerifySeal(vi))
defer ps.chargeGas(gasOnActorExec) defer ps.chargeGas(gasOnActorExec)
@ -240,7 +243,7 @@ func (ps pricedSyscalls) VerifySeal(vi proof2.SealVerifyInfo) error {
} }
// Verifies a proof of spacetime. // Verifies a proof of spacetime.
func (ps pricedSyscalls) VerifyPoSt(vi proof2.WindowPoStVerifyInfo) error { func (ps pricedSyscalls) VerifyPoSt(vi proof5.WindowPoStVerifyInfo) error {
ps.chargeGas(ps.pl.OnVerifyPost(vi)) ps.chargeGas(ps.pl.OnVerifyPost(vi))
defer ps.chargeGas(gasOnActorExec) defer ps.chargeGas(gasOnActorExec)
@ -257,14 +260,14 @@ func (ps pricedSyscalls) VerifyPoSt(vi proof2.WindowPoStVerifyInfo) error {
// the "parent grinding fault", in which case it must be the sibling of h1 (same parent tipset) and one of the // the "parent grinding fault", in which case it must be the sibling of h1 (same parent tipset) and one of the
// blocks in the parent of h2 (i.e. h2's grandparent). // blocks in the parent of h2 (i.e. h2's grandparent).
// Returns nil and an error if the headers don't prove a fault. // Returns nil and an error if the headers don't prove a fault.
func (ps pricedSyscalls) VerifyConsensusFault(h1 []byte, h2 []byte, extra []byte) (*vmr2.ConsensusFault, error) { func (ps pricedSyscalls) VerifyConsensusFault(h1 []byte, h2 []byte, extra []byte) (*vmr5.ConsensusFault, error) {
ps.chargeGas(ps.pl.OnVerifyConsensusFault()) ps.chargeGas(ps.pl.OnVerifyConsensusFault())
defer ps.chargeGas(gasOnActorExec) defer ps.chargeGas(gasOnActorExec)
return ps.under.VerifyConsensusFault(h1, h2, extra) return ps.under.VerifyConsensusFault(h1, h2, extra)
} }
func (ps pricedSyscalls) BatchVerifySeals(inp map[address.Address][]proof2.SealVerifyInfo) (map[address.Address][]bool, error) { func (ps pricedSyscalls) BatchVerifySeals(inp map[address.Address][]proof5.SealVerifyInfo) (map[address.Address][]bool, error) {
count := int64(0) count := int64(0)
for _, svis := range inp { for _, svis := range inp {
count += int64(len(svis)) count += int64(len(svis))
@ -277,3 +280,10 @@ func (ps pricedSyscalls) BatchVerifySeals(inp map[address.Address][]proof2.SealV
return ps.under.BatchVerifySeals(inp) return ps.under.BatchVerifySeals(inp)
} }
func (ps pricedSyscalls) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) error {
ps.chargeGas(ps.pl.OnVerifyAggregateSeals(aggregate))
defer ps.chargeGas(gasOnActorExec)
return ps.under.VerifyAggregateSeals(aggregate)
}

View File

@ -4,6 +4,7 @@ import (
"fmt" "fmt"
proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof"
proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/big"
@ -91,6 +92,7 @@ type pricelistV0 struct {
computeUnsealedSectorCidBase int64 computeUnsealedSectorCidBase int64
verifySealBase int64 verifySealBase int64
verifyAggregateSealBase int64
verifyPostLookup map[abi.RegisteredPoStProof]scalingCost verifyPostLookup map[abi.RegisteredPoStProof]scalingCost
verifyPostDiscount bool verifyPostDiscount bool
verifyConsensusFault int64 verifyConsensusFault int64
@ -185,6 +187,12 @@ func (pl *pricelistV0) OnVerifySeal(info proof2.SealVerifyInfo) GasCharge {
return newGasCharge("OnVerifySeal", pl.verifySealBase, 0) return newGasCharge("OnVerifySeal", pl.verifySealBase, 0)
} }
// OnVerifyAggregateSeals
func (pl *pricelistV0) OnVerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) GasCharge {
// TODO: this needs more cost tunning
return newGasCharge("OnVerifyAggregateSeals", pl.verifyAggregateSealBase, 0)
}
// OnVerifyPost // OnVerifyPost
func (pl *pricelistV0) OnVerifyPost(info proof2.WindowPoStVerifyInfo) GasCharge { func (pl *pricelistV0) OnVerifyPost(info proof2.WindowPoStVerifyInfo) GasCharge {
sectorSize := "unknown" sectorSize := "unknown"

View File

@ -16,10 +16,10 @@ import (
exported0 "github.com/filecoin-project/specs-actors/actors/builtin/exported" exported0 "github.com/filecoin-project/specs-actors/actors/builtin/exported"
exported2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/exported" exported2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/exported"
vmr "github.com/filecoin-project/specs-actors/v2/actors/runtime"
exported3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/exported" exported3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/exported"
exported4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/exported" exported4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/exported"
exported5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/exported" exported5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/exported"
vmr "github.com/filecoin-project/specs-actors/v5/actors/runtime"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/exitcode" "github.com/filecoin-project/go-state-types/exitcode"
@ -155,7 +155,7 @@ func (*ActorRegistry) transform(instance invokee) (nativeCode, error) {
"vmr.Runtime, <parameter>") "vmr.Runtime, <parameter>")
} }
if !runtimeType.Implements(t.In(0)) { if !runtimeType.Implements(t.In(0)) {
return nil, newErr("first arguemnt should be vmr.Runtime") return nil, newErr("first argument should be vmr.Runtime")
} }
if t.In(1).Kind() != reflect.Ptr { if t.In(1).Kind() != reflect.Ptr {
return nil, newErr("second argument should be of kind reflect.Ptr") return nil, newErr("second argument should be of kind reflect.Ptr")

View File

@ -16,7 +16,7 @@ import (
"github.com/filecoin-project/go-state-types/network" "github.com/filecoin-project/go-state-types/network"
rtt "github.com/filecoin-project/go-state-types/rt" rtt "github.com/filecoin-project/go-state-types/rt"
rt0 "github.com/filecoin-project/specs-actors/actors/runtime" rt0 "github.com/filecoin-project/specs-actors/actors/runtime"
rt2 "github.com/filecoin-project/specs-actors/v2/actors/runtime" rt5 "github.com/filecoin-project/specs-actors/v5/actors/runtime"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
ipldcbor "github.com/ipfs/go-ipld-cbor" ipldcbor "github.com/ipfs/go-ipld-cbor"
"go.opencensus.io/trace" "go.opencensus.io/trace"
@ -54,8 +54,8 @@ func (m *Message) ValueReceived() abi.TokenAmount {
var EnableGasTracing = false var EnableGasTracing = false
type Runtime struct { type Runtime struct {
rt2.Message rt5.Message
rt2.Syscalls rt5.Syscalls
ctx context.Context ctx context.Context
@ -136,7 +136,7 @@ func (rt *Runtime) StorePut(x cbor.Marshaler) cid.Cid {
} }
var _ rt0.Runtime = (*Runtime)(nil) var _ rt0.Runtime = (*Runtime)(nil)
var _ rt2.Runtime = (*Runtime)(nil) var _ rt5.Runtime = (*Runtime)(nil)
func (rt *Runtime) shimCall(f func() interface{}) (rval []byte, aerr aerrors.ActorError) { func (rt *Runtime) shimCall(f func() interface{}) (rval []byte, aerr aerrors.ActorError) {
defer func() { defer func() {

View File

@ -26,8 +26,8 @@ import (
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
"github.com/filecoin-project/lotus/lib/sigs" "github.com/filecoin-project/lotus/lib/sigs"
runtime2 "github.com/filecoin-project/specs-actors/v2/actors/runtime" runtime5 "github.com/filecoin-project/specs-actors/v5/actors/runtime"
proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof"
) )
func init() { func init() {
@ -36,10 +36,10 @@ func init() {
// Actual type is defined in chain/types/vmcontext.go because the VMContext interface is there // Actual type is defined in chain/types/vmcontext.go because the VMContext interface is there
type SyscallBuilder func(ctx context.Context, rt *Runtime) runtime2.Syscalls type SyscallBuilder func(ctx context.Context, rt *Runtime) runtime5.Syscalls
func Syscalls(verifier ffiwrapper.Verifier) SyscallBuilder { func Syscalls(verifier ffiwrapper.Verifier) SyscallBuilder {
return func(ctx context.Context, rt *Runtime) runtime2.Syscalls { return func(ctx context.Context, rt *Runtime) runtime5.Syscalls {
return &syscallShim{ return &syscallShim{
ctx: ctx, ctx: ctx,
@ -90,7 +90,7 @@ func (ss *syscallShim) HashBlake2b(data []byte) [32]byte {
// Checks validity of the submitted consensus fault with the two block headers needed to prove the fault // Checks validity of the submitted consensus fault with the two block headers needed to prove the fault
// and an optional extra one to check common ancestry (as needed). // and an optional extra one to check common ancestry (as needed).
// Note that the blocks are ordered: the method requires a.Epoch() <= b.Epoch(). // Note that the blocks are ordered: the method requires a.Epoch() <= b.Epoch().
func (ss *syscallShim) VerifyConsensusFault(a, b, extra []byte) (*runtime2.ConsensusFault, error) { func (ss *syscallShim) VerifyConsensusFault(a, b, extra []byte) (*runtime5.ConsensusFault, error) {
// Note that block syntax is not validated. Any validly signed block will be accepted pursuant to the below conditions. // Note that block syntax is not validated. Any validly signed block will be accepted pursuant to the below conditions.
// Whether or not it could ever have been accepted in a chain is not checked/does not matter here. // Whether or not it could ever have been accepted in a chain is not checked/does not matter here.
// for that reason when checking block parent relationships, rather than instantiating a Tipset to do so // for that reason when checking block parent relationships, rather than instantiating a Tipset to do so
@ -133,14 +133,14 @@ func (ss *syscallShim) VerifyConsensusFault(a, b, extra []byte) (*runtime2.Conse
} }
// (2) check for the consensus faults themselves // (2) check for the consensus faults themselves
var consensusFault *runtime2.ConsensusFault var consensusFault *runtime5.ConsensusFault
// (a) double-fork mining fault // (a) double-fork mining fault
if blockA.Height == blockB.Height { if blockA.Height == blockB.Height {
consensusFault = &runtime2.ConsensusFault{ consensusFault = &runtime5.ConsensusFault{
Target: blockA.Miner, Target: blockA.Miner,
Epoch: blockB.Height, Epoch: blockB.Height,
Type: runtime2.ConsensusFaultDoubleForkMining, Type: runtime5.ConsensusFaultDoubleForkMining,
} }
} }
@ -148,10 +148,10 @@ func (ss *syscallShim) VerifyConsensusFault(a, b, extra []byte) (*runtime2.Conse
// strictly speaking no need to compare heights based on double fork mining check above, // strictly speaking no need to compare heights based on double fork mining check above,
// but at same height this would be a different fault. // but at same height this would be a different fault.
if types.CidArrsEqual(blockA.Parents, blockB.Parents) && blockA.Height != blockB.Height { if types.CidArrsEqual(blockA.Parents, blockB.Parents) && blockA.Height != blockB.Height {
consensusFault = &runtime2.ConsensusFault{ consensusFault = &runtime5.ConsensusFault{
Target: blockA.Miner, Target: blockA.Miner,
Epoch: blockB.Height, Epoch: blockB.Height,
Type: runtime2.ConsensusFaultTimeOffsetMining, Type: runtime5.ConsensusFaultTimeOffsetMining,
} }
} }
@ -171,10 +171,10 @@ func (ss *syscallShim) VerifyConsensusFault(a, b, extra []byte) (*runtime2.Conse
if types.CidArrsEqual(blockA.Parents, blockC.Parents) && blockA.Height == blockC.Height && if types.CidArrsEqual(blockA.Parents, blockC.Parents) && blockA.Height == blockC.Height &&
types.CidArrsContains(blockB.Parents, blockC.Cid()) && !types.CidArrsContains(blockB.Parents, blockA.Cid()) { types.CidArrsContains(blockB.Parents, blockC.Cid()) && !types.CidArrsContains(blockB.Parents, blockA.Cid()) {
consensusFault = &runtime2.ConsensusFault{ consensusFault = &runtime5.ConsensusFault{
Target: blockA.Miner, Target: blockA.Miner,
Epoch: blockB.Height, Epoch: blockB.Height,
Type: runtime2.ConsensusFaultParentGrinding, Type: runtime5.ConsensusFaultParentGrinding,
} }
} }
} }
@ -243,7 +243,7 @@ func (ss *syscallShim) workerKeyAtLookback(height abi.ChainEpoch) (address.Addre
return ResolveToKeyAddr(ss.cstate, ss.cst, info.Worker) return ResolveToKeyAddr(ss.cstate, ss.cst, info.Worker)
} }
func (ss *syscallShim) VerifyPoSt(proof proof2.WindowPoStVerifyInfo) error { func (ss *syscallShim) VerifyPoSt(proof proof5.WindowPoStVerifyInfo) error {
ok, err := ss.verifier.VerifyWindowPoSt(context.TODO(), proof) ok, err := ss.verifier.VerifyWindowPoSt(context.TODO(), proof)
if err != nil { if err != nil {
return err return err
@ -254,7 +254,7 @@ func (ss *syscallShim) VerifyPoSt(proof proof2.WindowPoStVerifyInfo) error {
return nil return nil
} }
func (ss *syscallShim) VerifySeal(info proof2.SealVerifyInfo) error { func (ss *syscallShim) VerifySeal(info proof5.SealVerifyInfo) error {
//_, span := trace.StartSpan(ctx, "ValidatePoRep") //_, span := trace.StartSpan(ctx, "ValidatePoRep")
//defer span.End() //defer span.End()
@ -281,6 +281,18 @@ func (ss *syscallShim) VerifySeal(info proof2.SealVerifyInfo) error {
return nil return nil
} }
func (ss *syscallShim) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) error {
ok, err := ss.verifier.VerifyAggregateSeals(aggregate)
if err != nil {
return xerrors.Errorf("failed to verify aggregated PoRep: %w", err)
}
if !ok {
return fmt.Errorf("invalid aggredate proof")
}
return nil
}
func (ss *syscallShim) VerifySignature(sig crypto.Signature, addr address.Address, input []byte) error { func (ss *syscallShim) VerifySignature(sig crypto.Signature, addr address.Address, input []byte) error {
// TODO: in genesis setup, we are currently faking signatures // TODO: in genesis setup, we are currently faking signatures
@ -294,7 +306,7 @@ func (ss *syscallShim) VerifySignature(sig crypto.Signature, addr address.Addres
var BatchSealVerifyParallelism = goruntime.NumCPU() var BatchSealVerifyParallelism = goruntime.NumCPU()
func (ss *syscallShim) BatchVerifySeals(inp map[address.Address][]proof2.SealVerifyInfo) (map[address.Address][]bool, error) { func (ss *syscallShim) BatchVerifySeals(inp map[address.Address][]proof5.SealVerifyInfo) (map[address.Address][]bool, error) {
out := make(map[address.Address][]bool) out := make(map[address.Address][]bool)
sema := make(chan struct{}, BatchSealVerifyParallelism) sema := make(chan struct{}, BatchSealVerifyParallelism)
@ -306,7 +318,7 @@ func (ss *syscallShim) BatchVerifySeals(inp map[address.Address][]proof2.SealVer
for i, s := range seals { for i, s := range seals {
wg.Add(1) wg.Add(1)
go func(ma address.Address, ix int, svi proof2.SealVerifyInfo, res []bool) { go func(ma address.Address, ix int, svi proof5.SealVerifyInfo, res []bool) {
defer wg.Done() defer wg.Done()
sema <- struct{}{} sema <- struct{}{}

View File

@ -23,7 +23,7 @@ var FetchParamCmd = &cli.Command{
} }
sectorSize := uint64(sectorSizeInt) sectorSize := uint64(sectorSizeInt)
err = paramfetch.GetParams(ReqContext(cctx), build.ParametersJSON(), sectorSize) err = paramfetch.GetParams(ReqContext(cctx), build.ParametersJSON(), build.SrsJSON(), sectorSize)
if err != nil { if err != nil {
return xerrors.Errorf("fetching proof parameters: %w", err) return xerrors.Errorf("fetching proof parameters: %w", err)
} }

View File

@ -8,6 +8,7 @@ import (
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof"
proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof"
"github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore"
"github.com/minio/blake2b-simd" "github.com/minio/blake2b-simd"
cbg "github.com/whyrusleeping/cbor-gen" cbg "github.com/whyrusleeping/cbor-gen"
@ -96,4 +97,8 @@ func (cv *cachingVerifier) GenerateWinningPoStSectorChallenge(ctx context.Contex
return cv.backend.GenerateWinningPoStSectorChallenge(ctx, proofType, a, rnd, u) return cv.backend.GenerateWinningPoStSectorChallenge(ctx, proofType, a, rnd, u)
} }
func (cv cachingVerifier) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) (bool, error) {
return cv.backend.VerifyAggregateSeals(aggregate)
}
var _ ffiwrapper.Verifier = (*cachingVerifier)(nil) var _ ffiwrapper.Verifier = (*cachingVerifier)(nil)

View File

@ -243,7 +243,7 @@ var sealBenchCmd = &cli.Command{
// Only fetch parameters if actually needed // Only fetch parameters if actually needed
skipc2 := c.Bool("skip-commit2") skipc2 := c.Bool("skip-commit2")
if !skipc2 { if !skipc2 {
if err := paramfetch.GetParams(lcli.ReqContext(c), build.ParametersJSON(), uint64(sectorSize)); err != nil { if err := paramfetch.GetParams(lcli.ReqContext(c), build.ParametersJSON(), build.SrsJSON(), uint64(sectorSize)); err != nil {
return xerrors.Errorf("getting params: %w", err) return xerrors.Errorf("getting params: %w", err)
} }
} }
@ -738,7 +738,7 @@ var proveCmd = &cli.Command{
return xerrors.Errorf("unmarshalling input file: %w", err) return xerrors.Errorf("unmarshalling input file: %w", err)
} }
if err := paramfetch.GetParams(lcli.ReqContext(c), build.ParametersJSON(), c2in.SectorSize); err != nil { if err := paramfetch.GetParams(lcli.ReqContext(c), build.ParametersJSON(), build.SrsJSON(), c2in.SectorSize); err != nil {
return xerrors.Errorf("getting params: %w", err) return xerrors.Errorf("getting params: %w", err)
} }

View File

@ -228,7 +228,7 @@ var runCmd = &cli.Command{
} }
if cctx.Bool("commit") { if cctx.Bool("commit") {
if err := paramfetch.GetParams(ctx, build.ParametersJSON(), uint64(ssize)); err != nil { if err := paramfetch.GetParams(ctx, build.ParametersJSON(), build.SrsJSON(), uint64(ssize)); err != nil {
return xerrors.Errorf("get params: %w", err) return xerrors.Errorf("get params: %w", err)
} }
} }

View File

@ -25,7 +25,7 @@ var fetchParamCmd = &cli.Command{
return err return err
} }
sectorSize := uint64(sectorSizeInt) sectorSize := uint64(sectorSizeInt)
err = paramfetch.GetParams(lcli.ReqContext(cctx), build.ParametersJSON(), sectorSize) err = paramfetch.GetParams(lcli.ReqContext(cctx), build.ParametersJSON(), build.SrsJSON(), sectorSize)
if err != nil { if err != nil {
return xerrors.Errorf("fetching proof parameters: %w", err) return xerrors.Errorf("fetching proof parameters: %w", err)
} }

View File

@ -291,10 +291,14 @@ var stateList = []stateMeta{
{col: color.FgYellow, state: sealing.PreCommit2}, {col: color.FgYellow, state: sealing.PreCommit2},
{col: color.FgYellow, state: sealing.PreCommitting}, {col: color.FgYellow, state: sealing.PreCommitting},
{col: color.FgYellow, state: sealing.PreCommitWait}, {col: color.FgYellow, state: sealing.PreCommitWait},
{col: color.FgYellow, state: sealing.SubmitPreCommitBatch},
{col: color.FgYellow, state: sealing.PreCommitBatchWait},
{col: color.FgYellow, state: sealing.WaitSeed}, {col: color.FgYellow, state: sealing.WaitSeed},
{col: color.FgYellow, state: sealing.Committing}, {col: color.FgYellow, state: sealing.Committing},
{col: color.FgYellow, state: sealing.SubmitCommit}, {col: color.FgYellow, state: sealing.SubmitCommit},
{col: color.FgYellow, state: sealing.CommitWait}, {col: color.FgYellow, state: sealing.CommitWait},
{col: color.FgYellow, state: sealing.SubmitCommitAggregate},
{col: color.FgYellow, state: sealing.CommitAggregateWait},
{col: color.FgYellow, state: sealing.FinalizeSector}, {col: color.FgYellow, state: sealing.FinalizeSector},
{col: color.FgCyan, state: sealing.Terminating}, {col: color.FgCyan, state: sealing.Terminating},

View File

@ -143,7 +143,7 @@ var initCmd = &cli.Command{
log.Info("Checking proof parameters") log.Info("Checking proof parameters")
if err := paramfetch.GetParams(ctx, build.ParametersJSON(), uint64(ssize)); err != nil { if err := paramfetch.GetParams(ctx, build.ParametersJSON(), build.SrsJSON(), uint64(ssize)); err != nil {
return xerrors.Errorf("fetching proof parameters: %w", err) return xerrors.Errorf("fetching proof parameters: %w", err)
} }

View File

@ -249,7 +249,7 @@ var initRestoreCmd = &cli.Command{
log.Info("Checking proof parameters") log.Info("Checking proof parameters")
if err := paramfetch.GetParams(ctx, build.ParametersJSON(), uint64(mi.SectorSize)); err != nil { if err := paramfetch.GetParams(ctx, build.ParametersJSON(), build.SrsJSON(), uint64(mi.SectorSize)); err != nil {
return xerrors.Errorf("fetching proof parameters: %w", err) return xerrors.Errorf("fetching proof parameters: %w", err)
} }

View File

@ -45,6 +45,7 @@ var sectorsCmd = &cli.Command{
sectorsStartSealCmd, sectorsStartSealCmd,
sectorsSealDelayCmd, sectorsSealDelayCmd,
sectorsCapacityCollateralCmd, sectorsCapacityCollateralCmd,
sectorsBatching,
}, },
} }
@ -969,6 +970,109 @@ var sectorsUpdateCmd = &cli.Command{
}, },
} }
var sectorsBatching = &cli.Command{
Name: "batching",
Usage: "manage batch sector operations",
Subcommands: []*cli.Command{
sectorsBatchingPendingCommit,
sectorsBatchingPendingPreCommit,
},
}
var sectorsBatchingPendingCommit = &cli.Command{
Name: "pending-commit",
Usage: "list sectors waiting in commit batch queue",
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "publish-now",
Usage: "send a batch now",
},
},
Action: func(cctx *cli.Context) error {
api, closer, err := lcli.GetStorageMinerAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := lcli.ReqContext(cctx)
if cctx.Bool("publish-now") {
cid, err := api.SectorCommitFlush(ctx)
if err != nil {
return xerrors.Errorf("flush: %w", err)
}
if cid == nil {
return xerrors.Errorf("no sectors to publish")
}
fmt.Println("sector batch published: ", cid)
return nil
}
pending, err := api.SectorCommitPending(ctx)
if err != nil {
return xerrors.Errorf("getting pending deals: %w", err)
}
if len(pending) > 0 {
for _, sector := range pending {
fmt.Println(sector.Number)
}
return nil
}
fmt.Println("No sectors queued to be committed")
return nil
},
}
var sectorsBatchingPendingPreCommit = &cli.Command{
Name: "pending-precommit",
Usage: "list sectors waiting in precommit batch queue",
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "publish-now",
Usage: "send a batch now",
},
},
Action: func(cctx *cli.Context) error {
api, closer, err := lcli.GetStorageMinerAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := lcli.ReqContext(cctx)
if cctx.Bool("publish-now") {
cid, err := api.SectorPreCommitFlush(ctx)
if err != nil {
return xerrors.Errorf("flush: %w", err)
}
if cid == nil {
return xerrors.Errorf("no sectors to publish")
}
fmt.Println("sector batch published: ", cid)
return nil
}
pending, err := api.SectorPreCommitPending(ctx)
if err != nil {
return xerrors.Errorf("getting pending deals: %w", err)
}
if len(pending) > 0 {
for _, sector := range pending {
fmt.Println(sector.Number)
}
return nil
}
fmt.Println("No sectors queued to be committed")
return nil
},
}
func yesno(b bool) string { func yesno(b bool) string {
if b { if b {
return color.GreenString("YES") return color.GreenString("YES")

View File

@ -231,7 +231,7 @@ var DaemonCmd = &cli.Command{
freshRepo := err != repo.ErrRepoExists freshRepo := err != repo.ErrRepoExists
if !isLite { if !isLite {
if err := paramfetch.GetParams(lcli.ReqContext(cctx), build.ParametersJSON(), 0); err != nil { if err := paramfetch.GetParams(lcli.ReqContext(cctx), build.ParametersJSON(), build.SrsJSON(), 0); err != nil {
return xerrors.Errorf("fetching proof parameters: %w", err) return xerrors.Errorf("fetching proof parameters: %w", err)
} }
} }

View File

@ -98,9 +98,13 @@
* [SealingAbort](#SealingAbort) * [SealingAbort](#SealingAbort)
* [SealingSchedDiag](#SealingSchedDiag) * [SealingSchedDiag](#SealingSchedDiag)
* [Sector](#Sector) * [Sector](#Sector)
* [SectorCommitFlush](#SectorCommitFlush)
* [SectorCommitPending](#SectorCommitPending)
* [SectorGetExpectedSealDuration](#SectorGetExpectedSealDuration) * [SectorGetExpectedSealDuration](#SectorGetExpectedSealDuration)
* [SectorGetSealDelay](#SectorGetSealDelay) * [SectorGetSealDelay](#SectorGetSealDelay)
* [SectorMarkForUpgrade](#SectorMarkForUpgrade) * [SectorMarkForUpgrade](#SectorMarkForUpgrade)
* [SectorPreCommitFlush](#SectorPreCommitFlush)
* [SectorPreCommitPending](#SectorPreCommitPending)
* [SectorRemove](#SectorRemove) * [SectorRemove](#SectorRemove)
* [SectorSetExpectedSealDuration](#SectorSetExpectedSealDuration) * [SectorSetExpectedSealDuration](#SectorSetExpectedSealDuration)
* [SectorSetSealDelay](#SectorSetSealDelay) * [SectorSetSealDelay](#SectorSetSealDelay)
@ -1556,6 +1560,27 @@ Response: `{}`
## Sector ## Sector
### SectorCommitFlush
SectorCommitFlush immediately sends a Commit message with sectors aggregated for Commit.
Returns null if message wasn't sent
Perms: admin
Inputs: `null`
Response: `null`
### SectorCommitPending
SectorCommitPending returns a list of pending Commit sectors to be sent in the next aggregate message
Perms: admin
Inputs: `null`
Response: `null`
### SectorGetExpectedSealDuration ### SectorGetExpectedSealDuration
SectorGetExpectedSealDuration gets the expected time for a sector to seal SectorGetExpectedSealDuration gets the expected time for a sector to seal
@ -1591,6 +1616,27 @@ Inputs:
Response: `{}` Response: `{}`
### SectorPreCommitFlush
SectorPreCommitFlush immediately sends a PreCommit message with sectors batched for PreCommit.
Returns null if message wasn't sent
Perms: admin
Inputs: `null`
Response: `null`
### SectorPreCommitPending
SectorPreCommitPending returns a list of pending PreCommit sectors to be sent in the next batch message
Perms: admin
Inputs: `null`
Response: `null`
### SectorRemove ### SectorRemove
SectorRemove removes the sector from storage. It doesn't terminate it on-chain, which can SectorRemove removes the sector from storage. It doesn't terminate it on-chain, which can
be done with SectorTerminate. Removing and not terminating live sectors will cause additional penalties. be done with SectorTerminate. Removing and not terminating live sectors will cause additional penalties.

File diff suppressed because it is too large Load Diff

View File

@ -8,7 +8,7 @@
- [ ] Register in `chain/vm/invoker.go` - [ ] Register in `chain/vm/invoker.go`
- [ ] Register in `chain/vm/mkactor.go` - [ ] Register in `chain/vm/mkactor.go`
- [ ] Update `chain/types/state.go` - [ ] Update `chain/types/state.go`
- [ ] Update `chain/state/statetree.go` - [ ] Update `chain/state/statetree.go` (New / Load)
- [ ] Update `chain/stmgr/forks.go` - [ ] Update `chain/stmgr/forks.go`
- [ ] Schedule - [ ] Schedule
- [ ] Migration - [ ] Migration

2
extern/filecoin-ffi vendored

@ -1 +1 @@
Subproject commit dc4e4e8dc9554dedb6f48304f7f0c6328331f9ec Subproject commit 58771ba4d942badc306925160a945022ad335161

View File

@ -0,0 +1,18 @@
//+build cgo
package ffiwrapper
import (
ffi "github.com/filecoin-project/filecoin-ffi"
proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof"
)
var ProofProver = proofProver{}
var _ Prover = ProofProver
type proofProver struct{}
func (v proofProver) AggregateSealProofs(aggregateInfo proof5.AggregateSealVerifyProofAndInfos, proofs [][]byte) ([]byte, error) {
return ffi.AggregateSealProofs(aggregateInfo, proofs)
}

View File

@ -18,6 +18,7 @@ import (
commpffi "github.com/filecoin-project/go-commp-utils/ffiwrapper" commpffi "github.com/filecoin-project/go-commp-utils/ffiwrapper"
proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof"
proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
@ -31,6 +32,7 @@ import (
ffi "github.com/filecoin-project/filecoin-ffi" ffi "github.com/filecoin-project/filecoin-ffi"
"github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper/basicfs" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper/basicfs"
"github.com/filecoin-project/lotus/extern/sector-storage/storiface" "github.com/filecoin-project/lotus/extern/sector-storage/storiface"
"github.com/filecoin-project/lotus/extern/storage-sealing/lib/nullreader" "github.com/filecoin-project/lotus/extern/storage-sealing/lib/nullreader"
@ -83,9 +85,10 @@ func (s *seal) precommit(t *testing.T, sb *Sealer, id storage.SectorRef, done fu
s.cids = cids s.cids = cids
} }
func (s *seal) commit(t *testing.T, sb *Sealer, done func()) { var seed = abi.InteractiveSealRandomness{0, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 9, 8, 7, 6, 45, 3, 2, 1, 0, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 9}
func (s *seal) commit(t *testing.T, sb *Sealer, done func()) storage.Proof {
defer done() defer done()
seed := abi.InteractiveSealRandomness{0, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 9, 8, 7, 6, 45, 3, 2, 1, 0, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 9}
pc1, err := sb.SealCommit1(context.TODO(), s.ref, s.ticket, seed, []abi.PieceInfo{s.pi}, s.cids) pc1, err := sb.SealCommit1(context.TODO(), s.ref, s.ticket, seed, []abi.PieceInfo{s.pi}, s.cids)
if err != nil { if err != nil {
@ -112,6 +115,8 @@ func (s *seal) commit(t *testing.T, sb *Sealer, done func()) {
if !ok { if !ok {
t.Fatal("proof failed to validate") t.Fatal("proof failed to validate")
} }
return proof
} }
func (s *seal) unseal(t *testing.T, sb *Sealer, sp *basicfs.Provider, si storage.SectorRef, done func()) { func (s *seal) unseal(t *testing.T, sb *Sealer, sp *basicfs.Provider, si storage.SectorRef, done func()) {
@ -229,7 +234,12 @@ func getGrothParamFileAndVerifyingKeys(s abi.SectorSize) {
panic(err) panic(err)
} }
err = paramfetch.GetParams(context.TODO(), dat, uint64(s)) datSrs, err := ioutil.ReadFile("../../../build/proof-params/srs-inner-product.json")
if err != nil {
panic(err)
}
err = paramfetch.GetParams(context.TODO(), dat, datSrs, uint64(s))
if err != nil { if err != nil {
panic(xerrors.Errorf("failed to acquire Groth parameters for 2KiB sectors: %w", err)) panic(xerrors.Errorf("failed to acquire Groth parameters for 2KiB sectors: %w", err))
} }
@ -462,6 +472,97 @@ func TestSealAndVerify3(t *testing.T) {
post(t, sb, []abi.SectorID{si1.ID, si2.ID}, s1, s2, s3) post(t, sb, []abi.SectorID{si1.ID, si2.ID}, s1, s2, s3)
} }
func TestSealAndVerifyAggregate(t *testing.T) {
numAgg := 5
if testing.Short() {
t.Skip("skipping test in short mode")
}
defer requireFDsClosed(t, openFDs(t))
if runtime.NumCPU() < 10 && os.Getenv("CI") == "" { // don't bother on slow hardware
t.Skip("this is slow")
}
_ = os.Setenv("RUST_LOG", "info")
getGrothParamFileAndVerifyingKeys(sectorSize)
cdir, err := ioutil.TempDir("", "sbtest-c-")
if err != nil {
t.Fatal(err)
}
miner := abi.ActorID(123)
sp := &basicfs.Provider{
Root: cdir,
}
sb, err := New(sp)
if err != nil {
t.Fatalf("%+v", err)
}
cleanup := func() {
if t.Failed() {
fmt.Printf("not removing %s\n", cdir)
return
}
if err := os.RemoveAll(cdir); err != nil {
t.Error(err)
}
}
defer cleanup()
avi := proof5.AggregateSealVerifyProofAndInfos{
Miner: miner,
SealProof: sealProofType,
AggregateProof: policy.GetDefaultAggregationProof(),
Proof: nil,
Infos: make([]proof5.AggregateSealVerifyInfo, numAgg),
}
toAggregate := make([][]byte, numAgg)
for i := 0; i < numAgg; i++ {
si := storage.SectorRef{
ID: abi.SectorID{Miner: miner, Number: abi.SectorNumber(i + 1)},
ProofType: sealProofType,
}
s := seal{ref: si}
s.precommit(t, sb, si, func() {})
toAggregate[i] = s.commit(t, sb, func() {})
avi.Infos[i] = proof5.AggregateSealVerifyInfo{
Number: abi.SectorNumber(i + 1),
Randomness: s.ticket,
InteractiveRandomness: seed,
SealedCID: s.cids.Sealed,
UnsealedCID: s.cids.Unsealed,
}
}
aggStart := time.Now()
avi.Proof, err = ProofProver.AggregateSealProofs(avi, toAggregate)
require.NoError(t, err)
aggDone := time.Now()
_, err = ProofProver.AggregateSealProofs(avi, toAggregate)
require.NoError(t, err)
aggHot := time.Now()
ok, err := ProofVerifier.VerifyAggregateSeals(avi)
require.NoError(t, err)
require.True(t, ok)
verifDone := time.Now()
fmt.Printf("Aggregate: %s\n", aggDone.Sub(aggStart).String())
fmt.Printf("Hot: %s\n", aggHot.Sub(aggDone).String())
fmt.Printf("Verify: %s\n", verifDone.Sub(aggHot).String())
}
func BenchmarkWriteWithAlignment(b *testing.B) { func BenchmarkWriteWithAlignment(b *testing.B) {
bt := abi.UnpaddedPieceSize(2 * 127 * 1024 * 1024) bt := abi.UnpaddedPieceSize(2 * 127 * 1024 * 1024)
b.SetBytes(int64(bt)) b.SetBytes(int64(bt))

View File

@ -4,7 +4,7 @@ import (
"context" "context"
"io" "io"
proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
@ -34,13 +34,21 @@ type Storage interface {
} }
type Verifier interface { type Verifier interface {
VerifySeal(proof2.SealVerifyInfo) (bool, error) VerifySeal(proof5.SealVerifyInfo) (bool, error)
VerifyWinningPoSt(ctx context.Context, info proof2.WinningPoStVerifyInfo) (bool, error) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) (bool, error)
VerifyWindowPoSt(ctx context.Context, info proof2.WindowPoStVerifyInfo) (bool, error) VerifyWinningPoSt(ctx context.Context, info proof5.WinningPoStVerifyInfo) (bool, error)
VerifyWindowPoSt(ctx context.Context, info proof5.WindowPoStVerifyInfo) (bool, error)
GenerateWinningPoStSectorChallenge(context.Context, abi.RegisteredPoStProof, abi.ActorID, abi.PoStRandomness, uint64) ([]uint64, error) GenerateWinningPoStSectorChallenge(context.Context, abi.RegisteredPoStProof, abi.ActorID, abi.PoStRandomness, uint64) ([]uint64, error)
} }
// Prover contains cheap proving-related methods
type Prover interface {
// TODO: move GenerateWinningPoStSectorChallenge from the Verifier interface to here
AggregateSealProofs(aggregateInfo proof5.AggregateSealVerifyProofAndInfos, proofs [][]byte) ([]byte, error)
}
type SectorProvider interface { type SectorProvider interface {
// * returns storiface.ErrSectorNotFound if a requested existing sector doesn't exist // * returns storiface.ErrSectorNotFound if a requested existing sector doesn't exist
// * returns an error when allocate is set, and existing isn't, and the sector exists // * returns an error when allocate is set, and existing isn't, and the sector exists

View File

@ -10,13 +10,13 @@ import (
ffi "github.com/filecoin-project/filecoin-ffi" ffi "github.com/filecoin-project/filecoin-ffi"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof"
"github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/specs-storage/storage"
"github.com/filecoin-project/lotus/extern/sector-storage/storiface" "github.com/filecoin-project/lotus/extern/sector-storage/storiface"
) )
func (sb *Sealer) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof2.SectorInfo, randomness abi.PoStRandomness) ([]proof2.PoStProof, error) { func (sb *Sealer) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof5.SectorInfo, randomness abi.PoStRandomness) ([]proof5.PoStProof, error) {
randomness[31] &= 0x3f randomness[31] &= 0x3f
privsectors, skipped, done, err := sb.pubSectorToPriv(ctx, minerID, sectorInfo, nil, abi.RegisteredSealProof.RegisteredWinningPoStProof) // TODO: FAULTS? privsectors, skipped, done, err := sb.pubSectorToPriv(ctx, minerID, sectorInfo, nil, abi.RegisteredSealProof.RegisteredWinningPoStProof) // TODO: FAULTS?
if err != nil { if err != nil {
@ -30,7 +30,7 @@ func (sb *Sealer) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID,
return ffi.GenerateWinningPoSt(minerID, privsectors, randomness) return ffi.GenerateWinningPoSt(minerID, privsectors, randomness)
} }
func (sb *Sealer) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof2.SectorInfo, randomness abi.PoStRandomness) ([]proof2.PoStProof, []abi.SectorID, error) { func (sb *Sealer) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof5.SectorInfo, randomness abi.PoStRandomness) ([]proof5.PoStProof, []abi.SectorID, error) {
randomness[31] &= 0x3f randomness[31] &= 0x3f
privsectors, skipped, done, err := sb.pubSectorToPriv(ctx, minerID, sectorInfo, nil, abi.RegisteredSealProof.RegisteredWindowPoStProof) privsectors, skipped, done, err := sb.pubSectorToPriv(ctx, minerID, sectorInfo, nil, abi.RegisteredSealProof.RegisteredWindowPoStProof)
if err != nil { if err != nil {
@ -55,7 +55,7 @@ func (sb *Sealer) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, s
return proof, faultyIDs, err return proof, faultyIDs, err
} }
func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorInfo []proof2.SectorInfo, faults []abi.SectorNumber, rpt func(abi.RegisteredSealProof) (abi.RegisteredPoStProof, error)) (ffi.SortedPrivateSectorInfo, []abi.SectorID, func(), error) { func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorInfo []proof5.SectorInfo, faults []abi.SectorNumber, rpt func(abi.RegisteredSealProof) (abi.RegisteredPoStProof, error)) (ffi.SortedPrivateSectorInfo, []abi.SectorID, func(), error) {
fmap := map[abi.SectorNumber]struct{}{} fmap := map[abi.SectorNumber]struct{}{}
for _, fault := range faults { for _, fault := range faults {
fmap[fault] = struct{}{} fmap[fault] = struct{}{}
@ -111,11 +111,15 @@ type proofVerifier struct{}
var ProofVerifier = proofVerifier{} var ProofVerifier = proofVerifier{}
func (proofVerifier) VerifySeal(info proof2.SealVerifyInfo) (bool, error) { func (proofVerifier) VerifySeal(info proof5.SealVerifyInfo) (bool, error) {
return ffi.VerifySeal(info) return ffi.VerifySeal(info)
} }
func (proofVerifier) VerifyWinningPoSt(ctx context.Context, info proof2.WinningPoStVerifyInfo) (bool, error) { func (proofVerifier) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) (bool, error) {
return ffi.VerifyAggregateSeals(aggregate)
}
func (proofVerifier) VerifyWinningPoSt(ctx context.Context, info proof5.WinningPoStVerifyInfo) (bool, error) {
info.Randomness[31] &= 0x3f info.Randomness[31] &= 0x3f
_, span := trace.StartSpan(ctx, "VerifyWinningPoSt") _, span := trace.StartSpan(ctx, "VerifyWinningPoSt")
defer span.End() defer span.End()
@ -123,7 +127,7 @@ func (proofVerifier) VerifyWinningPoSt(ctx context.Context, info proof2.WinningP
return ffi.VerifyWinningPoSt(info) return ffi.VerifyWinningPoSt(info)
} }
func (proofVerifier) VerifyWindowPoSt(ctx context.Context, info proof2.WindowPoStVerifyInfo) (bool, error) { func (proofVerifier) VerifyWindowPoSt(ctx context.Context, info proof5.WindowPoStVerifyInfo) (bool, error) {
info.Randomness[31] &= 0x3f info.Randomness[31] &= 0x3f
_, span := trace.StartSpan(ctx, "VerifyWindowPoSt") _, span := trace.StartSpan(ctx, "VerifyWindowPoSt")
defer span.End() defer span.End()

View File

@ -9,7 +9,7 @@ import (
"math/rand" "math/rand"
"sync" "sync"
proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof"
ffiwrapper2 "github.com/filecoin-project/go-commp-utils/ffiwrapper" ffiwrapper2 "github.com/filecoin-project/go-commp-utils/ffiwrapper"
commcid "github.com/filecoin-project/go-fil-commcid" commcid "github.com/filecoin-project/go-fil-commcid"
@ -34,7 +34,7 @@ type SectorMgr struct {
lk sync.Mutex lk sync.Mutex
} }
type mockVerif struct{} type mockVerifProver struct{}
func NewMockSectorMgr(genesisSectors []abi.SectorID) *SectorMgr { func NewMockSectorMgr(genesisSectors []abi.SectorID) *SectorMgr {
sectors := make(map[abi.SectorID]*sectorState) sectors := make(map[abi.SectorID]*sectorState)
@ -300,14 +300,14 @@ func AddOpFinish(ctx context.Context) (context.Context, func()) {
} }
} }
func (mgr *SectorMgr) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof2.SectorInfo, randomness abi.PoStRandomness) ([]proof2.PoStProof, error) { func (mgr *SectorMgr) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof5.SectorInfo, randomness abi.PoStRandomness) ([]proof5.PoStProof, error) {
mgr.lk.Lock() mgr.lk.Lock()
defer mgr.lk.Unlock() defer mgr.lk.Unlock()
return generateFakePoSt(sectorInfo, abi.RegisteredSealProof.RegisteredWinningPoStProof, randomness), nil return generateFakePoSt(sectorInfo, abi.RegisteredSealProof.RegisteredWinningPoStProof, randomness), nil
} }
func (mgr *SectorMgr) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof2.SectorInfo, randomness abi.PoStRandomness) ([]proof2.PoStProof, []abi.SectorID, error) { func (mgr *SectorMgr) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof5.SectorInfo, randomness abi.PoStRandomness) ([]proof5.PoStProof, []abi.SectorID, error) {
mgr.lk.Lock() mgr.lk.Lock()
defer mgr.lk.Unlock() defer mgr.lk.Unlock()
@ -315,7 +315,8 @@ func (mgr *SectorMgr) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorI
return nil, nil, xerrors.Errorf("failed to post (mock)") return nil, nil, xerrors.Errorf("failed to post (mock)")
} }
si := make([]proof2.SectorInfo, 0, len(sectorInfo)) si := make([]proof5.SectorInfo, 0, len(sectorInfo))
var skipped []abi.SectorID var skipped []abi.SectorID
var err error var err error
@ -343,7 +344,7 @@ func (mgr *SectorMgr) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorI
return generateFakePoSt(si, abi.RegisteredSealProof.RegisteredWindowPoStProof, randomness), skipped, nil return generateFakePoSt(si, abi.RegisteredSealProof.RegisteredWindowPoStProof, randomness), skipped, nil
} }
func generateFakePoStProof(sectorInfo []proof2.SectorInfo, randomness abi.PoStRandomness) []byte { func generateFakePoStProof(sectorInfo []proof5.SectorInfo, randomness abi.PoStRandomness) []byte {
randomness[31] &= 0x3f randomness[31] &= 0x3f
hasher := sha256.New() hasher := sha256.New()
@ -358,13 +359,13 @@ func generateFakePoStProof(sectorInfo []proof2.SectorInfo, randomness abi.PoStRa
} }
func generateFakePoSt(sectorInfo []proof2.SectorInfo, rpt func(abi.RegisteredSealProof) (abi.RegisteredPoStProof, error), randomness abi.PoStRandomness) []proof2.PoStProof { func generateFakePoSt(sectorInfo []proof5.SectorInfo, rpt func(abi.RegisteredSealProof) (abi.RegisteredPoStProof, error), randomness abi.PoStRandomness) []proof5.PoStProof {
wp, err := rpt(sectorInfo[0].SealProof) wp, err := rpt(sectorInfo[0].SealProof)
if err != nil { if err != nil {
panic(err) panic(err)
} }
return []proof2.PoStProof{ return []proof5.PoStProof{
{ {
PoStProof: wp, PoStProof: wp,
ProofBytes: generateFakePoStProof(sectorInfo, randomness), ProofBytes: generateFakePoStProof(sectorInfo, randomness),
@ -489,7 +490,7 @@ func (mgr *SectorMgr) ReturnFetch(ctx context.Context, callID storiface.CallID,
panic("not supported") panic("not supported")
} }
func (m mockVerif) VerifySeal(svi proof2.SealVerifyInfo) (bool, error) { func (m mockVerifProver) VerifySeal(svi proof5.SealVerifyInfo) (bool, error) {
plen, err := svi.SealProof.ProofSize() plen, err := svi.SealProof.ProofSize()
if err != nil { if err != nil {
return false, err return false, err
@ -501,6 +502,7 @@ func (m mockVerif) VerifySeal(svi proof2.SealVerifyInfo) (bool, error) {
// only the first 32 bytes, the rest are 0. // only the first 32 bytes, the rest are 0.
for i, b := range svi.Proof[:32] { for i, b := range svi.Proof[:32] {
// unsealed+sealed-seed*ticket
if b != svi.UnsealedCID.Bytes()[i]+svi.SealedCID.Bytes()[31-i]-svi.InteractiveRandomness[i]*svi.Randomness[i] { if b != svi.UnsealedCID.Bytes()[i]+svi.SealedCID.Bytes()[31-i]-svi.InteractiveRandomness[i]*svi.Randomness[i] {
return false, nil return false, nil
} }
@ -509,12 +511,66 @@ func (m mockVerif) VerifySeal(svi proof2.SealVerifyInfo) (bool, error) {
return true, nil return true, nil
} }
func (m mockVerif) VerifyWinningPoSt(ctx context.Context, info proof2.WinningPoStVerifyInfo) (bool, error) { func (m mockVerifProver) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) (bool, error) {
out := make([]byte, m.aggLen(len(aggregate.Infos)))
for pi, svi := range aggregate.Infos {
for i := 0; i < 32; i++ {
b := svi.UnsealedCID.Bytes()[i] + svi.SealedCID.Bytes()[31-i] - svi.InteractiveRandomness[i]*svi.Randomness[i] // raw proof byte
b *= uint8(pi) // with aggregate index
out[i] += b
}
}
return bytes.Equal(aggregate.Proof, out), nil
}
func (m mockVerifProver) AggregateSealProofs(aggregateInfo proof5.AggregateSealVerifyProofAndInfos, proofs [][]byte) ([]byte, error) {
out := make([]byte, m.aggLen(len(aggregateInfo.Infos))) // todo: figure out more real length
for pi, proof := range proofs {
for i := range proof[:32] {
out[i] += proof[i] * uint8(pi)
}
}
return out, nil
}
func (m mockVerifProver) aggLen(nproofs int) int {
switch {
case nproofs <= 8:
return 11220
case nproofs <= 16:
return 14196
case nproofs <= 32:
return 17172
case nproofs <= 64:
return 20148
case nproofs <= 128:
return 23124
case nproofs <= 256:
return 26100
case nproofs <= 512:
return 29076
case nproofs <= 1024:
return 32052
case nproofs <= 2048:
return 35028
case nproofs <= 4096:
return 38004
case nproofs <= 8192:
return 40980
default:
panic("too many proofs")
}
}
func (m mockVerifProver) VerifyWinningPoSt(ctx context.Context, info proof5.WinningPoStVerifyInfo) (bool, error) {
info.Randomness[31] &= 0x3f info.Randomness[31] &= 0x3f
return true, nil return true, nil
} }
func (m mockVerif) VerifyWindowPoSt(ctx context.Context, info proof2.WindowPoStVerifyInfo) (bool, error) { func (m mockVerifProver) VerifyWindowPoSt(ctx context.Context, info proof5.WindowPoStVerifyInfo) (bool, error) {
if len(info.Proofs) != 1 { if len(info.Proofs) != 1 {
return false, xerrors.Errorf("expected 1 proof entry") return false, xerrors.Errorf("expected 1 proof entry")
} }
@ -528,15 +584,17 @@ func (m mockVerif) VerifyWindowPoSt(ctx context.Context, info proof2.WindowPoStV
return true, nil return true, nil
} }
func (m mockVerif) GenerateDataCommitment(pt abi.RegisteredSealProof, pieces []abi.PieceInfo) (cid.Cid, error) { func (m mockVerifProver) GenerateDataCommitment(pt abi.RegisteredSealProof, pieces []abi.PieceInfo) (cid.Cid, error) {
return ffiwrapper.GenerateUnsealedCID(pt, pieces) return ffiwrapper.GenerateUnsealedCID(pt, pieces)
} }
func (m mockVerif) GenerateWinningPoStSectorChallenge(ctx context.Context, proofType abi.RegisteredPoStProof, minerID abi.ActorID, randomness abi.PoStRandomness, eligibleSectorCount uint64) ([]uint64, error) { func (m mockVerifProver) GenerateWinningPoStSectorChallenge(ctx context.Context, proofType abi.RegisteredPoStProof, minerID abi.ActorID, randomness abi.PoStRandomness, eligibleSectorCount uint64) ([]uint64, error) {
return []uint64{0}, nil return []uint64{0}, nil
} }
var MockVerifier = mockVerif{} var MockVerifier = mockVerifProver{}
var MockProver = mockVerifProver{}
var _ storage.Sealer = &SectorMgr{} var _ storage.Sealer = &SectorMgr{}
var _ ffiwrapper.Verifier = MockVerifier var _ ffiwrapper.Verifier = MockVerifier
var _ ffiwrapper.Prover = MockProver

366
extern/storage-sealing/commit_batch.go vendored Normal file
View File

@ -0,0 +1,366 @@
package sealing
import (
"bytes"
"context"
"sort"
"sync"
"time"
"github.com/ipfs/go-cid"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner"
proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
)
const arp = abi.RegisteredAggregationProof_SnarkPackV1
type CommitBatcherApi interface {
SendMsg(ctx context.Context, from, to address.Address, method abi.MethodNum, value, maxFee abi.TokenAmount, params []byte) (cid.Cid, error)
StateMinerInfo(context.Context, address.Address, TipSetToken) (miner.MinerInfo, error)
ChainHead(ctx context.Context) (TipSetToken, abi.ChainEpoch, error)
}
type AggregateInput struct {
spt abi.RegisteredSealProof
info proof5.AggregateSealVerifyInfo
proof []byte
}
type CommitBatcher struct {
api CommitBatcherApi
maddr address.Address
mctx context.Context
addrSel AddrSel
feeCfg FeeConfig
getConfig GetSealingConfigFunc
prover ffiwrapper.Prover
deadlines map[abi.SectorNumber]time.Time
todo map[abi.SectorNumber]AggregateInput
waiting map[abi.SectorNumber][]chan cid.Cid
notify, stop, stopped chan struct{}
force chan chan *cid.Cid
lk sync.Mutex
}
func NewCommitBatcher(mctx context.Context, maddr address.Address, api CommitBatcherApi, addrSel AddrSel, feeCfg FeeConfig, getConfig GetSealingConfigFunc, prov ffiwrapper.Prover) *CommitBatcher {
b := &CommitBatcher{
api: api,
maddr: maddr,
mctx: mctx,
addrSel: addrSel,
feeCfg: feeCfg,
getConfig: getConfig,
prover: prov,
deadlines: map[abi.SectorNumber]time.Time{},
todo: map[abi.SectorNumber]AggregateInput{},
waiting: map[abi.SectorNumber][]chan cid.Cid{},
notify: make(chan struct{}, 1),
force: make(chan chan *cid.Cid),
stop: make(chan struct{}),
stopped: make(chan struct{}),
}
go b.run()
return b
}
func (b *CommitBatcher) run() {
var forceRes chan *cid.Cid
var lastMsg *cid.Cid
cfg, err := b.getConfig()
if err != nil {
panic(err)
}
for {
if forceRes != nil {
forceRes <- lastMsg
forceRes = nil
}
lastMsg = nil
var sendAboveMax, sendAboveMin bool
select {
case <-b.stop:
close(b.stopped)
return
case <-b.notify:
sendAboveMax = true
case <-b.batchWait(cfg.CommitBatchWait, cfg.CommitBatchSlack):
sendAboveMin = true
case fr := <-b.force: // user triggered
forceRes = fr
}
var err error
lastMsg, err = b.processBatch(sendAboveMax, sendAboveMin)
if err != nil {
log.Warnw("CommitBatcher processBatch error", "error", err)
}
}
}
func (b *CommitBatcher) batchWait(maxWait, slack time.Duration) <-chan time.Time {
now := time.Now()
b.lk.Lock()
defer b.lk.Unlock()
if len(b.todo) == 0 {
return nil
}
var deadline time.Time
for sn := range b.todo {
sectorDeadline := b.deadlines[sn]
if deadline.IsZero() || (!sectorDeadline.IsZero() && sectorDeadline.Before(deadline)) {
deadline = sectorDeadline
}
}
for sn := range b.waiting {
sectorDeadline := b.deadlines[sn]
if deadline.IsZero() || (!sectorDeadline.IsZero() && sectorDeadline.Before(deadline)) {
deadline = sectorDeadline
}
}
if deadline.IsZero() {
return time.After(maxWait)
}
deadline = deadline.Add(-slack)
if deadline.Before(now) {
return time.After(time.Nanosecond) // can't return 0
}
wait := deadline.Sub(now)
if wait > maxWait {
wait = maxWait
}
return time.After(wait)
}
func (b *CommitBatcher) processBatch(notif, after bool) (*cid.Cid, error) {
b.lk.Lock()
defer b.lk.Unlock()
params := miner5.ProveCommitAggregateParams{
SectorNumbers: bitfield.New(),
}
total := len(b.todo)
if total == 0 {
return nil, nil // nothing to do
}
cfg, err := b.getConfig()
if err != nil {
return nil, xerrors.Errorf("getting config: %w", err)
}
if notif && total < cfg.MaxCommitBatch {
return nil, nil
}
if after && total < cfg.MinCommitBatch {
return nil, nil
}
proofs := make([][]byte, 0, total)
infos := make([]proof5.AggregateSealVerifyInfo, 0, total)
for id, p := range b.todo {
if len(infos) >= cfg.MaxCommitBatch {
log.Infow("commit batch full")
break
}
params.SectorNumbers.Set(uint64(id))
infos = append(infos, p.info)
}
sort.Slice(infos, func(i, j int) bool {
return infos[i].Number < infos[j].Number
})
for _, info := range infos {
proofs = append(proofs, b.todo[info.Number].proof)
}
mid, err := address.IDFromAddress(b.maddr)
if err != nil {
return nil, xerrors.Errorf("getting miner id: %w", err)
}
params.AggregateProof, err = b.prover.AggregateSealProofs(proof5.AggregateSealVerifyProofAndInfos{
Miner: abi.ActorID(mid),
SealProof: b.todo[infos[0].Number].spt,
AggregateProof: arp,
Infos: infos,
}, proofs)
if err != nil {
return nil, xerrors.Errorf("aggregating proofs: %w", err)
}
enc := new(bytes.Buffer)
if err := params.MarshalCBOR(enc); err != nil {
return nil, xerrors.Errorf("couldn't serialize ProveCommitAggregateParams: %w", err)
}
mi, err := b.api.StateMinerInfo(b.mctx, b.maddr, nil)
if err != nil {
return nil, xerrors.Errorf("couldn't get miner info: %w", err)
}
from, _, err := b.addrSel(b.mctx, mi, api.CommitAddr, b.feeCfg.MaxCommitGasFee, b.feeCfg.MaxCommitGasFee)
if err != nil {
return nil, xerrors.Errorf("no good address found: %w", err)
}
mcid, err := b.api.SendMsg(b.mctx, from, b.maddr, miner.Methods.ProveCommitAggregate, big.Zero(), b.feeCfg.MaxCommitGasFee, enc.Bytes())
if err != nil {
return nil, xerrors.Errorf("sending message failed: %w", err)
}
log.Infow("Sent ProveCommitAggregate message", "cid", mcid, "from", from, "todo", total, "sectors", len(infos))
err = params.SectorNumbers.ForEach(func(us uint64) error {
sn := abi.SectorNumber(us)
for _, ch := range b.waiting[sn] {
ch <- mcid // buffered
}
delete(b.waiting, sn)
delete(b.todo, sn)
delete(b.deadlines, sn)
return nil
})
if err != nil {
return nil, xerrors.Errorf("done sectors foreach: %w", err)
}
return &mcid, nil
}
// register commit, wait for batch message, return message CID
func (b *CommitBatcher) AddCommit(ctx context.Context, s SectorInfo, in AggregateInput) (mcid cid.Cid, err error) {
_, curEpoch, err := b.api.ChainHead(b.mctx)
if err != nil {
log.Errorf("getting chain head: %s", err)
return cid.Undef, nil
}
sn := s.SectorNumber
b.lk.Lock()
b.deadlines[sn] = getSectorDeadline(curEpoch, s)
b.todo[sn] = in
sent := make(chan cid.Cid, 1)
b.waiting[sn] = append(b.waiting[sn], sent)
select {
case b.notify <- struct{}{}:
default: // already have a pending notification, don't need more
}
b.lk.Unlock()
select {
case c := <-sent:
return c, nil
case <-ctx.Done():
return cid.Undef, ctx.Err()
}
}
func (b *CommitBatcher) Flush(ctx context.Context) (*cid.Cid, error) {
resCh := make(chan *cid.Cid, 1)
select {
case b.force <- resCh:
select {
case res := <-resCh:
return res, nil
case <-ctx.Done():
return nil, ctx.Err()
}
case <-ctx.Done():
return nil, ctx.Err()
}
}
func (b *CommitBatcher) Pending(ctx context.Context) ([]abi.SectorID, error) {
b.lk.Lock()
defer b.lk.Unlock()
mid, err := address.IDFromAddress(b.maddr)
if err != nil {
return nil, err
}
res := make([]abi.SectorID, 0)
for _, s := range b.todo {
res = append(res, abi.SectorID{
Miner: abi.ActorID(mid),
Number: s.info.Number,
})
}
sort.Slice(res, func(i, j int) bool {
if res[i].Miner != res[j].Miner {
return res[i].Miner < res[j].Miner
}
return res[i].Number < res[j].Number
})
return res, nil
}
func (b *CommitBatcher) Stop(ctx context.Context) error {
close(b.stop)
select {
case <-b.stopped:
return nil
case <-ctx.Done():
return ctx.Err()
}
}
func getSectorDeadline(curEpoch abi.ChainEpoch, si SectorInfo) time.Time {
deadlineEpoch := si.TicketEpoch + policy.MaxPreCommitRandomnessLookback
for _, p := range si.Pieces {
if p.DealInfo == nil {
continue
}
startEpoch := p.DealInfo.DealSchedule.StartEpoch
if startEpoch < deadlineEpoch {
deadlineEpoch = startEpoch
}
}
if deadlineEpoch <= curEpoch {
return time.Now()
}
return time.Now().Add(time.Duration(deadlineEpoch-curEpoch) * time.Duration(build.BlockDelaySecs) * time.Second)
}

View File

@ -71,13 +71,27 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto
on(SectorSealPreCommit1Failed{}, SealPreCommit1Failed), on(SectorSealPreCommit1Failed{}, SealPreCommit1Failed),
), ),
PreCommitting: planOne( PreCommitting: planOne(
on(SectorSealPreCommit1Failed{}, SealPreCommit1Failed), on(SectorPreCommitBatch{}, SubmitPreCommitBatch),
on(SectorPreCommitted{}, PreCommitWait), on(SectorPreCommitted{}, PreCommitWait),
on(SectorSealPreCommit1Failed{}, SealPreCommit1Failed),
on(SectorChainPreCommitFailed{}, PreCommitFailed), on(SectorChainPreCommitFailed{}, PreCommitFailed),
on(SectorPreCommitLanded{}, WaitSeed), on(SectorPreCommitLanded{}, WaitSeed),
on(SectorDealsExpired{}, DealsExpired), on(SectorDealsExpired{}, DealsExpired),
on(SectorInvalidDealIDs{}, RecoverDealIDs), on(SectorInvalidDealIDs{}, RecoverDealIDs),
), ),
SubmitPreCommitBatch: planOne(
on(SectorPreCommitBatchSent{}, PreCommitBatchWait),
on(SectorSealPreCommit1Failed{}, SealPreCommit1Failed),
on(SectorChainPreCommitFailed{}, PreCommitFailed),
on(SectorPreCommitLanded{}, WaitSeed),
on(SectorDealsExpired{}, DealsExpired),
on(SectorInvalidDealIDs{}, RecoverDealIDs),
),
PreCommitBatchWait: planOne(
on(SectorChainPreCommitFailed{}, PreCommitFailed),
on(SectorPreCommitLanded{}, WaitSeed),
on(SectorRetryPreCommit{}, PreCommitting),
),
PreCommitWait: planOne( PreCommitWait: planOne(
on(SectorChainPreCommitFailed{}, PreCommitFailed), on(SectorChainPreCommitFailed{}, PreCommitFailed),
on(SectorPreCommitLanded{}, WaitSeed), on(SectorPreCommitLanded{}, WaitSeed),
@ -90,6 +104,11 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto
Committing: planCommitting, Committing: planCommitting,
SubmitCommit: planOne( SubmitCommit: planOne(
on(SectorCommitSubmitted{}, CommitWait), on(SectorCommitSubmitted{}, CommitWait),
on(SectorSubmitCommitAggregate{}, SubmitCommitAggregate),
on(SectorCommitFailed{}, CommitFailed),
),
SubmitCommitAggregate: planOne(
on(SectorCommitAggregateSent{}, CommitWait),
on(SectorCommitFailed{}, CommitFailed), on(SectorCommitFailed{}, CommitFailed),
), ),
CommitWait: planOne( CommitWait: planOne(
@ -97,6 +116,11 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto
on(SectorCommitFailed{}, CommitFailed), on(SectorCommitFailed{}, CommitFailed),
on(SectorRetrySubmitCommit{}, SubmitCommit), on(SectorRetrySubmitCommit{}, SubmitCommit),
), ),
CommitAggregateWait: planOne(
on(SectorProving{}, FinalizeSector),
on(SectorCommitFailed{}, CommitFailed),
on(SectorRetrySubmitCommit{}, SubmitCommit),
),
FinalizeSector: planOne( FinalizeSector: planOne(
on(SectorFinalized{}, Proving), on(SectorFinalized{}, Proving),
@ -330,6 +354,10 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta
return m.handlePreCommit2, processed, nil return m.handlePreCommit2, processed, nil
case PreCommitting: case PreCommitting:
return m.handlePreCommitting, processed, nil return m.handlePreCommitting, processed, nil
case SubmitPreCommitBatch:
return m.handleSubmitPreCommitBatch, processed, nil
case PreCommitBatchWait:
fallthrough
case PreCommitWait: case PreCommitWait:
return m.handlePreCommitWait, processed, nil return m.handlePreCommitWait, processed, nil
case WaitSeed: case WaitSeed:
@ -338,6 +366,10 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta
return m.handleCommitting, processed, nil return m.handleCommitting, processed, nil
case SubmitCommit: case SubmitCommit:
return m.handleSubmitCommit, processed, nil return m.handleSubmitCommit, processed, nil
case SubmitCommitAggregate:
return m.handleSubmitCommitAggregate, processed, nil
case CommitAggregateWait:
fallthrough
case CommitWait: case CommitWait:
return m.handleCommitWait, processed, nil return m.handleCommitWait, processed, nil
case FinalizeSector: case FinalizeSector:

View File

@ -150,6 +150,18 @@ func (evt SectorPreCommit2) apply(state *SectorInfo) {
state.CommR = &commr state.CommR = &commr
} }
type SectorPreCommitBatch struct{}
func (evt SectorPreCommitBatch) apply(*SectorInfo) {}
type SectorPreCommitBatchSent struct {
Message cid.Cid
}
func (evt SectorPreCommitBatchSent) apply(state *SectorInfo) {
state.PreCommitMessage = &evt.Message
}
type SectorPreCommitLanded struct { type SectorPreCommitLanded struct {
TipSet TipSetToken TipSet TipSetToken
} }
@ -233,6 +245,10 @@ func (evt SectorCommitted) apply(state *SectorInfo) {
state.Proof = evt.Proof state.Proof = evt.Proof
} }
type SectorSubmitCommitAggregate struct{}
func (evt SectorSubmitCommitAggregate) apply(*SectorInfo) {}
type SectorCommitSubmitted struct { type SectorCommitSubmitted struct {
Message cid.Cid Message cid.Cid
} }
@ -241,6 +257,14 @@ func (evt SectorCommitSubmitted) apply(state *SectorInfo) {
state.CommitMessage = &evt.Message state.CommitMessage = &evt.Message
} }
type SectorCommitAggregateSent struct {
Message cid.Cid
}
func (evt SectorCommitAggregateSent) apply(state *SectorInfo) {
state.CommitMessage = &evt.Message
}
type SectorProving struct{} type SectorProving struct{}
func (evt SectorProving) apply(*SectorInfo) {} func (evt SectorProving) apply(*SectorInfo) {}

View File

@ -0,0 +1,312 @@
package sealing
import (
"bytes"
"context"
"sort"
"sync"
"time"
"github.com/ipfs/go-cid"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
)
type PreCommitBatcherApi interface {
SendMsg(ctx context.Context, from, to address.Address, method abi.MethodNum, value, maxFee abi.TokenAmount, params []byte) (cid.Cid, error)
StateMinerInfo(context.Context, address.Address, TipSetToken) (miner.MinerInfo, error)
ChainHead(ctx context.Context) (TipSetToken, abi.ChainEpoch, error)
}
type preCommitEntry struct {
deposit abi.TokenAmount
pci *miner0.SectorPreCommitInfo
}
type PreCommitBatcher struct {
api PreCommitBatcherApi
maddr address.Address
mctx context.Context
addrSel AddrSel
feeCfg FeeConfig
getConfig GetSealingConfigFunc
deadlines map[abi.SectorNumber]time.Time
todo map[abi.SectorNumber]*preCommitEntry
waiting map[abi.SectorNumber][]chan cid.Cid
notify, stop, stopped chan struct{}
force chan chan *cid.Cid
lk sync.Mutex
}
func NewPreCommitBatcher(mctx context.Context, maddr address.Address, api PreCommitBatcherApi, addrSel AddrSel, feeCfg FeeConfig, getConfig GetSealingConfigFunc) *PreCommitBatcher {
b := &PreCommitBatcher{
api: api,
maddr: maddr,
mctx: mctx,
addrSel: addrSel,
feeCfg: feeCfg,
getConfig: getConfig,
deadlines: map[abi.SectorNumber]time.Time{},
todo: map[abi.SectorNumber]*preCommitEntry{},
waiting: map[abi.SectorNumber][]chan cid.Cid{},
notify: make(chan struct{}, 1),
force: make(chan chan *cid.Cid),
stop: make(chan struct{}),
stopped: make(chan struct{}),
}
go b.run()
return b
}
func (b *PreCommitBatcher) run() {
var forceRes chan *cid.Cid
var lastMsg *cid.Cid
cfg, err := b.getConfig()
if err != nil {
panic(err)
}
for {
if forceRes != nil {
forceRes <- lastMsg
forceRes = nil
}
lastMsg = nil
var sendAboveMax, sendAboveMin bool
select {
case <-b.stop:
close(b.stopped)
return
case <-b.notify:
sendAboveMax = true
case <-b.batchWait(cfg.PreCommitBatchWait, cfg.PreCommitBatchSlack):
sendAboveMin = true
case fr := <-b.force: // user triggered
forceRes = fr
}
var err error
lastMsg, err = b.processBatch(sendAboveMax, sendAboveMin)
if err != nil {
log.Warnw("PreCommitBatcher processBatch error", "error", err)
}
}
}
func (b *PreCommitBatcher) batchWait(maxWait, slack time.Duration) <-chan time.Time {
now := time.Now()
b.lk.Lock()
defer b.lk.Unlock()
if len(b.todo) == 0 {
return nil
}
var deadline time.Time
for sn := range b.todo {
sectorDeadline := b.deadlines[sn]
if deadline.IsZero() || (!sectorDeadline.IsZero() && sectorDeadline.Before(deadline)) {
deadline = sectorDeadline
}
}
for sn := range b.waiting {
sectorDeadline := b.deadlines[sn]
if deadline.IsZero() || (!sectorDeadline.IsZero() && sectorDeadline.Before(deadline)) {
deadline = sectorDeadline
}
}
if deadline.IsZero() {
return time.After(maxWait)
}
deadline = deadline.Add(-slack)
if deadline.Before(now) {
return time.After(time.Nanosecond) // can't return 0
}
wait := deadline.Sub(now)
if wait > maxWait {
wait = maxWait
}
return time.After(wait)
}
func (b *PreCommitBatcher) processBatch(notif, after bool) (*cid.Cid, error) {
b.lk.Lock()
defer b.lk.Unlock()
params := miner5.PreCommitSectorBatchParams{}
total := len(b.todo)
if total == 0 {
return nil, nil // nothing to do
}
cfg, err := b.getConfig()
if err != nil {
return nil, xerrors.Errorf("getting config: %w", err)
}
if notif && total < cfg.MaxPreCommitBatch {
return nil, nil
}
if after && total < cfg.MinPreCommitBatch {
return nil, nil
}
deposit := big.Zero()
for _, p := range b.todo {
if len(params.Sectors) >= cfg.MaxPreCommitBatch {
log.Infow("precommit batch full")
break
}
params.Sectors = append(params.Sectors, p.pci)
deposit = big.Add(deposit, p.deposit)
}
enc := new(bytes.Buffer)
if err := params.MarshalCBOR(enc); err != nil {
return nil, xerrors.Errorf("couldn't serialize PreCommitSectorBatchParams: %w", err)
}
mi, err := b.api.StateMinerInfo(b.mctx, b.maddr, nil)
if err != nil {
return nil, xerrors.Errorf("couldn't get miner info: %w", err)
}
goodFunds := big.Add(deposit, b.feeCfg.MaxPreCommitGasFee)
from, _, err := b.addrSel(b.mctx, mi, api.PreCommitAddr, goodFunds, deposit)
if err != nil {
return nil, xerrors.Errorf("no good address found: %w", err)
}
mcid, err := b.api.SendMsg(b.mctx, from, b.maddr, miner.Methods.PreCommitSectorBatch, deposit, b.feeCfg.MaxPreCommitGasFee, enc.Bytes())
if err != nil {
return nil, xerrors.Errorf("sending message failed: %w", err)
}
log.Infow("Sent ProveCommitAggregate message", "cid", mcid, "from", from, "sectors", total)
for _, sector := range params.Sectors {
sn := sector.SectorNumber
for _, ch := range b.waiting[sn] {
ch <- mcid // buffered
}
delete(b.waiting, sn)
delete(b.todo, sn)
delete(b.deadlines, sn)
}
return &mcid, nil
}
// register PreCommit, wait for batch message, return message CID
func (b *PreCommitBatcher) AddPreCommit(ctx context.Context, s SectorInfo, deposit abi.TokenAmount, in *miner0.SectorPreCommitInfo) (mcid cid.Cid, err error) {
_, curEpoch, err := b.api.ChainHead(b.mctx)
if err != nil {
log.Errorf("getting chain head: %s", err)
return cid.Undef, nil
}
sn := s.SectorNumber
b.lk.Lock()
b.deadlines[sn] = getSectorDeadline(curEpoch, s)
b.todo[sn] = &preCommitEntry{
deposit: deposit,
pci: in,
}
sent := make(chan cid.Cid, 1)
b.waiting[sn] = append(b.waiting[sn], sent)
select {
case b.notify <- struct{}{}:
default: // already have a pending notification, don't need more
}
b.lk.Unlock()
select {
case c := <-sent:
return c, nil
case <-ctx.Done():
return cid.Undef, ctx.Err()
}
}
func (b *PreCommitBatcher) Flush(ctx context.Context) (*cid.Cid, error) {
resCh := make(chan *cid.Cid, 1)
select {
case b.force <- resCh:
select {
case res := <-resCh:
return res, nil
case <-ctx.Done():
return nil, ctx.Err()
}
case <-ctx.Done():
return nil, ctx.Err()
}
}
func (b *PreCommitBatcher) Pending(ctx context.Context) ([]abi.SectorID, error) {
b.lk.Lock()
defer b.lk.Unlock()
mid, err := address.IDFromAddress(b.maddr)
if err != nil {
return nil, err
}
res := make([]abi.SectorID, 0)
for _, s := range b.todo {
res = append(res, abi.SectorID{
Miner: abi.ActorID(mid),
Number: s.pci.SectorNumber,
})
}
sort.Slice(res, func(i, j int) bool {
if res[i].Miner != res[j].Miner {
return res[i].Miner < res[j].Miner
}
return res[i].Number < res[j].Number
})
return res, nil
}
func (b *PreCommitBatcher) Stop(ctx context.Context) error {
close(b.stop)
select {
case <-b.stopped:
return nil
case <-ctx.Done():
return ctx.Err()
}
}

View File

@ -17,4 +17,20 @@ type Config struct {
WaitDealsDelay time.Duration WaitDealsDelay time.Duration
AlwaysKeepUnsealedCopy bool AlwaysKeepUnsealedCopy bool
BatchPreCommits bool
MaxPreCommitBatch int
MinPreCommitBatch int
PreCommitBatchWait time.Duration
PreCommitBatchSlack time.Duration
AggregateCommits bool
MinCommitBatch int
MaxCommitBatch int
CommitBatchWait time.Duration
CommitBatchSlack time.Duration
TerminateBatchMax uint64
TerminateBatchMin uint64
TerminateBatchWait time.Duration
} }

View File

@ -102,7 +102,9 @@ type Sealing struct {
stats SectorStats stats SectorStats
terminator *TerminateBatcher terminator *TerminateBatcher
precommiter *PreCommitBatcher
commiter *CommitBatcher
getConfig GetSealingConfigFunc getConfig GetSealingConfigFunc
dealInfo *CurrentDealInfoManager dealInfo *CurrentDealInfoManager
@ -130,7 +132,7 @@ type pendingPiece struct {
accepted func(abi.SectorNumber, abi.UnpaddedPieceSize, error) accepted func(abi.SectorNumber, abi.UnpaddedPieceSize, error)
} }
func New(api SealingAPI, fc FeeConfig, events Events, maddr address.Address, ds datastore.Batching, sealer sectorstorage.SectorManager, sc SectorIDCounter, verif ffiwrapper.Verifier, pcp PreCommitPolicy, gc GetSealingConfigFunc, notifee SectorStateNotifee, as AddrSel) *Sealing { func New(api SealingAPI, fc FeeConfig, events Events, maddr address.Address, ds datastore.Batching, sealer sectorstorage.SectorManager, sc SectorIDCounter, verif ffiwrapper.Verifier, prov ffiwrapper.Prover, pcp PreCommitPolicy, gc GetSealingConfigFunc, notifee SectorStateNotifee, as AddrSel) *Sealing {
s := &Sealing{ s := &Sealing{
api: api, api: api,
feeCfg: fc, feeCfg: fc,
@ -151,7 +153,9 @@ func New(api SealingAPI, fc FeeConfig, events Events, maddr address.Address, ds
notifee: notifee, notifee: notifee,
addrSel: as, addrSel: as,
terminator: NewTerminationBatcher(context.TODO(), maddr, api, as, fc), terminator: NewTerminationBatcher(context.TODO(), maddr, api, as, fc, gc),
precommiter: NewPreCommitBatcher(context.TODO(), maddr, api, as, fc, gc),
commiter: NewCommitBatcher(context.TODO(), maddr, api, as, fc, gc, prov),
getConfig: gc, getConfig: gc,
dealInfo: &CurrentDealInfoManager{api}, dealInfo: &CurrentDealInfoManager{api},
@ -202,6 +206,22 @@ func (m *Sealing) TerminatePending(ctx context.Context) ([]abi.SectorID, error)
return m.terminator.Pending(ctx) return m.terminator.Pending(ctx)
} }
func (m *Sealing) SectorPreCommitFlush(ctx context.Context) (*cid.Cid, error) {
return m.precommiter.Flush(ctx)
}
func (m *Sealing) SectorPreCommitPending(ctx context.Context) ([]abi.SectorID, error) {
return m.precommiter.Pending(ctx)
}
func (m *Sealing) CommitFlush(ctx context.Context) (*cid.Cid, error) {
return m.commiter.Flush(ctx)
}
func (m *Sealing) CommitPending(ctx context.Context) ([]abi.SectorID, error) {
return m.commiter.Pending(ctx)
}
func (m *Sealing) currentSealProof(ctx context.Context) (abi.RegisteredSealProof, error) { func (m *Sealing) currentSealProof(ctx context.Context) (abi.RegisteredSealProof, error) {
mi, err := m.api.StateMinerInfo(ctx, m.maddr, nil) mi, err := m.api.StateMinerInfo(ctx, m.maddr, nil)
if err != nil { if err != nil {

View File

@ -3,61 +3,76 @@ package sealing
type SectorState string type SectorState string
var ExistSectorStateList = map[SectorState]struct{}{ var ExistSectorStateList = map[SectorState]struct{}{
Empty: {}, Empty: {},
WaitDeals: {}, WaitDeals: {},
Packing: {}, Packing: {},
AddPiece: {}, AddPiece: {},
AddPieceFailed: {}, AddPieceFailed: {},
GetTicket: {}, GetTicket: {},
PreCommit1: {}, PreCommit1: {},
PreCommit2: {}, PreCommit2: {},
PreCommitting: {}, PreCommitting: {},
PreCommitWait: {}, PreCommitWait: {},
WaitSeed: {}, SubmitPreCommitBatch: {},
Committing: {}, PreCommitBatchWait: {},
SubmitCommit: {}, WaitSeed: {},
CommitWait: {}, Committing: {},
FinalizeSector: {}, SubmitCommit: {},
Proving: {}, CommitWait: {},
FailedUnrecoverable: {}, SubmitCommitAggregate: {},
SealPreCommit1Failed: {}, CommitAggregateWait: {},
SealPreCommit2Failed: {}, FinalizeSector: {},
PreCommitFailed: {}, Proving: {},
ComputeProofFailed: {}, FailedUnrecoverable: {},
CommitFailed: {}, SealPreCommit1Failed: {},
PackingFailed: {}, SealPreCommit2Failed: {},
FinalizeFailed: {}, PreCommitFailed: {},
DealsExpired: {}, ComputeProofFailed: {},
RecoverDealIDs: {}, CommitFailed: {},
Faulty: {}, PackingFailed: {},
FaultReported: {}, FinalizeFailed: {},
FaultedFinal: {}, DealsExpired: {},
Terminating: {}, RecoverDealIDs: {},
TerminateWait: {}, Faulty: {},
TerminateFinality: {}, FaultReported: {},
TerminateFailed: {}, FaultedFinal: {},
Removing: {}, Terminating: {},
RemoveFailed: {}, TerminateWait: {},
Removed: {}, TerminateFinality: {},
TerminateFailed: {},
Removing: {},
RemoveFailed: {},
Removed: {},
} }
const ( const (
UndefinedSectorState SectorState = "" UndefinedSectorState SectorState = ""
// happy path // happy path
Empty SectorState = "Empty" // deprecated Empty SectorState = "Empty" // deprecated
WaitDeals SectorState = "WaitDeals" // waiting for more pieces (deals) to be added to the sector WaitDeals SectorState = "WaitDeals" // waiting for more pieces (deals) to be added to the sector
AddPiece SectorState = "AddPiece" // put deal data (and padding if required) into the sector AddPiece SectorState = "AddPiece" // put deal data (and padding if required) into the sector
Packing SectorState = "Packing" // sector not in sealStore, and not on chain Packing SectorState = "Packing" // sector not in sealStore, and not on chain
GetTicket SectorState = "GetTicket" // generate ticket GetTicket SectorState = "GetTicket" // generate ticket
PreCommit1 SectorState = "PreCommit1" // do PreCommit1 PreCommit1 SectorState = "PreCommit1" // do PreCommit1
PreCommit2 SectorState = "PreCommit2" // do PreCommit2 PreCommit2 SectorState = "PreCommit2" // do PreCommit2
PreCommitting SectorState = "PreCommitting" // on chain pre-commit
PreCommitWait SectorState = "PreCommitWait" // waiting for precommit to land on chain PreCommitting SectorState = "PreCommitting" // on chain pre-commit
WaitSeed SectorState = "WaitSeed" // waiting for seed PreCommitWait SectorState = "PreCommitWait" // waiting for precommit to land on chain
Committing SectorState = "Committing" // compute PoRep
SubmitCommit SectorState = "SubmitCommit" // send commit message to the chain SubmitPreCommitBatch SectorState = "SubmitPreCommitBatch"
CommitWait SectorState = "CommitWait" // wait for the commit message to land on chain PreCommitBatchWait SectorState = "PreCommitBatchWait"
WaitSeed SectorState = "WaitSeed" // waiting for seed
Committing SectorState = "Committing" // compute PoRep
// single commit
SubmitCommit SectorState = "SubmitCommit" // send commit message to the chain
CommitWait SectorState = "CommitWait" // wait for the commit message to land on chain
SubmitCommitAggregate SectorState = "SubmitCommitAggregate"
CommitAggregateWait SectorState = "CommitAggregateWait"
FinalizeSector SectorState = "FinalizeSector" FinalizeSector SectorState = "FinalizeSector"
Proving SectorState = "Proving" Proving SectorState = "Proving"
// error modes // error modes
@ -91,7 +106,7 @@ func toStatState(st SectorState) statSectorState {
switch st { switch st {
case UndefinedSectorState, Empty, WaitDeals, AddPiece: case UndefinedSectorState, Empty, WaitDeals, AddPiece:
return sstStaging return sstStaging
case Packing, GetTicket, PreCommit1, PreCommit2, PreCommitting, PreCommitWait, WaitSeed, Committing, SubmitCommit, CommitWait, FinalizeSector: case Packing, GetTicket, PreCommit1, PreCommit2, PreCommitting, PreCommitWait, SubmitPreCommitBatch, PreCommitBatchWait, WaitSeed, Committing, SubmitCommit, CommitWait, SubmitCommitAggregate, CommitAggregateWait, FinalizeSector:
return sstSealing return sstSealing
case Proving, Removed, Removing, Terminating, TerminateWait, TerminateFinality, TerminateFailed: case Proving, Removed, Removing, Terminating, TerminateWait, TerminateFinality, TerminateFailed:
return sstProving return sstProving

View File

@ -11,7 +11,9 @@ import (
"github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/go-state-types/exitcode" "github.com/filecoin-project/go-state-types/exitcode"
"github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/go-statemachine" "github.com/filecoin-project/go-statemachine"
"github.com/filecoin-project/specs-actors/v5/actors/runtime/proof"
"github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/specs-storage/storage"
"github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api"
@ -224,56 +226,50 @@ func (m *Sealing) remarkForUpgrade(sid abi.SectorNumber) {
} }
} }
func (m *Sealing) handlePreCommitting(ctx statemachine.Context, sector SectorInfo) error { func (m *Sealing) preCommitParams(ctx statemachine.Context, sector SectorInfo) (*miner.SectorPreCommitInfo, big.Int, TipSetToken, error) {
tok, height, err := m.api.ChainHead(ctx.Context()) tok, height, err := m.api.ChainHead(ctx.Context())
if err != nil { if err != nil {
log.Errorf("handlePreCommitting: api error, not proceeding: %+v", err) log.Errorf("handlePreCommitting: api error, not proceeding: %+v", err)
return nil return nil, big.Zero(), nil, nil
}
mi, err := m.api.StateMinerInfo(ctx.Context(), m.maddr, tok)
if err != nil {
log.Errorf("handlePreCommitting: api error, not proceeding: %+v", err)
return nil
} }
if err := checkPrecommit(ctx.Context(), m.Address(), sector, tok, height, m.api); err != nil { if err := checkPrecommit(ctx.Context(), m.Address(), sector, tok, height, m.api); err != nil {
switch err := err.(type) { switch err := err.(type) {
case *ErrApi: case *ErrApi:
log.Errorf("handlePreCommitting: api error, not proceeding: %+v", err) log.Errorf("handlePreCommitting: api error, not proceeding: %+v", err)
return nil return nil, big.Zero(), nil, nil
case *ErrBadCommD: // TODO: Should this just back to packing? (not really needed since handlePreCommit1 will do that too) case *ErrBadCommD: // TODO: Should this just back to packing? (not really needed since handlePreCommit1 will do that too)
return ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("bad CommD error: %w", err)}) return nil, big.Zero(), nil, ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("bad CommD error: %w", err)})
case *ErrExpiredTicket: case *ErrExpiredTicket:
return ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("ticket expired: %w", err)}) return nil, big.Zero(), nil, ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("ticket expired: %w", err)})
case *ErrBadTicket: case *ErrBadTicket:
return ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("bad ticket: %w", err)}) return nil, big.Zero(), nil, ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("bad ticket: %w", err)})
case *ErrInvalidDeals: case *ErrInvalidDeals:
log.Warnf("invalid deals in sector %d: %v", sector.SectorNumber, err) log.Warnf("invalid deals in sector %d: %v", sector.SectorNumber, err)
return ctx.Send(SectorInvalidDealIDs{Return: RetPreCommitting}) return nil, big.Zero(), nil, ctx.Send(SectorInvalidDealIDs{Return: RetPreCommitting})
case *ErrExpiredDeals: case *ErrExpiredDeals:
return ctx.Send(SectorDealsExpired{xerrors.Errorf("sector deals expired: %w", err)}) return nil, big.Zero(), nil, ctx.Send(SectorDealsExpired{xerrors.Errorf("sector deals expired: %w", err)})
case *ErrPrecommitOnChain: case *ErrPrecommitOnChain:
return ctx.Send(SectorPreCommitLanded{TipSet: tok}) // we re-did precommit return nil, big.Zero(), nil, ctx.Send(SectorPreCommitLanded{TipSet: tok}) // we re-did precommit
case *ErrSectorNumberAllocated: case *ErrSectorNumberAllocated:
log.Errorf("handlePreCommitFailed: sector number already allocated, not proceeding: %+v", err) log.Errorf("handlePreCommitFailed: sector number already allocated, not proceeding: %+v", err)
// TODO: check if the sector is committed (not sure how we'd end up here) // TODO: check if the sector is committed (not sure how we'd end up here)
return nil return nil, big.Zero(), nil, nil
default: default:
return xerrors.Errorf("checkPrecommit sanity check error: %w", err) return nil, big.Zero(), nil, xerrors.Errorf("checkPrecommit sanity check error: %w", err)
} }
} }
expiration, err := m.pcp.Expiration(ctx.Context(), sector.Pieces...) expiration, err := m.pcp.Expiration(ctx.Context(), sector.Pieces...)
if err != nil { if err != nil {
return ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("handlePreCommitting: failed to compute pre-commit expiry: %w", err)}) return nil, big.Zero(), nil, ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("handlePreCommitting: failed to compute pre-commit expiry: %w", err)})
} }
// Sectors must last _at least_ MinSectorExpiration + MaxSealDuration. // Sectors must last _at least_ MinSectorExpiration + MaxSealDuration.
// TODO: The "+10" allows the pre-commit to take 10 blocks to be accepted. // TODO: The "+10" allows the pre-commit to take 10 blocks to be accepted.
nv, err := m.api.StateNetworkVersion(ctx.Context(), tok) nv, err := m.api.StateNetworkVersion(ctx.Context(), tok)
if err != nil { if err != nil {
return ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("failed to get network version: %w", err)}) return nil, big.Zero(), nil, ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("failed to get network version: %w", err)})
} }
msd := policy.GetMaxProveCommitDuration(actors.VersionForNetwork(nv), sector.SectorType) msd := policy.GetMaxProveCommitDuration(actors.VersionForNetwork(nv), sector.SectorType)
@ -295,17 +291,49 @@ func (m *Sealing) handlePreCommitting(ctx statemachine.Context, sector SectorInf
depositMinimum := m.tryUpgradeSector(ctx.Context(), params) depositMinimum := m.tryUpgradeSector(ctx.Context(), params)
collateral, err := m.api.StateMinerPreCommitDepositForPower(ctx.Context(), m.maddr, *params, tok)
if err != nil {
return nil, big.Zero(), nil, xerrors.Errorf("getting initial pledge collateral: %w", err)
}
deposit := big.Max(depositMinimum, collateral)
return params, deposit, tok, nil
}
func (m *Sealing) handlePreCommitting(ctx statemachine.Context, sector SectorInfo) error {
cfg, err := m.getConfig()
if err != nil {
return xerrors.Errorf("getting config: %w", err)
}
if cfg.BatchPreCommits {
nv, err := m.api.StateNetworkVersion(ctx.Context(), nil)
if err != nil {
return xerrors.Errorf("getting network version: %w", err)
}
if nv >= network.Version13 {
return ctx.Send(SectorPreCommitBatch{})
}
}
params, deposit, tok, err := m.preCommitParams(ctx, sector)
if params == nil || err != nil {
return err
}
enc := new(bytes.Buffer) enc := new(bytes.Buffer)
if err := params.MarshalCBOR(enc); err != nil { if err := params.MarshalCBOR(enc); err != nil {
return ctx.Send(SectorChainPreCommitFailed{xerrors.Errorf("could not serialize pre-commit sector parameters: %w", err)}) return ctx.Send(SectorChainPreCommitFailed{xerrors.Errorf("could not serialize pre-commit sector parameters: %w", err)})
} }
collateral, err := m.api.StateMinerPreCommitDepositForPower(ctx.Context(), m.maddr, *params, tok) mi, err := m.api.StateMinerInfo(ctx.Context(), m.maddr, tok)
if err != nil { if err != nil {
return xerrors.Errorf("getting initial pledge collateral: %w", err) log.Errorf("handlePreCommitting: api error, not proceeding: %+v", err)
return nil
} }
deposit := big.Max(depositMinimum, collateral)
goodFunds := big.Add(deposit, m.feeCfg.MaxPreCommitGasFee) goodFunds := big.Add(deposit, m.feeCfg.MaxPreCommitGasFee)
from, _, err := m.addrSel(ctx.Context(), mi, api.PreCommitAddr, goodFunds, deposit) from, _, err := m.addrSel(ctx.Context(), mi, api.PreCommitAddr, goodFunds, deposit)
@ -325,6 +353,24 @@ func (m *Sealing) handlePreCommitting(ctx statemachine.Context, sector SectorInf
return ctx.Send(SectorPreCommitted{Message: mcid, PreCommitDeposit: deposit, PreCommitInfo: *params}) return ctx.Send(SectorPreCommitted{Message: mcid, PreCommitDeposit: deposit, PreCommitInfo: *params})
} }
func (m *Sealing) handleSubmitPreCommitBatch(ctx statemachine.Context, sector SectorInfo) error {
if sector.CommD == nil || sector.CommR == nil {
return ctx.Send(SectorCommitFailed{xerrors.Errorf("sector had nil commR or commD")})
}
params, deposit, _, err := m.preCommitParams(ctx, sector)
if params == nil || err != nil {
return err
}
mcid, err := m.precommiter.AddPreCommit(ctx.Context(), sector, deposit, params)
if err != nil {
return ctx.Send(SectorCommitFailed{xerrors.Errorf("queuing precommit batch failed: %w", err)})
}
return ctx.Send(SectorPreCommitBatchSent{mcid})
}
func (m *Sealing) handlePreCommitWait(ctx statemachine.Context, sector SectorInfo) error { func (m *Sealing) handlePreCommitWait(ctx statemachine.Context, sector SectorInfo) error {
if sector.PreCommitMessage == nil { if sector.PreCommitMessage == nil {
return ctx.Send(SectorChainPreCommitFailed{xerrors.Errorf("precommit message was nil")}) return ctx.Send(SectorChainPreCommitFailed{xerrors.Errorf("precommit message was nil")})
@ -452,6 +498,22 @@ func (m *Sealing) handleCommitting(ctx statemachine.Context, sector SectorInfo)
} }
func (m *Sealing) handleSubmitCommit(ctx statemachine.Context, sector SectorInfo) error { func (m *Sealing) handleSubmitCommit(ctx statemachine.Context, sector SectorInfo) error {
cfg, err := m.getConfig()
if err != nil {
return xerrors.Errorf("getting config: %w", err)
}
if cfg.AggregateCommits {
nv, err := m.api.StateNetworkVersion(ctx.Context(), nil)
if err != nil {
return xerrors.Errorf("getting network version: %w", err)
}
if nv >= network.Version13 {
return ctx.Send(SectorSubmitCommitAggregate{})
}
}
tok, _, err := m.api.ChainHead(ctx.Context()) tok, _, err := m.api.ChainHead(ctx.Context())
if err != nil { if err != nil {
log.Errorf("handleCommitting: api error, not proceeding: %+v", err) log.Errorf("handleCommitting: api error, not proceeding: %+v", err)
@ -514,6 +576,29 @@ func (m *Sealing) handleSubmitCommit(ctx statemachine.Context, sector SectorInfo
}) })
} }
func (m *Sealing) handleSubmitCommitAggregate(ctx statemachine.Context, sector SectorInfo) error {
if sector.CommD == nil || sector.CommR == nil {
return ctx.Send(SectorCommitFailed{xerrors.Errorf("sector had nil commR or commD")})
}
mcid, err := m.commiter.AddCommit(ctx.Context(), sector, AggregateInput{
info: proof.AggregateSealVerifyInfo{
Number: sector.SectorNumber,
Randomness: sector.TicketValue,
InteractiveRandomness: sector.SeedValue,
SealedCID: *sector.CommR,
UnsealedCID: *sector.CommD,
},
proof: sector.Proof, // todo: this correct??
spt: sector.SectorType,
})
if err != nil {
return ctx.Send(SectorCommitFailed{xerrors.Errorf("queuing commit for aggregation failed: %w", err)})
}
return ctx.Send(SectorCommitAggregateSent{mcid})
}
func (m *Sealing) handleCommitWait(ctx statemachine.Context, sector SectorInfo) error { func (m *Sealing) handleCommitWait(ctx statemachine.Context, sector SectorInfo) error {
if sector.CommitMessage == nil { if sector.CommitMessage == nil {
log.Errorf("sector %d entered commit wait state without a message cid", sector.SectorNumber) log.Errorf("sector %d entered commit wait state without a message cid", sector.SectorNumber)

View File

@ -21,14 +21,6 @@ import (
"github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
) )
var (
// TODO: config
TerminateBatchMax uint64 = 100 // adjust based on real-world gas numbers, actors limit at 10k
TerminateBatchMin uint64 = 1
TerminateBatchWait = 5 * time.Minute
)
type TerminateBatcherApi interface { type TerminateBatcherApi interface {
StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok TipSetToken) (*SectorLocation, error) StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok TipSetToken) (*SectorLocation, error)
SendMsg(ctx context.Context, from, to address.Address, method abi.MethodNum, value, maxFee abi.TokenAmount, params []byte) (cid.Cid, error) SendMsg(ctx context.Context, from, to address.Address, method abi.MethodNum, value, maxFee abi.TokenAmount, params []byte) (cid.Cid, error)
@ -38,11 +30,12 @@ type TerminateBatcherApi interface {
} }
type TerminateBatcher struct { type TerminateBatcher struct {
api TerminateBatcherApi api TerminateBatcherApi
maddr address.Address maddr address.Address
mctx context.Context mctx context.Context
addrSel AddrSel addrSel AddrSel
feeCfg FeeConfig feeCfg FeeConfig
getConfig GetSealingConfigFunc
todo map[SectorLocation]*bitfield.BitField // MinerSectorLocation -> BitField todo map[SectorLocation]*bitfield.BitField // MinerSectorLocation -> BitField
@ -53,13 +46,14 @@ type TerminateBatcher struct {
lk sync.Mutex lk sync.Mutex
} }
func NewTerminationBatcher(mctx context.Context, maddr address.Address, api TerminateBatcherApi, addrSel AddrSel, feeCfg FeeConfig) *TerminateBatcher { func NewTerminationBatcher(mctx context.Context, maddr address.Address, api TerminateBatcherApi, addrSel AddrSel, feeCfg FeeConfig, getConfig GetSealingConfigFunc) *TerminateBatcher {
b := &TerminateBatcher{ b := &TerminateBatcher{
api: api, api: api,
maddr: maddr, maddr: maddr,
mctx: mctx, mctx: mctx,
addrSel: addrSel, addrSel: addrSel,
feeCfg: feeCfg, feeCfg: feeCfg,
getConfig: getConfig,
todo: map[SectorLocation]*bitfield.BitField{}, todo: map[SectorLocation]*bitfield.BitField{},
waiting: map[abi.SectorNumber][]chan cid.Cid{}, waiting: map[abi.SectorNumber][]chan cid.Cid{},
@ -86,6 +80,11 @@ func (b *TerminateBatcher) run() {
} }
lastMsg = nil lastMsg = nil
cfg, err := b.getConfig()
if err != nil {
log.Warnw("TerminateBatcher getconfig error", "error", err)
}
var sendAboveMax, sendAboveMin bool var sendAboveMax, sendAboveMin bool
select { select {
case <-b.stop: case <-b.stop:
@ -93,13 +92,12 @@ func (b *TerminateBatcher) run() {
return return
case <-b.notify: case <-b.notify:
sendAboveMax = true sendAboveMax = true
case <-time.After(TerminateBatchWait): case <-time.After(cfg.TerminateBatchWait):
sendAboveMin = true sendAboveMin = true
case fr := <-b.force: // user triggered case fr := <-b.force: // user triggered
forceRes = fr forceRes = fr
} }
var err error
lastMsg, err = b.processBatch(sendAboveMax, sendAboveMin) lastMsg, err = b.processBatch(sendAboveMax, sendAboveMin)
if err != nil { if err != nil {
log.Warnw("TerminateBatcher processBatch error", "error", err) log.Warnw("TerminateBatcher processBatch error", "error", err)
@ -113,6 +111,11 @@ func (b *TerminateBatcher) processBatch(notif, after bool) (*cid.Cid, error) {
return nil, xerrors.Errorf("getting proving deadline info failed: %w", err) return nil, xerrors.Errorf("getting proving deadline info failed: %w", err)
} }
cfg, err := b.getConfig()
if err != nil {
return nil, xerrors.Errorf("getting sealing config: %W", err)
}
b.lk.Lock() b.lk.Lock()
defer b.lk.Unlock() defer b.lk.Unlock()
params := miner2.TerminateSectorsParams{} params := miner2.TerminateSectorsParams{}
@ -180,7 +183,7 @@ func (b *TerminateBatcher) processBatch(notif, after bool) (*cid.Cid, error) {
Sectors: toTerminate, Sectors: toTerminate,
}) })
if total >= uint64(miner.AddressedSectorsMax) { if total >= uint64(miner.AddressedSectorsMax) || total >= cfg.TerminateBatchMax {
break break
} }
@ -193,11 +196,11 @@ func (b *TerminateBatcher) processBatch(notif, after bool) (*cid.Cid, error) {
return nil, nil // nothing to do return nil, nil // nothing to do
} }
if notif && total < TerminateBatchMax { if notif && total < cfg.TerminateBatchMax {
return nil, nil return nil, nil
} }
if after && total < TerminateBatchMin { if after && total < cfg.TerminateBatchMin {
return nil, nil return nil, nil
} }

4
go.mod
View File

@ -38,7 +38,7 @@ require (
github.com/filecoin-project/go-jsonrpc v0.1.4-0.20210217175800-45ea43ac2bec github.com/filecoin-project/go-jsonrpc v0.1.4-0.20210217175800-45ea43ac2bec
github.com/filecoin-project/go-multistore v0.0.3 github.com/filecoin-project/go-multistore v0.0.3
github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20 github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20
github.com/filecoin-project/go-paramfetch v0.0.2-0.20200701152213-3e0f0afdc261 github.com/filecoin-project/go-paramfetch v0.0.2-0.20210330140417-936748d3f5ec
github.com/filecoin-project/go-state-types v0.1.1-0.20210506134452-99b279731c48 github.com/filecoin-project/go-state-types v0.1.1-0.20210506134452-99b279731c48
github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe
github.com/filecoin-project/go-statestore v0.1.1 github.com/filecoin-project/go-statestore v0.1.1
@ -47,7 +47,7 @@ require (
github.com/filecoin-project/specs-actors/v2 v2.3.5-0.20210114162132-5b58b773f4fb github.com/filecoin-project/specs-actors/v2 v2.3.5-0.20210114162132-5b58b773f4fb
github.com/filecoin-project/specs-actors/v3 v3.1.0 github.com/filecoin-project/specs-actors/v3 v3.1.0
github.com/filecoin-project/specs-actors/v4 v4.0.0 github.com/filecoin-project/specs-actors/v4 v4.0.0
github.com/filecoin-project/specs-actors/v5 v5.0.0-20210510162709-3255bdd9f2bb github.com/filecoin-project/specs-actors/v5 v5.0.0-20210517165532-c7cff61d07fb
github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506 github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506
github.com/filecoin-project/test-vectors/schema v0.0.5 github.com/filecoin-project/test-vectors/schema v0.0.5
github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1 github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1

10
go.sum
View File

@ -287,13 +287,12 @@ github.com/filecoin-project/go-multistore v0.0.3 h1:vaRBY4YiA2UZFPK57RNuewypB8u0
github.com/filecoin-project/go-multistore v0.0.3/go.mod h1:kaNqCC4IhU4B1uyr7YWFHd23TL4KM32aChS0jNkyUvQ= github.com/filecoin-project/go-multistore v0.0.3/go.mod h1:kaNqCC4IhU4B1uyr7YWFHd23TL4KM32aChS0jNkyUvQ=
github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20 h1:+/4aUeUoKr6AKfPE3mBhXA5spIV6UcKdTYDPNU2Tdmg= github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20 h1:+/4aUeUoKr6AKfPE3mBhXA5spIV6UcKdTYDPNU2Tdmg=
github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20/go.mod h1:mPn+LRRd5gEKNAtc+r3ScpW2JRU/pj4NBKdADYWHiak= github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20/go.mod h1:mPn+LRRd5gEKNAtc+r3ScpW2JRU/pj4NBKdADYWHiak=
github.com/filecoin-project/go-paramfetch v0.0.2-0.20200701152213-3e0f0afdc261 h1:A256QonvzRaknIIAuWhe/M2dpV2otzs3NBhi5TWa/UA= github.com/filecoin-project/go-paramfetch v0.0.2-0.20210330140417-936748d3f5ec h1:gExwWUiT1TcARkxGneS4nvp9C+wBsKU0bFdg7qFpNco=
github.com/filecoin-project/go-paramfetch v0.0.2-0.20200701152213-3e0f0afdc261/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc= github.com/filecoin-project/go-paramfetch v0.0.2-0.20210330140417-936748d3f5ec/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc=
github.com/filecoin-project/go-state-types v0.0.0-20200903145444-247639ffa6ad/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I= github.com/filecoin-project/go-state-types v0.0.0-20200903145444-247639ffa6ad/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I=
github.com/filecoin-project/go-state-types v0.0.0-20200904021452-1883f36ca2f4/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I= github.com/filecoin-project/go-state-types v0.0.0-20200904021452-1883f36ca2f4/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I=
github.com/filecoin-project/go-state-types v0.0.0-20200928172055-2df22083d8ab/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= github.com/filecoin-project/go-state-types v0.0.0-20200928172055-2df22083d8ab/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g=
github.com/filecoin-project/go-state-types v0.0.0-20201102161440-c8033295a1fc/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= github.com/filecoin-project/go-state-types v0.0.0-20201102161440-c8033295a1fc/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g=
github.com/filecoin-project/go-state-types v0.1.0 h1:9r2HCSMMCmyMfGyMKxQtv0GKp6VT/m5GgVk8EhYbLJU=
github.com/filecoin-project/go-state-types v0.1.0/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= github.com/filecoin-project/go-state-types v0.1.0/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g=
github.com/filecoin-project/go-state-types v0.1.1-0.20210506134452-99b279731c48 h1:Jc4OprDp3bRDxbsrXNHPwJabZJM3iDy+ri8/1e0ZnX4= github.com/filecoin-project/go-state-types v0.1.1-0.20210506134452-99b279731c48 h1:Jc4OprDp3bRDxbsrXNHPwJabZJM3iDy+ri8/1e0ZnX4=
github.com/filecoin-project/go-state-types v0.1.1-0.20210506134452-99b279731c48/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= github.com/filecoin-project/go-state-types v0.1.1-0.20210506134452-99b279731c48/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g=
@ -316,8 +315,9 @@ github.com/filecoin-project/specs-actors/v3 v3.1.0 h1:s4qiPw8pgypqBGAy853u/zdZJ7
github.com/filecoin-project/specs-actors/v3 v3.1.0/go.mod h1:mpynccOLlIRy0QnR008BwYBwT9fen+sPR13MA1VmMww= github.com/filecoin-project/specs-actors/v3 v3.1.0/go.mod h1:mpynccOLlIRy0QnR008BwYBwT9fen+sPR13MA1VmMww=
github.com/filecoin-project/specs-actors/v4 v4.0.0 h1:vMALksY5G3J5rj3q9rbcyB+f4Tk1xrLqSgdB3jOok4s= github.com/filecoin-project/specs-actors/v4 v4.0.0 h1:vMALksY5G3J5rj3q9rbcyB+f4Tk1xrLqSgdB3jOok4s=
github.com/filecoin-project/specs-actors/v4 v4.0.0/go.mod h1:TkHXf/l7Wyw4ZejyXIPS2rK8bBO0rdwhTZyQQgaglng= github.com/filecoin-project/specs-actors/v4 v4.0.0/go.mod h1:TkHXf/l7Wyw4ZejyXIPS2rK8bBO0rdwhTZyQQgaglng=
github.com/filecoin-project/specs-actors/v5 v5.0.0-20210510162709-3255bdd9f2bb h1:i2ZBHLiNYyyhNlfjfB/TGtGLlb8dgiGiVCDZlGpUtUc= github.com/filecoin-project/specs-actors/v5 v5.0.0-20210512015452-4fe3889fff57/go.mod h1:283yBMMUSDB2abcjP/hhrwTkhb9h3sfM6KGrep/ZlBI=
github.com/filecoin-project/specs-actors/v5 v5.0.0-20210510162709-3255bdd9f2bb/go.mod h1:XAgQWq5pu0MBwx3MI5uJ6fK/Q8jCkZnKNNLxvDcbXew= github.com/filecoin-project/specs-actors/v5 v5.0.0-20210517165532-c7cff61d07fb h1:818gGdeEC+7aHGl2X7ptdtYuqoEgRsY3jwz+DvUYUFk=
github.com/filecoin-project/specs-actors/v5 v5.0.0-20210517165532-c7cff61d07fb/go.mod h1:283yBMMUSDB2abcjP/hhrwTkhb9h3sfM6KGrep/ZlBI=
github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506 h1:Ur/l2+6qN+lQiqjozWWc5p9UDaAMDZKTlDS98oRnlIw= github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506 h1:Ur/l2+6qN+lQiqjozWWc5p9UDaAMDZKTlDS98oRnlIw=
github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506/go.mod h1:nJRRM7Aa9XVvygr3W9k6xGF46RWzr2zxF/iGoAIfA/g= github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506/go.mod h1:nJRRM7Aa9XVvygr3W9k6xGF46RWzr2zxF/iGoAIfA/g=
github.com/filecoin-project/test-vectors/schema v0.0.5 h1:w3zHQhzM4pYxJDl21avXjOKBLF8egrvwUwjpT8TquDg= github.com/filecoin-project/test-vectors/schema v0.0.5 h1:w3zHQhzM4pYxJDl21avXjOKBLF8egrvwUwjpT8TquDg=

View File

@ -489,7 +489,9 @@
"ConfirmUpdateWorkerKey", "ConfirmUpdateWorkerKey",
"RepayDebt", "RepayDebt",
"ChangeOwnerAddress", "ChangeOwnerAddress",
"DisputeWindowedPoSt" "DisputeWindowedPoSt",
"PreCommitSectorBatch",
"ProveCommitAggregate"
], ],
"fil/5/storagepower": [ "fil/5/storagepower": [
"Send", "Send",

View File

@ -12,6 +12,7 @@ import (
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-fil-markets/storagemarket" "github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner"
"github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors/builtin/market" "github.com/filecoin-project/lotus/chain/actors/builtin/market"
@ -109,7 +110,7 @@ func (mgr *SectorCommittedManager) OnDealSectorPreCommitted(ctx context.Context,
// Watch for a pre-commit message to the provider. // Watch for a pre-commit message to the provider.
matchEvent := func(msg *types.Message) (bool, error) { matchEvent := func(msg *types.Message) (bool, error) {
matched := msg.To == provider && msg.Method == miner.Methods.PreCommitSector matched := msg.To == provider && (msg.Method == miner.Methods.PreCommitSector || msg.Method == miner.Methods.PreCommitSectorBatch)
return matched, nil return matched, nil
} }
@ -137,12 +138,6 @@ func (mgr *SectorCommittedManager) OnDealSectorPreCommitted(ctx context.Context,
return true, nil return true, nil
} }
// Extract the message parameters
var params miner.SectorPreCommitInfo
if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil {
return false, xerrors.Errorf("unmarshal pre commit: %w", err)
}
// When there is a reorg, the deal ID may change, so get the // When there is a reorg, the deal ID may change, so get the
// current deal ID from the publish message CID // current deal ID from the publish message CID
res, err := mgr.dealInfo.GetCurrentDealInfo(ctx, ts.Key().Bytes(), &proposal, publishCid) res, err := mgr.dealInfo.GetCurrentDealInfo(ctx, ts.Key().Bytes(), &proposal, publishCid)
@ -150,13 +145,14 @@ func (mgr *SectorCommittedManager) OnDealSectorPreCommitted(ctx context.Context,
return false, err return false, err
} }
// Check through the deal IDs associated with this message // Extract the message parameters
for _, did := range params.DealIDs { sn, err := dealSectorInPreCommitMsg(msg, res)
if did == res.DealID { if err != nil {
// Found the deal ID in this message. Callback with the sector ID. return false, err
cb(params.SectorNumber, false, nil) }
return false, nil
} if sn != nil {
cb(*sn, false, nil)
} }
// Didn't find the deal ID in this message, so keep looking // Didn't find the deal ID in this message, so keep looking
@ -207,16 +203,11 @@ func (mgr *SectorCommittedManager) OnDealSectorCommitted(ctx context.Context, pr
// Match a prove-commit sent to the provider with the given sector number // Match a prove-commit sent to the provider with the given sector number
matchEvent := func(msg *types.Message) (matched bool, err error) { matchEvent := func(msg *types.Message) (matched bool, err error) {
if msg.To != provider || msg.Method != miner.Methods.ProveCommitSector { if msg.To != provider {
return false, nil return false, nil
} }
var params miner.ProveCommitSectorParams return sectorInCommitMsg(msg, sectorNumber)
if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil {
return false, xerrors.Errorf("failed to unmarshal prove commit sector params: %w", err)
}
return params.SectorNumber == sectorNumber, nil
} }
// The deal must be accepted by the deal proposal start epoch, so timeout // The deal must be accepted by the deal proposal start epoch, so timeout
@ -273,6 +264,73 @@ func (mgr *SectorCommittedManager) OnDealSectorCommitted(ctx context.Context, pr
return nil return nil
} }
// dealSectorInPreCommitMsg tries to find a sector containing the specified deal
func dealSectorInPreCommitMsg(msg *types.Message, res sealing.CurrentDealInfo) (*abi.SectorNumber, error) {
switch msg.Method {
case miner.Methods.PreCommitSector:
var params miner.SectorPreCommitInfo
if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil {
return nil, xerrors.Errorf("unmarshal pre commit: %w", err)
}
// Check through the deal IDs associated with this message
for _, did := range params.DealIDs {
if did == res.DealID {
// Found the deal ID in this message. Callback with the sector ID.
return &params.SectorNumber, nil
}
}
case miner.Methods.PreCommitSectorBatch:
var params miner5.PreCommitSectorBatchParams
if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil {
return nil, xerrors.Errorf("unmarshal pre commit: %w", err)
}
for _, precommit := range params.Sectors {
// Check through the deal IDs associated with this message
for _, did := range precommit.DealIDs {
if did == res.DealID {
// Found the deal ID in this message. Callback with the sector ID.
return &precommit.SectorNumber, nil
}
}
}
default:
return nil, xerrors.Errorf("unexpected method %d", msg.Method)
}
return nil, nil
}
// sectorInCommitMsg checks if the provided message commits specified sector
func sectorInCommitMsg(msg *types.Message, sectorNumber abi.SectorNumber) (bool, error) {
switch msg.Method {
case miner.Methods.ProveCommitSector:
var params miner.ProveCommitSectorParams
if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil {
return false, xerrors.Errorf("failed to unmarshal prove commit sector params: %w", err)
}
return params.SectorNumber == sectorNumber, nil
case miner.Methods.ProveCommitAggregate:
var params miner5.ProveCommitAggregateParams
if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil {
return false, xerrors.Errorf("failed to unmarshal prove commit sector params: %w", err)
}
set, err := params.SectorNumbers.IsSet(uint64(sectorNumber))
if err != nil {
return false, xerrors.Errorf("checking if sectorNumber is set in commit aggregate message: %w", err)
}
return set, nil
default:
return false, nil
}
}
func (mgr *SectorCommittedManager) checkIfDealAlreadyActive(ctx context.Context, ts *types.TipSet, proposal *market.DealProposal, publishCid cid.Cid) (sealing.CurrentDealInfo, bool, error) { func (mgr *SectorCommittedManager) checkIfDealAlreadyActive(ctx context.Context, ts *types.TipSet, proposal *market.DealProposal, publishCid cid.Cid) (sealing.CurrentDealInfo, bool, error) {
res, err := mgr.dealInfo.GetCurrentDealInfo(ctx, ts.Key().Bytes(), proposal, publishCid) res, err := mgr.dealInfo.GetCurrentDealInfo(ctx, ts.Key().Bytes(), proposal, publishCid)
if err != nil { if err != nil {

View File

@ -379,6 +379,7 @@ var MinerNode = Options(
// Sector storage: Proofs // Sector storage: Proofs
Override(new(ffiwrapper.Verifier), ffiwrapper.ProofVerifier), Override(new(ffiwrapper.Verifier), ffiwrapper.ProofVerifier),
Override(new(ffiwrapper.Prover), ffiwrapper.ProofProver),
Override(new(storage2.Prover), From(new(sectorstorage.SectorManager))), Override(new(storage2.Prover), From(new(sectorstorage.SectorManager))),
// Sealing // Sealing

View File

@ -6,6 +6,8 @@ import (
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage" sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
) )
@ -82,6 +84,30 @@ type SealingConfig struct {
AlwaysKeepUnsealedCopy bool AlwaysKeepUnsealedCopy bool
// enable / disable precommit batching (takes effect after nv13)
BatchPreCommits bool
// maximum precommit batch size - batches will be sent immediately above this size
MaxPreCommitBatch int
MinPreCommitBatch int
// how long to wait before submitting a batch after crossing the minimum batch size
PreCommitBatchWait Duration
// time buffer for forceful batch submission before sectors in batch would start expiring
PreCommitBatchSlack Duration
// enable / disable commit aggregation (takes effect after nv13)
AggregateCommits bool
// maximum batched commit size - batches will be sent immediately above this size
MinCommitBatch int
MaxCommitBatch int
// how long to wait before submitting a batch after crossing the minimum batch size
CommitBatchWait Duration
// time buffer for forceful batch submission before sectors in batch would start expiring
CommitBatchSlack Duration
TerminateBatchMax uint64
TerminateBatchMin uint64
TerminateBatchWait Duration
// Keep this many sectors in sealing pipeline, start CC if needed // Keep this many sectors in sealing pipeline, start CC if needed
// todo TargetSealingSectors uint64 // todo TargetSealingSectors uint64
@ -237,6 +263,22 @@ func DefaultStorageMiner() *StorageMiner {
MaxSealingSectorsForDeals: 0, MaxSealingSectorsForDeals: 0,
WaitDealsDelay: Duration(time.Hour * 6), WaitDealsDelay: Duration(time.Hour * 6),
AlwaysKeepUnsealedCopy: true, AlwaysKeepUnsealedCopy: true,
BatchPreCommits: true,
MinPreCommitBatch: 1, // we must have at least one proof to aggregate
MaxPreCommitBatch: miner5.PreCommitSectorBatchMaxSize, //
PreCommitBatchWait: Duration(24 * time.Hour), // this can be up to 6 days
PreCommitBatchSlack: Duration(3 * time.Hour),
AggregateCommits: true,
MinCommitBatch: 1, // we must have at least one proof to aggregate
MaxCommitBatch: miner5.MaxAggregatedSectors, // this is the maximum aggregation per FIP13
CommitBatchWait: Duration(24 * time.Hour), // this can be up to 6 days
CommitBatchSlack: Duration(1 * time.Hour),
TerminateBatchMin: 1,
TerminateBatchMax: 100,
TerminateBatchWait: Duration(5 * time.Minute),
}, },
Storage: sectorstorage.SealerConfig{ Storage: sectorstorage.SealerConfig{

View File

@ -374,10 +374,26 @@ func (sm *StorageMinerAPI) SectorTerminatePending(ctx context.Context) ([]abi.Se
return sm.Miner.TerminatePending(ctx) return sm.Miner.TerminatePending(ctx)
} }
func (sm *StorageMinerAPI) SectorPreCommitFlush(ctx context.Context) (*cid.Cid, error) {
return sm.Miner.SectorPreCommitFlush(ctx)
}
func (sm *StorageMinerAPI) SectorPreCommitPending(ctx context.Context) ([]abi.SectorID, error) {
return sm.Miner.SectorPreCommitPending(ctx)
}
func (sm *StorageMinerAPI) SectorMarkForUpgrade(ctx context.Context, id abi.SectorNumber) error { func (sm *StorageMinerAPI) SectorMarkForUpgrade(ctx context.Context, id abi.SectorNumber) error {
return sm.Miner.MarkForUpgrade(id) return sm.Miner.MarkForUpgrade(id)
} }
func (sm *StorageMinerAPI) SectorCommitFlush(ctx context.Context) (*cid.Cid, error) {
return sm.Miner.CommitFlush(ctx)
}
func (sm *StorageMinerAPI) SectorCommitPending(ctx context.Context) ([]abi.SectorID, error) {
return sm.Miner.CommitPending(ctx)
}
func (sm *StorageMinerAPI) WorkerConnect(ctx context.Context, url string) error { func (sm *StorageMinerAPI) WorkerConnect(ctx context.Context, url string) error {
w, err := connectRemoteWorker(ctx, sm, url) w, err := connectRemoteWorker(ctx, sm, url)
if err != nil { if err != nil {

View File

@ -99,7 +99,7 @@ func GetParams(spt abi.RegisteredSealProof) error {
} }
// TODO: We should fetch the params for the actual proof type, not just based on the size. // TODO: We should fetch the params for the actual proof type, not just based on the size.
if err := paramfetch.GetParams(context.TODO(), build.ParametersJSON(), uint64(ssize)); err != nil { if err := paramfetch.GetParams(context.TODO(), build.ParametersJSON(), build.SrsJSON(), uint64(ssize)); err != nil {
return xerrors.Errorf("fetching proof parameters: %w", err) return xerrors.Errorf("fetching proof parameters: %w", err)
} }
@ -202,6 +202,7 @@ type StorageMinerParams struct {
Sealer sectorstorage.SectorManager Sealer sectorstorage.SectorManager
SectorIDCounter sealing.SectorIDCounter SectorIDCounter sealing.SectorIDCounter
Verifier ffiwrapper.Verifier Verifier ffiwrapper.Verifier
Prover ffiwrapper.Prover
GetSealingConfigFn dtypes.GetSealingConfigFunc GetSealingConfigFn dtypes.GetSealingConfigFunc
Journal journal.Journal Journal journal.Journal
AddrSel *storage.AddressSelector AddrSel *storage.AddressSelector
@ -218,6 +219,7 @@ func StorageMiner(fc config.MinerFeeConfig) func(params StorageMinerParams) (*st
h = params.Host h = params.Host
sc = params.SectorIDCounter sc = params.SectorIDCounter
verif = params.Verifier verif = params.Verifier
prover = params.Prover
gsd = params.GetSealingConfigFn gsd = params.GetSealingConfigFn
j = params.Journal j = params.Journal
as = params.AddrSel as = params.AddrSel
@ -235,7 +237,7 @@ func StorageMiner(fc config.MinerFeeConfig) func(params StorageMinerParams) (*st
return nil, err return nil, err
} }
sm, err := storage.NewMiner(api, maddr, h, ds, sealer, sc, verif, gsd, fc, j, as) sm, err := storage.NewMiner(api, maddr, h, ds, sealer, sc, verif, prover, gsd, fc, j, as)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -824,6 +826,22 @@ func NewSetSealConfigFunc(r repo.LockedRepo) (dtypes.SetSealingConfigFunc, error
MaxSealingSectorsForDeals: cfg.MaxSealingSectorsForDeals, MaxSealingSectorsForDeals: cfg.MaxSealingSectorsForDeals,
WaitDealsDelay: config.Duration(cfg.WaitDealsDelay), WaitDealsDelay: config.Duration(cfg.WaitDealsDelay),
AlwaysKeepUnsealedCopy: cfg.AlwaysKeepUnsealedCopy, AlwaysKeepUnsealedCopy: cfg.AlwaysKeepUnsealedCopy,
BatchPreCommits: cfg.BatchPreCommits,
MinPreCommitBatch: cfg.MinPreCommitBatch,
MaxPreCommitBatch: cfg.MaxPreCommitBatch,
PreCommitBatchWait: config.Duration(cfg.PreCommitBatchWait),
PreCommitBatchSlack: config.Duration(cfg.PreCommitBatchSlack),
AggregateCommits: cfg.AggregateCommits,
MinCommitBatch: cfg.MinCommitBatch,
MaxCommitBatch: cfg.MaxCommitBatch,
CommitBatchWait: config.Duration(cfg.CommitBatchWait),
CommitBatchSlack: config.Duration(cfg.CommitBatchSlack),
TerminateBatchMax: cfg.TerminateBatchMax,
TerminateBatchMin: cfg.TerminateBatchMin,
TerminateBatchWait: config.Duration(cfg.TerminateBatchWait),
} }
}) })
return return
@ -839,6 +857,22 @@ func NewGetSealConfigFunc(r repo.LockedRepo) (dtypes.GetSealingConfigFunc, error
MaxSealingSectorsForDeals: cfg.Sealing.MaxSealingSectorsForDeals, MaxSealingSectorsForDeals: cfg.Sealing.MaxSealingSectorsForDeals,
WaitDealsDelay: time.Duration(cfg.Sealing.WaitDealsDelay), WaitDealsDelay: time.Duration(cfg.Sealing.WaitDealsDelay),
AlwaysKeepUnsealedCopy: cfg.Sealing.AlwaysKeepUnsealedCopy, AlwaysKeepUnsealedCopy: cfg.Sealing.AlwaysKeepUnsealedCopy,
BatchPreCommits: cfg.Sealing.BatchPreCommits,
MinPreCommitBatch: cfg.Sealing.MinPreCommitBatch,
MaxPreCommitBatch: cfg.Sealing.MaxPreCommitBatch,
PreCommitBatchWait: time.Duration(cfg.Sealing.PreCommitBatchWait),
PreCommitBatchSlack: time.Duration(cfg.Sealing.PreCommitBatchSlack),
AggregateCommits: cfg.Sealing.AggregateCommits,
MinCommitBatch: cfg.Sealing.MinCommitBatch,
MaxCommitBatch: cfg.Sealing.MaxCommitBatch,
CommitBatchWait: time.Duration(cfg.Sealing.CommitBatchWait),
CommitBatchSlack: time.Duration(cfg.Sealing.CommitBatchSlack),
TerminateBatchMax: cfg.Sealing.TerminateBatchMax,
TerminateBatchMin: cfg.Sealing.TerminateBatchMin,
TerminateBatchWait: time.Duration(cfg.Sealing.TerminateBatchWait),
} }
}) })
return return

View File

@ -148,6 +148,15 @@ func TestPledgeSectors(t *testing.T) {
}) })
} }
func TestPledgeBatching(t *testing.T) {
t.Run("100", func(t *testing.T) {
test.TestPledgeBatching(t, builder.MockSbBuilder, 50*time.Millisecond, 100)
})
t.Run("100-before-nv13", func(t *testing.T) {
test.TestPledgeBeforeNv13(t, builder.MockSbBuilder, 50*time.Millisecond, 100)
})
}
func TestTapeFix(t *testing.T) { func TestTapeFix(t *testing.T) {
logging.SetLogLevel("miner", "ERROR") logging.SetLogLevel("miner", "ERROR")
logging.SetLogLevel("chainstore", "ERROR") logging.SetLogLevel("chainstore", "ERROR")
@ -164,6 +173,7 @@ func TestWindowedPost(t *testing.T) {
} }
logging.SetLogLevel("miner", "ERROR") logging.SetLogLevel("miner", "ERROR")
logging.SetLogLevel("gen", "ERROR")
logging.SetLogLevel("chainstore", "ERROR") logging.SetLogLevel("chainstore", "ERROR")
logging.SetLogLevel("chain", "ERROR") logging.SetLogLevel("chain", "ERROR")
logging.SetLogLevel("sub", "ERROR") logging.SetLogLevel("sub", "ERROR")
@ -212,6 +222,7 @@ func TestWindowPostDispute(t *testing.T) {
t.Skip("this takes a few minutes, set LOTUS_TEST_WINDOW_POST=1 to run") t.Skip("this takes a few minutes, set LOTUS_TEST_WINDOW_POST=1 to run")
} }
logging.SetLogLevel("miner", "ERROR") logging.SetLogLevel("miner", "ERROR")
logging.SetLogLevel("gen", "ERROR")
logging.SetLogLevel("chainstore", "ERROR") logging.SetLogLevel("chainstore", "ERROR")
logging.SetLogLevel("chain", "ERROR") logging.SetLogLevel("chain", "ERROR")
logging.SetLogLevel("sub", "ERROR") logging.SetLogLevel("sub", "ERROR")
@ -225,6 +236,7 @@ func TestWindowPostDisputeFails(t *testing.T) {
t.Skip("this takes a few minutes, set LOTUS_TEST_WINDOW_POST=1 to run") t.Skip("this takes a few minutes, set LOTUS_TEST_WINDOW_POST=1 to run")
} }
logging.SetLogLevel("miner", "ERROR") logging.SetLogLevel("miner", "ERROR")
logging.SetLogLevel("gen", "ERROR")
logging.SetLogLevel("chainstore", "ERROR") logging.SetLogLevel("chainstore", "ERROR")
logging.SetLogLevel("chain", "ERROR") logging.SetLogLevel("chain", "ERROR")
logging.SetLogLevel("sub", "ERROR") logging.SetLogLevel("sub", "ERROR")
@ -238,6 +250,7 @@ func TestDeadlineToggling(t *testing.T) {
t.Skip("this takes a few minutes, set LOTUS_TEST_DEADLINE_TOGGLING=1 to run") t.Skip("this takes a few minutes, set LOTUS_TEST_DEADLINE_TOGGLING=1 to run")
} }
logging.SetLogLevel("miner", "ERROR") logging.SetLogLevel("miner", "ERROR")
logging.SetLogLevel("gen", "ERROR")
logging.SetLogLevel("chainstore", "ERROR") logging.SetLogLevel("chainstore", "ERROR")
logging.SetLogLevel("chain", "ERROR") logging.SetLogLevel("chain", "ERROR")
logging.SetLogLevel("sub", "ERROR") logging.SetLogLevel("sub", "ERROR")

View File

@ -463,6 +463,7 @@ func mockSbBuilderOpts(t *testing.T, fullOpts []test.FullNodeOpts, storage []tes
node.Test(), node.Test(),
node.Override(new(ffiwrapper.Verifier), mock.MockVerifier), node.Override(new(ffiwrapper.Verifier), mock.MockVerifier),
node.Override(new(ffiwrapper.Prover), mock.MockProver),
// so that we subscribe to pubsub topics immediately // so that we subscribe to pubsub topics immediately
node.Override(new(dtypes.Bootstrapper), dtypes.Bootstrapper(true)), node.Override(new(dtypes.Bootstrapper), dtypes.Bootstrapper(true)),
@ -486,6 +487,7 @@ func mockSbBuilderOpts(t *testing.T, fullOpts []test.FullNodeOpts, storage []tes
return mock.NewMockSectorMgr(nil), nil return mock.NewMockSectorMgr(nil), nil
}), }),
node.Override(new(ffiwrapper.Verifier), mock.MockVerifier), node.Override(new(ffiwrapper.Verifier), mock.MockVerifier),
node.Override(new(ffiwrapper.Prover), mock.MockProver),
node.Unset(new(*sectorstorage.Manager)), node.Unset(new(*sectorstorage.Manager)),
)) ))
} }
@ -524,6 +526,7 @@ func mockSbBuilderOpts(t *testing.T, fullOpts []test.FullNodeOpts, storage []tes
return mock.NewMockSectorMgr(sectors), nil return mock.NewMockSectorMgr(sectors), nil
}), }),
node.Override(new(ffiwrapper.Verifier), mock.MockVerifier), node.Override(new(ffiwrapper.Verifier), mock.MockVerifier),
node.Override(new(ffiwrapper.Prover), mock.MockProver),
node.Unset(new(*sectorstorage.Manager)), node.Unset(new(*sectorstorage.Manager)),
opts, opts,
)) ))

View File

@ -16,6 +16,7 @@ import (
"github.com/filecoin-project/go-state-types/network" "github.com/filecoin-project/go-state-types/network"
market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market"
market5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/market"
"github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/blockstore"
@ -146,10 +147,36 @@ func (s SealingAPIAdapter) StateComputeDataCommitment(ctx context.Context, maddr
return cid.Undef, xerrors.Errorf("failed to unmarshal TipSetToken to TipSetKey: %w", err) return cid.Undef, xerrors.Errorf("failed to unmarshal TipSetToken to TipSetKey: %w", err)
} }
ccparams, err := actors.SerializeParams(&market2.ComputeDataCommitmentParams{ ts, err := s.delegate.ChainGetTipSet(ctx, tsk)
DealIDs: deals, if err != nil {
SectorType: sectorType, return cid.Cid{}, err
}) }
// using parent ts because the migration won't be run on the first nv13
// tipset we apply StateCall to (because we don't run migrations in StateCall
// and just apply to parent state)
nv, err := s.delegate.StateNetworkVersion(ctx, ts.Parents())
if err != nil {
return cid.Cid{}, err
}
var ccparams []byte
if nv < network.Version13 {
ccparams, err = actors.SerializeParams(&market2.ComputeDataCommitmentParams{
DealIDs: deals,
SectorType: sectorType,
})
} else {
ccparams, err = actors.SerializeParams(&market5.ComputeDataCommitmentParams{
Inputs: []*market5.SectorDataSpec{
{
DealIDs: deals,
SectorType: sectorType,
},
},
})
}
if err != nil { if err != nil {
return cid.Undef, xerrors.Errorf("computing params for ComputeDataCommitment: %w", err) return cid.Undef, xerrors.Errorf("computing params for ComputeDataCommitment: %w", err)
} }
@ -169,12 +196,25 @@ func (s SealingAPIAdapter) StateComputeDataCommitment(ctx context.Context, maddr
return cid.Undef, xerrors.Errorf("receipt for ComputeDataCommitment had exit code %d", r.MsgRct.ExitCode) return cid.Undef, xerrors.Errorf("receipt for ComputeDataCommitment had exit code %d", r.MsgRct.ExitCode)
} }
var c cbg.CborCid if nv < network.Version13 {
if err := c.UnmarshalCBOR(bytes.NewReader(r.MsgRct.Return)); err != nil { var c cbg.CborCid
if err := c.UnmarshalCBOR(bytes.NewReader(r.MsgRct.Return)); err != nil {
return cid.Undef, xerrors.Errorf("failed to unmarshal CBOR to CborCid: %w", err)
}
return cid.Cid(c), nil
}
var cr market5.ComputeDataCommitmentReturn
if err := cr.UnmarshalCBOR(bytes.NewReader(r.MsgRct.Return)); err != nil {
return cid.Undef, xerrors.Errorf("failed to unmarshal CBOR to CborCid: %w", err) return cid.Undef, xerrors.Errorf("failed to unmarshal CBOR to CborCid: %w", err)
} }
return cid.Cid(c), nil if len(cr.CommDs) != 1 {
return cid.Undef, xerrors.Errorf("CommD output must have 1 entry")
}
return cid.Cid(cr.CommDs[0]), nil
} }
func (s SealingAPIAdapter) StateSectorPreCommitInfo(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok sealing.TipSetToken) (*miner.SectorPreCommitOnChainInfo, error) { func (s SealingAPIAdapter) StateSectorPreCommitInfo(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok sealing.TipSetToken) (*miner.SectorPreCommitOnChainInfo, error) {

View File

@ -48,6 +48,7 @@ type Miner struct {
ds datastore.Batching ds datastore.Batching
sc sealing.SectorIDCounter sc sealing.SectorIDCounter
verif ffiwrapper.Verifier verif ffiwrapper.Verifier
prover ffiwrapper.Prover
addrSel *AddressSelector addrSel *AddressSelector
maddr address.Address maddr address.Address
@ -116,7 +117,7 @@ type storageMinerApi interface {
WalletHas(context.Context, address.Address) (bool, error) WalletHas(context.Context, address.Address) (bool, error)
} }
func NewMiner(api storageMinerApi, maddr address.Address, h host.Host, ds datastore.Batching, sealer sectorstorage.SectorManager, sc sealing.SectorIDCounter, verif ffiwrapper.Verifier, gsd dtypes.GetSealingConfigFunc, feeCfg config.MinerFeeConfig, journal journal.Journal, as *AddressSelector) (*Miner, error) { func NewMiner(api storageMinerApi, maddr address.Address, h host.Host, ds datastore.Batching, sealer sectorstorage.SectorManager, sc sealing.SectorIDCounter, verif ffiwrapper.Verifier, prover ffiwrapper.Prover, gsd dtypes.GetSealingConfigFunc, feeCfg config.MinerFeeConfig, journal journal.Journal, as *AddressSelector) (*Miner, error) {
m := &Miner{ m := &Miner{
api: api, api: api,
feeCfg: feeCfg, feeCfg: feeCfg,
@ -125,6 +126,7 @@ func NewMiner(api storageMinerApi, maddr address.Address, h host.Host, ds datast
ds: ds, ds: ds,
sc: sc, sc: sc,
verif: verif, verif: verif,
prover: prover,
addrSel: as, addrSel: as,
maddr: maddr, maddr: maddr,
@ -161,7 +163,7 @@ func (m *Miner) Run(ctx context.Context) error {
return m.addrSel.AddressFor(ctx, m.api, mi, use, goodFunds, minFunds) return m.addrSel.AddressFor(ctx, m.api, mi, use, goodFunds, minFunds)
} }
m.sealing = sealing.New(adaptedAPI, fc, NewEventsAdapter(evts), m.maddr, m.ds, m.sealer, m.sc, m.verif, &pcp, sealing.GetSealingConfigFunc(m.getSealConfig), m.handleSealingNotifications, as) m.sealing = sealing.New(adaptedAPI, fc, NewEventsAdapter(evts), m.maddr, m.ds, m.sealer, m.sc, m.verif, m.prover, &pcp, sealing.GetSealingConfigFunc(m.getSealConfig), m.handleSealingNotifications, as)
go m.sealing.Run(ctx) //nolint:errcheck // logged intside the function go m.sealing.Run(ctx) //nolint:errcheck // logged intside the function

View File

@ -59,6 +59,22 @@ func (m *Miner) TerminatePending(ctx context.Context) ([]abi.SectorID, error) {
return m.sealing.TerminatePending(ctx) return m.sealing.TerminatePending(ctx)
} }
func (m *Miner) SectorPreCommitFlush(ctx context.Context) (*cid.Cid, error) {
return m.sealing.SectorPreCommitFlush(ctx)
}
func (m *Miner) SectorPreCommitPending(ctx context.Context) ([]abi.SectorID, error) {
return m.sealing.SectorPreCommitPending(ctx)
}
func (m *Miner) CommitFlush(ctx context.Context) (*cid.Cid, error) {
return m.sealing.CommitFlush(ctx)
}
func (m *Miner) CommitPending(ctx context.Context) ([]abi.SectorID, error) {
return m.sealing.CommitPending(ctx)
}
func (m *Miner) MarkForUpgrade(id abi.SectorNumber) error { func (m *Miner) MarkForUpgrade(id abi.SectorNumber) error {
return m.sealing.MarkForUpgrade(id) return m.sealing.MarkForUpgrade(id)
} }

View File

@ -23,6 +23,7 @@ import (
miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner"
proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof"
tutils "github.com/filecoin-project/specs-actors/v2/support/testing" tutils "github.com/filecoin-project/specs-actors/v2/support/testing"
proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof"
"github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/build"
@ -144,6 +145,10 @@ func (m mockVerif) VerifyWindowPoSt(ctx context.Context, info proof2.WindowPoStV
return true, nil return true, nil
} }
func (m mockVerif) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) (bool, error) {
panic("implement me")
}
func (m mockVerif) VerifySeal(proof2.SealVerifyInfo) (bool, error) { func (m mockVerif) VerifySeal(proof2.SealVerifyInfo) (bool, error) {
panic("implement me") panic("implement me")
} }