ffiwrapper: Separate Prover interface

This commit is contained in:
Łukasz Magiera 2021-05-19 15:20:23 +02:00
parent eafaf6d236
commit a5677d1b7a
12 changed files with 51 additions and 31 deletions

View File

@ -693,10 +693,6 @@ func (m genFakeVerifier) VerifyAggregateSeals(aggregate proof5.AggregateSealVeri
panic("not supported")
}
func (m genFakeVerifier) AggregateSealProofs(ai proof5.AggregateSealVerifyProofAndInfos, proofs [][]byte) ([]byte, error) {
panic("not supported")
}
func (m genFakeVerifier) VerifyWinningPoSt(ctx context.Context, info proof5.WinningPoStVerifyInfo) (bool, error) {
panic("not supported")
}

View File

@ -101,8 +101,4 @@ func (cv cachingVerifier) VerifyAggregateSeals(aggregate proof5.AggregateSealVer
return cv.backend.VerifyAggregateSeals(aggregate)
}
func (cv cachingVerifier) AggregateSealProofs(proofType abi.RegisteredSealProof, rap abi.RegisteredAggregationProof, proofs [][]byte) ([]byte, error) {
return cv.backend.AggregateSealProofs(proofType, rap, proofs)
}
var _ ffiwrapper.Verifier = (*cachingVerifier)(nil)

View File

@ -0,0 +1,18 @@
//+build cgo
package ffiwrapper
import (
ffi "github.com/filecoin-project/filecoin-ffi"
proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof"
)
var ProofProver = proofProver{}
var _ Prover = ProofProver
type proofProver struct{}
func (v proofProver) AggregateSealProofs(aggregateInfo proof5.AggregateSealVerifyProofAndInfos, proofs [][]byte) ([]byte, error) {
return ffi.AggregateSealProofs(aggregateInfo, proofs)
}

View File

@ -40,8 +40,12 @@ type Verifier interface {
VerifyWindowPoSt(ctx context.Context, info proof5.WindowPoStVerifyInfo) (bool, error)
GenerateWinningPoStSectorChallenge(context.Context, abi.RegisteredPoStProof, abi.ActorID, abi.PoStRandomness, uint64) ([]uint64, error)
}
// Prover contains cheap proving-related methods
type Prover interface {
// TODO: move GenerateWinningPoStSectorChallenge from the Verifier interface to here
// cheap, makes no sense to put this on the storage interface
AggregateSealProofs(aggregateInfo proof5.AggregateSealVerifyProofAndInfos, proofs [][]byte) ([]byte, error)
}

View File

@ -139,7 +139,3 @@ func (proofVerifier) GenerateWinningPoStSectorChallenge(ctx context.Context, pro
randomness[31] &= 0x3f
return ffi.GenerateWinningPoStSectorChallenge(proofType, minerID, randomness, eligibleSectorCount)
}
func (v proofVerifier) AggregateSealProofs(aggregateInfo proof5.AggregateSealVerifyProofAndInfos, proofs [][]byte) ([]byte, error) {
return ffi.AggregateSealProofs(aggregateInfo, proofs)
}

View File

@ -34,7 +34,7 @@ type SectorMgr struct {
lk sync.Mutex
}
type mockVerif struct{}
type mockVerifProver struct{}
func NewMockSectorMgr(genesisSectors []abi.SectorID) *SectorMgr {
sectors := make(map[abi.SectorID]*sectorState)
@ -490,7 +490,7 @@ func (mgr *SectorMgr) ReturnFetch(ctx context.Context, callID storiface.CallID,
panic("not supported")
}
func (m mockVerif) VerifySeal(svi proof5.SealVerifyInfo) (bool, error) {
func (m mockVerifProver) VerifySeal(svi proof5.SealVerifyInfo) (bool, error) {
plen, err := svi.SealProof.ProofSize()
if err != nil {
return false, err
@ -511,7 +511,7 @@ func (m mockVerif) VerifySeal(svi proof5.SealVerifyInfo) (bool, error) {
return true, nil
}
func (m mockVerif) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) (bool, error) {
func (m mockVerifProver) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) (bool, error) {
out := make([]byte, 200)
for pi, svi := range aggregate.Infos {
for i := 0; i < 32; i++ {
@ -531,7 +531,7 @@ func (m mockVerif) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProo
return bytes.Equal(aggregate.Proof, out), nil
}
func (m mockVerif) AggregateSealProofs(aggregateInfo proof5.AggregateSealVerifyProofAndInfos, proofs [][]byte) ([]byte, error) {
func (m mockVerifProver) AggregateSealProofs(aggregateInfo proof5.AggregateSealVerifyProofAndInfos, proofs [][]byte) ([]byte, error) {
out := make([]byte, 200) // todo: figure out more real length
for pi, proof := range proofs {
for i := range proof[:32] {
@ -548,12 +548,12 @@ func (m mockVerif) AggregateSealProofs(aggregateInfo proof5.AggregateSealVerifyP
return out, nil
}
func (m mockVerif) VerifyWinningPoSt(ctx context.Context, info proof5.WinningPoStVerifyInfo) (bool, error) {
func (m mockVerifProver) VerifyWinningPoSt(ctx context.Context, info proof5.WinningPoStVerifyInfo) (bool, error) {
info.Randomness[31] &= 0x3f
return true, nil
}
func (m mockVerif) VerifyWindowPoSt(ctx context.Context, info proof5.WindowPoStVerifyInfo) (bool, error) {
func (m mockVerifProver) VerifyWindowPoSt(ctx context.Context, info proof5.WindowPoStVerifyInfo) (bool, error) {
if len(info.Proofs) != 1 {
return false, xerrors.Errorf("expected 1 proof entry")
}
@ -567,15 +567,17 @@ func (m mockVerif) VerifyWindowPoSt(ctx context.Context, info proof5.WindowPoStV
return true, nil
}
func (m mockVerif) GenerateDataCommitment(pt abi.RegisteredSealProof, pieces []abi.PieceInfo) (cid.Cid, error) {
func (m mockVerifProver) GenerateDataCommitment(pt abi.RegisteredSealProof, pieces []abi.PieceInfo) (cid.Cid, error) {
return ffiwrapper.GenerateUnsealedCID(pt, pieces)
}
func (m mockVerif) GenerateWinningPoStSectorChallenge(ctx context.Context, proofType abi.RegisteredPoStProof, minerID abi.ActorID, randomness abi.PoStRandomness, eligibleSectorCount uint64) ([]uint64, error) {
func (m mockVerifProver) GenerateWinningPoStSectorChallenge(ctx context.Context, proofType abi.RegisteredPoStProof, minerID abi.ActorID, randomness abi.PoStRandomness, eligibleSectorCount uint64) ([]uint64, error) {
return []uint64{0}, nil
}
var MockVerifier = mockVerif{}
var MockVerifier = mockVerifProver{}
var MockProver = mockVerifProver{}
var _ storage.Sealer = &SectorMgr{}
var _ ffiwrapper.Verifier = MockVerifier
var _ ffiwrapper.Prover = MockProver

View File

@ -46,7 +46,7 @@ type CommitBatcher struct {
addrSel AddrSel
feeCfg FeeConfig
getConfig GetSealingConfigFunc
verif ffiwrapper.Verifier
prover ffiwrapper.Prover
deadlines map[abi.SectorNumber]time.Time
todo map[abi.SectorNumber]AggregateInput
@ -57,7 +57,7 @@ type CommitBatcher struct {
lk sync.Mutex
}
func NewCommitBatcher(mctx context.Context, maddr address.Address, api CommitBatcherApi, addrSel AddrSel, feeCfg FeeConfig, getConfig GetSealingConfigFunc, verif ffiwrapper.Verifier) *CommitBatcher {
func NewCommitBatcher(mctx context.Context, maddr address.Address, api CommitBatcherApi, addrSel AddrSel, feeCfg FeeConfig, getConfig GetSealingConfigFunc, prov ffiwrapper.Prover) *CommitBatcher {
b := &CommitBatcher{
api: api,
maddr: maddr,
@ -65,7 +65,7 @@ func NewCommitBatcher(mctx context.Context, maddr address.Address, api CommitBat
addrSel: addrSel,
feeCfg: feeCfg,
getConfig: getConfig,
verif: verif,
prover: prov,
deadlines: map[abi.SectorNumber]time.Time{},
todo: map[abi.SectorNumber]AggregateInput{},
@ -202,7 +202,7 @@ func (b *CommitBatcher) processBatch(notif, after bool) (*cid.Cid, error) {
return nil, xerrors.Errorf("getting miner id: %w", err)
}
params.AggregateProof, err = b.verif.AggregateSealProofs(proof5.AggregateSealVerifyProofAndInfos{
params.AggregateProof, err = b.prover.AggregateSealProofs(proof5.AggregateSealVerifyProofAndInfos{
Miner: abi.ActorID(mid),
SealProof: b.todo[infos[0].Number].spt,
AggregateProof: arp,

View File

@ -132,7 +132,7 @@ type pendingPiece struct {
accepted func(abi.SectorNumber, abi.UnpaddedPieceSize, error)
}
func New(api SealingAPI, fc FeeConfig, events Events, maddr address.Address, ds datastore.Batching, sealer sectorstorage.SectorManager, sc SectorIDCounter, verif ffiwrapper.Verifier, pcp PreCommitPolicy, gc GetSealingConfigFunc, notifee SectorStateNotifee, as AddrSel) *Sealing {
func New(api SealingAPI, fc FeeConfig, events Events, maddr address.Address, ds datastore.Batching, sealer sectorstorage.SectorManager, sc SectorIDCounter, verif ffiwrapper.Verifier, prov ffiwrapper.Prover, pcp PreCommitPolicy, gc GetSealingConfigFunc, notifee SectorStateNotifee, as AddrSel) *Sealing {
s := &Sealing{
api: api,
feeCfg: fc,
@ -155,7 +155,7 @@ func New(api SealingAPI, fc FeeConfig, events Events, maddr address.Address, ds
terminator: NewTerminationBatcher(context.TODO(), maddr, api, as, fc, gc),
precommiter: NewPreCommitBatcher(context.TODO(), maddr, api, as, fc, gc),
commiter: NewCommitBatcher(context.TODO(), maddr, api, as, fc, gc, verif),
commiter: NewCommitBatcher(context.TODO(), maddr, api, as, fc, gc, prov),
getConfig: gc,
dealInfo: &CurrentDealInfoManager{api},

View File

@ -379,6 +379,7 @@ var MinerNode = Options(
// Sector storage: Proofs
Override(new(ffiwrapper.Verifier), ffiwrapper.ProofVerifier),
Override(new(ffiwrapper.Prover), ffiwrapper.ProofProver),
Override(new(storage2.Prover), From(new(sectorstorage.SectorManager))),
// Sealing

View File

@ -202,6 +202,7 @@ type StorageMinerParams struct {
Sealer sectorstorage.SectorManager
SectorIDCounter sealing.SectorIDCounter
Verifier ffiwrapper.Verifier
Prover ffiwrapper.Prover
GetSealingConfigFn dtypes.GetSealingConfigFunc
Journal journal.Journal
AddrSel *storage.AddressSelector
@ -218,6 +219,7 @@ func StorageMiner(fc config.MinerFeeConfig) func(params StorageMinerParams) (*st
h = params.Host
sc = params.SectorIDCounter
verif = params.Verifier
prover = params.Prover
gsd = params.GetSealingConfigFn
j = params.Journal
as = params.AddrSel
@ -235,7 +237,7 @@ func StorageMiner(fc config.MinerFeeConfig) func(params StorageMinerParams) (*st
return nil, err
}
sm, err := storage.NewMiner(api, maddr, h, ds, sealer, sc, verif, gsd, fc, j, as)
sm, err := storage.NewMiner(api, maddr, h, ds, sealer, sc, verif, prover, gsd, fc, j, as)
if err != nil {
return nil, err
}

View File

@ -463,6 +463,7 @@ func mockSbBuilderOpts(t *testing.T, fullOpts []test.FullNodeOpts, storage []tes
node.Test(),
node.Override(new(ffiwrapper.Verifier), mock.MockVerifier),
node.Override(new(ffiwrapper.Prover), mock.MockProver),
// so that we subscribe to pubsub topics immediately
node.Override(new(dtypes.Bootstrapper), dtypes.Bootstrapper(true)),
@ -486,6 +487,7 @@ func mockSbBuilderOpts(t *testing.T, fullOpts []test.FullNodeOpts, storage []tes
return mock.NewMockSectorMgr(nil), nil
}),
node.Override(new(ffiwrapper.Verifier), mock.MockVerifier),
node.Override(new(ffiwrapper.Prover), mock.MockProver),
node.Unset(new(*sectorstorage.Manager)),
))
}
@ -524,6 +526,7 @@ func mockSbBuilderOpts(t *testing.T, fullOpts []test.FullNodeOpts, storage []tes
return mock.NewMockSectorMgr(sectors), nil
}),
node.Override(new(ffiwrapper.Verifier), mock.MockVerifier),
node.Override(new(ffiwrapper.Prover), mock.MockProver),
node.Unset(new(*sectorstorage.Manager)),
opts,
))

View File

@ -48,6 +48,7 @@ type Miner struct {
ds datastore.Batching
sc sealing.SectorIDCounter
verif ffiwrapper.Verifier
prover ffiwrapper.Prover
addrSel *AddressSelector
maddr address.Address
@ -116,7 +117,7 @@ type storageMinerApi interface {
WalletHas(context.Context, address.Address) (bool, error)
}
func NewMiner(api storageMinerApi, maddr address.Address, h host.Host, ds datastore.Batching, sealer sectorstorage.SectorManager, sc sealing.SectorIDCounter, verif ffiwrapper.Verifier, gsd dtypes.GetSealingConfigFunc, feeCfg config.MinerFeeConfig, journal journal.Journal, as *AddressSelector) (*Miner, error) {
func NewMiner(api storageMinerApi, maddr address.Address, h host.Host, ds datastore.Batching, sealer sectorstorage.SectorManager, sc sealing.SectorIDCounter, verif ffiwrapper.Verifier, prover ffiwrapper.Prover, gsd dtypes.GetSealingConfigFunc, feeCfg config.MinerFeeConfig, journal journal.Journal, as *AddressSelector) (*Miner, error) {
m := &Miner{
api: api,
feeCfg: feeCfg,
@ -125,6 +126,7 @@ func NewMiner(api storageMinerApi, maddr address.Address, h host.Host, ds datast
ds: ds,
sc: sc,
verif: verif,
prover: prover,
addrSel: as,
maddr: maddr,
@ -161,7 +163,7 @@ func (m *Miner) Run(ctx context.Context) error {
return m.addrSel.AddressFor(ctx, m.api, mi, use, goodFunds, minFunds)
}
m.sealing = sealing.New(adaptedAPI, fc, NewEventsAdapter(evts), m.maddr, m.ds, m.sealer, m.sc, m.verif, &pcp, sealing.GetSealingConfigFunc(m.getSealConfig), m.handleSealingNotifications, as)
m.sealing = sealing.New(adaptedAPI, fc, NewEventsAdapter(evts), m.maddr, m.ds, m.sealer, m.sc, m.verif, m.prover, &pcp, sealing.GetSealingConfigFunc(m.getSealConfig), m.handleSealingNotifications, as)
go m.sealing.Run(ctx) //nolint:errcheck // logged intside the function