WIP Interactive PoRep

This commit is contained in:
Łukasz Magiera 2019-10-30 19:10:29 +01:00
parent dd9d6c4ef0
commit 7fa3e2db7a
7 changed files with 38 additions and 28 deletions

View File

@ -304,8 +304,11 @@ func (sma StorageMinerActor) ProveCommitSector(act *types.Actor, vmctx types.VMC
// TODO: ensure normalization to ID address
maddr := vmctx.Message().To
var pieces []sectorbuilder.PublicPieceInfo // TODO: GET ME FROM DEALS IN STORAGEMARKET
var seed []byte // TODO: GET ME FROM SOMEWHERE
rand, err := vmctx.GetRandomness(us.SubmitHeight + build.InteractivePoRepDelay)
if ok, err := ValidatePoRep(maddr, mi.SectorSize, us.CommD, us.CommR, rand, params.Proof, params.SectorID); err != nil {
if ok, err := ValidatePoRep(maddr, mi.SectorSize, us.CommD, us.CommR, rand, seed, params.Proof, params.SectorID, pieces); err != nil {
return nil, err
} else if !ok {
return nil, aerrors.New(2, "bad proof!")
@ -579,8 +582,8 @@ func GetFromSectorSet(ctx context.Context, s types.Storage, ss cid.Cid, sectorID
return true, comms[0], comms[1], nil
}
func ValidatePoRep(maddr address.Address, ssize uint64, commD, commR, ticket, proof []byte, sectorID uint64) (bool, ActorError) {
ok, err := sectorbuilder.VerifySeal(ssize, commR, commD, maddr, ticket, sectorID, proof)
func ValidatePoRep(maddr address.Address, ssize uint64, commD, commR, ticket, proof, seed []byte, sectorID uint64, pieces []sectorbuilder.PublicPieceInfo) (bool, ActorError) {
ok, err := sectorbuilder.VerifySeal(ssize, commR, commD, maddr, ticket, seed, sectorID, proof, pieces)
if err != nil {
return false, aerrors.Absorb(err, 25, "verify seal failed")
}

2
go.sum
View File

@ -715,6 +715,7 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7 h1:fHDIZ2oxGnUZRN6WgWFCbYBjH9uqVPRCUVUDhs0wnbA=
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478 h1:l5EDrHhldLYb3ZRHDUhXF7Om7MvYXnkV9/iQNo1lX6g=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -751,6 +752,7 @@ golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd h1:DBH9mDw0zluJT/R+nGuV3jWFWLFaHyYZWD4tOT+cjn0=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190922100055-0a153f010e69 h1:rOhMmluY6kLMhdnrivzec6lLgaVbMHMn2ISQXJeJ5EM=
golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.0.0-20170915090833-1cbadb444a80/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=

View File

@ -26,7 +26,11 @@ type SectorInfo = sectorbuilder.SectorInfo
type SealTicket = sectorbuilder.SealTicket
type SealedSectorMetadata = sectorbuilder.SealedSectorMetadata
type SealSeed = sectorbuilder.SealSeed
type SealCommitOutput = sectorbuilder.SealCommitOutput
type PublicPieceInfo = sectorbuilder.PublicPieceInfo
const CommLen = sectorbuilder.CommitmentBytesLen
@ -37,6 +41,7 @@ type SectorBuilder struct {
type SectorBuilderConfig struct {
SectorSize uint64
Miner address.Address
CacheDir string
SealedDir string
StagedDir string
MetadataDir string
@ -44,8 +49,9 @@ type SectorBuilderConfig struct {
func New(cfg *SectorBuilderConfig) (*SectorBuilder, error) {
proverId := addressToProverID(cfg.Miner)
nemWorkerThreads := uint8(5) // TODO: from config
sbp, err := sectorbuilder.InitSectorBuilder(cfg.SectorSize, 2, 1, 0, cfg.MetadataDir, proverId, cfg.SealedDir, cfg.StagedDir, 16)
sbp, err := sectorbuilder.InitSectorBuilder(cfg.SectorSize, 2, 0, cfg.MetadataDir, proverId, cfg.SealedDir, cfg.StagedDir, cfg.CacheDir, 16, nemWorkerThreads)
if err != nil {
return nil, err
}
@ -84,12 +90,12 @@ func (sb *SectorBuilder) ReadPieceFromSealedSector(pieceKey string) ([]byte, err
return sectorbuilder.ReadPieceFromSealedSector(sb.handle, pieceKey)
}
func (sb *SectorBuilder) SealSector(sectorID uint64, ticket SealTicket) (SealedSectorMetadata, error) {
return sectorbuilder.SealSector(sb.handle, sectorID, ticket)
func (sb *SectorBuilder) SealSector(sectorID uint64, seed SealSeed) (SealCommitOutput, error) {
return sectorbuilder.SealCommit(sb.handle, sectorID, seed)
}
func (sb *SectorBuilder) ResumeSealSector(sectorID uint64) (SealedSectorMetadata, error) {
return sectorbuilder.ResumeSealSector(sb.handle, sectorID)
func (sb *SectorBuilder) ResumeSealCommit(sectorID uint64) (SealCommitOutput, error) {
return sectorbuilder.ResumeSealCommit(sb.handle, sectorID)
}
func (sb *SectorBuilder) SealStatus(sector uint64) (SectorSealingStatus, error) {
@ -122,22 +128,15 @@ func (sb *SectorBuilder) GeneratePoSt(sectorInfo SortedSectorInfo, challengeSeed
var UserBytesForSectorSize = sectorbuilder.GetMaxUserBytesPerStagedSector
func VerifySeal(sectorSize uint64, commR, commD []byte, proverID address.Address, ticket []byte, sectorID uint64, proof []byte) (bool, error) {
var commRa, commDa, ticketa [32]byte
func VerifySeal(sectorSize uint64, commR, commD []byte, proverID address.Address, ticket []byte, seed []byte, sectorID uint64, proof []byte, pieces []PublicPieceInfo) (bool, error) {
var commRa, commDa, ticketa, seeda [32]byte
copy(commRa[:], commR)
copy(commDa[:], commD)
copy(ticketa[:], ticket)
copy(seeda[:], seed)
proverIDa := addressToProverID(proverID)
return sectorbuilder.VerifySeal(sectorSize, commRa, commDa, proverIDa, ticketa, sectorID, proof)
}
func VerifyPieceInclusionProof(sectorSize uint64, pieceSize uint64, commP []byte, commD []byte, proof []byte) (bool, error) {
var commPa, commDa [32]byte
copy(commPa[:], commP)
copy(commDa[:], commD)
return sectorbuilder.VerifyPieceInclusionProof(sectorSize, pieceSize, commPa, commDa, proof)
return sectorbuilder.VerifySeal(sectorSize, commRa, commDa, proverIDa, ticketa, seeda, sectorID, proof, pieces)
}
func NewSortedSectorInfo(sectors []SectorInfo) SortedSectorInfo {

View File

@ -36,12 +36,14 @@ func TestSealAndVerify(t *testing.T) {
t.Fatal(err)
}
cache := filepath.Join(dir, "cache")
metadata := filepath.Join(dir, "meta")
sealed := filepath.Join(dir, "sealed")
staging := filepath.Join(dir, "staging")
sb, err := sectorbuilder.New(&sectorbuilder.SectorBuilderConfig{
SectorSize: sectorSize,
CacheDir:cache,
SealedDir: sealed,
StagedDir: staging,
MetadataDir: metadata,

View File

@ -55,6 +55,7 @@ func SectorBuilderConfig(storagePath string) func(dtypes.MetadataDS, api.FullNod
return nil, err
}
cache := filepath.Join(sp, "cache")
metadata := filepath.Join(sp, "meta")
sealed := filepath.Join(sp, "sealed")
staging := filepath.Join(sp, "staging")
@ -62,6 +63,7 @@ func SectorBuilderConfig(storagePath string) func(dtypes.MetadataDS, api.FullNod
sb := &sectorbuilder.SectorBuilderConfig{
Miner: minerAddr,
SectorSize: ssize,
CacheDir:cache,
MetadataDir: metadata,
SealedDir: sealed,
StagedDir: staging,

View File

@ -92,7 +92,7 @@ func (m *Miner) Run(ctx context.Context) error {
}
func (m *Miner) commitUntrackedSectors(ctx context.Context) error {
sealed, err := m.secst.Sealed()
sealed, err := m.secst.Commited()
if err != nil {
return err
}
@ -159,6 +159,8 @@ func (m *Miner) commitSector(ctx context.Context, sinfo sectorbuilder.SectorSeal
return xerrors.Errorf("failed to check out own sector size: %w", err)
}
// TODO: Interactive porep
ok, err := sectorbuilder.VerifySeal(ssize, sinfo.CommR[:], sinfo.CommD[:], m.maddr, sinfo.Ticket.TicketBytes[:], sinfo.SectorID, sinfo.Proof)
if err != nil {
log.Error("failed to verify seal we just created: ", err)

View File

@ -32,7 +32,7 @@ type dealMapping struct {
Committed bool
}
type TicketFn func(context.Context) (*sectorbuilder.SealTicket, error)
type TicketFn func(context.Context) (*sectorbuilder.SealSeed, error)
// TODO: eventually handle sector storage here instead of in rust-sectorbuilder
type Store struct {
@ -65,7 +65,7 @@ func (s *Store) Service() {
go s.service()
}
func (s *Store) poll() {
func (s *Store) poll() { // TODO: REMOVE ME (and just use the fact that sectorbuilder methods are now blocking)
log.Debug("polling for sealed sectors...")
// get a list of sectors to poll
@ -87,7 +87,7 @@ func (s *Store) poll() {
continue
}
if status.State == sealing_state.Sealed {
if status.State == sealing_state.Committed {
done = append(done, status)
}
}
@ -119,7 +119,7 @@ func (s *Store) restartSealing() {
return
}
if status.State != sealing_state.Paused {
if status.State != sealing_state.CommittingPaused { // TODO: Also handle PreCommit!
continue
}
@ -127,7 +127,7 @@ func (s *Store) restartSealing() {
go func() {
// TODO: when we refactor wait-for-seal below, care about this output too
// (see SealSector below)
_, err := s.sb.ResumeSealSector(sid)
_, err := s.sb.ResumeSealCommit(sid)
if err != nil {
return
}
@ -293,7 +293,7 @@ func (s *Store) WaitSeal(ctx context.Context, sector uint64) (sectorbuilder.Sect
return s.sb.SealStatus(sector)
}
func (s *Store) Sealed() ([]sectorbuilder.SectorSealingStatus, error) {
func (s *Store) Commited() ([]sectorbuilder.SectorSealingStatus, error) {
l, err := s.sb.GetAllStagedSectors()
if err != nil {
return nil, err
@ -306,7 +306,7 @@ func (s *Store) Sealed() ([]sectorbuilder.SectorSealingStatus, error) {
return nil, err
}
if status.State != sealing_state.Sealed {
if status.State != sealing_state.Committed {
continue
}
out = append(out, status)