lotus/storage/sealer/ffiwrapper/sealer_test.go

1339 lines
30 KiB
Go
Raw Normal View History

2020-03-26 02:50:56 +00:00
package ffiwrapper
import (
2020-05-14 15:35:38 +00:00
"bytes"
2020-03-26 02:50:56 +00:00
"context"
2023-10-02 22:08:42 +00:00
crand "crypto/rand"
2020-03-26 02:50:56 +00:00
"fmt"
"io"
2023-09-21 15:37:02 +00:00
"io/fs"
2023-10-02 22:08:42 +00:00
"math/rand"
2020-03-26 02:50:56 +00:00
"os"
2020-06-09 10:24:03 +00:00
"path/filepath"
2020-03-26 02:50:56 +00:00
"runtime"
2020-06-09 10:24:03 +00:00
"strings"
2020-03-26 02:50:56 +00:00
"sync"
"testing"
"time"
2020-08-16 10:40:35 +00:00
"github.com/ipfs/go-cid"
logging "github.com/ipfs/go-log/v2"
2020-05-29 15:21:10 +00:00
"github.com/stretchr/testify/require"
2020-03-26 02:50:56 +00:00
"golang.org/x/xerrors"
2022-06-14 15:00:51 +00:00
ffi "github.com/filecoin-project/filecoin-ffi"
"github.com/filecoin-project/filecoin-ffi/cgo"
commpffi "github.com/filecoin-project/go-commp-utils/ffiwrapper"
2023-09-21 15:37:02 +00:00
commcid "github.com/filecoin-project/go-fil-commcid"
"github.com/filecoin-project/go-paramfetch"
2020-09-07 03:49:10 +00:00
"github.com/filecoin-project/go-state-types/abi"
2022-06-14 15:00:51 +00:00
prooftypes "github.com/filecoin-project/go-state-types/proof"
2020-05-18 23:03:42 +00:00
2021-05-17 18:47:41 +00:00
"github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/lotus/storage/pipeline/lib/nullreader"
"github.com/filecoin-project/lotus/storage/sealer/commitment"
"github.com/filecoin-project/lotus/storage/sealer/ffiwrapper/basicfs"
"github.com/filecoin-project/lotus/storage/sealer/storiface"
2020-03-26 02:50:56 +00:00
)
func init() {
2020-05-18 23:03:42 +00:00
logging.SetLogLevel("*", "DEBUG") //nolint: errcheck
2020-03-26 02:50:56 +00:00
}
2020-06-15 12:32:17 +00:00
var sealProofType = abi.RegisteredSealProof_StackedDrg2KiBV1
2020-05-29 15:21:10 +00:00
var sectorSize, _ = sealProofType.SectorSize()
2020-05-18 23:03:42 +00:00
var sealRand = abi.SealRandomness{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2}
2020-03-26 02:50:56 +00:00
type seal struct {
ref storiface.SectorRef
cids storiface.SectorCids
2020-03-26 02:50:56 +00:00
pi abi.PieceInfo
ticket abi.SealRandomness
}
2020-05-18 23:03:42 +00:00
func data(sn abi.SectorNumber, dlen abi.UnpaddedPieceSize) io.Reader {
return io.MultiReader(
2023-10-02 22:08:42 +00:00
io.LimitReader(rand.New(rand.NewSource(42+int64(sn))), int64(123)),
io.LimitReader(rand.New(rand.NewSource(42+int64(sn))), int64(dlen-123)),
)
2020-05-18 23:03:42 +00:00
}
func (s *seal) precommit(t *testing.T, sb *Sealer, id storiface.SectorRef, done func()) {
2020-03-26 02:50:56 +00:00
defer done()
dlen := abi.PaddedPieceSize(sectorSize).Unpadded()
var err error
2020-11-05 06:34:24 +00:00
r := data(id.ID.Number, dlen)
2020-03-26 02:50:56 +00:00
s.pi, err = sb.AddPiece(context.TODO(), id, []abi.UnpaddedPieceSize{}, dlen, r)
if err != nil {
2022-04-20 09:34:01 +00:00
t.Errorf("%+v", err)
return
2020-03-26 02:50:56 +00:00
}
2020-05-18 23:03:42 +00:00
s.ticket = sealRand
2020-03-26 02:50:56 +00:00
p1, err := sb.SealPreCommit1(context.TODO(), id, s.ticket, []abi.PieceInfo{s.pi})
if err != nil {
2022-04-20 09:34:01 +00:00
t.Errorf("%+v", err)
return
2020-03-26 02:50:56 +00:00
}
cids, err := sb.SealPreCommit2(context.TODO(), id, p1)
if err != nil {
2022-04-20 09:34:01 +00:00
t.Errorf("%+v", err)
return
2020-03-26 02:50:56 +00:00
}
s.cids = cids
}
2021-03-10 15:16:44 +00:00
var seed = abi.InteractiveSealRandomness{0, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 9, 8, 7, 6, 45, 3, 2, 1, 0, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 9}
func (s *seal) commit(t *testing.T, sb *Sealer, done func()) storiface.Proof {
2020-03-26 02:50:56 +00:00
defer done()
2020-11-05 06:34:24 +00:00
pc1, err := sb.SealCommit1(context.TODO(), s.ref, s.ticket, seed, []abi.PieceInfo{s.pi}, s.cids)
2020-03-26 02:50:56 +00:00
if err != nil {
2022-04-20 09:34:01 +00:00
t.Errorf("%+v", err)
return nil
2020-03-26 02:50:56 +00:00
}
2020-11-05 06:34:24 +00:00
proof, err := sb.SealCommit2(context.TODO(), s.ref, pc1)
2020-03-26 02:50:56 +00:00
if err != nil {
2022-04-20 09:34:01 +00:00
t.Errorf("%+v", err)
return nil
2020-03-26 02:50:56 +00:00
}
2022-04-20 21:34:28 +00:00
ok, err := ProofVerifier.VerifySeal(prooftypes.SealVerifyInfo{
2020-11-05 06:34:24 +00:00
SectorID: s.ref.ID,
2020-05-22 01:19:46 +00:00
SealedCID: s.cids.Sealed,
2020-11-05 06:34:24 +00:00
SealProof: s.ref.ProofType,
2020-05-22 01:19:46 +00:00
Proof: proof,
2020-03-26 02:50:56 +00:00
Randomness: s.ticket,
InteractiveRandomness: seed,
UnsealedCID: s.cids.Unsealed,
})
if err != nil {
2022-04-20 09:34:01 +00:00
t.Errorf("%+v", err)
return nil
2020-03-26 02:50:56 +00:00
}
if !ok {
2022-04-20 09:34:01 +00:00
t.Errorf("proof failed to validate")
return nil
2020-03-26 02:50:56 +00:00
}
2021-03-10 15:16:44 +00:00
return proof
2020-03-26 02:50:56 +00:00
}
func (s *seal) unseal(t *testing.T, sb *Sealer, sp *basicfs.Provider, si storiface.SectorRef, done func()) {
2020-05-18 23:03:42 +00:00
defer done()
var b bytes.Buffer
2020-07-30 20:38:05 +00:00
_, err := sb.ReadPiece(context.TODO(), &b, si, 0, 1016)
2020-05-18 23:03:42 +00:00
if err != nil {
t.Fatal(err)
}
expect, _ := io.ReadAll(data(si.ID.Number, 1016))
2020-05-18 23:03:42 +00:00
if !bytes.Equal(b.Bytes(), expect) {
t.Fatal("read wrong bytes")
}
2020-09-06 16:54:00 +00:00
p, sd, err := sp.AcquireSector(context.TODO(), si, storiface.FTUnsealed, storiface.FTNone, storiface.PathStorage)
2020-05-18 23:03:42 +00:00
if err != nil {
t.Fatal(err)
}
if err := os.Remove(p.Unsealed); err != nil {
t.Fatal(err)
}
sd()
2020-07-30 20:38:05 +00:00
_, err = sb.ReadPiece(context.TODO(), &b, si, 0, 1016)
2020-05-18 23:03:42 +00:00
if err == nil {
t.Fatal("HOW?!")
}
log.Info("this is what we expect: ", err)
if err := sb.UnsealPiece(context.TODO(), si, 0, 1016, sealRand, s.cids.Unsealed); err != nil {
t.Fatal(err)
}
b.Reset()
2020-07-30 20:38:05 +00:00
_, err = sb.ReadPiece(context.TODO(), &b, si, 0, 1016)
2020-05-18 23:03:42 +00:00
if err != nil {
t.Fatal(err)
}
expect, _ = io.ReadAll(data(si.ID.Number, 1016))
2020-05-29 15:21:10 +00:00
require.Equal(t, expect, b.Bytes())
2020-05-18 23:03:42 +00:00
b.Reset()
2020-07-30 20:38:05 +00:00
have, err := sb.ReadPiece(context.TODO(), &b, si, 0, 2032)
2020-05-18 23:03:42 +00:00
if err != nil {
t.Fatal(err)
}
2020-07-30 20:38:05 +00:00
if have {
t.Errorf("didn't expect to read things")
}
if b.Len() != 0 {
t.Fatal("read bytes")
2020-05-18 23:03:42 +00:00
}
}
func post(t *testing.T, sealer *Sealer, skipped []abi.SectorID, seals ...seal) {
randomness := abi.PoStRandomness{0, 9, 2, 7, 6, 5, 4, 3, 2, 1, 0, 9, 8, 7, 6, 45, 3, 2, 1, 0, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 9, 7}
2020-03-26 02:50:56 +00:00
2022-04-20 21:34:28 +00:00
xsis := make([]prooftypes.ExtendedSectorInfo, len(seals))
2020-03-26 02:50:56 +00:00
for i, s := range seals {
2022-04-20 21:34:28 +00:00
xsis[i] = prooftypes.ExtendedSectorInfo{
2020-11-05 06:34:24 +00:00
SealProof: s.ref.ProofType,
SectorNumber: s.ref.ID.Number,
2020-09-10 20:07:20 +00:00
SealedCID: s.cids.Sealed,
2020-03-26 02:50:56 +00:00
}
}
chore: build: Merge/v22 into 21 for 23 (#10702) * chore: update ffi to increase execution parallelism * Don't enforce walking receipt tree during compaction * fix: build: drop drand incentinet servers * chore: release lotus v1.20.4 * Apply suggestions from code review Co-authored-by: Jiaying Wang <42981373+jennijuju@users.noreply.github.com> * feat: Introduce nv19 skeleton Update to go-state-types v0.11.0-alpha-1 Introduce dummy v11 actor bundles Make new actors adapters Add upgrade to Upgrade Schedules make jen Update to go-state-types v0.11.0-alpha-2 * feat: vm: switch to the new exec trace format (#10372) This is now "FVM" native. Changes include: 1. Don't treat "trace" messages like off-chain messages. E.g., don't include CIDs, versions, etc. 2. Include IPLD codecs where applicable. 3. Remove fields that aren't filled by the FVM (timing, some errors, code locations, etc.). * feat: implement FIP-0061 * Address review * Add and test the FIP-0061 migration * Update actors bundles to fip/20230406 * Update to go-state-types master * Update to actors v11.0.0-rc1 * - Update go state types - Keep current expiration defaults on creation, extension some tests - Update ffi * ffi experiment * Integration nv19 migration - Open splitstore in migration shed tool - Update state root version * Post rebase fixup * Fix * gen * nv19 invariant checking * Try fixig blockstore so bundle is loaded * Debug * Fix * Make butterfly upgrades happen * Another ffi experiment * Fix copy paste error * Actually schedule migration (#10656) Co-authored-by: zenground0 <ZenGround0@users.noreply.github.com> * Butterfly artifacts * Set calibration net upgrade height * Review Response * Fix state tree version assert * Quick butterfly upgrade to sanity check (#10660) * Quick butterfly upgrade to sanity check * Update butterfly artifacts * Revert fake fix * Give butterfly net correct genesis * Butterfly artifacts * Give time before upgrade --------- Co-authored-by: zenground0 <ZenGround0@users.noreply.github.com> * chore:releasepolish v1.22 release (#10666) * Update butterfly artifacts * register actors v11 * Update calibration upgrade time * State inspection shed cmds * Fix * make gen * Fix swallowed errors * Lint fixup --------- Co-authored-by: zenground0 <ZenGround0@users.noreply.github.com> * v1.22.0-rc3 * bundle fix * Feat/expedite nv19 (#10681) * Update go-state-types * Modify upgrade schedule and params * Revert fip 0052 * Update gst * docsgen * fast butterfly migration to validate migration * Correct epoch to match specified date * Update actors v11 * Update changelog build version * Update butterfly artifacts * Fix lotus-miner init to work after upgrade --------- Co-authored-by: zenground0 <ZenGround0@users.noreply.github.com> * fix:deps:stable ffi for stable release (#10698) * Point to stable ffi for stable lotus release * go mod tidy --------- Co-authored-by: zenground0 <ZenGround0@users.noreply.github.com> * Update CHANGELOG.md Co-authored-by: Jiaying Wang <42981373+jennijuju@users.noreply.github.com> --------- Co-authored-by: Aayush Rajasekaran <arajasek94@gmail.com> Co-authored-by: zenground0 <ZenGround0@users.noreply.github.com> Co-authored-by: Jiaying Wang <42981373+jennijuju@users.noreply.github.com> Co-authored-by: Steven Allen <steven@stebalien.com> Co-authored-by: jennijuju <jiayingw703@gmail.com>
2023-04-19 22:40:18 +00:00
ppt, err := xsis[0].SealProof.RegisteredWindowPoStProof()
if err != nil {
t.Fatalf("%+v", err)
}
ppt, err = ppt.ToV1_1PostProof()
if err != nil {
t.Fatalf("%+v", err)
}
proofs, skp, err := sealer.GenerateWindowPoSt(context.TODO(), seals[0].ref.ID.Miner, ppt, xsis, randomness)
if len(skipped) > 0 {
require.Error(t, err)
require.EqualValues(t, skipped, skp)
return
2020-03-26 02:50:56 +00:00
}
if err != nil {
t.Fatalf("%+v", err)
}
2022-04-20 21:34:28 +00:00
sis := make([]prooftypes.SectorInfo, len(seals))
for i, xsi := range xsis {
2022-04-20 21:34:28 +00:00
sis[i] = prooftypes.SectorInfo{
SealProof: xsi.SealProof,
SectorNumber: xsi.SectorNumber,
SealedCID: xsi.SealedCID,
}
}
2022-04-20 21:34:28 +00:00
ok, err := ProofVerifier.VerifyWindowPoSt(context.TODO(), prooftypes.WindowPoStVerifyInfo{
2020-09-10 20:07:20 +00:00
Randomness: randomness,
Proofs: proofs,
ChallengedSectors: sis,
2020-11-05 06:34:24 +00:00
Prover: seals[0].ref.ID.Miner,
2020-03-26 02:50:56 +00:00
})
if err != nil {
t.Fatalf("%+v", err)
}
if !ok {
t.Fatal("bad post")
}
}
func corrupt(t *testing.T, sealer *Sealer, id storiface.SectorRef) {
2020-09-14 18:28:47 +00:00
paths, done, err := sealer.sectors.AcquireSector(context.Background(), id, storiface.FTSealed, 0, storiface.PathStorage)
require.NoError(t, err)
defer done()
log.Infof("corrupt %s", paths.Sealed)
f, err := os.OpenFile(paths.Sealed, os.O_RDWR, 0664)
require.NoError(t, err)
_, err = f.WriteAt(bytes.Repeat([]byte{'d'}, 2048), 0)
require.NoError(t, err)
require.NoError(t, f.Close())
2020-03-26 02:50:56 +00:00
}
func getGrothParamFileAndVerifyingKeys(s abi.SectorSize) {
dat, err := os.ReadFile("../../../build/proof-params/parameters.json")
2020-05-01 16:18:00 +00:00
if err != nil {
panic(err)
}
2020-03-26 02:50:56 +00:00
datSrs, err := os.ReadFile("../../../build/proof-params/srs-inner-product.json")
2021-03-10 15:16:44 +00:00
if err != nil {
panic(err)
}
err = paramfetch.GetParams(context.TODO(), dat, datSrs, uint64(s))
2020-03-26 02:50:56 +00:00
if err != nil {
panic(xerrors.Errorf("failed to acquire Groth parameters for 2KiB sectors: %w", err))
}
}
// TestDownloadParams exists only so that developers and CI can pre-download
// Groth parameters and verifying keys before running the tests which rely on
// those parameters and keys. To do this, run the following command:
//
// go test -run=^TestDownloadParams
func TestDownloadParams(t *testing.T) {
2021-06-02 17:50:17 +00:00
// defer requireFDsClosed(t, openFDs(t)) flaky likely cause of how go-embed works with param files
2020-06-09 10:24:03 +00:00
2020-03-26 02:50:56 +00:00
getGrothParamFileAndVerifyingKeys(sectorSize)
}
func TestSealAndVerify(t *testing.T) {
2020-09-17 10:22:56 +00:00
if testing.Short() {
t.Skip("skipping test in short mode")
}
2020-06-09 10:24:03 +00:00
defer requireFDsClosed(t, openFDs(t))
2020-03-26 02:50:56 +00:00
if runtime.NumCPU() < 10 && os.Getenv("CI") == "" { // don't bother on slow hardware
t.Skip("this is slow")
}
_ = os.Setenv("RUST_LOG", "info")
getGrothParamFileAndVerifyingKeys(sectorSize)
cdir, err := os.MkdirTemp("", "sbtest-c-")
2020-03-26 02:50:56 +00:00
if err != nil {
t.Fatal(err)
}
miner := abi.ActorID(123)
sp := &basicfs.Provider{
Root: cdir,
}
2020-11-05 06:34:24 +00:00
sb, err := New(sp)
2020-03-26 02:50:56 +00:00
if err != nil {
t.Fatalf("%+v", err)
}
t.Cleanup(func() {
2020-03-26 02:50:56 +00:00
if t.Failed() {
fmt.Printf("not removing %s\n", cdir)
return
}
if err := os.RemoveAll(cdir); err != nil {
t.Error(err)
}
})
2020-03-26 02:50:56 +00:00
si := storiface.SectorRef{
2020-11-05 06:34:24 +00:00
ID: abi.SectorID{Miner: miner, Number: 1},
ProofType: sealProofType,
}
2020-03-26 02:50:56 +00:00
2020-11-05 06:34:24 +00:00
s := seal{ref: si}
2020-03-26 02:50:56 +00:00
start := time.Now()
s.precommit(t, sb, si, func() {})
precommit := time.Now()
s.commit(t, sb, func() {})
commit := time.Now()
post(t, sb, nil, s)
2020-03-26 02:50:56 +00:00
epost := time.Now()
post(t, sb, nil, s)
2020-03-26 02:50:56 +00:00
if err := sb.FinalizeSector(context.TODO(), si); err != nil {
2020-03-26 02:50:56 +00:00
t.Fatalf("%+v", err)
}
2020-05-18 23:03:42 +00:00
s.unseal(t, sb, sp, si, func() {})
2020-03-26 02:50:56 +00:00
fmt.Printf("PreCommit: %s\n", precommit.Sub(start).String())
fmt.Printf("Commit: %s\n", commit.Sub(precommit).String())
fmt.Printf("EPoSt: %s\n", epost.Sub(commit).String())
2020-03-26 02:50:56 +00:00
}
func TestSealPoStNoCommit(t *testing.T) {
2020-09-17 10:22:56 +00:00
if testing.Short() {
t.Skip("skipping test in short mode")
}
2020-06-09 10:24:03 +00:00
defer requireFDsClosed(t, openFDs(t))
2020-03-26 02:50:56 +00:00
if runtime.NumCPU() < 10 && os.Getenv("CI") == "" { // don't bother on slow hardware
t.Skip("this is slow")
}
_ = os.Setenv("RUST_LOG", "info")
getGrothParamFileAndVerifyingKeys(sectorSize)
dir, err := os.MkdirTemp("", "sbtest")
2020-03-26 02:50:56 +00:00
if err != nil {
t.Fatal(err)
}
miner := abi.ActorID(123)
sp := &basicfs.Provider{
Root: dir,
}
2020-11-05 06:34:24 +00:00
sb, err := New(sp)
2020-03-26 02:50:56 +00:00
if err != nil {
t.Fatalf("%+v", err)
}
t.Cleanup(func() {
2020-03-26 02:50:56 +00:00
if t.Failed() {
fmt.Printf("not removing %s\n", dir)
return
}
if err := os.RemoveAll(dir); err != nil {
t.Error(err)
}
})
2020-03-26 02:50:56 +00:00
si := storiface.SectorRef{
2020-11-05 06:34:24 +00:00
ID: abi.SectorID{Miner: miner, Number: 1},
ProofType: sealProofType,
}
2020-03-26 02:50:56 +00:00
2020-11-05 06:34:24 +00:00
s := seal{ref: si}
2020-03-26 02:50:56 +00:00
start := time.Now()
s.precommit(t, sb, si, func() {})
precommit := time.Now()
if err := sb.FinalizeSector(context.TODO(), si); err != nil {
2020-03-26 02:50:56 +00:00
t.Fatal(err)
}
post(t, sb, nil, s)
2020-03-26 02:50:56 +00:00
epost := time.Now()
fmt.Printf("PreCommit: %s\n", precommit.Sub(start).String())
fmt.Printf("EPoSt: %s\n", epost.Sub(precommit).String())
2020-03-26 02:50:56 +00:00
}
2023-09-21 15:37:02 +00:00
func TestMain(m *testing.M) {
//setup()
// Here it no-longer is bound to 30s but has 1m30s for the whole suite.
getGrothParamFileAndVerifyingKeys(sectorSize)
code := m.Run()
//shutdown()
os.Exit(code)
}
func TestSealAndVerify3(t *testing.T) {
2023-09-21 15:37:02 +00:00
t.Skip("i flake on CI, re-enable me when you have a fix pls")
2020-09-17 10:22:56 +00:00
if testing.Short() {
t.Skip("skipping test in short mode")
}
2020-06-09 10:24:03 +00:00
defer requireFDsClosed(t, openFDs(t))
2020-03-26 02:50:56 +00:00
if runtime.NumCPU() < 10 && os.Getenv("CI") == "" { // don't bother on slow hardware
t.Skip("this is slow")
}
_ = os.Setenv("RUST_LOG", "trace")
dir, err := os.MkdirTemp("", "sbtest")
2020-03-26 02:50:56 +00:00
if err != nil {
t.Fatal(err)
}
miner := abi.ActorID(123)
sp := &basicfs.Provider{
Root: dir,
}
2020-11-05 06:34:24 +00:00
sb, err := New(sp)
2020-03-26 02:50:56 +00:00
if err != nil {
t.Fatalf("%+v", err)
}
t.Cleanup(func() {
2020-03-26 02:50:56 +00:00
if err := os.RemoveAll(dir); err != nil {
t.Error(err)
}
})
2020-03-26 02:50:56 +00:00
var wg sync.WaitGroup
si1 := storiface.SectorRef{
2020-11-05 06:34:24 +00:00
ID: abi.SectorID{Miner: miner, Number: 1},
ProofType: sealProofType,
}
si2 := storiface.SectorRef{
2020-11-05 06:34:24 +00:00
ID: abi.SectorID{Miner: miner, Number: 2},
ProofType: sealProofType,
}
si3 := storiface.SectorRef{
2020-11-05 06:34:24 +00:00
ID: abi.SectorID{Miner: miner, Number: 3},
ProofType: sealProofType,
}
2020-03-26 02:50:56 +00:00
2020-11-05 06:34:24 +00:00
s1 := seal{ref: si1}
s2 := seal{ref: si2}
s3 := seal{ref: si3}
2020-03-26 02:50:56 +00:00
wg.Add(3)
2022-04-20 09:34:01 +00:00
go s1.precommit(t, sb, si1, wg.Done)
2020-03-26 02:50:56 +00:00
time.Sleep(100 * time.Millisecond)
2022-04-20 09:34:01 +00:00
go s2.precommit(t, sb, si2, wg.Done)
time.Sleep(100 * time.Millisecond)
2022-04-20 09:34:01 +00:00
go s3.precommit(t, sb, si3, wg.Done)
2020-03-26 02:50:56 +00:00
wg.Wait()
wg.Add(3)
2022-04-20 09:34:01 +00:00
go s1.commit(t, sb, wg.Done)
go s2.commit(t, sb, wg.Done)
go s3.commit(t, sb, wg.Done)
2020-03-26 02:50:56 +00:00
wg.Wait()
post(t, sb, nil, s1, s2, s3)
corrupt(t, sb, si1)
corrupt(t, sb, si2)
2020-11-05 06:34:24 +00:00
post(t, sb, []abi.SectorID{si1.ID, si2.ID}, s1, s2, s3)
2020-03-26 02:50:56 +00:00
}
2020-05-28 17:15:15 +00:00
2021-03-10 15:16:44 +00:00
func TestSealAndVerifyAggregate(t *testing.T) {
numAgg := 5
if testing.Short() {
t.Skip("skipping test in short mode")
}
defer requireFDsClosed(t, openFDs(t))
if runtime.NumCPU() < 10 && os.Getenv("CI") == "" { // don't bother on slow hardware
t.Skip("this is slow")
}
_ = os.Setenv("RUST_LOG", "info")
getGrothParamFileAndVerifyingKeys(sectorSize)
cdir, err := os.MkdirTemp("", "sbtest-c-")
2021-03-10 15:16:44 +00:00
if err != nil {
t.Fatal(err)
}
miner := abi.ActorID(123)
sp := &basicfs.Provider{
Root: cdir,
}
sb, err := New(sp)
if err != nil {
t.Fatalf("%+v", err)
}
t.Cleanup(func() {
2021-03-10 15:16:44 +00:00
if t.Failed() {
fmt.Printf("not removing %s\n", cdir)
return
}
if err := os.RemoveAll(cdir); err != nil {
t.Error(err)
}
})
2021-03-10 15:16:44 +00:00
2022-04-20 21:34:28 +00:00
avi := prooftypes.AggregateSealVerifyProofAndInfos{
2021-05-18 10:46:13 +00:00
Miner: miner,
SealProof: sealProofType,
AggregateProof: policy.GetDefaultAggregationProof(),
Proof: nil,
2022-04-20 21:34:28 +00:00
Infos: make([]prooftypes.AggregateSealVerifyInfo, numAgg),
2021-03-10 15:16:44 +00:00
}
toAggregate := make([][]byte, numAgg)
for i := 0; i < numAgg; i++ {
si := storiface.SectorRef{
2021-03-10 15:16:44 +00:00
ID: abi.SectorID{Miner: miner, Number: abi.SectorNumber(i + 1)},
ProofType: sealProofType,
}
s := seal{ref: si}
s.precommit(t, sb, si, func() {})
toAggregate[i] = s.commit(t, sb, func() {})
2022-04-20 21:34:28 +00:00
avi.Infos[i] = prooftypes.AggregateSealVerifyInfo{
2021-03-10 15:16:44 +00:00
Number: abi.SectorNumber(i + 1),
Randomness: s.ticket,
InteractiveRandomness: seed,
SealedCID: s.cids.Sealed,
UnsealedCID: s.cids.Unsealed,
}
}
aggStart := time.Now()
avi.Proof, err = ProofProver.AggregateSealProofs(avi, toAggregate)
2021-03-10 15:16:44 +00:00
require.NoError(t, err)
2021-06-18 10:02:42 +00:00
require.Len(t, avi.Proof, 11188)
2021-03-10 15:16:44 +00:00
aggDone := time.Now()
_, err = ProofProver.AggregateSealProofs(avi, toAggregate)
2021-03-10 15:16:44 +00:00
require.NoError(t, err)
aggHot := time.Now()
ok, err := ProofVerifier.VerifyAggregateSeals(avi)
require.NoError(t, err)
require.True(t, ok)
verifDone := time.Now()
fmt.Printf("Aggregate: %s\n", aggDone.Sub(aggStart).String())
fmt.Printf("Hot: %s\n", aggHot.Sub(aggDone).String())
fmt.Printf("Verify: %s\n", verifDone.Sub(aggHot).String())
}
2020-05-28 17:15:15 +00:00
func BenchmarkWriteWithAlignment(b *testing.B) {
bt := abi.UnpaddedPieceSize(2 * 127 * 1024 * 1024)
b.SetBytes(int64(bt))
for i := 0; i < b.N; i++ {
b.StopTimer()
2020-11-23 13:20:32 +00:00
rf, w, _ := commpffi.ToReadableFile(bytes.NewReader(bytes.Repeat([]byte{0xff, 0}, int(bt/2))), int64(bt))
tf, _ := os.CreateTemp("/tmp/", "scrb-")
2020-05-28 17:15:15 +00:00
b.StartTimer()
2020-08-16 10:40:35 +00:00
ffi.WriteWithAlignment(abi.RegisteredSealProof_StackedDrg2KiBV1, rf, bt, tf, nil) // nolint:errcheck
_ = w()
2020-05-28 17:15:15 +00:00
}
}
2020-06-09 10:24:03 +00:00
func openFDs(t *testing.T) int {
2023-09-21 15:37:02 +00:00
path := "/proc/self/fd"
if runtime.GOOS == "darwin" {
path = "/dev/fd"
}
dent, err := os.ReadDir(path)
if err != nil && !strings.Contains(err.Error(), "/dev/fd/3: bad file descriptor") {
require.NoError(t, err)
}
2020-06-09 10:24:03 +00:00
var skip int
for _, info := range dent {
2023-09-21 15:37:02 +00:00
l, err := os.Readlink(filepath.Join(path, info.Name()))
2020-06-09 10:24:03 +00:00
if err != nil {
continue
}
if strings.HasPrefix(l, "/dev/nvidia") {
skip++
}
2020-06-30 17:38:41 +00:00
if strings.HasPrefix(l, "/var/tmp/filecoin-proof-parameters/") {
skip++
}
2020-06-09 10:24:03 +00:00
}
return len(dent) - skip
}
func requireFDsClosed(t *testing.T, start int) {
openNow := openFDs(t)
if start != openNow {
2023-09-21 15:37:02 +00:00
path := "/proc/self/fd"
if runtime.GOOS == "darwin" {
path = "/dev/fd"
}
dent, err := os.ReadDir(path)
2020-06-09 10:24:03 +00:00
require.NoError(t, err)
for _, info := range dent {
2023-09-21 15:37:02 +00:00
l, err := os.Readlink(filepath.Join(path, info.Name()))
2020-06-09 10:24:03 +00:00
if err != nil {
fmt.Printf("FD err %s\n", err)
continue
}
fmt.Printf("FD %s -> %s\n", info.Name(), l)
}
}
log.Infow("open FDs", "start", start, "now", openNow)
2021-10-01 16:05:41 +00:00
// todo make work with cuda somehow
// require.Equal(t, start, openNow, "FDs shouldn't leak")
2020-06-09 10:24:03 +00:00
}
func TestGenerateUnsealedCID(t *testing.T) {
pt := abi.RegisteredSealProof_StackedDrg2KiBV1
ups := int(abi.PaddedPieceSize(2048).Unpadded())
commP := func(b []byte) cid.Cid {
2020-11-23 13:20:32 +00:00
pf, werr, err := commpffi.ToReadableFile(bytes.NewReader(b), int64(len(b)))
require.NoError(t, err)
c, err := ffi.GeneratePieceCIDFromFile(pt, pf, abi.UnpaddedPieceSize(len(b)))
require.NoError(t, err)
require.NoError(t, werr())
return c
}
testCommEq := func(name string, in [][]byte, expect [][]byte) {
t.Run(name, func(t *testing.T) {
upi := make([]abi.PieceInfo, len(in))
for i, b := range in {
upi[i] = abi.PieceInfo{
Size: abi.UnpaddedPieceSize(len(b)).Padded(),
PieceCID: commP(b),
}
}
sectorPi := []abi.PieceInfo{
{
Size: 2048,
PieceCID: commP(bytes.Join(expect, nil)),
},
}
expectCid, err := GenerateUnsealedCID(pt, sectorPi)
require.NoError(t, err)
actualCid, err := GenerateUnsealedCID(pt, upi)
require.NoError(t, err)
require.Equal(t, expectCid, actualCid)
})
}
barr := func(b byte, den int) []byte {
return bytes.Repeat([]byte{b}, ups/den)
}
// 0000
testCommEq("zero",
nil,
[][]byte{barr(0, 1)},
)
// 1111
testCommEq("one",
[][]byte{barr(1, 1)},
[][]byte{barr(1, 1)},
)
// 11 00
testCommEq("one|2",
[][]byte{barr(1, 2)},
[][]byte{barr(1, 2), barr(0, 2)},
)
// 1 0 00
testCommEq("one|4",
[][]byte{barr(1, 4)},
[][]byte{barr(1, 4), barr(0, 4), barr(0, 2)},
)
// 11 2 0
testCommEq("one|2-two|4",
[][]byte{barr(1, 2), barr(2, 4)},
[][]byte{barr(1, 2), barr(2, 4), barr(0, 4)},
)
// 1 0 22
testCommEq("one|4-two|2",
[][]byte{barr(1, 4), barr(2, 2)},
[][]byte{barr(1, 4), barr(0, 4), barr(2, 2)},
)
// 1 0 22 0000
testCommEq("one|8-two|4",
[][]byte{barr(1, 8), barr(2, 4)},
[][]byte{barr(1, 8), barr(0, 8), barr(2, 4), barr(0, 2)},
)
// 11 2 0 0000
testCommEq("one|4-two|8",
[][]byte{barr(1, 4), barr(2, 8)},
[][]byte{barr(1, 4), barr(2, 8), barr(0, 8), barr(0, 2)},
)
// 1 0 22 3 0 00 4444 5 0 00
testCommEq("one|16-two|8-three|16-four|4-five|16",
[][]byte{barr(1, 16), barr(2, 8), barr(3, 16), barr(4, 4), barr(5, 16)},
[][]byte{barr(1, 16), barr(0, 16), barr(2, 8), barr(3, 16), barr(0, 16), barr(0, 8), barr(4, 4), barr(5, 16), barr(0, 16), barr(0, 8)},
)
}
2021-01-10 21:54:05 +00:00
func TestAddPiece512M(t *testing.T) {
sz := abi.PaddedPieceSize(512 << 20).Unpadded()
cdir, err := os.MkdirTemp("", "sbtest-c-")
2021-01-10 21:54:05 +00:00
if err != nil {
t.Fatal(err)
}
miner := abi.ActorID(123)
sp := &basicfs.Provider{
Root: cdir,
}
sb, err := New(sp)
if err != nil {
t.Fatalf("%+v", err)
}
cleanup := func() {
if t.Failed() {
fmt.Printf("not removing %s\n", cdir)
return
}
if err := os.RemoveAll(cdir); err != nil {
t.Error(err)
}
}
t.Cleanup(cleanup)
2023-10-02 22:08:42 +00:00
r := rand.New(rand.NewSource(0x7e5))
c, err := sb.AddPiece(context.TODO(), storiface.SectorRef{
2021-01-10 21:54:21 +00:00
ID: abi.SectorID{
2021-01-10 21:54:05 +00:00
Miner: miner,
Number: 0,
},
ProofType: abi.RegisteredSealProof_StackedDrg512MiBV1_1,
2023-10-02 22:08:42 +00:00
}, nil, sz, io.LimitReader(r, int64(sz)))
2021-01-10 21:54:05 +00:00
if err != nil {
t.Fatal(err)
}
require.Equal(t, "baga6ea4seaqhyticusemlcrjhvulpfng4nint6bu3wpe5s3x4bnuj2rs47hfacy", c.PieceCID.String())
}
func BenchmarkAddPiece512M(b *testing.B) {
sz := abi.PaddedPieceSize(512 << 20).Unpadded()
b.SetBytes(int64(sz))
cdir, err := os.MkdirTemp("", "sbtest-c-")
2021-01-10 21:54:05 +00:00
if err != nil {
b.Fatal(err)
}
miner := abi.ActorID(123)
sp := &basicfs.Provider{
Root: cdir,
}
sb, err := New(sp)
if err != nil {
b.Fatalf("%+v", err)
}
cleanup := func() {
if b.Failed() {
fmt.Printf("not removing %s\n", cdir)
return
}
if err := os.RemoveAll(cdir); err != nil {
b.Error(err)
}
}
b.Cleanup(cleanup)
for i := 0; i < b.N; i++ {
c, err := sb.AddPiece(context.TODO(), storiface.SectorRef{
2021-01-10 21:54:21 +00:00
ID: abi.SectorID{
2021-01-10 21:54:05 +00:00
Miner: miner,
Number: abi.SectorNumber(i),
},
ProofType: abi.RegisteredSealProof_StackedDrg512MiBV1_1,
}, nil, sz, io.LimitReader(&nullreader.Reader{}, int64(sz)))
if err != nil {
b.Fatal(err)
}
fmt.Println(c)
}
2021-01-10 21:54:21 +00:00
}
2021-04-27 21:15:25 +00:00
func TestAddPiece512MPadded(t *testing.T) {
sz := abi.PaddedPieceSize(512 << 20).Unpadded()
cdir, err := os.MkdirTemp("", "sbtest-c-")
2021-04-27 21:15:25 +00:00
if err != nil {
t.Fatal(err)
}
miner := abi.ActorID(123)
sp := &basicfs.Provider{
Root: cdir,
}
sb, err := New(sp)
if err != nil {
t.Fatalf("%+v", err)
}
cleanup := func() {
if t.Failed() {
fmt.Printf("not removing %s\n", cdir)
return
}
if err := os.RemoveAll(cdir); err != nil {
t.Error(err)
}
}
t.Cleanup(cleanup)
2023-10-02 22:08:42 +00:00
r := rand.New(rand.NewSource(0x7e5))
c, err := sb.AddPiece(context.TODO(), storiface.SectorRef{
2021-04-27 21:15:25 +00:00
ID: abi.SectorID{
Miner: miner,
Number: 0,
},
ProofType: abi.RegisteredSealProof_StackedDrg512MiBV1_1,
2023-10-02 22:08:42 +00:00
}, nil, sz, io.LimitReader(r, int64(sz/4)))
2021-04-27 21:15:25 +00:00
if err != nil {
t.Fatalf("add piece failed: %s", err)
}
require.Equal(t, "baga6ea4seaqonenxyku4o7hr5xkzbqsceipf6xgli3on54beqbk6k246sbooobq", c.PieceCID.String())
}
2021-06-14 20:08:19 +00:00
2022-05-10 12:20:05 +00:00
func setupLogger(t *testing.T) *bytes.Buffer {
_ = os.Setenv("RUST_LOG", "info")
var bb bytes.Buffer
r, w, err := os.Pipe()
if err != nil {
t.Fatal(err)
}
go func() {
_, _ = io.Copy(&bb, r)
runtime.KeepAlive(w)
}()
err = cgo.InitLogFd(int32(w.Fd()))
require.NoError(t, err)
return &bb
}
func TestMulticoreSDR(t *testing.T) {
if os.Getenv("TEST_RUSTPROOFS_LOGS") != "1" {
t.Skip("skipping test without TEST_RUSTPROOFS_LOGS=1")
}
rustLogger := setupLogger(t)
getGrothParamFileAndVerifyingKeys(sectorSize)
dir, err := os.MkdirTemp("", "sbtest")
2022-05-10 12:20:05 +00:00
if err != nil {
t.Fatal(err)
}
miner := abi.ActorID(123)
sp := &basicfs.Provider{
Root: dir,
}
sb, err := New(sp)
if err != nil {
t.Fatalf("%+v", err)
}
t.Cleanup(func() {
if t.Failed() {
fmt.Printf("not removing %s\n", dir)
return
}
if err := os.RemoveAll(dir); err != nil {
t.Error(err)
}
})
si := storiface.SectorRef{
2022-05-10 12:20:05 +00:00
ID: abi.SectorID{Miner: miner, Number: 1},
ProofType: sealProofType,
}
s := seal{ref: si}
// check multicore
_ = os.Setenv("FIL_PROOFS_USE_MULTICORE_SDR", "1")
rustLogger.Reset()
s.precommit(t, sb, si, func() {})
ok := false
for _, s := range strings.Split(rustLogger.String(), "\n") {
if strings.Contains(s, "create_label::multi") {
ok = true
break
}
}
require.True(t, ok)
}
func TestPoStChallengeAssumptions(t *testing.T) {
var r [32]byte
2023-10-02 22:08:42 +00:00
if _, err := crand.Read(r[:]); err != nil {
2023-09-29 16:56:10 +00:00
panic(err)
}
r[31] &= 0x3f
// behaves like a pure function
{
c1, err := ffi.GeneratePoStFallbackSectorChallenges(abi.RegisteredPoStProof_StackedDrgWindow32GiBV1, 1000, r[:], []abi.SectorNumber{1, 2, 3, 4})
require.NoError(t, err)
c2, err := ffi.GeneratePoStFallbackSectorChallenges(abi.RegisteredPoStProof_StackedDrgWindow32GiBV1, 1000, r[:], []abi.SectorNumber{1, 2, 3, 4})
require.NoError(t, err)
require.Equal(t, c1, c2)
}
// doesn't sort, challenges position dependant
{
c1, err := ffi.GeneratePoStFallbackSectorChallenges(abi.RegisteredPoStProof_StackedDrgWindow32GiBV1, 1000, r[:], []abi.SectorNumber{1, 2, 3, 4})
require.NoError(t, err)
c2, err := ffi.GeneratePoStFallbackSectorChallenges(abi.RegisteredPoStProof_StackedDrgWindow32GiBV1, 1000, r[:], []abi.SectorNumber{4, 2, 3, 1})
require.NoError(t, err)
require.NotEqual(t, c1, c2)
require.Equal(t, c1.Challenges[2], c2.Challenges[2])
require.Equal(t, c1.Challenges[3], c2.Challenges[3])
require.NotEqual(t, c1.Challenges[1], c2.Challenges[1])
require.NotEqual(t, c1.Challenges[4], c2.Challenges[4])
}
// length doesn't matter
{
c1, err := ffi.GeneratePoStFallbackSectorChallenges(abi.RegisteredPoStProof_StackedDrgWindow32GiBV1, 1000, r[:], []abi.SectorNumber{1})
require.NoError(t, err)
c2, err := ffi.GeneratePoStFallbackSectorChallenges(abi.RegisteredPoStProof_StackedDrgWindow32GiBV1, 1000, r[:], []abi.SectorNumber{1, 2})
require.NoError(t, err)
require.NotEqual(t, c1, c2)
require.Equal(t, c1.Challenges[1], c2.Challenges[1])
}
// generate dedupes
{
c1, err := ffi.GeneratePoStFallbackSectorChallenges(abi.RegisteredPoStProof_StackedDrgWindow32GiBV1, 1000, r[:], []abi.SectorNumber{1, 2, 1, 4})
require.NoError(t, err)
require.Len(t, c1.Sectors, 3)
require.Len(t, c1.Challenges, 3)
}
}
func TestDCAPCloses(t *testing.T) {
sz := abi.PaddedPieceSize(2 << 10).Unpadded()
cdir, err := os.MkdirTemp("", "sbtest-c-")
if err != nil {
t.Fatal(err)
}
miner := abi.ActorID(123)
sp := &basicfs.Provider{
Root: cdir,
}
sb, err := New(sp)
if err != nil {
t.Fatalf("%+v", err)
}
cleanup := func() {
if t.Failed() {
fmt.Printf("not removing %s\n", cdir)
return
}
if err := os.RemoveAll(cdir); err != nil {
t.Error(err)
}
}
t.Cleanup(cleanup)
t.Run("DataCid", func(t *testing.T) {
2023-10-02 22:08:42 +00:00
r := rand.New(rand.NewSource(0x7e5))
clr := &closeAssertReader{
2023-10-02 22:08:42 +00:00
Reader: io.LimitReader(r, int64(sz)),
}
c, err := sb.DataCid(context.TODO(), sz, clr)
if err != nil {
t.Fatal(err)
}
require.Equal(t, "baga6ea4seaqeje7jy4hufnybpo7ckxzujaigqbcxhdjq7ojb4b6xzgqdugkyciq", c.PieceCID.String())
require.True(t, clr.closed)
})
t.Run("AddPiece", func(t *testing.T) {
2023-10-02 22:08:42 +00:00
r := rand.New(rand.NewSource(0x7e5))
clr := &closeAssertReader{
2023-10-02 22:08:42 +00:00
Reader: io.LimitReader(r, int64(sz)),
}
c, err := sb.AddPiece(context.TODO(), storiface.SectorRef{
ID: abi.SectorID{
Miner: miner,
Number: 0,
},
ProofType: abi.RegisteredSealProof_StackedDrg2KiBV1_1,
}, nil, sz, clr)
if err != nil {
t.Fatal(err)
}
require.Equal(t, "baga6ea4seaqeje7jy4hufnybpo7ckxzujaigqbcxhdjq7ojb4b6xzgqdugkyciq", c.PieceCID.String())
require.True(t, clr.closed)
})
2023-07-13 10:24:54 +00:00
}
func TestSealAndVerifySynth(t *testing.T) {
2023-11-27 16:58:49 +00:00
origSealProofType := sealProofType
2023-07-13 10:24:54 +00:00
sealProofType = abi.RegisteredSealProof_StackedDrg2KiBV1_1_Feat_SyntheticPoRep
2023-11-27 16:58:49 +00:00
t.Cleanup(func() {
sealProofType = origSealProofType
})
2023-07-13 10:24:54 +00:00
if testing.Short() {
t.Skip("skipping test in short mode")
}
defer requireFDsClosed(t, openFDs(t))
if runtime.NumCPU() < 10 && os.Getenv("CI") == "" { // don't bother on slow hardware
t.Skip("this is slow")
}
_ = os.Setenv("RUST_LOG", "info")
getGrothParamFileAndVerifyingKeys(sectorSize)
cdir, err := os.MkdirTemp("", "sbtest-c-")
if err != nil {
t.Fatal(err)
}
miner := abi.ActorID(123)
synthPorRepVProofsName := "syn-porep-vanilla-proofs.dat"
2023-07-13 10:24:54 +00:00
printFileList := func(stage string, expectSynthPorep bool) {
var hasSynthPorep bool
fmt.Println("----file list:", stage)
err := filepath.Walk(cdir, func(path string, info os.FileInfo, err error) error {
if strings.Contains(path, synthPorRepVProofsName) {
hasSynthPorep = true
}
fmt.Println(path)
return nil
})
if err != nil {
t.Fatal(err)
}
require.Equal(t, expectSynthPorep, hasSynthPorep)
fmt.Println("----")
}
sp := &basicfs.Provider{
Root: cdir,
}
sb, err := New(sp)
if err != nil {
t.Fatalf("%+v", err)
}
t.Cleanup(func() {
if t.Failed() {
fmt.Printf("not removing %s\n", cdir)
return
}
if err := os.RemoveAll(cdir); err != nil {
t.Error(err)
}
})
si := storiface.SectorRef{
ID: abi.SectorID{Miner: miner, Number: 1},
ProofType: sealProofType,
}
s := seal{ref: si}
start := time.Now()
s.precommit(t, sb, si, func() {})
printFileList("precommit", true)
precommit := time.Now()
s.commit(t, sb, func() {})
2023-07-13 10:24:54 +00:00
printFileList("commit", true)
commit := time.Now()
post(t, sb, nil, s)
printFileList("post", true)
epost := time.Now()
post(t, sb, nil, s)
if err := sb.FinalizeSector(context.TODO(), si); err != nil {
t.Fatalf("%+v", err)
}
printFileList("finalize", false)
s.unseal(t, sb, sp, si, func() {})
printFileList("unseal", false)
fmt.Printf("PreCommit: %s\n", precommit.Sub(start).String())
fmt.Printf("Commit: %s\n", commit.Sub(precommit).String())
fmt.Printf("EPoSt: %s\n", epost.Sub(commit).String())
}
type closeAssertReader struct {
io.Reader
closed bool
}
func (c *closeAssertReader) Close() error {
if c.closed {
panic("double close")
}
c.closed = true
return nil
}
var _ io.Closer = &closeAssertReader{}
2023-09-21 15:37:02 +00:00
func TestSealCommDRInGo(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode")
}
defer requireFDsClosed(t, openFDs(t))
cdir, err := os.MkdirTemp("", "sbtest-c-")
require.NoError(t, err)
miner := abi.ActorID(123)
sp := &basicfs.Provider{
Root: cdir,
}
sb, err := New(sp)
require.NoError(t, err)
t.Cleanup(func() {
if t.Failed() {
fmt.Printf("not removing %s\n", cdir)
return
}
if err := os.RemoveAll(cdir); err != nil {
t.Error(err)
}
})
si := storiface.SectorRef{
ID: abi.SectorID{Miner: miner, Number: 1},
ProofType: sealProofType,
}
s := seal{ref: si}
s.precommit(t, sb, si, func() {})
p, _, err := sp.AcquireSector(context.Background(), si, storiface.FTCache, storiface.FTNone, storiface.PathStorage)
require.NoError(t, err)
commr, err := commitment.PAuxCommR(p.Cache)
require.NoError(t, err)
commd, err := commitment.TreeDCommD(p.Cache)
require.NoError(t, err)
sealCid, err := commcid.ReplicaCommitmentV1ToCID(commr[:])
require.NoError(t, err)
unsealedCid, err := commcid.DataCommitmentV1ToCID(commd[:])
require.NoError(t, err)
require.Equal(t, s.cids.Sealed, sealCid)
require.Equal(t, s.cids.Unsealed, unsealedCid)
}
2023-09-21 15:37:02 +00:00
func TestGenerateSDR(t *testing.T) {
d := t.TempDir()
miner := abi.ActorID(123)
sp := &basicfs.Provider{
Root: d,
}
sb, err := New(sp)
require.NoError(t, err)
si := storiface.SectorRef{
ID: abi.SectorID{Miner: miner, Number: 1},
ProofType: sealProofType,
}
s := seal{ref: si}
sz := abi.PaddedPieceSize(sectorSize).Unpadded()
s.pi, err = sb.AddPiece(context.TODO(), si, []abi.UnpaddedPieceSize{}, sz, nullreader.NewNullReader(sz))
require.NoError(t, err)
s.ticket = sealRand
_, err = sb.SealPreCommit1(context.TODO(), si, s.ticket, []abi.PieceInfo{s.pi})
require.NoError(t, err)
// sdr for comparison
sdrCache := filepath.Join(d, "sdrcache")
commd, err := commcid.CIDToDataCommitmentV1(s.pi.PieceCID)
require.NoError(t, err)
replicaID, err := sealProofType.ReplicaId(si.ID.Miner, si.ID.Number, s.ticket, commd)
require.NoError(t, err)
err = ffi.GenerateSDR(sealProofType, sdrCache, replicaID)
require.NoError(t, err)
// list files in d recursively, for debug
require.NoError(t, filepath.Walk(d, func(path string, info fs.FileInfo, err error) error {
fmt.Println(path)
return nil
}))
// compare
lastLayerFile := "sc-02-data-layer-2.dat"
sdrFile := filepath.Join(sdrCache, lastLayerFile)
pc1File := filepath.Join(d, "cache/s-t0123-1/", lastLayerFile)
sdrData, err := os.ReadFile(sdrFile)
require.NoError(t, err)
pc1Data, err := os.ReadFile(pc1File)
require.NoError(t, err)
require.Equal(t, sdrData, pc1Data)
}