Fix unsealing, sector based data refs
This commit is contained in:
parent
c92b9d5a0d
commit
a59d0f0f8c
@ -86,7 +86,7 @@ type SectorInfo struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type SealedRef struct {
|
type SealedRef struct {
|
||||||
Piece string
|
SectorID uint64
|
||||||
Offset uint64
|
Offset uint64
|
||||||
Size uint64
|
Size uint64
|
||||||
}
|
}
|
||||||
|
@ -137,11 +137,8 @@ func (t *SealedRef) MarshalCBOR(w io.Writer) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// t.t.Piece (string) (string)
|
// t.t.SectorID (uint64) (uint64)
|
||||||
if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajTextString, uint64(len(t.Piece)))); err != nil {
|
if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.SectorID))); err != nil {
|
||||||
return err
|
|
||||||
}
|
|
||||||
if _, err := w.Write([]byte(t.Piece)); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -172,16 +169,16 @@ func (t *SealedRef) UnmarshalCBOR(r io.Reader) error {
|
|||||||
return fmt.Errorf("cbor input had wrong number of fields")
|
return fmt.Errorf("cbor input had wrong number of fields")
|
||||||
}
|
}
|
||||||
|
|
||||||
// t.t.Piece (string) (string)
|
// t.t.SectorID (uint64) (uint64)
|
||||||
|
|
||||||
{
|
maj, extra, err = cbg.CborReadHeader(br)
|
||||||
sval, err := cbg.ReadString(br)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if maj != cbg.MajUnsignedInt {
|
||||||
t.Piece = string(sval)
|
return fmt.Errorf("wrong type for uint64 field")
|
||||||
}
|
}
|
||||||
|
t.SectorID = uint64(extra)
|
||||||
// t.t.Offset (uint64) (uint64)
|
// t.t.Offset (uint64) (uint64)
|
||||||
|
|
||||||
maj, extra, err = cbg.CborReadHeader(br)
|
maj, extra, err = cbg.CborReadHeader(br)
|
||||||
|
@ -5,7 +5,7 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/ipfs/go-cid"
|
cid "github.com/ipfs/go-cid"
|
||||||
cbg "github.com/whyrusleeping/cbor-gen"
|
cbg "github.com/whyrusleeping/cbor-gen"
|
||||||
xerrors "golang.org/x/xerrors"
|
xerrors "golang.org/x/xerrors"
|
||||||
)
|
)
|
||||||
|
@ -204,7 +204,7 @@ func (p *Provider) staged(ctx context.Context, deal MinerDeal) (func(*MinerDeal)
|
|||||||
return nil, xerrors.Errorf("deal.Proposal.PieceSize didn't match padded unixfs file size")
|
return nil, xerrors.Errorf("deal.Proposal.PieceSize didn't match padded unixfs file size")
|
||||||
}
|
}
|
||||||
|
|
||||||
sectorID, err := p.secb.AddUnixfsPiece(ctx, deal.Ref, uf, deal.DealID)
|
sectorID, err := p.secb.AddUnixfsPiece(ctx, uf, deal.DealID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("AddPiece failed: %s", err)
|
return nil, xerrors.Errorf("AddPiece failed: %s", err)
|
||||||
}
|
}
|
||||||
|
@ -5,7 +5,7 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"math"
|
"math"
|
||||||
|
|
||||||
"github.com/ipfs/go-cid"
|
cid "github.com/ipfs/go-cid"
|
||||||
cbg "github.com/whyrusleeping/cbor-gen"
|
cbg "github.com/whyrusleeping/cbor-gen"
|
||||||
xerrors "golang.org/x/xerrors"
|
xerrors "golang.org/x/xerrors"
|
||||||
)
|
)
|
||||||
|
@ -33,11 +33,11 @@ func PreSeal(maddr address.Address, ssize uint64, sectors int, sbroot string, pr
|
|||||||
CacheDir: filepath.Join(sbroot, "cache"),
|
CacheDir: filepath.Join(sbroot, "cache"),
|
||||||
SealedDir: filepath.Join(sbroot, "sealed"),
|
SealedDir: filepath.Join(sbroot, "sealed"),
|
||||||
StagedDir: filepath.Join(sbroot, "staging"),
|
StagedDir: filepath.Join(sbroot, "staging"),
|
||||||
MetadataDir: filepath.Join(sbroot, "meta"),
|
UnsealedDir: filepath.Join(sbroot, "unsealed"),
|
||||||
WorkerThreads: 2,
|
WorkerThreads: 2,
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, d := range []string{cfg.CacheDir, cfg.SealedDir, cfg.StagedDir, cfg.MetadataDir} {
|
for _, d := range []string{cfg.CacheDir, cfg.SealedDir, cfg.StagedDir, cfg.UnsealedDir} {
|
||||||
if err := os.MkdirAll(d, 0775); err != nil {
|
if err := os.MkdirAll(d, 0775); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -144,7 +144,7 @@ var initCmd = &cli.Command{
|
|||||||
SealedDir: filepath.Join(pssb, "sealed"),
|
SealedDir: filepath.Join(pssb, "sealed"),
|
||||||
CacheDir: filepath.Join(pssb, "cache"),
|
CacheDir: filepath.Join(pssb, "cache"),
|
||||||
StagedDir: filepath.Join(pssb, "staging"),
|
StagedDir: filepath.Join(pssb, "staging"),
|
||||||
MetadataDir: filepath.Join(pssb, "meta"),
|
UnsealedDir: filepath.Join(pssb, "unsealed"),
|
||||||
}, oldmds)
|
}, oldmds)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("failed to open up preseal sectorbuilder: %w", err)
|
return xerrors.Errorf("failed to open up preseal sectorbuilder: %w", err)
|
||||||
@ -156,7 +156,7 @@ var initCmd = &cli.Command{
|
|||||||
SealedDir: filepath.Join(lr.Path(), "sealed"),
|
SealedDir: filepath.Join(lr.Path(), "sealed"),
|
||||||
CacheDir: filepath.Join(lr.Path(), "cache"),
|
CacheDir: filepath.Join(lr.Path(), "cache"),
|
||||||
StagedDir: filepath.Join(lr.Path(), "staging"),
|
StagedDir: filepath.Join(lr.Path(), "staging"),
|
||||||
MetadataDir: filepath.Join(lr.Path(), "meta"),
|
UnsealedDir: filepath.Join(lr.Path(), "unsealed"),
|
||||||
}, mds)
|
}, mds)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("failed to open up sectorbuilder: %w", err)
|
return xerrors.Errorf("failed to open up sectorbuilder: %w", err)
|
||||||
@ -221,7 +221,6 @@ func migratePreSealMeta(ctx context.Context, api lapi.FullNode, presealDir strin
|
|||||||
Pieces: []storage.Piece{
|
Pieces: []storage.Piece{
|
||||||
{
|
{
|
||||||
DealID: dealID,
|
DealID: dealID,
|
||||||
Ref: fmt.Sprintf("preseal-%d", sector.SectorID),
|
|
||||||
Size: meta.SectorSize,
|
Size: meta.SectorSize,
|
||||||
CommP: sector.CommD[:],
|
CommP: sector.CommD[:],
|
||||||
},
|
},
|
||||||
|
@ -163,7 +163,7 @@ var sectorsRefsCmd = &cli.Command{
|
|||||||
for name, refs := range refs {
|
for name, refs := range refs {
|
||||||
fmt.Printf("Block %s:\n", name)
|
fmt.Printf("Block %s:\n", name)
|
||||||
for _, ref := range refs {
|
for _, ref := range refs {
|
||||||
fmt.Printf("\t%s+%d %d bytes\n", ref.Piece, ref.Offset, ref.Size)
|
fmt.Printf("\t%d+%d %d bytes\n", ref.SectorID, ref.Offset, ref.Size)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -18,6 +18,10 @@ func (sb *SectorBuilder) stagedSectorPath(sectorID uint64) string {
|
|||||||
return filepath.Join(sb.stagedDir, sb.sectorName(sectorID))
|
return filepath.Join(sb.stagedDir, sb.sectorName(sectorID))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (sb *SectorBuilder) unsealedSectorPath(sectorID uint64) string {
|
||||||
|
return filepath.Join(sb.unsealedDir, sb.sectorName(sectorID))
|
||||||
|
}
|
||||||
|
|
||||||
func (sb *SectorBuilder) stagedSectorFile(sectorID uint64) (*os.File, error) {
|
func (sb *SectorBuilder) stagedSectorFile(sectorID uint64) (*os.File, error) {
|
||||||
return os.OpenFile(sb.stagedSectorPath(sectorID), os.O_RDWR|os.O_CREATE, 0644)
|
return os.OpenFile(sb.stagedSectorPath(sectorID), os.O_RDWR|os.O_CREATE, 0644)
|
||||||
}
|
}
|
||||||
|
@ -29,7 +29,7 @@ func TempSectorbuilderDir(dir string, sectorSize uint64, ds dtypes.MetadataDS) (
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
metadata := filepath.Join(dir, "meta")
|
unsealed := filepath.Join(dir, "unsealed")
|
||||||
sealed := filepath.Join(dir, "sealed")
|
sealed := filepath.Join(dir, "sealed")
|
||||||
staging := filepath.Join(dir, "staging")
|
staging := filepath.Join(dir, "staging")
|
||||||
cache := filepath.Join(dir, "cache")
|
cache := filepath.Join(dir, "cache")
|
||||||
@ -39,7 +39,7 @@ func TempSectorbuilderDir(dir string, sectorSize uint64, ds dtypes.MetadataDS) (
|
|||||||
|
|
||||||
SealedDir: sealed,
|
SealedDir: sealed,
|
||||||
StagedDir: staging,
|
StagedDir: staging,
|
||||||
MetadataDir: metadata,
|
UnsealedDir: unsealed,
|
||||||
CacheDir: cache,
|
CacheDir: cache,
|
||||||
|
|
||||||
WorkerThreads: 2,
|
WorkerThreads: 2,
|
||||||
|
@ -58,6 +58,9 @@ type SectorBuilder struct {
|
|||||||
stagedDir string
|
stagedDir string
|
||||||
sealedDir string
|
sealedDir string
|
||||||
cacheDir string
|
cacheDir string
|
||||||
|
unsealedDir string
|
||||||
|
|
||||||
|
unsealLk sync.Mutex
|
||||||
|
|
||||||
rateLimit chan struct{}
|
rateLimit chan struct{}
|
||||||
}
|
}
|
||||||
@ -71,7 +74,7 @@ type Config struct {
|
|||||||
CacheDir string
|
CacheDir string
|
||||||
SealedDir string
|
SealedDir string
|
||||||
StagedDir string
|
StagedDir string
|
||||||
MetadataDir string
|
UnsealedDir string
|
||||||
}
|
}
|
||||||
|
|
||||||
func New(cfg *Config, ds dtypes.MetadataDS) (*SectorBuilder, error) {
|
func New(cfg *Config, ds dtypes.MetadataDS) (*SectorBuilder, error) {
|
||||||
@ -79,7 +82,7 @@ func New(cfg *Config, ds dtypes.MetadataDS) (*SectorBuilder, error) {
|
|||||||
return nil, xerrors.Errorf("minimum worker threads is %d, specified %d", PoStReservedWorkers+1, cfg.WorkerThreads)
|
return nil, xerrors.Errorf("minimum worker threads is %d, specified %d", PoStReservedWorkers+1, cfg.WorkerThreads)
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, dir := range []string{cfg.StagedDir, cfg.SealedDir, cfg.CacheDir, cfg.MetadataDir} {
|
for _, dir := range []string{cfg.StagedDir, cfg.SealedDir, cfg.CacheDir, cfg.UnsealedDir} {
|
||||||
if err := os.Mkdir(dir, 0755); err != nil {
|
if err := os.Mkdir(dir, 0755); err != nil {
|
||||||
if os.IsExist(err) {
|
if os.IsExist(err) {
|
||||||
continue
|
continue
|
||||||
@ -187,13 +190,72 @@ func (sb *SectorBuilder) AddPiece(pieceSize uint64, sectorId uint64, file io.Rea
|
|||||||
}, werr()
|
}, werr()
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: should *really really* return an io.ReadCloser
|
func (sb *SectorBuilder) ReadPieceFromSealedSector(sectorID uint64, offset uint64, size uint64, ticket []byte, commD []byte) (io.ReadCloser, error) {
|
||||||
func (sb *SectorBuilder) ReadPieceFromSealedSector(pieceKey string) ([]byte, error) {
|
ret := sb.RateLimit() // TODO: check perf, consider remote unseal worker
|
||||||
ret := sb.RateLimit()
|
|
||||||
defer ret()
|
defer ret()
|
||||||
|
|
||||||
panic("fixme")
|
sb.unsealLk.Lock() // TODO: allow unsealing unrelated sectors in parallel
|
||||||
//return sectorbuilder.Unseal(sb.handle, pieceKey)
|
defer sb.unsealLk.Lock()
|
||||||
|
|
||||||
|
cacheDir, err := sb.sectorCacheDir(sectorID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
sealedPath, err := sb.sealedSectorPath(sectorID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
unsealedPath := sb.unsealedSectorPath(sectorID)
|
||||||
|
|
||||||
|
// TODO: GC for those
|
||||||
|
// (Probably configurable count of sectors to be kept unsealed, and just
|
||||||
|
// remove last used one (or use whatever other cache policy makes sense))
|
||||||
|
f, err := os.OpenFile(unsealedPath, os.O_RDONLY, 0644)
|
||||||
|
if err != nil {
|
||||||
|
if !os.IsNotExist(err) {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var commd [CommLen]byte
|
||||||
|
copy(commd[:], commD)
|
||||||
|
|
||||||
|
var tkt [CommLen]byte
|
||||||
|
copy(tkt[:], ticket)
|
||||||
|
|
||||||
|
err = sectorbuilder.Unseal(sb.ssize,
|
||||||
|
PoRepProofPartitions,
|
||||||
|
cacheDir,
|
||||||
|
sealedPath,
|
||||||
|
unsealedPath,
|
||||||
|
sectorID,
|
||||||
|
addressToProverID(sb.Miner),
|
||||||
|
tkt,
|
||||||
|
commd)
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("unseal failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err = os.OpenFile(unsealedPath, os.O_RDONLY, 0644)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := f.Seek(int64(offset), io.SeekStart); err != nil {
|
||||||
|
return nil, xerrors.Errorf("seek: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
lr := io.LimitReader(f, int64(size))
|
||||||
|
|
||||||
|
return &struct {
|
||||||
|
io.Reader
|
||||||
|
io.Closer
|
||||||
|
}{
|
||||||
|
Reader: lr,
|
||||||
|
Closer: f,
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sb *SectorBuilder) SealPreCommit(sectorID uint64, ticket SealTicket, pieces []PublicPieceInfo) (RawSealPreCommitOutput, error) {
|
func (sb *SectorBuilder) SealPreCommit(sectorID uint64, ticket SealTicket, pieces []PublicPieceInfo) (RawSealPreCommitOutput, error) {
|
||||||
|
@ -66,7 +66,7 @@ func SectorBuilderConfig(storagePath string, threads uint) func(dtypes.MetadataD
|
|||||||
}
|
}
|
||||||
|
|
||||||
cache := filepath.Join(sp, "cache")
|
cache := filepath.Join(sp, "cache")
|
||||||
metadata := filepath.Join(sp, "meta")
|
unsealed := filepath.Join(sp, "unsealed")
|
||||||
sealed := filepath.Join(sp, "sealed")
|
sealed := filepath.Join(sp, "sealed")
|
||||||
staging := filepath.Join(sp, "staging")
|
staging := filepath.Join(sp, "staging")
|
||||||
|
|
||||||
@ -76,7 +76,7 @@ func SectorBuilderConfig(storagePath string, threads uint) func(dtypes.MetadataD
|
|||||||
WorkerThreads: uint8(threads),
|
WorkerThreads: uint8(threads),
|
||||||
|
|
||||||
CacheDir: cache,
|
CacheDir: cache,
|
||||||
MetadataDir: metadata,
|
UnsealedDir: unsealed,
|
||||||
SealedDir: sealed,
|
SealedDir: sealed,
|
||||||
StagedDir: staging,
|
StagedDir: staging,
|
||||||
}
|
}
|
||||||
|
@ -155,7 +155,7 @@ func (t *Piece) MarshalCBOR(w io.Writer) error {
|
|||||||
_, err := w.Write(cbg.CborNull)
|
_, err := w.Write(cbg.CborNull)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if _, err := w.Write([]byte{132}); err != nil {
|
if _, err := w.Write([]byte{131}); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -164,14 +164,6 @@ func (t *Piece) MarshalCBOR(w io.Writer) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// t.t.Ref (string) (string)
|
|
||||||
if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajTextString, uint64(len(t.Ref)))); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if _, err := w.Write([]byte(t.Ref)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// t.t.Size (uint64) (uint64)
|
// t.t.Size (uint64) (uint64)
|
||||||
if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.Size))); err != nil {
|
if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.Size))); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -198,7 +190,7 @@ func (t *Piece) UnmarshalCBOR(r io.Reader) error {
|
|||||||
return fmt.Errorf("cbor input should be of type array")
|
return fmt.Errorf("cbor input should be of type array")
|
||||||
}
|
}
|
||||||
|
|
||||||
if extra != 4 {
|
if extra != 3 {
|
||||||
return fmt.Errorf("cbor input had wrong number of fields")
|
return fmt.Errorf("cbor input had wrong number of fields")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -212,16 +204,6 @@ func (t *Piece) UnmarshalCBOR(r io.Reader) error {
|
|||||||
return fmt.Errorf("wrong type for uint64 field")
|
return fmt.Errorf("wrong type for uint64 field")
|
||||||
}
|
}
|
||||||
t.DealID = uint64(extra)
|
t.DealID = uint64(extra)
|
||||||
// t.t.Ref (string) (string)
|
|
||||||
|
|
||||||
{
|
|
||||||
sval, err := cbg.ReadString(br)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
t.Ref = string(sval)
|
|
||||||
}
|
|
||||||
// t.t.Size (uint64) (uint64)
|
// t.t.Size (uint64) (uint64)
|
||||||
|
|
||||||
maj, extra, err = cbg.CborReadHeader(br)
|
maj, extra, err = cbg.CborReadHeader(br)
|
||||||
|
@ -3,7 +3,6 @@ package storage
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"io"
|
"io"
|
||||||
"math"
|
"math"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
@ -95,7 +94,6 @@ func (m *Miner) storeGarbage(ctx context.Context, sectorID uint64, existingPiece
|
|||||||
out := make([]Piece, len(sizes))
|
out := make([]Piece, len(sizes))
|
||||||
|
|
||||||
for i, size := range sizes {
|
for i, size := range sizes {
|
||||||
name := fmt.Sprintf("fake-file-%d", rand.Intn(100000000))
|
|
||||||
ppi, err := m.sb.AddPiece(size, sectorID, io.LimitReader(rand.New(rand.NewSource(42)), int64(size)), existingPieceSizes)
|
ppi, err := m.sb.AddPiece(size, sectorID, io.LimitReader(rand.New(rand.NewSource(42)), int64(size)), existingPieceSizes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -105,7 +103,6 @@ func (m *Miner) storeGarbage(ctx context.Context, sectorID uint64, existingPiece
|
|||||||
|
|
||||||
out[i] = Piece{
|
out[i] = Piece{
|
||||||
DealID: resp.DealIDs[i],
|
DealID: resp.DealIDs[i],
|
||||||
Ref: name,
|
|
||||||
Size: ppi.Size,
|
Size: ppi.Size,
|
||||||
CommP: ppi.CommP[:],
|
CommP: ppi.CommP[:],
|
||||||
}
|
}
|
||||||
@ -134,7 +131,7 @@ func (m *Miner) StoreGarbageData() error {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := m.newSector(context.TODO(), sid, pieces[0].DealID, pieces[0].Ref, pieces[0].ppi()); err != nil {
|
if err := m.newSector(context.TODO(), sid, pieces[0].DealID, pieces[0].ppi()); err != nil {
|
||||||
log.Errorf("%+v", err)
|
log.Errorf("%+v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -2,6 +2,7 @@ package storage
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"github.com/filecoin-project/lotus/lib/padreader"
|
||||||
"io"
|
"io"
|
||||||
|
|
||||||
cid "github.com/ipfs/go-cid"
|
cid "github.com/ipfs/go-cid"
|
||||||
@ -37,7 +38,6 @@ func (t *SealSeed) SB() sectorbuilder.SealSeed {
|
|||||||
|
|
||||||
type Piece struct {
|
type Piece struct {
|
||||||
DealID uint64
|
DealID uint64
|
||||||
Ref string
|
|
||||||
|
|
||||||
Size uint64
|
Size uint64
|
||||||
CommP []byte
|
CommP []byte
|
||||||
@ -97,14 +97,6 @@ func (t *SectorInfo) deals() []uint64 {
|
|||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *SectorInfo) refs() []string {
|
|
||||||
out := make([]string, len(t.Pieces))
|
|
||||||
for i, piece := range t.Pieces {
|
|
||||||
out[i] = piece.Ref
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *SectorInfo) existingPieces() []uint64 {
|
func (t *SectorInfo) existingPieces() []uint64 {
|
||||||
out := make([]uint64, len(t.Pieces))
|
out := make([]uint64, len(t.Pieces))
|
||||||
for i, piece := range t.Pieces {
|
for i, piece := range t.Pieces {
|
||||||
@ -265,30 +257,38 @@ func (m *Miner) failSector(id uint64, err error) {
|
|||||||
log.Errorf("sector %d error: %+v", id, err)
|
log.Errorf("sector %d error: %+v", id, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Miner) SealPiece(ctx context.Context, ref string, size uint64, r io.Reader, dealID uint64) (uint64, error) {
|
func (m *Miner) AllocatePiece(size uint64) (sectorID uint64, offset uint64, err error) {
|
||||||
log.Infof("Seal piece for deal %d", dealID)
|
if padreader.PaddedSize(size) != size {
|
||||||
|
return 0, 0, xerrors.Errorf("cannot allocate unpadded piece")
|
||||||
|
}
|
||||||
|
|
||||||
sid, err := m.sb.AcquireSectorId() // TODO: Put more than one thing in a sector
|
sid, err := m.sb.AcquireSectorId() // TODO: Put more than one thing in a sector
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, xerrors.Errorf("acquiring sector ID: %w", err)
|
return 0, 0, xerrors.Errorf("acquiring sector ID: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ppi, err := m.sb.AddPiece(size, sid, r, []uint64{})
|
// offset hard-coded to 0 since we only put one thing in a sector for now
|
||||||
|
return sid, 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Miner) SealPiece(ctx context.Context, size uint64, r io.Reader, sectorID uint64, dealID uint64) error {
|
||||||
|
log.Infof("Seal piece for deal %d", dealID)
|
||||||
|
|
||||||
|
ppi, err := m.sb.AddPiece(size, sectorID, r, []uint64{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, xerrors.Errorf("adding piece to sector: %w", err)
|
return xerrors.Errorf("adding piece to sector: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return sid, m.newSector(ctx, sid, dealID, ref, ppi)
|
return m.newSector(ctx, sectorID, dealID, ppi)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Miner) newSector(ctx context.Context, sid uint64, dealID uint64, ref string, ppi sectorbuilder.PublicPieceInfo) error {
|
func (m *Miner) newSector(ctx context.Context, sid uint64, dealID uint64, ppi sectorbuilder.PublicPieceInfo) error {
|
||||||
si := &SectorInfo{
|
si := &SectorInfo{
|
||||||
SectorID: sid,
|
SectorID: sid,
|
||||||
|
|
||||||
Pieces: []Piece{
|
Pieces: []Piece{
|
||||||
{
|
{
|
||||||
DealID: dealID,
|
DealID: dealID,
|
||||||
Ref: ref,
|
|
||||||
|
|
||||||
Size: ppi.Size,
|
Size: ppi.Size,
|
||||||
CommP: ppi.CommP[:],
|
CommP: ppi.CommP[:],
|
||||||
|
@ -39,10 +39,10 @@ var ErrNotFound = errors.New("not found")
|
|||||||
|
|
||||||
type SectorBlocks struct {
|
type SectorBlocks struct {
|
||||||
*storage.Miner
|
*storage.Miner
|
||||||
|
sb *sectorbuilder.SectorBuilder
|
||||||
|
|
||||||
intermediate blockstore.Blockstore // holds intermediate nodes TODO: consider combining with the staging blockstore
|
intermediate blockstore.Blockstore // holds intermediate nodes TODO: consider combining with the staging blockstore
|
||||||
|
|
||||||
unsealed *unsealedBlocks
|
|
||||||
keys datastore.Batching
|
keys datastore.Batching
|
||||||
keyLk sync.Mutex
|
keyLk sync.Mutex
|
||||||
}
|
}
|
||||||
@ -50,20 +50,13 @@ type SectorBlocks struct {
|
|||||||
func NewSectorBlocks(miner *storage.Miner, ds dtypes.MetadataDS, sb *sectorbuilder.SectorBuilder) *SectorBlocks {
|
func NewSectorBlocks(miner *storage.Miner, ds dtypes.MetadataDS, sb *sectorbuilder.SectorBuilder) *SectorBlocks {
|
||||||
sbc := &SectorBlocks{
|
sbc := &SectorBlocks{
|
||||||
Miner: miner,
|
Miner: miner,
|
||||||
|
sb: sb,
|
||||||
|
|
||||||
intermediate: blockstore.NewBlockstore(namespace.Wrap(ds, imBlocksPrefix)),
|
intermediate: blockstore.NewBlockstore(namespace.Wrap(ds, imBlocksPrefix)),
|
||||||
|
|
||||||
keys: namespace.Wrap(ds, dsPrefix),
|
keys: namespace.Wrap(ds, dsPrefix),
|
||||||
}
|
}
|
||||||
|
|
||||||
unsealed := &unsealedBlocks{ // TODO: untangle this
|
|
||||||
sb: sb,
|
|
||||||
|
|
||||||
unsealed: map[string][]byte{},
|
|
||||||
unsealing: map[string]chan struct{}{},
|
|
||||||
}
|
|
||||||
|
|
||||||
sbc.unsealed = unsealed
|
|
||||||
return sbc
|
return sbc
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -77,14 +70,13 @@ type UnixfsReader interface {
|
|||||||
|
|
||||||
type refStorer struct {
|
type refStorer struct {
|
||||||
blockReader UnixfsReader
|
blockReader UnixfsReader
|
||||||
writeRef func(cid cid.Cid, pieceRef string, offset uint64, size uint64) error
|
writeRef func(cid cid.Cid, offset uint64, size uint64) error
|
||||||
intermediate blockstore.Blockstore
|
intermediate blockstore.Blockstore
|
||||||
|
|
||||||
pieceRef string
|
|
||||||
remaining []byte
|
remaining []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
func (st *SectorBlocks) writeRef(cid cid.Cid, pieceRef string, offset uint64, size uint64) error {
|
func (st *SectorBlocks) writeRef(cid cid.Cid, sectorID uint64, offset uint64, size uint64) error {
|
||||||
st.keyLk.Lock() // TODO: make this multithreaded
|
st.keyLk.Lock() // TODO: make this multithreaded
|
||||||
defer st.keyLk.Unlock()
|
defer st.keyLk.Unlock()
|
||||||
|
|
||||||
@ -104,7 +96,7 @@ func (st *SectorBlocks) writeRef(cid cid.Cid, pieceRef string, offset uint64, si
|
|||||||
}
|
}
|
||||||
|
|
||||||
refs.Refs = append(refs.Refs, api.SealedRef{
|
refs.Refs = append(refs.Refs, api.SealedRef{
|
||||||
Piece: pieceRef,
|
SectorID: sectorID,
|
||||||
Offset: offset,
|
Offset: offset,
|
||||||
Size: size,
|
Size: size,
|
||||||
})
|
})
|
||||||
@ -147,7 +139,7 @@ func (r *refStorer) Read(p []byte) (n int, err error) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := r.writeRef(nd.Cid(), r.pieceRef, offset, uint64(len(data))); err != nil {
|
if err := r.writeRef(nd.Cid(), offset, uint64(len(data))); err != nil {
|
||||||
return 0, xerrors.Errorf("writing ref: %w", err)
|
return 0, xerrors.Errorf("writing ref: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -160,22 +152,30 @@ func (r *refStorer) Read(p []byte) (n int, err error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (st *SectorBlocks) AddUnixfsPiece(ctx context.Context, ref cid.Cid, r UnixfsReader, dealID uint64) (sectorID uint64, err error) {
|
func (st *SectorBlocks) AddUnixfsPiece(ctx context.Context, r UnixfsReader, dealID uint64) (sectorID uint64, err error) {
|
||||||
size, err := r.Size()
|
size, err := r.Size()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
sectorID, pieceOffset, err := st.Miner.AllocatePiece(padreader.PaddedSize(uint64(size)))
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
refst := &refStorer{
|
refst := &refStorer{
|
||||||
blockReader: r,
|
blockReader: r,
|
||||||
pieceRef: string(SerializationUnixfs0) + ref.String(),
|
writeRef: func(cid cid.Cid, offset uint64, size uint64) error {
|
||||||
writeRef: st.writeRef,
|
offset += pieceOffset
|
||||||
|
|
||||||
|
return st.writeRef(cid, sectorID, offset, size)
|
||||||
|
},
|
||||||
intermediate: st.intermediate,
|
intermediate: st.intermediate,
|
||||||
}
|
}
|
||||||
|
|
||||||
pr, psize := padreader.New(r, uint64(size))
|
pr, psize := padreader.New(refst, uint64(size))
|
||||||
|
|
||||||
return st.Miner.SealPiece(ctx, refst.pieceRef, psize, pr, dealID)
|
return sectorID, st.Miner.SealPiece(ctx, psize, pr, sectorID, dealID)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (st *SectorBlocks) List() (map[cid.Cid][]api.SealedRef, error) {
|
func (st *SectorBlocks) List() (map[cid.Cid][]api.SealedRef, error) {
|
||||||
|
@ -2,6 +2,8 @@ package sectorblocks
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
"io/ioutil"
|
||||||
|
|
||||||
blocks "github.com/ipfs/go-block-format"
|
blocks "github.com/ipfs/go-block-format"
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
@ -67,9 +69,28 @@ func (s *SectorBlockStore) Get(c cid.Cid) (blocks.Block, error) {
|
|||||||
return nil, blockstore.ErrNotFound
|
return nil, blockstore.ErrNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
data, err := s.sectorBlocks.unsealed.getRef(context.TODO(), refs, s.approveUnseal)
|
best := refs[0] // TODO: better strategy (e.g. look for already unsealed)
|
||||||
|
|
||||||
|
si, err := s.sectorBlocks.Miner.GetSectorInfo(best.SectorID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, xerrors.Errorf("getting sector info: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
r, err := s.sectorBlocks.sb.ReadPieceFromSealedSector(
|
||||||
|
best.SectorID,
|
||||||
|
best.Offset,
|
||||||
|
best.Size,
|
||||||
|
si.Ticket.TicketBytes,
|
||||||
|
si.CommD,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("unsealing block: %w", err)
|
||||||
|
}
|
||||||
|
defer r.Close()
|
||||||
|
|
||||||
|
data, err := ioutil.ReadAll(r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("reading block data: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return blocks.NewBlockWithCid(data, c)
|
return blocks.NewBlockWithCid(data, c)
|
||||||
|
@ -1,99 +0,0 @@
|
|||||||
package sectorblocks
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
logging "github.com/ipfs/go-log"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
|
||||||
"github.com/filecoin-project/lotus/lib/sectorbuilder"
|
|
||||||
)
|
|
||||||
|
|
||||||
var log = logging.Logger("sectorblocks")
|
|
||||||
|
|
||||||
type unsealedBlocks struct {
|
|
||||||
lk sync.Mutex
|
|
||||||
sb *sectorbuilder.SectorBuilder
|
|
||||||
|
|
||||||
// TODO: Treat this as some sort of cache, one with rather aggressive GC
|
|
||||||
// TODO: This REALLY, REALLY needs to be on-disk
|
|
||||||
unsealed map[string][]byte
|
|
||||||
|
|
||||||
unsealing map[string]chan struct{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ub *unsealedBlocks) getRef(ctx context.Context, refs []api.SealedRef, approveUnseal func() error) ([]byte, error) {
|
|
||||||
var best api.SealedRef
|
|
||||||
|
|
||||||
ub.lk.Lock()
|
|
||||||
for _, ref := range refs {
|
|
||||||
b, ok := ub.unsealed[ref.Piece]
|
|
||||||
if ok {
|
|
||||||
ub.lk.Unlock()
|
|
||||||
return b[ref.Offset : ref.Offset+uint64(ref.Size)], nil
|
|
||||||
}
|
|
||||||
// TODO: pick unsealing based on how long it's running (or just select all relevant, usually it'll be just one)
|
|
||||||
_, ok = ub.unsealing[ref.Piece]
|
|
||||||
if ok {
|
|
||||||
best = ref
|
|
||||||
break
|
|
||||||
}
|
|
||||||
best = ref
|
|
||||||
}
|
|
||||||
ub.lk.Unlock()
|
|
||||||
|
|
||||||
b, err := ub.maybeUnseal(ctx, best.Piece, approveUnseal)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return b[best.Offset : best.Offset+uint64(best.Size)], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ub *unsealedBlocks) maybeUnseal(ctx context.Context, pieceKey string, approveUnseal func() error) ([]byte, error) {
|
|
||||||
ub.lk.Lock()
|
|
||||||
defer ub.lk.Unlock()
|
|
||||||
|
|
||||||
out, ok := ub.unsealed[pieceKey]
|
|
||||||
if ok {
|
|
||||||
return out, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
wait, ok := ub.unsealing[pieceKey]
|
|
||||||
if ok {
|
|
||||||
ub.lk.Unlock()
|
|
||||||
select {
|
|
||||||
case <-wait:
|
|
||||||
ub.lk.Lock()
|
|
||||||
// TODO: make sure this is not racy with gc when it's implemented
|
|
||||||
return ub.unsealed[pieceKey], nil
|
|
||||||
case <-ctx.Done():
|
|
||||||
ub.lk.Lock()
|
|
||||||
return nil, ctx.Err()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: doing this under a lock is suboptimal.. but simpler
|
|
||||||
err := approveUnseal()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
ub.unsealing[pieceKey] = make(chan struct{})
|
|
||||||
ub.lk.Unlock()
|
|
||||||
|
|
||||||
log.Infof("Unsealing piece '%s'", pieceKey)
|
|
||||||
data, err := ub.sb.ReadPieceFromSealedSector(pieceKey)
|
|
||||||
ub.lk.Lock()
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
// TODO: tell subs
|
|
||||||
log.Error(err)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
ub.unsealed[pieceKey] = data
|
|
||||||
close(ub.unsealing[pieceKey])
|
|
||||||
return data, nil
|
|
||||||
}
|
|
Loading…
Reference in New Issue
Block a user