Merge remote-tracking branch 'origin/next' into feat/fetch-reserve-space
This commit is contained in:
commit
deec7a2658
50
manager.go
50
manager.go
@ -213,12 +213,9 @@ func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector abi.Sect
|
|||||||
|
|
||||||
var selector WorkerSelector
|
var selector WorkerSelector
|
||||||
if len(best) == 0 { // new
|
if len(best) == 0 { // new
|
||||||
selector = newAllocSelector(ctx, m.index, stores.FTUnsealed, stores.PathSealing)
|
selector = newAllocSelector(m.index, stores.FTUnsealed, stores.PathSealing)
|
||||||
} else { // append to existing
|
} else { // append to existing
|
||||||
selector, err = newExistingSelector(ctx, m.index, sector, stores.FTUnsealed, false)
|
selector = newExistingSelector(m.index, sector, stores.FTUnsealed, false)
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return xerrors.Errorf("creating unsealPiece selector: %w", err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var readOk bool
|
var readOk bool
|
||||||
@ -226,10 +223,7 @@ func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector abi.Sect
|
|||||||
if len(best) > 0 {
|
if len(best) > 0 {
|
||||||
// There is unsealed sector, see if we can read from it
|
// There is unsealed sector, see if we can read from it
|
||||||
|
|
||||||
selector, err = newExistingSelector(ctx, m.index, sector, stores.FTUnsealed, false)
|
selector = newExistingSelector(m.index, sector, stores.FTUnsealed, false)
|
||||||
if err != nil {
|
|
||||||
return xerrors.Errorf("creating readPiece selector: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = m.sched.Schedule(ctx, sector, sealtasks.TTReadUnsealed, selector, schedFetch(sector, stores.FTUnsealed, stores.PathSealing, stores.AcquireMove), func(ctx context.Context, w Worker) error {
|
err = m.sched.Schedule(ctx, sector, sealtasks.TTReadUnsealed, selector, schedFetch(sector, stores.FTUnsealed, stores.PathSealing, stores.AcquireMove), func(ctx context.Context, w Worker) error {
|
||||||
readOk, err = w.ReadPiece(ctx, sink, sector, offset, size)
|
readOk, err = w.ReadPiece(ctx, sink, sector, offset, size)
|
||||||
@ -264,10 +258,7 @@ func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector abi.Sect
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
selector, err = newExistingSelector(ctx, m.index, sector, stores.FTUnsealed, false)
|
selector = newExistingSelector(m.index, sector, stores.FTUnsealed, false)
|
||||||
if err != nil {
|
|
||||||
return xerrors.Errorf("creating readPiece selector: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = m.sched.Schedule(ctx, sector, sealtasks.TTReadUnsealed, selector, schedFetch(sector, stores.FTUnsealed, stores.PathSealing, stores.AcquireMove), func(ctx context.Context, w Worker) error {
|
err = m.sched.Schedule(ctx, sector, sealtasks.TTReadUnsealed, selector, schedFetch(sector, stores.FTUnsealed, stores.PathSealing, stores.AcquireMove), func(ctx context.Context, w Worker) error {
|
||||||
readOk, err = w.ReadPiece(ctx, sink, sector, offset, size)
|
readOk, err = w.ReadPiece(ctx, sink, sector, offset, size)
|
||||||
@ -300,12 +291,9 @@ func (m *Manager) AddPiece(ctx context.Context, sector abi.SectorID, existingPie
|
|||||||
var selector WorkerSelector
|
var selector WorkerSelector
|
||||||
var err error
|
var err error
|
||||||
if len(existingPieces) == 0 { // new
|
if len(existingPieces) == 0 { // new
|
||||||
selector = newAllocSelector(ctx, m.index, stores.FTUnsealed, stores.PathSealing)
|
selector = newAllocSelector(m.index, stores.FTUnsealed, stores.PathSealing)
|
||||||
} else { // use existing
|
} else { // use existing
|
||||||
selector, err = newExistingSelector(ctx, m.index, sector, stores.FTUnsealed, false)
|
selector = newExistingSelector(m.index, sector, stores.FTUnsealed, false)
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return abi.PieceInfo{}, xerrors.Errorf("creating path selector: %w", err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var out abi.PieceInfo
|
var out abi.PieceInfo
|
||||||
@ -331,7 +319,7 @@ func (m *Manager) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticke
|
|||||||
|
|
||||||
// TODO: also consider where the unsealed data sits
|
// TODO: also consider where the unsealed data sits
|
||||||
|
|
||||||
selector := newAllocSelector(ctx, m.index, stores.FTCache|stores.FTSealed, stores.PathSealing)
|
selector := newAllocSelector(m.index, stores.FTCache|stores.FTSealed, stores.PathSealing)
|
||||||
|
|
||||||
err = m.sched.Schedule(ctx, sector, sealtasks.TTPreCommit1, selector, schedFetch(sector, stores.FTUnsealed, stores.PathSealing, stores.AcquireMove), func(ctx context.Context, w Worker) error {
|
err = m.sched.Schedule(ctx, sector, sealtasks.TTPreCommit1, selector, schedFetch(sector, stores.FTUnsealed, stores.PathSealing, stores.AcquireMove), func(ctx context.Context, w Worker) error {
|
||||||
p, err := w.SealPreCommit1(ctx, sector, ticket, pieces)
|
p, err := w.SealPreCommit1(ctx, sector, ticket, pieces)
|
||||||
@ -353,10 +341,7 @@ func (m *Manager) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase
|
|||||||
return storage.SectorCids{}, xerrors.Errorf("acquiring sector lock: %w", err)
|
return storage.SectorCids{}, xerrors.Errorf("acquiring sector lock: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
selector, err := newExistingSelector(ctx, m.index, sector, stores.FTCache|stores.FTSealed, true)
|
selector := newExistingSelector(m.index, sector, stores.FTCache|stores.FTSealed, true)
|
||||||
if err != nil {
|
|
||||||
return storage.SectorCids{}, xerrors.Errorf("creating path selector: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = m.sched.Schedule(ctx, sector, sealtasks.TTPreCommit2, selector, schedFetch(sector, stores.FTCache|stores.FTSealed, stores.PathSealing, stores.AcquireMove), func(ctx context.Context, w Worker) error {
|
err = m.sched.Schedule(ctx, sector, sealtasks.TTPreCommit2, selector, schedFetch(sector, stores.FTCache|stores.FTSealed, stores.PathSealing, stores.AcquireMove), func(ctx context.Context, w Worker) error {
|
||||||
p, err := w.SealPreCommit2(ctx, sector, phase1Out)
|
p, err := w.SealPreCommit2(ctx, sector, phase1Out)
|
||||||
@ -380,10 +365,7 @@ func (m *Manager) SealCommit1(ctx context.Context, sector abi.SectorID, ticket a
|
|||||||
// NOTE: We set allowFetch to false in so that we always execute on a worker
|
// NOTE: We set allowFetch to false in so that we always execute on a worker
|
||||||
// with direct access to the data. We want to do that because this step is
|
// with direct access to the data. We want to do that because this step is
|
||||||
// generally very cheap / fast, and transferring data is not worth the effort
|
// generally very cheap / fast, and transferring data is not worth the effort
|
||||||
selector, err := newExistingSelector(ctx, m.index, sector, stores.FTCache|stores.FTSealed, false)
|
selector := newExistingSelector(m.index, sector, stores.FTCache|stores.FTSealed, false)
|
||||||
if err != nil {
|
|
||||||
return storage.Commit1Out{}, xerrors.Errorf("creating path selector: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = m.sched.Schedule(ctx, sector, sealtasks.TTCommit1, selector, schedFetch(sector, stores.FTCache|stores.FTSealed, stores.PathSealing, stores.AcquireMove), func(ctx context.Context, w Worker) error {
|
err = m.sched.Schedule(ctx, sector, sealtasks.TTCommit1, selector, schedFetch(sector, stores.FTCache|stores.FTSealed, stores.PathSealing, stores.AcquireMove), func(ctx context.Context, w Worker) error {
|
||||||
p, err := w.SealCommit1(ctx, sector, ticket, seed, pieces, cids)
|
p, err := w.SealCommit1(ctx, sector, ticket, seed, pieces, cids)
|
||||||
@ -431,12 +413,9 @@ func (m *Manager) FinalizeSector(ctx context.Context, sector abi.SectorID, keepU
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
selector, err := newExistingSelector(ctx, m.index, sector, stores.FTCache|stores.FTSealed, false)
|
selector := newExistingSelector(m.index, sector, stores.FTCache|stores.FTSealed, false)
|
||||||
if err != nil {
|
|
||||||
return xerrors.Errorf("creating path selector: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = m.sched.Schedule(ctx, sector, sealtasks.TTFinalize, selector,
|
err := m.sched.Schedule(ctx, sector, sealtasks.TTFinalize, selector,
|
||||||
schedFetch(sector, stores.FTCache|stores.FTSealed|unsealed, stores.PathSealing, stores.AcquireMove),
|
schedFetch(sector, stores.FTCache|stores.FTSealed|unsealed, stores.PathSealing, stores.AcquireMove),
|
||||||
func(ctx context.Context, w Worker) error {
|
func(ctx context.Context, w Worker) error {
|
||||||
return w.FinalizeSector(ctx, sector, keepUnsealed)
|
return w.FinalizeSector(ctx, sector, keepUnsealed)
|
||||||
@ -445,7 +424,7 @@ func (m *Manager) FinalizeSector(ctx context.Context, sector abi.SectorID, keepU
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
fetchSel := newAllocSelector(ctx, m.index, stores.FTCache|stores.FTSealed, stores.PathStorage)
|
fetchSel := newAllocSelector(m.index, stores.FTCache|stores.FTSealed, stores.PathStorage)
|
||||||
moveUnsealed := unsealed
|
moveUnsealed := unsealed
|
||||||
{
|
{
|
||||||
if len(keepUnsealed) == 0 {
|
if len(keepUnsealed) == 0 {
|
||||||
@ -490,10 +469,7 @@ func (m *Manager) Remove(ctx context.Context, sector abi.SectorID) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
selector, err := newExistingSelector(ctx, m.index, sector, stores.FTCache|stores.FTSealed, false)
|
selector := newExistingSelector(m.index, sector, stores.FTCache|stores.FTSealed, false)
|
||||||
if err != nil {
|
|
||||||
return xerrors.Errorf("creating selector: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return m.sched.Schedule(ctx, sector, sealtasks.TTFinalize, selector,
|
return m.sched.Schedule(ctx, sector, sealtasks.TTFinalize, selector,
|
||||||
schedFetch(sector, stores.FTCache|stores.FTSealed|unsealed, stores.PathStorage, stores.AcquireMove),
|
schedFetch(sector, stores.FTCache|stores.FTSealed|unsealed, stores.PathStorage, stores.AcquireMove),
|
||||||
|
@ -207,7 +207,7 @@ func TestSched(t *testing.T) {
|
|||||||
done := make(chan struct{})
|
done := make(chan struct{})
|
||||||
rm.done[taskName] = done
|
rm.done[taskName] = done
|
||||||
|
|
||||||
sel := newAllocSelector(ctx, index, stores.FTCache, stores.PathSealing)
|
sel := newAllocSelector(index, stores.FTCache, stores.PathSealing)
|
||||||
|
|
||||||
rm.wg.Add(1)
|
rm.wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
|
@ -17,7 +17,7 @@ type allocSelector struct {
|
|||||||
ptype stores.PathType
|
ptype stores.PathType
|
||||||
}
|
}
|
||||||
|
|
||||||
func newAllocSelector(ctx context.Context, index stores.SectorIndex, alloc stores.SectorFileType, ptype stores.PathType) *allocSelector {
|
func newAllocSelector(index stores.SectorIndex, alloc stores.SectorFileType, ptype stores.PathType) *allocSelector {
|
||||||
return &allocSelector{
|
return &allocSelector{
|
||||||
index: index,
|
index: index,
|
||||||
alloc: alloc,
|
alloc: alloc,
|
||||||
|
@ -12,18 +12,19 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type existingSelector struct {
|
type existingSelector struct {
|
||||||
best []stores.SectorStorageInfo
|
index stores.SectorIndex
|
||||||
|
sector abi.SectorID
|
||||||
|
alloc stores.SectorFileType
|
||||||
|
allowFetch bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func newExistingSelector(ctx context.Context, index stores.SectorIndex, sector abi.SectorID, alloc stores.SectorFileType, allowFetch bool) (*existingSelector, error) {
|
func newExistingSelector(index stores.SectorIndex, sector abi.SectorID, alloc stores.SectorFileType, allowFetch bool) *existingSelector {
|
||||||
best, err := index.StorageFindSector(ctx, sector, alloc, allowFetch)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &existingSelector{
|
return &existingSelector{
|
||||||
best: best,
|
index: index,
|
||||||
}, nil
|
sector: sector,
|
||||||
|
alloc: alloc,
|
||||||
|
allowFetch: allowFetch,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *existingSelector) Ok(ctx context.Context, task sealtasks.TaskType, spt abi.RegisteredSealProof, whnd *workerHandle) (bool, error) {
|
func (s *existingSelector) Ok(ctx context.Context, task sealtasks.TaskType, spt abi.RegisteredSealProof, whnd *workerHandle) (bool, error) {
|
||||||
@ -45,7 +46,12 @@ func (s *existingSelector) Ok(ctx context.Context, task sealtasks.TaskType, spt
|
|||||||
have[path.ID] = struct{}{}
|
have[path.ID] = struct{}{}
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, info := range s.best {
|
best, err := s.index.StorageFindSector(ctx, s.sector, s.alloc, s.allowFetch)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("finding best storage: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, info := range best {
|
||||||
if _, ok := have[info.ID]; ok {
|
if _, ok := have[info.ID]; ok {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user