Merge pull request #27 from filecoin-project/fix/unseal
Fix unseal; some improvements
This commit is contained in:
commit
8c47b13d9e
@ -108,33 +108,48 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector abi.SectorID, existingPie
|
|||||||
}, werr()
|
}, werr()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type closerFunc func() error
|
||||||
|
|
||||||
|
func (cf closerFunc) Close() error {
|
||||||
|
return cf()
|
||||||
|
}
|
||||||
|
|
||||||
func (sb *Sealer) ReadPieceFromSealedSector(ctx context.Context, sector abi.SectorID, offset UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealedCID cid.Cid) (io.ReadCloser, error) {
|
func (sb *Sealer) ReadPieceFromSealedSector(ctx context.Context, sector abi.SectorID, offset UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealedCID cid.Cid) (io.ReadCloser, error) {
|
||||||
path, doneUnsealed, err := sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, stores.FTUnsealed, false)
|
{
|
||||||
if err != nil {
|
path, doneUnsealed, err := sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, stores.FTNone, false)
|
||||||
return nil, xerrors.Errorf("acquire unsealed sector path: %w", err)
|
if err != nil {
|
||||||
}
|
return nil, xerrors.Errorf("acquire unsealed sector path: %w", err)
|
||||||
defer doneUnsealed()
|
|
||||||
f, err := os.OpenFile(path.Unsealed, os.O_RDONLY, 0644)
|
|
||||||
if err == nil {
|
|
||||||
if _, err := f.Seek(int64(offset), io.SeekStart); err != nil {
|
|
||||||
return nil, xerrors.Errorf("seek: %w", err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
lr := io.LimitReader(f, int64(size))
|
f, err := os.OpenFile(path.Unsealed, os.O_RDONLY, 0644)
|
||||||
|
if err == nil {
|
||||||
|
if _, err := f.Seek(int64(offset), io.SeekStart); err != nil {
|
||||||
|
doneUnsealed()
|
||||||
|
return nil, xerrors.Errorf("seek: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
return &struct {
|
lr := io.LimitReader(f, int64(size))
|
||||||
io.Reader
|
|
||||||
io.Closer
|
return &struct {
|
||||||
}{
|
io.Reader
|
||||||
Reader: lr,
|
io.Closer
|
||||||
Closer: f,
|
}{
|
||||||
}, nil
|
Reader: lr,
|
||||||
}
|
Closer: closerFunc(func() error {
|
||||||
if !os.IsNotExist(err) {
|
doneUnsealed()
|
||||||
return nil, err
|
return f.Close()
|
||||||
|
}),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
doneUnsealed()
|
||||||
|
|
||||||
|
if !os.IsNotExist(err) {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
sealed, doneSealed, err := sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed|stores.FTCache, 0, false)
|
paths, doneSealed, err := sb.sectors.AcquireSector(ctx, sector, stores.FTSealed|stores.FTCache, stores.FTUnsealed, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("acquire sealed/cache sector path: %w", err)
|
return nil, xerrors.Errorf("acquire sealed/cache sector path: %w", err)
|
||||||
}
|
}
|
||||||
@ -145,9 +160,9 @@ func (sb *Sealer) ReadPieceFromSealedSector(ctx context.Context, sector abi.Sect
|
|||||||
// remove last used one (or use whatever other cache policy makes sense))
|
// remove last used one (or use whatever other cache policy makes sense))
|
||||||
err = ffi.Unseal(
|
err = ffi.Unseal(
|
||||||
sb.sealProofType,
|
sb.sealProofType,
|
||||||
sealed.Cache,
|
paths.Cache,
|
||||||
sealed.Sealed,
|
paths.Sealed,
|
||||||
path.Unsealed,
|
paths.Unsealed,
|
||||||
sector.Number,
|
sector.Number,
|
||||||
sector.Miner,
|
sector.Miner,
|
||||||
ticket,
|
ticket,
|
||||||
@ -157,7 +172,7 @@ func (sb *Sealer) ReadPieceFromSealedSector(ctx context.Context, sector abi.Sect
|
|||||||
return nil, xerrors.Errorf("unseal failed: %w", err)
|
return nil, xerrors.Errorf("unseal failed: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
f, err = os.OpenFile(string(path.Unsealed), os.O_RDONLY, 0644)
|
f, err := os.OpenFile(paths.Unsealed, os.O_RDONLY, 0644)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
36
request_queue.go
Normal file
36
request_queue.go
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
package sectorstorage
|
||||||
|
|
||||||
|
import "container/heap"
|
||||||
|
|
||||||
|
type requestQueue []*workerRequest
|
||||||
|
|
||||||
|
func (q requestQueue) Len() int { return len(q) }
|
||||||
|
|
||||||
|
func (q requestQueue) Less(i, j int) bool {
|
||||||
|
return q[i].taskType.Less(q[j].taskType)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q requestQueue) Swap(i, j int) {
|
||||||
|
q[i], q[j] = q[j], q[i]
|
||||||
|
q[i].index = i
|
||||||
|
q[j].index = j
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *requestQueue) Push(x interface{}) {
|
||||||
|
n := len(*q)
|
||||||
|
item := x.(*workerRequest)
|
||||||
|
item.index = n
|
||||||
|
*q = append(*q, item)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *requestQueue) Pop() interface{} {
|
||||||
|
old := *q
|
||||||
|
n := len(old)
|
||||||
|
item := old[n-1]
|
||||||
|
old[n-1] = nil // avoid memory leak
|
||||||
|
item.index = -1 // for safety
|
||||||
|
*q = old[0 : n-1]
|
||||||
|
return item
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ heap.Interface = &requestQueue{}
|
30
request_queue_test.go
Normal file
30
request_queue_test.go
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
package sectorstorage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"container/heap"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/sector-storage/sealtasks"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestRequestQueue(t *testing.T) {
|
||||||
|
rq := &requestQueue{}
|
||||||
|
|
||||||
|
heap.Push(rq, &workerRequest{taskType: sealtasks.TTAddPiece})
|
||||||
|
heap.Push(rq, &workerRequest{taskType: sealtasks.TTPreCommit1})
|
||||||
|
heap.Push(rq, &workerRequest{taskType: sealtasks.TTPreCommit2})
|
||||||
|
heap.Push(rq, &workerRequest{taskType: sealtasks.TTPreCommit1})
|
||||||
|
heap.Push(rq, &workerRequest{taskType: sealtasks.TTAddPiece})
|
||||||
|
|
||||||
|
pt := heap.Pop(rq).(*workerRequest)
|
||||||
|
|
||||||
|
if pt.taskType != sealtasks.TTPreCommit2 {
|
||||||
|
t.Error("expected precommit2, got", pt.taskType)
|
||||||
|
}
|
||||||
|
|
||||||
|
pt = heap.Pop(rq).(*workerRequest)
|
||||||
|
|
||||||
|
if pt.taskType != sealtasks.TTPreCommit1 {
|
||||||
|
t.Error("expected precommit1, got", pt.taskType)
|
||||||
|
}
|
||||||
|
}
|
11
resources.go
11
resources.go
@ -73,7 +73,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{
|
|||||||
sealtasks.TTPreCommit1: {
|
sealtasks.TTPreCommit1: {
|
||||||
abi.RegisteredProof_StackedDRG32GiBSeal: Resources{
|
abi.RegisteredProof_StackedDRG32GiBSeal: Resources{
|
||||||
MaxMemory: 64 << 30,
|
MaxMemory: 64 << 30,
|
||||||
MinMemory: 32 << 30,
|
MinMemory: 48 << 30,
|
||||||
|
|
||||||
Threads: 1,
|
Threads: 1,
|
||||||
|
|
||||||
@ -106,10 +106,11 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{
|
|||||||
},
|
},
|
||||||
sealtasks.TTPreCommit2: {
|
sealtasks.TTPreCommit2: {
|
||||||
abi.RegisteredProof_StackedDRG32GiBSeal: Resources{
|
abi.RegisteredProof_StackedDRG32GiBSeal: Resources{
|
||||||
MaxMemory: 96 << 30,
|
MaxMemory: 32 << 30,
|
||||||
MinMemory: 64 << 30,
|
MinMemory: 32 << 30,
|
||||||
|
|
||||||
Threads: -1,
|
Threads: -1,
|
||||||
|
CanGPU: true,
|
||||||
|
|
||||||
BaseMinMemory: 30 << 30,
|
BaseMinMemory: 30 << 30,
|
||||||
},
|
},
|
||||||
@ -172,9 +173,9 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{
|
|||||||
BaseMinMemory: 8 << 20,
|
BaseMinMemory: 8 << 20,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
sealtasks.TTCommit2: { // TODO: Measure more accurately
|
sealtasks.TTCommit2: {
|
||||||
abi.RegisteredProof_StackedDRG32GiBSeal: Resources{
|
abi.RegisteredProof_StackedDRG32GiBSeal: Resources{
|
||||||
MaxMemory: 110 << 30,
|
MaxMemory: 130 << 30,
|
||||||
MinMemory: 60 << 30,
|
MinMemory: 60 << 30,
|
||||||
|
|
||||||
Threads: -1,
|
Threads: -1,
|
||||||
|
25
sched.go
25
sched.go
@ -1,7 +1,7 @@
|
|||||||
package sectorstorage
|
package sectorstorage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"container/list"
|
"container/heap"
|
||||||
"context"
|
"context"
|
||||||
"sort"
|
"sort"
|
||||||
"sync"
|
"sync"
|
||||||
@ -41,7 +41,7 @@ type scheduler struct {
|
|||||||
workerFree chan WorkerID
|
workerFree chan WorkerID
|
||||||
closing chan struct{}
|
closing chan struct{}
|
||||||
|
|
||||||
schedQueue *list.List // List[*workerRequest]
|
schedQueue *requestQueue
|
||||||
}
|
}
|
||||||
|
|
||||||
func newScheduler(spt abi.RegisteredProof) *scheduler {
|
func newScheduler(spt abi.RegisteredProof) *scheduler {
|
||||||
@ -60,7 +60,7 @@ func newScheduler(spt abi.RegisteredProof) *scheduler {
|
|||||||
workerFree: make(chan WorkerID),
|
workerFree: make(chan WorkerID),
|
||||||
closing: make(chan struct{}),
|
closing: make(chan struct{}),
|
||||||
|
|
||||||
schedQueue: list.New(),
|
schedQueue: &requestQueue{},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -101,6 +101,8 @@ type workerRequest struct {
|
|||||||
prepare WorkerAction
|
prepare WorkerAction
|
||||||
work WorkerAction
|
work WorkerAction
|
||||||
|
|
||||||
|
index int // The index of the item in the heap.
|
||||||
|
|
||||||
ret chan<- workerResponse
|
ret chan<- workerResponse
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
}
|
}
|
||||||
@ -154,7 +156,7 @@ func (sh *scheduler) runSched() {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
sh.schedQueue.PushBack(req)
|
heap.Push(sh.schedQueue, req)
|
||||||
case wid := <-sh.workerFree:
|
case wid := <-sh.workerFree:
|
||||||
sh.onWorkerFreed(wid)
|
sh.onWorkerFreed(wid)
|
||||||
case <-sh.closing:
|
case <-sh.closing:
|
||||||
@ -173,8 +175,8 @@ func (sh *scheduler) onWorkerFreed(wid WorkerID) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
for e := sh.schedQueue.Front(); e != nil; e = e.Next() {
|
for i := 0; i < sh.schedQueue.Len(); i++ {
|
||||||
req := e.Value.(*workerRequest)
|
req := (*sh.schedQueue)[i]
|
||||||
|
|
||||||
ok, err := req.sel.Ok(req.ctx, req.taskType, w)
|
ok, err := req.sel.Ok(req.ctx, req.taskType, w)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -193,15 +195,8 @@ func (sh *scheduler) onWorkerFreed(wid WorkerID) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if scheduled {
|
if scheduled {
|
||||||
pe := e.Prev()
|
heap.Remove(sh.schedQueue, i)
|
||||||
sh.schedQueue.Remove(e)
|
i--
|
||||||
if pe == nil {
|
|
||||||
pe = sh.schedQueue.Front()
|
|
||||||
}
|
|
||||||
if pe == nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
e = pe
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -13,3 +13,17 @@ const (
|
|||||||
|
|
||||||
TTFetch TaskType = "seal/v0/fetch"
|
TTFetch TaskType = "seal/v0/fetch"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var order = map[TaskType]int{
|
||||||
|
TTAddPiece: 7,
|
||||||
|
TTPreCommit1: 6,
|
||||||
|
TTPreCommit2: 5,
|
||||||
|
TTCommit2: 4,
|
||||||
|
TTCommit1: 3,
|
||||||
|
TTFetch: 2,
|
||||||
|
TTFinalize: 1,
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a TaskType) Less(b TaskType) bool {
|
||||||
|
return order[a] < order[b]
|
||||||
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user