lotus/storage/sectorstorage/manager.go

411 lines
12 KiB
Go
Raw Normal View History

2020-03-23 11:40:02 +00:00
package sectorstorage
2020-03-03 22:19:22 +00:00
import (
2020-03-20 22:30:17 +00:00
"container/list"
2020-03-03 22:19:22 +00:00
"context"
"io"
2020-03-11 05:49:17 +00:00
"net/http"
2020-03-18 23:23:28 +00:00
"sync"
2020-03-03 22:19:22 +00:00
"github.com/ipfs/go-cid"
2020-03-11 01:57:52 +00:00
logging "github.com/ipfs/go-log/v2"
"github.com/mitchellh/go-homedir"
"golang.org/x/xerrors"
2020-03-03 22:19:22 +00:00
"github.com/filecoin-project/go-sectorbuilder"
"github.com/filecoin-project/specs-actors/actors/abi"
2020-03-23 11:40:02 +00:00
"github.com/filecoin-project/specs-storage/storage"
2020-03-03 22:19:22 +00:00
2020-03-19 02:23:21 +00:00
"github.com/filecoin-project/lotus/api"
2020-03-03 22:19:22 +00:00
"github.com/filecoin-project/lotus/node/config"
2020-03-23 11:40:02 +00:00
"github.com/filecoin-project/lotus/storage/sectorstorage/sealtasks"
"github.com/filecoin-project/lotus/storage/sectorstorage/stores"
2020-03-03 22:19:22 +00:00
)
2020-03-11 01:57:52 +00:00
var log = logging.Logger("advmgr")
2020-03-16 17:50:07 +00:00
type URLs []string
2020-03-03 22:19:22 +00:00
type Worker interface {
sectorbuilder.Sealer
2020-03-03 22:19:22 +00:00
2020-03-23 11:40:02 +00:00
TaskTypes(context.Context) (map[sealtasks.TaskType]struct{}, error)
2020-03-19 02:23:21 +00:00
// Returns paths accessible to the worker
2020-03-13 11:59:19 +00:00
Paths(context.Context) ([]stores.StoragePath, error)
2020-03-20 22:30:17 +00:00
Info(context.Context) (api.WorkerInfo, error)
2020-03-03 22:19:22 +00:00
}
2020-03-23 11:40:02 +00:00
type SectorManager interface {
SectorSize() abi.SectorSize
ReadPieceFromSealedSector(context.Context, abi.SectorID, sectorbuilder.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (io.ReadCloser, error)
sectorbuilder.Sealer
storage.Prover
}
2020-03-23 14:56:22 +00:00
type WorkerID uint64
2020-03-20 22:30:17 +00:00
2020-03-03 22:19:22 +00:00
type Manager struct {
2020-03-20 22:30:17 +00:00
scfg *sectorbuilder.Config
2020-03-19 02:23:21 +00:00
ls stores.LocalStorage
storage *stores.Remote
localStore *stores.Local
remoteHnd *stores.FetchHandler
2020-03-19 15:10:19 +00:00
index stores.SectorIndex
2020-03-03 22:19:22 +00:00
2020-03-23 11:40:02 +00:00
storage.Prover
2020-03-18 23:23:28 +00:00
2020-03-20 22:30:17 +00:00
workersLk sync.Mutex
2020-03-23 14:56:22 +00:00
nextWorker WorkerID
workers map[WorkerID]*workerHandle
2020-03-20 22:30:17 +00:00
newWorkers chan *workerHandle
schedule chan *workerRequest
2020-03-23 14:56:22 +00:00
workerFree chan WorkerID
2020-03-20 22:30:17 +00:00
closing chan struct{}
2020-03-22 20:45:12 +00:00
schedQueue *list.List // List[*workerRequest]
}
2020-03-19 15:10:19 +00:00
func New(ls stores.LocalStorage, si stores.SectorIndex, cfg *sectorbuilder.Config, urls URLs, ca api.Common) (*Manager, error) {
ctx := context.TODO()
lstor, err := stores.NewLocal(ctx, ls, si, urls)
if err != nil {
return nil, err
}
2020-03-19 02:23:21 +00:00
prover, err := sectorbuilder.New(&readonlyProvider{stor: lstor}, cfg)
if err != nil {
return nil, xerrors.Errorf("creating prover instance: %w", err)
}
2020-03-19 02:23:21 +00:00
token, err := ca.AuthNew(context.TODO(), []api.Permission{"admin"})
headers := http.Header{}
headers.Add("Authorization", "Bearer "+string(token))
stor := stores.NewRemote(lstor, si, headers)
m := &Manager{
scfg: cfg,
2020-03-19 02:23:21 +00:00
ls: ls,
storage: stor,
localStore: lstor,
remoteHnd: &stores.FetchHandler{Local: lstor},
2020-03-19 15:10:19 +00:00
index: si,
2020-03-20 22:30:17 +00:00
nextWorker: 0,
2020-03-23 14:56:22 +00:00
workers: map[WorkerID]*workerHandle{},
2020-03-20 22:30:17 +00:00
newWorkers: make(chan *workerHandle),
schedule: make(chan *workerRequest),
2020-03-23 14:56:22 +00:00
workerFree: make(chan WorkerID),
2020-03-20 22:30:17 +00:00
closing: make(chan struct{}),
2020-03-22 20:45:12 +00:00
schedQueue: list.New(),
Prover: prover,
}
2020-03-20 22:30:17 +00:00
go m.runSched()
err = m.AddWorker(ctx, NewLocalWorker(WorkerConfig{
SealProof: cfg.SealProofType,
2020-03-23 11:40:02 +00:00
TaskTypes: []sealtasks.TaskType{sealtasks.TTAddPiece, sealtasks.TTCommit1, sealtasks.TTFinalize},
2020-03-20 22:30:17 +00:00
}, stor, lstor, si))
if err != nil {
return nil, xerrors.Errorf("adding local worker: %w", err)
}
return m, nil
2020-03-03 22:19:22 +00:00
}
2020-03-19 15:10:19 +00:00
func (m *Manager) AddLocalStorage(ctx context.Context, path string) error {
path, err := homedir.Expand(path)
if err != nil {
return xerrors.Errorf("expanding local path: %w", err)
}
2020-03-19 15:10:19 +00:00
if err := m.localStore.OpenPath(ctx, path); err != nil {
return xerrors.Errorf("opening local path: %w", err)
}
if err := m.ls.SetStorage(func(sc *config.StorageConfig) {
2020-03-09 19:22:30 +00:00
sc.StoragePaths = append(sc.StoragePaths, config.LocalPath{Path: path})
}); err != nil {
return xerrors.Errorf("get storage config: %w", err)
}
return nil
}
2020-03-20 22:30:17 +00:00
func (m *Manager) AddWorker(ctx context.Context, w Worker) error {
info, err := w.Info(ctx)
if err != nil {
return xerrors.Errorf("getting worker info: %w", err)
}
2020-03-18 23:23:28 +00:00
2020-03-20 22:30:17 +00:00
m.newWorkers <- &workerHandle{
w: w,
info: info,
}
2020-03-18 23:23:28 +00:00
return nil
}
2020-03-11 05:49:17 +00:00
func (m *Manager) ServeHTTP(w http.ResponseWriter, r *http.Request) {
m.remoteHnd.ServeHTTP(w, r)
2020-03-11 05:49:17 +00:00
}
func (m *Manager) SectorSize() abi.SectorSize {
sz, _ := m.scfg.SealProofType.SectorSize()
return sz
2020-03-03 22:19:22 +00:00
}
2020-03-17 20:19:52 +00:00
func (m *Manager) ReadPieceFromSealedSector(context.Context, abi.SectorID, sectorbuilder.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (io.ReadCloser, error) {
2020-03-03 22:19:22 +00:00
panic("implement me")
}
2020-03-23 14:56:22 +00:00
func (m *Manager) getWorkersByPaths(task sealtasks.TaskType, inPaths []stores.StorageInfo) ([]WorkerID, map[WorkerID]stores.StorageInfo) {
2020-03-20 22:30:17 +00:00
m.workersLk.Lock()
defer m.workersLk.Unlock()
2020-03-23 14:56:22 +00:00
var workers []WorkerID
paths := map[WorkerID]stores.StorageInfo{}
for i, worker := range m.workers {
2020-03-20 22:30:17 +00:00
tt, err := worker.w.TaskTypes(context.TODO())
2020-03-11 01:57:52 +00:00
if err != nil {
log.Errorf("error getting supported worker task types: %+v", err)
continue
}
if _, ok := tt[task]; !ok {
2020-03-18 23:23:28 +00:00
log.Debugf("dropping worker %d; task %s not supported (supports %v)", i, task, tt)
continue
}
2020-03-20 22:30:17 +00:00
phs, err := worker.w.Paths(context.TODO())
2020-03-11 05:49:17 +00:00
if err != nil {
log.Errorf("error getting worker paths: %+v", err)
continue
}
// check if the worker has access to the path we selected
2020-03-19 15:10:19 +00:00
var st *stores.StorageInfo
2020-03-11 05:49:17 +00:00
for _, p := range phs {
for _, meta := range inPaths {
if p.ID == meta.ID {
if st != nil && st.Weight > p.Weight {
continue
}
2020-03-11 05:49:17 +00:00
p := meta // copy
st = &p
}
}
}
if st == nil {
2020-03-18 23:23:28 +00:00
log.Debugf("skipping worker %d; doesn't have any of %v", i, inPaths)
log.Debugf("skipping worker %d; only has %v", i, phs)
continue
}
2020-03-05 18:18:33 +00:00
paths[i] = *st
2020-03-20 22:30:17 +00:00
workers = append(workers, i)
2020-03-05 18:18:33 +00:00
}
return workers, paths
}
2020-03-23 14:56:22 +00:00
func (m *Manager) getWorker(ctx context.Context, taskType sealtasks.TaskType, accept []WorkerID) (Worker, func(), error) {
2020-03-20 22:30:17 +00:00
ret := make(chan workerResponse)
select {
case m.schedule <- &workerRequest{
taskType: taskType,
accept: accept,
cancel: ctx.Done(),
ret: ret,
}:
case <-m.closing:
return nil, nil, xerrors.New("closing")
case <-ctx.Done():
return nil, nil, ctx.Err()
}
select {
case resp := <-ret:
return resp.worker, resp.done, resp.err
case <-m.closing:
return nil, nil, xerrors.New("closing")
case <-ctx.Done():
return nil, nil, ctx.Err()
}
}
2020-03-17 20:19:52 +00:00
func (m *Manager) NewSector(ctx context.Context, sector abi.SectorID) error {
log.Warnf("stub NewSector")
return nil
}
func (m *Manager) AddPiece(ctx context.Context, sector abi.SectorID, existingPieces []abi.UnpaddedPieceSize, sz abi.UnpaddedPieceSize, r io.Reader) (abi.PieceInfo, error) {
2020-03-05 18:18:33 +00:00
// TODO: consider multiple paths vs workers when initially allocating
2020-03-19 15:10:19 +00:00
var best []stores.StorageInfo
2020-03-05 18:18:33 +00:00
var err error
if len(existingPieces) == 0 { // new
2020-03-19 15:10:19 +00:00
best, err = m.index.StorageBestAlloc(ctx, sectorbuilder.FTUnsealed, true)
2020-03-05 18:18:33 +00:00
} else { // append to existing
best, err = m.index.StorageFindSector(ctx, sector, sectorbuilder.FTUnsealed, false)
2020-03-05 18:18:33 +00:00
}
if err != nil {
return abi.PieceInfo{}, xerrors.Errorf("finding sector path: %w", err)
}
2020-03-18 23:23:28 +00:00
log.Debugf("find workers for %v", best)
2020-03-23 11:40:02 +00:00
candidateWorkers, _ := m.getWorkersByPaths(sealtasks.TTAddPiece, best)
2020-03-05 18:18:33 +00:00
if len(candidateWorkers) == 0 {
return abi.PieceInfo{}, xerrors.New("no worker found")
}
2020-03-23 11:40:02 +00:00
worker, done, err := m.getWorker(ctx, sealtasks.TTAddPiece, candidateWorkers)
2020-03-20 22:30:17 +00:00
if err != nil {
return abi.PieceInfo{}, xerrors.Errorf("scheduling worker: %w", err)
}
defer done()
2020-03-05 18:18:33 +00:00
// TODO: select(candidateWorkers, ...)
// TODO: remove the sectorbuilder abstraction, pass path directly
2020-03-20 22:30:17 +00:00
return worker.AddPiece(ctx, sector, existingPieces, sz, r)
2020-03-03 22:19:22 +00:00
}
2020-03-23 11:40:02 +00:00
func (m *Manager) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) {
2020-03-05 18:18:33 +00:00
// TODO: also consider where the unsealed data sits
2020-03-19 15:10:19 +00:00
best, err := m.index.StorageBestAlloc(ctx, sectorbuilder.FTCache|sectorbuilder.FTSealed, true)
2020-03-05 18:18:33 +00:00
if err != nil {
return nil, xerrors.Errorf("finding path for sector sealing: %w", err)
}
2020-03-23 11:40:02 +00:00
candidateWorkers, _ := m.getWorkersByPaths(sealtasks.TTPreCommit1, best)
if len(candidateWorkers) == 0 {
2020-03-20 22:30:17 +00:00
return nil, xerrors.New("no suitable workers found")
}
2020-03-05 18:18:33 +00:00
2020-03-23 11:40:02 +00:00
worker, done, err := m.getWorker(ctx, sealtasks.TTPreCommit1, candidateWorkers)
2020-03-20 22:30:17 +00:00
if err != nil {
return nil, xerrors.Errorf("scheduling worker: %w", err)
}
defer done()
2020-03-05 18:18:33 +00:00
// TODO: select(candidateWorkers, ...)
// TODO: remove the sectorbuilder abstraction, pass path directly
2020-03-20 22:30:17 +00:00
return worker.SealPreCommit1(ctx, sector, ticket, pieces)
2020-03-03 22:19:22 +00:00
}
2020-03-23 11:40:02 +00:00
func (m *Manager) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage.PreCommit1Out) (cids storage.SectorCids, err error) {
2020-03-05 18:18:33 +00:00
// TODO: allow workers to fetch the sectors
best, err := m.index.StorageFindSector(ctx, sector, sectorbuilder.FTCache|sectorbuilder.FTSealed, true)
2020-03-05 18:18:33 +00:00
if err != nil {
2020-03-23 11:40:02 +00:00
return storage.SectorCids{}, xerrors.Errorf("finding path for sector sealing: %w", err)
2020-03-05 18:18:33 +00:00
}
2020-03-23 11:40:02 +00:00
candidateWorkers, _ := m.getWorkersByPaths(sealtasks.TTPreCommit2, best)
if len(candidateWorkers) == 0 {
2020-03-23 11:40:02 +00:00
return storage.SectorCids{}, xerrors.New("no suitable workers found")
2020-03-20 22:30:17 +00:00
}
2020-03-23 11:40:02 +00:00
worker, done, err := m.getWorker(ctx, sealtasks.TTPreCommit2, candidateWorkers)
2020-03-20 22:30:17 +00:00
if err != nil {
2020-03-23 11:40:02 +00:00
return storage.SectorCids{}, xerrors.Errorf("scheduling worker: %w", err)
}
2020-03-20 22:30:17 +00:00
defer done()
2020-03-05 18:18:33 +00:00
// TODO: select(candidateWorkers, ...)
// TODO: remove the sectorbuilder abstraction, pass path directly
2020-03-20 22:30:17 +00:00
return worker.SealPreCommit2(ctx, sector, phase1Out)
2020-03-03 22:19:22 +00:00
}
2020-03-23 11:40:02 +00:00
func (m *Manager) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (output storage.Commit1Out, err error) {
best, err := m.index.StorageFindSector(ctx, sector, sectorbuilder.FTCache|sectorbuilder.FTSealed, true)
2020-03-05 18:18:33 +00:00
if err != nil {
return nil, xerrors.Errorf("finding path for sector sealing: %w", err)
}
2020-03-23 11:40:02 +00:00
candidateWorkers, _ := m.getWorkersByPaths(sealtasks.TTCommit1, best)
if len(candidateWorkers) == 0 {
return nil, xerrors.New("no suitable workers found") // TODO: wait?
}
2020-03-05 18:18:33 +00:00
2020-03-20 22:30:17 +00:00
// TODO: Try very hard to execute on worker with access to the sectors
2020-03-23 11:40:02 +00:00
worker, done, err := m.getWorker(ctx, sealtasks.TTCommit1, candidateWorkers)
2020-03-20 22:30:17 +00:00
if err != nil {
return nil, xerrors.Errorf("scheduling worker: %w", err)
}
defer done()
2020-03-05 18:18:33 +00:00
// TODO: select(candidateWorkers, ...)
// TODO: remove the sectorbuilder abstraction, pass path directly
2020-03-20 22:30:17 +00:00
return worker.SealCommit1(ctx, sector, ticket, seed, pieces, cids)
2020-03-03 22:19:22 +00:00
}
2020-03-23 11:40:02 +00:00
func (m *Manager) SealCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage.Commit1Out) (proof storage.Proof, err error) {
2020-03-23 14:56:22 +00:00
var candidateWorkers []WorkerID
2020-03-20 22:30:17 +00:00
m.workersLk.Lock()
for id, worker := range m.workers {
tt, err := worker.w.TaskTypes(ctx)
2020-03-11 01:57:52 +00:00
if err != nil {
log.Errorf("error getting supported worker task types: %+v", err)
continue
}
2020-03-23 11:40:02 +00:00
if _, ok := tt[sealtasks.TTCommit2]; !ok {
2020-03-05 18:18:33 +00:00
continue
}
2020-03-20 22:30:17 +00:00
candidateWorkers = append(candidateWorkers, id)
}
m.workersLk.Unlock()
2020-03-05 18:18:33 +00:00
2020-03-23 11:40:02 +00:00
worker, done, err := m.getWorker(ctx, sealtasks.TTCommit2, candidateWorkers)
2020-03-20 22:30:17 +00:00
if err != nil {
return nil, xerrors.Errorf("scheduling worker: %w", err)
2020-03-05 18:18:33 +00:00
}
2020-03-20 22:30:17 +00:00
defer done()
2020-03-05 18:18:33 +00:00
2020-03-20 22:30:17 +00:00
return worker.SealCommit2(ctx, sector, phase1Out)
2020-03-03 22:19:22 +00:00
}
2020-03-17 20:19:52 +00:00
func (m *Manager) FinalizeSector(ctx context.Context, sector abi.SectorID) error {
best, err := m.index.StorageFindSector(ctx, sector, sectorbuilder.FTCache|sectorbuilder.FTSealed|sectorbuilder.FTUnsealed, true)
2020-03-05 18:18:33 +00:00
if err != nil {
return xerrors.Errorf("finding sealed sector: %w", err)
}
2020-03-23 11:40:02 +00:00
candidateWorkers, _ := m.getWorkersByPaths(sealtasks.TTFinalize, best)
2020-03-05 18:18:33 +00:00
// TODO: Remove sector from sealing stores
2020-03-05 18:18:33 +00:00
// TODO: Move the sector to long-term storage
2020-03-20 22:30:17 +00:00
return m.workers[candidateWorkers[0]].w.FinalizeSector(ctx, sector)
}
2020-03-19 19:51:33 +00:00
func (m *Manager) StorageLocal(ctx context.Context) (map[stores.ID]string, error) {
l, err := m.localStore.Local(ctx)
if err != nil {
return nil, err
}
out := map[stores.ID]string{}
for _, st := range l {
out[st.ID] = st.LocalPath
}
return out, nil
}
2020-03-23 11:40:02 +00:00
var _ SectorManager = &Manager{}