From e40c5d441ddcc13892607e7aeab4e94eb2ea7fb9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Mon, 23 Mar 2020 12:40:02 +0100 Subject: [PATCH 001/199] Rename agvmgr+sealmgr to sectorstorage --- manager.go | 410 +++++++++++++++++++++++++++++++++++++++++ mock/mock.go | 371 +++++++++++++++++++++++++++++++++++++ mock/mock_test.go | 45 +++++ mock/preseal.go | 63 +++++++ mock/util.go | 23 +++ resources.go | 135 ++++++++++++++ roprov.go | 25 +++ sched.go | 242 ++++++++++++++++++++++++ sealtasks/task.go | 13 ++ sectorutil/utils.go | 56 ++++++ stores/http_handler.go | 125 +++++++++++++ stores/index.go | 312 +++++++++++++++++++++++++++++++ stores/interface.go | 32 ++++ stores/local.go | 314 +++++++++++++++++++++++++++++++ stores/remote.go | 204 ++++++++++++++++++++ worker_local.go | 198 ++++++++++++++++++++ worker_remote.go | 45 +++++ 17 files changed, 2613 insertions(+) create mode 100644 manager.go create mode 100644 mock/mock.go create mode 100644 mock/mock_test.go create mode 100644 mock/preseal.go create mode 100644 mock/util.go create mode 100644 resources.go create mode 100644 roprov.go create mode 100644 sched.go create mode 100644 sealtasks/task.go create mode 100644 sectorutil/utils.go create mode 100644 stores/http_handler.go create mode 100644 stores/index.go create mode 100644 stores/interface.go create mode 100644 stores/local.go create mode 100644 stores/remote.go create mode 100644 worker_local.go create mode 100644 worker_remote.go diff --git a/manager.go b/manager.go new file mode 100644 index 000000000..08597bd31 --- /dev/null +++ b/manager.go @@ -0,0 +1,410 @@ +package sectorstorage + +import ( + "container/list" + "context" + "io" + "net/http" + "sync" + + "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log/v2" + "github.com/mitchellh/go-homedir" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-sectorbuilder" + "github.com/filecoin-project/specs-actors/actors/abi" + "github.com/filecoin-project/specs-storage/storage" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/node/config" + "github.com/filecoin-project/lotus/storage/sectorstorage/sealtasks" + "github.com/filecoin-project/lotus/storage/sectorstorage/stores" +) + +var log = logging.Logger("advmgr") + +type URLs []string + +type Worker interface { + sectorbuilder.Sealer + + TaskTypes(context.Context) (map[sealtasks.TaskType]struct{}, error) + + // Returns paths accessible to the worker + Paths(context.Context) ([]stores.StoragePath, error) + + Info(context.Context) (api.WorkerInfo, error) +} + +type SectorManager interface { + SectorSize() abi.SectorSize + + ReadPieceFromSealedSector(context.Context, abi.SectorID, sectorbuilder.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (io.ReadCloser, error) + + sectorbuilder.Sealer + storage.Prover +} + +type workerID uint64 + +type Manager struct { + scfg *sectorbuilder.Config + + ls stores.LocalStorage + storage *stores.Remote + localStore *stores.Local + remoteHnd *stores.FetchHandler + index stores.SectorIndex + + storage.Prover + + workersLk sync.Mutex + nextWorker workerID + workers map[workerID]*workerHandle + + newWorkers chan *workerHandle + schedule chan *workerRequest + workerFree chan workerID + closing chan struct{} + + schedQueue *list.List // List[*workerRequest] +} + +func New(ls stores.LocalStorage, si stores.SectorIndex, cfg *sectorbuilder.Config, urls URLs, ca api.Common) (*Manager, error) { + ctx := context.TODO() + + lstor, err := stores.NewLocal(ctx, ls, si, urls) + if err != nil { + return nil, err + } + + prover, err := sectorbuilder.New(&readonlyProvider{stor: lstor}, cfg) + if err != nil { + return nil, xerrors.Errorf("creating prover instance: %w", err) + } + + token, err := ca.AuthNew(context.TODO(), []api.Permission{"admin"}) + headers := http.Header{} + headers.Add("Authorization", "Bearer "+string(token)) + stor := stores.NewRemote(lstor, si, headers) + + m := &Manager{ + scfg: cfg, + + ls: ls, + storage: stor, + localStore: lstor, + remoteHnd: &stores.FetchHandler{Local: lstor}, + index: si, + + nextWorker: 0, + workers: map[workerID]*workerHandle{}, + + newWorkers: make(chan *workerHandle), + schedule: make(chan *workerRequest), + workerFree: make(chan workerID), + closing: make(chan struct{}), + + schedQueue: list.New(), + + Prover: prover, + } + + go m.runSched() + + err = m.AddWorker(ctx, NewLocalWorker(WorkerConfig{ + SealProof: cfg.SealProofType, + TaskTypes: []sealtasks.TaskType{sealtasks.TTAddPiece, sealtasks.TTCommit1, sealtasks.TTFinalize}, + }, stor, lstor, si)) + if err != nil { + return nil, xerrors.Errorf("adding local worker: %w", err) + } + + return m, nil +} + +func (m *Manager) AddLocalStorage(ctx context.Context, path string) error { + path, err := homedir.Expand(path) + if err != nil { + return xerrors.Errorf("expanding local path: %w", err) + } + + if err := m.localStore.OpenPath(ctx, path); err != nil { + return xerrors.Errorf("opening local path: %w", err) + } + + if err := m.ls.SetStorage(func(sc *config.StorageConfig) { + sc.StoragePaths = append(sc.StoragePaths, config.LocalPath{Path: path}) + }); err != nil { + return xerrors.Errorf("get storage config: %w", err) + } + return nil +} + +func (m *Manager) AddWorker(ctx context.Context, w Worker) error { + info, err := w.Info(ctx) + if err != nil { + return xerrors.Errorf("getting worker info: %w", err) + } + + m.newWorkers <- &workerHandle{ + w: w, + info: info, + } + return nil +} + +func (m *Manager) ServeHTTP(w http.ResponseWriter, r *http.Request) { + m.remoteHnd.ServeHTTP(w, r) +} + +func (m *Manager) SectorSize() abi.SectorSize { + sz, _ := m.scfg.SealProofType.SectorSize() + return sz +} + +func (m *Manager) ReadPieceFromSealedSector(context.Context, abi.SectorID, sectorbuilder.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (io.ReadCloser, error) { + panic("implement me") +} + +func (m *Manager) getWorkersByPaths(task sealtasks.TaskType, inPaths []stores.StorageInfo) ([]workerID, map[workerID]stores.StorageInfo) { + m.workersLk.Lock() + defer m.workersLk.Unlock() + + var workers []workerID + paths := map[workerID]stores.StorageInfo{} + + for i, worker := range m.workers { + tt, err := worker.w.TaskTypes(context.TODO()) + if err != nil { + log.Errorf("error getting supported worker task types: %+v", err) + continue + } + if _, ok := tt[task]; !ok { + log.Debugf("dropping worker %d; task %s not supported (supports %v)", i, task, tt) + continue + } + + phs, err := worker.w.Paths(context.TODO()) + if err != nil { + log.Errorf("error getting worker paths: %+v", err) + continue + } + + // check if the worker has access to the path we selected + var st *stores.StorageInfo + for _, p := range phs { + for _, meta := range inPaths { + if p.ID == meta.ID { + if st != nil && st.Weight > p.Weight { + continue + } + + p := meta // copy + st = &p + } + } + } + if st == nil { + log.Debugf("skipping worker %d; doesn't have any of %v", i, inPaths) + log.Debugf("skipping worker %d; only has %v", i, phs) + continue + } + + paths[i] = *st + workers = append(workers, i) + } + + return workers, paths +} + +func (m *Manager) getWorker(ctx context.Context, taskType sealtasks.TaskType, accept []workerID) (Worker, func(), error) { + ret := make(chan workerResponse) + + select { + case m.schedule <- &workerRequest{ + taskType: taskType, + accept: accept, + + cancel: ctx.Done(), + ret: ret, + }: + case <-m.closing: + return nil, nil, xerrors.New("closing") + case <-ctx.Done(): + return nil, nil, ctx.Err() + } + + select { + case resp := <-ret: + return resp.worker, resp.done, resp.err + case <-m.closing: + return nil, nil, xerrors.New("closing") + case <-ctx.Done(): + return nil, nil, ctx.Err() + } +} + +func (m *Manager) NewSector(ctx context.Context, sector abi.SectorID) error { + log.Warnf("stub NewSector") + return nil +} + +func (m *Manager) AddPiece(ctx context.Context, sector abi.SectorID, existingPieces []abi.UnpaddedPieceSize, sz abi.UnpaddedPieceSize, r io.Reader) (abi.PieceInfo, error) { + // TODO: consider multiple paths vs workers when initially allocating + + var best []stores.StorageInfo + var err error + if len(existingPieces) == 0 { // new + best, err = m.index.StorageBestAlloc(ctx, sectorbuilder.FTUnsealed, true) + } else { // append to existing + best, err = m.index.StorageFindSector(ctx, sector, sectorbuilder.FTUnsealed, false) + } + if err != nil { + return abi.PieceInfo{}, xerrors.Errorf("finding sector path: %w", err) + } + + log.Debugf("find workers for %v", best) + candidateWorkers, _ := m.getWorkersByPaths(sealtasks.TTAddPiece, best) + + if len(candidateWorkers) == 0 { + return abi.PieceInfo{}, xerrors.New("no worker found") + } + + worker, done, err := m.getWorker(ctx, sealtasks.TTAddPiece, candidateWorkers) + if err != nil { + return abi.PieceInfo{}, xerrors.Errorf("scheduling worker: %w", err) + } + defer done() + + // TODO: select(candidateWorkers, ...) + // TODO: remove the sectorbuilder abstraction, pass path directly + return worker.AddPiece(ctx, sector, existingPieces, sz, r) +} + +func (m *Manager) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) { + // TODO: also consider where the unsealed data sits + + best, err := m.index.StorageBestAlloc(ctx, sectorbuilder.FTCache|sectorbuilder.FTSealed, true) + if err != nil { + return nil, xerrors.Errorf("finding path for sector sealing: %w", err) + } + + candidateWorkers, _ := m.getWorkersByPaths(sealtasks.TTPreCommit1, best) + if len(candidateWorkers) == 0 { + return nil, xerrors.New("no suitable workers found") + } + + worker, done, err := m.getWorker(ctx, sealtasks.TTPreCommit1, candidateWorkers) + if err != nil { + return nil, xerrors.Errorf("scheduling worker: %w", err) + } + defer done() + + // TODO: select(candidateWorkers, ...) + // TODO: remove the sectorbuilder abstraction, pass path directly + return worker.SealPreCommit1(ctx, sector, ticket, pieces) +} + +func (m *Manager) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage.PreCommit1Out) (cids storage.SectorCids, err error) { + // TODO: allow workers to fetch the sectors + + best, err := m.index.StorageFindSector(ctx, sector, sectorbuilder.FTCache|sectorbuilder.FTSealed, true) + if err != nil { + return storage.SectorCids{}, xerrors.Errorf("finding path for sector sealing: %w", err) + } + + candidateWorkers, _ := m.getWorkersByPaths(sealtasks.TTPreCommit2, best) + if len(candidateWorkers) == 0 { + return storage.SectorCids{}, xerrors.New("no suitable workers found") + } + + worker, done, err := m.getWorker(ctx, sealtasks.TTPreCommit2, candidateWorkers) + if err != nil { + return storage.SectorCids{}, xerrors.Errorf("scheduling worker: %w", err) + } + defer done() + + // TODO: select(candidateWorkers, ...) + // TODO: remove the sectorbuilder abstraction, pass path directly + return worker.SealPreCommit2(ctx, sector, phase1Out) +} + +func (m *Manager) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (output storage.Commit1Out, err error) { + best, err := m.index.StorageFindSector(ctx, sector, sectorbuilder.FTCache|sectorbuilder.FTSealed, true) + if err != nil { + return nil, xerrors.Errorf("finding path for sector sealing: %w", err) + } + + candidateWorkers, _ := m.getWorkersByPaths(sealtasks.TTCommit1, best) + if len(candidateWorkers) == 0 { + return nil, xerrors.New("no suitable workers found") // TODO: wait? + } + + // TODO: Try very hard to execute on worker with access to the sectors + worker, done, err := m.getWorker(ctx, sealtasks.TTCommit1, candidateWorkers) + if err != nil { + return nil, xerrors.Errorf("scheduling worker: %w", err) + } + defer done() + + // TODO: select(candidateWorkers, ...) + // TODO: remove the sectorbuilder abstraction, pass path directly + return worker.SealCommit1(ctx, sector, ticket, seed, pieces, cids) +} + +func (m *Manager) SealCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage.Commit1Out) (proof storage.Proof, err error) { + var candidateWorkers []workerID + + m.workersLk.Lock() + for id, worker := range m.workers { + tt, err := worker.w.TaskTypes(ctx) + if err != nil { + log.Errorf("error getting supported worker task types: %+v", err) + continue + } + if _, ok := tt[sealtasks.TTCommit2]; !ok { + continue + } + candidateWorkers = append(candidateWorkers, id) + } + m.workersLk.Unlock() + + worker, done, err := m.getWorker(ctx, sealtasks.TTCommit2, candidateWorkers) + if err != nil { + return nil, xerrors.Errorf("scheduling worker: %w", err) + } + defer done() + + return worker.SealCommit2(ctx, sector, phase1Out) +} + +func (m *Manager) FinalizeSector(ctx context.Context, sector abi.SectorID) error { + best, err := m.index.StorageFindSector(ctx, sector, sectorbuilder.FTCache|sectorbuilder.FTSealed|sectorbuilder.FTUnsealed, true) + if err != nil { + return xerrors.Errorf("finding sealed sector: %w", err) + } + + candidateWorkers, _ := m.getWorkersByPaths(sealtasks.TTFinalize, best) + + // TODO: Remove sector from sealing stores + // TODO: Move the sector to long-term storage + return m.workers[candidateWorkers[0]].w.FinalizeSector(ctx, sector) +} + +func (m *Manager) StorageLocal(ctx context.Context) (map[stores.ID]string, error) { + l, err := m.localStore.Local(ctx) + if err != nil { + return nil, err + } + + out := map[stores.ID]string{} + for _, st := range l { + out[st.ID] = st.LocalPath + } + + return out, nil +} + +var _ SectorManager = &Manager{} diff --git a/mock/mock.go b/mock/mock.go new file mode 100644 index 000000000..fdaae7f80 --- /dev/null +++ b/mock/mock.go @@ -0,0 +1,371 @@ +package mock + +import ( + "bytes" + "context" + "fmt" + "github.com/filecoin-project/lotus/storage/sectorstorage" + "io" + "io/ioutil" + "math/big" + "math/rand" + "sync" + + commcid "github.com/filecoin-project/go-fil-commcid" + "github.com/filecoin-project/go-sectorbuilder" + "github.com/filecoin-project/specs-actors/actors/abi" + "github.com/filecoin-project/specs-storage/storage" + "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/api" +) + +var log = logging.Logger("sbmock") + +type SectorMgr struct { + sectors map[abi.SectorID]*sectorState + sectorSize abi.SectorSize + nextSectorID abi.SectorNumber + rateLimit chan struct{} + proofType abi.RegisteredProof + + lk sync.Mutex +} + +type mockVerif struct{} + +func NewMockSectorMgr(threads int, ssize abi.SectorSize) *SectorMgr { + rt, _, err := api.ProofTypeFromSectorSize(ssize) + if err != nil { + panic(err) + } + + return &SectorMgr{ + sectors: make(map[abi.SectorID]*sectorState), + sectorSize: ssize, + nextSectorID: 5, + rateLimit: make(chan struct{}, threads), + proofType: rt, + } +} + +const ( + statePacking = iota + statePreCommit + stateCommit +) + +type sectorState struct { + pieces []cid.Cid + failed bool + + state int + + lk sync.Mutex +} + +func (sb *SectorMgr) RateLimit() func() { + sb.rateLimit <- struct{}{} + + // TODO: probably want to copy over rate limit code + return func() { + <-sb.rateLimit + } +} + +func (sb *SectorMgr) NewSector(ctx context.Context, sector abi.SectorID) error { + return nil +} + +func (sb *SectorMgr) AddPiece(ctx context.Context, sectorId abi.SectorID, existingPieces []abi.UnpaddedPieceSize, size abi.UnpaddedPieceSize, r io.Reader) (abi.PieceInfo, error) { + log.Warn("Add piece: ", sectorId, size, sb.proofType) + sb.lk.Lock() + ss, ok := sb.sectors[sectorId] + if !ok { + ss = §orState{ + state: statePacking, + } + sb.sectors[sectorId] = ss + } + sb.lk.Unlock() + ss.lk.Lock() + defer ss.lk.Unlock() + + c, err := sectorbuilder.GeneratePieceCIDFromFile(sb.proofType, r, size) + if err != nil { + return abi.PieceInfo{}, xerrors.Errorf("failed to generate piece cid: %w", err) + } + + log.Warn("Generated Piece CID: ", c) + + ss.pieces = append(ss.pieces, c) + return abi.PieceInfo{ + Size: size.Padded(), + PieceCID: c, + }, nil +} + +func (sb *SectorMgr) SectorSize() abi.SectorSize { + return sb.sectorSize +} + +func (sb *SectorMgr) AcquireSectorNumber() (abi.SectorNumber, error) { + sb.lk.Lock() + defer sb.lk.Unlock() + id := sb.nextSectorID + sb.nextSectorID++ + return id, nil +} + +func (sb *SectorMgr) SealPreCommit1(ctx context.Context, sid abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) { + sb.lk.Lock() + ss, ok := sb.sectors[sid] + sb.lk.Unlock() + if !ok { + return nil, xerrors.Errorf("no sector with id %d in sectorbuilder", sid) + } + + ss.lk.Lock() + defer ss.lk.Unlock() + + ussize := abi.PaddedPieceSize(sb.sectorSize).Unpadded() + + // TODO: verify pieces in sinfo.pieces match passed in pieces + + var sum abi.UnpaddedPieceSize + for _, p := range pieces { + sum += p.Size.Unpadded() + } + + if sum != ussize { + return nil, xerrors.Errorf("aggregated piece sizes don't match up: %d != %d", sum, ussize) + } + + if ss.state != statePacking { + return nil, xerrors.Errorf("cannot call pre-seal on sector not in 'packing' state") + } + + opFinishWait(ctx) + + ss.state = statePreCommit + + pis := make([]abi.PieceInfo, len(ss.pieces)) + for i, piece := range ss.pieces { + pis[i] = abi.PieceInfo{ + Size: pieces[i].Size, + PieceCID: piece, + } + } + + commd, err := MockVerifier.GenerateDataCommitment(abi.PaddedPieceSize(sb.sectorSize), pis) + if err != nil { + return nil, err + } + + cc, _, err := commcid.CIDToCommitment(commd) + if err != nil { + panic(err) + } + + cc[0] ^= 'd' + + return cc, nil +} + +func (sb *SectorMgr) SealPreCommit2(ctx context.Context, sid abi.SectorID, phase1Out storage.PreCommit1Out) (cids storage.SectorCids, err error) { + db := []byte(string(phase1Out)) + db[0] ^= 'd' + + d := commcid.DataCommitmentV1ToCID(db) + + commr := make([]byte, 32) + for i := range db { + commr[32-(i+1)] = db[i] + } + + commR := commcid.DataCommitmentV1ToCID(commr) + + return storage.SectorCids{ + Unsealed: d, + Sealed: commR, + }, nil +} + +func (sb *SectorMgr) SealCommit1(ctx context.Context, sid abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (output storage.Commit1Out, err error) { + sb.lk.Lock() + ss, ok := sb.sectors[sid] + sb.lk.Unlock() + if !ok { + return nil, xerrors.Errorf("no such sector %d", sid) + } + ss.lk.Lock() + defer ss.lk.Unlock() + + if ss.failed { + return nil, xerrors.Errorf("[mock] cannot commit failed sector %d", sid) + } + + if ss.state != statePreCommit { + return nil, xerrors.Errorf("cannot commit sector that has not been precommitted") + } + + opFinishWait(ctx) + + var out [32]byte + for i := range out { + out[i] = cids.Unsealed.Bytes()[i] + cids.Sealed.Bytes()[31-i] - ticket[i]*seed[i] ^ byte(sid.Number&0xff) + } + + return out[:], nil +} + +func (sb *SectorMgr) SealCommit2(ctx context.Context, sid abi.SectorID, phase1Out storage.Commit1Out) (proof storage.Proof, err error) { + var out [32]byte + for i := range out { + out[i] = phase1Out[i] ^ byte(sid.Number&0xff) + } + + return out[:], nil +} + +// Test Instrumentation Methods + +func (sb *SectorMgr) FailSector(sid abi.SectorID) error { + sb.lk.Lock() + defer sb.lk.Unlock() + ss, ok := sb.sectors[sid] + if !ok { + return fmt.Errorf("no such sector in sectorbuilder") + } + + ss.failed = true + return nil +} + +func opFinishWait(ctx context.Context) { + val, ok := ctx.Value("opfinish").(chan struct{}) + if !ok { + return + } + <-val +} + +func AddOpFinish(ctx context.Context) (context.Context, func()) { + done := make(chan struct{}) + + return context.WithValue(ctx, "opfinish", done), func() { + close(done) + } +} + +func (sb *SectorMgr) GenerateFallbackPoSt(context.Context, abi.ActorID, []abi.SectorInfo, abi.PoStRandomness, []abi.SectorNumber) (storage.FallbackPostOut, error) { + panic("implement me") +} + +func (sb *SectorMgr) ComputeElectionPoSt(ctx context.Context, mid abi.ActorID, sectorInfo []abi.SectorInfo, challengeSeed abi.PoStRandomness, winners []abi.PoStCandidate) ([]abi.PoStProof, error) { + panic("implement me") +} + +func (sb *SectorMgr) GenerateEPostCandidates(ctx context.Context, mid abi.ActorID, sectorInfo []abi.SectorInfo, challengeSeed abi.PoStRandomness, faults []abi.SectorNumber) ([]storage.PoStCandidateWithTicket, error) { + if len(faults) > 0 { + panic("todo") + } + + n := sectorbuilder.ElectionPostChallengeCount(uint64(len(sectorInfo)), uint64(len(faults))) + if n > uint64(len(sectorInfo)) { + n = uint64(len(sectorInfo)) + } + + out := make([]storage.PoStCandidateWithTicket, n) + + seed := big.NewInt(0).SetBytes(challengeSeed[:]) + start := seed.Mod(seed, big.NewInt(int64(len(sectorInfo)))).Int64() + + for i := range out { + out[i] = storage.PoStCandidateWithTicket{ + Candidate: abi.PoStCandidate{ + SectorID: abi.SectorID{ + Number: abi.SectorNumber((int(start) + i) % len(sectorInfo)), + Miner: mid, + }, + PartialTicket: abi.PartialTicket(challengeSeed), + }, + } + } + + return out, nil +} + +func (sb *SectorMgr) ReadPieceFromSealedSector(ctx context.Context, sectorID abi.SectorID, offset sectorbuilder.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, commD cid.Cid) (io.ReadCloser, error) { + if len(sb.sectors[sectorID].pieces) > 1 { + panic("implme") + } + return ioutil.NopCloser(io.LimitReader(bytes.NewReader(sb.sectors[sectorID].pieces[0].Bytes()[offset:]), int64(size))), nil +} + +func (sb *SectorMgr) StageFakeData(mid abi.ActorID) (abi.SectorID, []abi.PieceInfo, error) { + usize := abi.PaddedPieceSize(sb.sectorSize).Unpadded() + sid, err := sb.AcquireSectorNumber() + if err != nil { + return abi.SectorID{}, nil, err + } + + buf := make([]byte, usize) + rand.Read(buf) + + id := abi.SectorID{ + Miner: mid, + Number: sid, + } + + pi, err := sb.AddPiece(context.TODO(), id, nil, usize, bytes.NewReader(buf)) + if err != nil { + return abi.SectorID{}, nil, err + } + + return id, []abi.PieceInfo{pi}, nil +} + +func (sb *SectorMgr) FinalizeSector(context.Context, abi.SectorID) error { + return nil +} + +func (m mockVerif) VerifyElectionPost(ctx context.Context, pvi abi.PoStVerifyInfo) (bool, error) { + panic("implement me") +} + +func (m mockVerif) VerifyFallbackPost(ctx context.Context, pvi abi.PoStVerifyInfo) (bool, error) { + panic("implement me") +} + +func (m mockVerif) VerifySeal(svi abi.SealVerifyInfo) (bool, error) { + if len(svi.OnChain.Proof) != 32 { // Real ones are longer, but this should be fine + return false, nil + } + + for i, b := range svi.OnChain.Proof { + if b != svi.UnsealedCID.Bytes()[i]+svi.OnChain.SealedCID.Bytes()[31-i]-svi.InteractiveRandomness[i]*svi.Randomness[i] { + return false, nil + } + } + + return true, nil +} + +func (m mockVerif) GenerateDataCommitment(ssize abi.PaddedPieceSize, pieces []abi.PieceInfo) (cid.Cid, error) { + if len(pieces) != 1 { + panic("todo") + } + if pieces[0].Size != ssize { + fmt.Println("wrong sizes? ", pieces[0].Size, ssize) + panic("todo") + } + return pieces[0].PieceCID, nil +} + +var MockVerifier = mockVerif{} + +var _ sectorbuilder.Verifier = MockVerifier +var _ sectorstorage.SectorManager = &SectorMgr{} diff --git a/mock/mock_test.go b/mock/mock_test.go new file mode 100644 index 000000000..524e8d615 --- /dev/null +++ b/mock/mock_test.go @@ -0,0 +1,45 @@ +package mock + +import ( + "context" + "testing" + "time" + + "github.com/filecoin-project/specs-actors/actors/abi" +) + +func TestOpFinish(t *testing.T) { + sb := NewMockSectorMgr(1, 2048) + + sid, pieces, err := sb.StageFakeData(123) + if err != nil { + t.Fatal(err) + } + + ctx, done := AddOpFinish(context.TODO()) + + finished := make(chan struct{}) + go func() { + _, err := sb.SealPreCommit1(ctx, sid, abi.SealRandomness{}, pieces) + if err != nil { + t.Error(err) + return + } + + close(finished) + }() + + select { + case <-finished: + t.Fatal("should not finish until we tell it to") + case <-time.After(time.Second / 2): + } + + done() + + select { + case <-finished: + case <-time.After(time.Second / 2): + t.Fatal("should finish after we tell it to") + } +} diff --git a/mock/preseal.go b/mock/preseal.go new file mode 100644 index 000000000..6bac0aaea --- /dev/null +++ b/mock/preseal.go @@ -0,0 +1,63 @@ +package mock + +import ( + "github.com/filecoin-project/go-address" + commcid "github.com/filecoin-project/go-fil-commcid" + "github.com/filecoin-project/go-sectorbuilder" + "github.com/filecoin-project/specs-actors/actors/abi" + "github.com/filecoin-project/specs-actors/actors/abi/big" + "github.com/filecoin-project/specs-actors/actors/builtin/market" + "github.com/filecoin-project/specs-actors/actors/crypto" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/wallet" + "github.com/filecoin-project/lotus/genesis" +) + +func PreSeal(ssize abi.SectorSize, maddr address.Address, sectors int) (*genesis.Miner, *types.KeyInfo, error) { + k, err := wallet.GenerateKey(crypto.SigTypeBLS) + if err != nil { + return nil, nil, err + } + + genm := &genesis.Miner{ + Owner: k.Address, + Worker: k.Address, + MarketBalance: big.NewInt(0), + PowerBalance: big.NewInt(0), + SectorSize: ssize, + Sectors: make([]*genesis.PreSeal, sectors), + } + + _, st, err := api.ProofTypeFromSectorSize(ssize) + if err != nil { + return nil, nil, err + } + + for i := range genm.Sectors { + preseal := &genesis.PreSeal{} + + preseal.ProofType = st + preseal.CommD = sectorbuilder.ZeroPieceCommitment(abi.PaddedPieceSize(ssize).Unpadded()) + d, _ := commcid.CIDToPieceCommitmentV1(preseal.CommD) + r := commDR(d) + preseal.CommR = commcid.ReplicaCommitmentV1ToCID(r[:]) + preseal.SectorID = abi.SectorNumber(i + 1) + preseal.Deal = market.DealProposal{ + PieceCID: preseal.CommD, + PieceSize: abi.PaddedPieceSize(ssize), + Client: maddr, + Provider: maddr, + StartEpoch: 1, + EndEpoch: 10000, + StoragePricePerEpoch: big.Zero(), + ProviderCollateral: big.Zero(), + ClientCollateral: big.Zero(), + } + + genm.Sectors[i] = preseal + } + + return genm, &k.KeyInfo, nil +} diff --git a/mock/util.go b/mock/util.go new file mode 100644 index 000000000..e37cf3552 --- /dev/null +++ b/mock/util.go @@ -0,0 +1,23 @@ +package mock + +import ( + "crypto/rand" + "io" + "io/ioutil" +) + +func randB(n uint64) []byte { + b, err := ioutil.ReadAll(io.LimitReader(rand.Reader, int64(n))) + if err != nil { + panic(err) + } + return b +} + +func commDR(in []byte) (out [32]byte) { + for i, b := range in { + out[i] = ^b + } + + return out +} diff --git a/resources.go b/resources.go new file mode 100644 index 000000000..3587b41ea --- /dev/null +++ b/resources.go @@ -0,0 +1,135 @@ +package sectorstorage + +import ( + "github.com/filecoin-project/go-sectorbuilder" + "github.com/filecoin-project/lotus/storage/sectorstorage/sealtasks" + "github.com/filecoin-project/specs-actors/actors/abi" +) + +var FSOverheadSeal = map[sectorbuilder.SectorFileType]int{ // 10x overheads + sectorbuilder.FTUnsealed: 10, + sectorbuilder.FTSealed: 10, + sectorbuilder.FTCache: 70, // TODO: confirm for 32G +} + +var FsOverheadFinalized = map[sectorbuilder.SectorFileType]int{ + sectorbuilder.FTUnsealed: 10, + sectorbuilder.FTSealed: 10, + sectorbuilder.FTCache: 2, +} + +type Resources struct { + MinMemory uint64 // What Must be in RAM for decent perf + MaxMemory uint64 // Mamory required (swap + ram) + + MultiThread bool + CanGPU bool + + BaseMinMemory uint64 // What Must be in RAM for decent perf (shared between threads) +} + +const MaxCachingOverhead = 32 << 30 + +var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ + sealtasks.TTAddPiece: { + abi.RegisteredProof_StackedDRG32GiBSeal: Resources{ // This is probably a bit conservative + MaxMemory: 32 << 30, + MinMemory: 32 << 30, + + MultiThread: false, + + BaseMinMemory: 1 << 30, + }, + abi.RegisteredProof_StackedDRG512MiBSeal: Resources{ + MaxMemory: 1 << 30, + MinMemory: 1 << 30, + + MultiThread: false, + + BaseMinMemory: 1 << 30, + }, + }, + sealtasks.TTPreCommit1: { + abi.RegisteredProof_StackedDRG32GiBSeal: Resources{ + MaxMemory: 64 << 30, + MinMemory: 32 << 30, + + MultiThread: false, + + BaseMinMemory: 30 << 30, + }, + abi.RegisteredProof_StackedDRG512MiBSeal: Resources{ + MaxMemory: 3 << 29, // 1.5G + MinMemory: 1 << 30, + + MultiThread: false, + + BaseMinMemory: 1 << 30, + }, + }, + sealtasks.TTPreCommit2: { + abi.RegisteredProof_StackedDRG32GiBSeal: Resources{ + MaxMemory: 96 << 30, + MinMemory: 64 << 30, + + MultiThread: true, + + BaseMinMemory: 30 << 30, + }, + abi.RegisteredProof_StackedDRG512MiBSeal: Resources{ + MaxMemory: 3 << 29, // 1.5G + MinMemory: 1 << 30, + + MultiThread: true, + + BaseMinMemory: 1 << 30, + }, + }, + sealtasks.TTCommit1: { // Very short (~100ms), so params are very light + abi.RegisteredProof_StackedDRG32GiBSeal: Resources{ + MaxMemory: 1 << 30, + MinMemory: 1 << 30, + + MultiThread: false, + + BaseMinMemory: 1 << 30, + }, + abi.RegisteredProof_StackedDRG512MiBSeal: Resources{ + MaxMemory: 1 << 30, + MinMemory: 1 << 30, + + MultiThread: false, + + BaseMinMemory: 1 << 30, + }, + }, + sealtasks.TTCommit2: { // TODO: Measure more accurately + abi.RegisteredProof_StackedDRG32GiBSeal: Resources{ + MaxMemory: 110 << 30, + MinMemory: 60 << 30, + + MultiThread: true, + CanGPU: true, + + BaseMinMemory: 64 << 30, // params + }, + abi.RegisteredProof_StackedDRG512MiBSeal: Resources{ + MaxMemory: 3 << 29, // 1.5G + MinMemory: 1 << 30, + + MultiThread: false, // This is fine + CanGPU: true, + + BaseMinMemory: 10 << 30, + }, + }, +} + +func init() { + // for now we just reuse params for 2kib and 8mib from 512mib + + for taskType := range ResourceTable { + ResourceTable[taskType][abi.RegisteredProof_StackedDRG8MiBSeal] = ResourceTable[taskType][abi.RegisteredProof_StackedDRG512MiBSeal] + ResourceTable[taskType][abi.RegisteredProof_StackedDRG2KiBSeal] = ResourceTable[taskType][abi.RegisteredProof_StackedDRG512MiBSeal] + } +} diff --git a/roprov.go b/roprov.go new file mode 100644 index 000000000..99723e181 --- /dev/null +++ b/roprov.go @@ -0,0 +1,25 @@ +package sectorstorage + +import ( + "context" + + "github.com/filecoin-project/go-sectorbuilder" + "github.com/filecoin-project/lotus/storage/sectorstorage/stores" + + "github.com/filecoin-project/specs-actors/actors/abi" + "golang.org/x/xerrors" +) + +type readonlyProvider struct { + stor *stores.Local +} + +func (l *readonlyProvider) AcquireSector(ctx context.Context, id abi.SectorID, existing sectorbuilder.SectorFileType, allocate sectorbuilder.SectorFileType, sealing bool) (sectorbuilder.SectorPaths, func(), error) { + if allocate != 0 { + return sectorbuilder.SectorPaths{}, nil, xerrors.New("read-only storage") + } + + p, _, done, err := l.stor.AcquireSector(ctx, id, existing, allocate, sealing) + + return p, done, err +} diff --git a/sched.go b/sched.go new file mode 100644 index 000000000..adf2e6cd3 --- /dev/null +++ b/sched.go @@ -0,0 +1,242 @@ +package sectorstorage + +import ( + "github.com/filecoin-project/lotus/storage/sectorstorage/sealtasks" + "github.com/filecoin-project/specs-actors/actors/abi" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/api" +) + +const mib = 1 << 20 + +type workerRequest struct { + taskType sealtasks.TaskType + accept []workerID // ordered by preference + + ret chan<- workerResponse + cancel <-chan struct{} +} + +type workerResponse struct { + err error + + worker Worker + done func() +} + +func (r *workerRequest) respond(resp workerResponse) { + select { + case r.ret <- resp: + case <-r.cancel: + log.Warnf("request got cancelled before we could respond") + if resp.done != nil { + resp.done() + } + } +} + +type workerHandle struct { + w Worker + + info api.WorkerInfo + + memUsedMin uint64 + memUsedMax uint64 + gpuUsed bool + cpuUse int // -1 - multicore thing; 0 - free; 1+ - singlecore things +} + +func (m *Manager) runSched() { + for { + select { + case w := <-m.newWorkers: + m.schedNewWorker(w) + case req := <-m.schedule: + resp, err := m.maybeSchedRequest(req) + if err != nil { + req.respond(workerResponse{err: err}) + continue + } + + if resp != nil { + req.respond(*resp) + continue + } + + m.schedQueue.PushBack(req) + case wid := <-m.workerFree: + m.onWorkerFreed(wid) + } + } +} + +func (m *Manager) onWorkerFreed(wid workerID) { + for e := m.schedQueue.Front(); e != nil; e = e.Next() { + req := e.Value.(*workerRequest) + var ok bool + for _, id := range req.accept { + if id == wid { + ok = true + break + } + } + if !ok { + continue + } + + resp, err := m.maybeSchedRequest(req) + if err != nil { + req.respond(workerResponse{err: err}) + continue + } + + if resp != nil { + req.respond(*resp) + + pe := e.Prev() + m.schedQueue.Remove(e) + if pe == nil { + pe = m.schedQueue.Front() + } + if pe == nil { + break + } + e = pe + continue + } + } +} + +func (m *Manager) maybeSchedRequest(req *workerRequest) (*workerResponse, error) { + m.workersLk.Lock() + defer m.workersLk.Unlock() + + tried := 0 + + for _, id := range req.accept { + w, ok := m.workers[id] + if !ok { + log.Warnf("requested worker %d is not in scheduler", id) + } + tried++ + + canDo, err := m.canHandleRequest(id, w, req) + if err != nil { + return nil, err + } + + if !canDo { + continue + } + + return m.makeResponse(id, w, req), nil + } + + if tried == 0 { + return nil, xerrors.New("maybeSchedRequest didn't find any good workers") + } + + return nil, nil // put in waiting queue +} + +func (m *Manager) makeResponse(wid workerID, w *workerHandle, req *workerRequest) *workerResponse { + needRes := ResourceTable[req.taskType][m.scfg.SealProofType] + + w.gpuUsed = needRes.CanGPU + if needRes.MultiThread { + w.cpuUse = -1 + } else { + if w.cpuUse != -1 { + w.cpuUse++ + } else { + log.Warnf("sched: makeResponse for worker %d: worker cpu is in multicore use, but a single core task was scheduled", wid) + } + } + + w.memUsedMin += needRes.MinMemory + w.memUsedMax += needRes.MaxMemory + + return &workerResponse{ + err: nil, + worker: w.w, + done: func() { + m.workersLk.Lock() + + if needRes.CanGPU { + w.gpuUsed = false + } + + if needRes.MultiThread { + w.cpuUse = 0 + } else if w.cpuUse != -1 { + w.cpuUse-- + } + + w.memUsedMin -= needRes.MinMemory + w.memUsedMax -= needRes.MaxMemory + + m.workersLk.Unlock() + + select { + case m.workerFree <- wid: + case <-m.closing: + } + }, + } +} + +func (m *Manager) canHandleRequest(wid workerID, w *workerHandle, req *workerRequest) (bool, error) { + needRes, ok := ResourceTable[req.taskType][m.scfg.SealProofType] + if !ok { + return false, xerrors.Errorf("canHandleRequest: missing ResourceTable entry for %s/%d", req.taskType, m.scfg.SealProofType) + } + + res := w.info.Resources + + // TODO: dedupe needRes.BaseMinMemory per task type (don't add if that task is already running) + minNeedMem := res.MemReserved + w.memUsedMin + needRes.MinMemory + needRes.BaseMinMemory + if minNeedMem > res.MemPhysical { + log.Debugf("sched: not scheduling on worker %d; not enough physical memory - need: %dM, have %dM", wid, minNeedMem/mib, res.MemPhysical/mib) + return false, nil + } + + maxNeedMem := res.MemReserved + w.memUsedMax + needRes.MaxMemory + needRes.BaseMinMemory + if m.scfg.SealProofType == abi.RegisteredProof_StackedDRG32GiBSeal { + maxNeedMem += MaxCachingOverhead + } + if maxNeedMem > res.MemSwap+res.MemPhysical { + log.Debugf("sched: not scheduling on worker %d; not enough virtual memory - need: %dM, have %dM", wid, maxNeedMem/mib, (res.MemSwap+res.MemPhysical)/mib) + return false, nil + } + + if needRes.MultiThread { + if w.cpuUse != 0 { + log.Debugf("sched: not scheduling on worker %d; multicore process needs free CPU", wid) + return false, nil + } + } else { + if w.cpuUse == -1 { + log.Debugf("sched: not scheduling on worker %d; CPU in use by a multicore process", wid) + return false, nil + } + } + + if len(res.GPUs) > 0 && needRes.CanGPU { + if w.gpuUsed { + log.Debugf("sched: not scheduling on worker %d; GPU in use", wid) + return false, nil + } + } + + return true, nil +} + +func (m *Manager) schedNewWorker(w *workerHandle) { + m.workersLk.Lock() + defer m.workersLk.Unlock() + + id := m.nextWorker + m.workers[id] = w + m.nextWorker++ +} diff --git a/sealtasks/task.go b/sealtasks/task.go new file mode 100644 index 000000000..8eefa14fa --- /dev/null +++ b/sealtasks/task.go @@ -0,0 +1,13 @@ +package sealtasks + +type TaskType string + +const ( + TTAddPiece TaskType = "seal/v0/addpiece" + TTPreCommit1 TaskType = "seal/v0/precommit/1" + TTPreCommit2 TaskType = "seal/v0/precommit/2" + TTCommit1 TaskType = "seal/v0/commit/1" // NOTE: We use this to transfer the sector into miner-local storage for now; Don't use on workers! + TTCommit2 TaskType = "seal/v0/commit/2" + + TTFinalize TaskType = "seal/v0/finalize" +) diff --git a/sectorutil/utils.go b/sectorutil/utils.go new file mode 100644 index 000000000..01862b7b4 --- /dev/null +++ b/sectorutil/utils.go @@ -0,0 +1,56 @@ +package sectorutil + +import ( + "fmt" + "github.com/filecoin-project/go-sectorbuilder" + + "golang.org/x/xerrors" + + "github.com/filecoin-project/specs-actors/actors/abi" +) + +func ParseSectorID(baseName string) (abi.SectorID, error) { + var n abi.SectorNumber + var mid abi.ActorID + read, err := fmt.Sscanf(baseName, "s-t0%d-%d", &mid, &n) + if err != nil { + return abi.SectorID{}, xerrors.Errorf(": %w", err) + } + + if read != 2 { + return abi.SectorID{}, xerrors.Errorf("parseSectorID expected to scan 2 values, got %d", read) + } + + return abi.SectorID{ + Miner: mid, + Number: n, + }, nil +} + +func SectorName(sid abi.SectorID) string { + return fmt.Sprintf("s-t0%d-%d", sid.Miner, sid.Number) +} + +func PathByType(sps sectorbuilder.SectorPaths, fileType sectorbuilder.SectorFileType) string { + switch fileType { + case sectorbuilder.FTUnsealed: + return sps.Unsealed + case sectorbuilder.FTSealed: + return sps.Sealed + case sectorbuilder.FTCache: + return sps.Cache + } + + panic("requested unknown path type") +} + +func SetPathByType(sps *sectorbuilder.SectorPaths, fileType sectorbuilder.SectorFileType, p string) { + switch fileType { + case sectorbuilder.FTUnsealed: + sps.Unsealed = p + case sectorbuilder.FTSealed: + sps.Sealed = p + case sectorbuilder.FTCache: + sps.Cache = p + } +} diff --git a/stores/http_handler.go b/stores/http_handler.go new file mode 100644 index 000000000..daa81061e --- /dev/null +++ b/stores/http_handler.go @@ -0,0 +1,125 @@ +package stores + +import ( + "io" + "net/http" + "os" + + "github.com/gorilla/mux" + logging "github.com/ipfs/go-log/v2" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-sectorbuilder" + "github.com/filecoin-project/lotus/lib/tarutil" + "github.com/filecoin-project/lotus/storage/sectorstorage/sectorutil" +) + +var log = logging.Logger("stores") + +type FetchHandler struct { + *Local +} + +func (handler *FetchHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { // /remote/ + mux := mux.NewRouter() + + mux.HandleFunc("/remote/{type}/{id}", handler.remoteGetSector).Methods("GET") + mux.HandleFunc("/remote/{type}/{id}", handler.remoteDeleteSector).Methods("DELETE") + + mux.ServeHTTP(w, r) +} + +func (handler *FetchHandler) remoteGetSector(w http.ResponseWriter, r *http.Request) { + log.Infof("SERVE GET %s", r.URL) + vars := mux.Vars(r) + + id, err := sectorutil.ParseSectorID(vars["id"]) + if err != nil { + log.Error("%+v", err) + w.WriteHeader(500) + return + } + + ft, err := ftFromString(vars["type"]) + if err != nil { + log.Error("%+v", err) + return + } + paths, _, done, err := handler.Local.AcquireSector(r.Context(), id, ft, 0, false) + if err != nil { + log.Error("%+v", err) + return + } + defer done() + + path := sectorutil.PathByType(paths, ft) + if path == "" { + log.Error("acquired path was empty") + w.WriteHeader(500) + return + } + + stat, err := os.Stat(path) + if err != nil { + log.Error("%+v", err) + w.WriteHeader(500) + return + } + + var rd io.Reader + if stat.IsDir() { + rd, err = tarutil.TarDirectory(path) + w.Header().Set("Content-Type", "application/x-tar") + } else { + rd, err = os.OpenFile(path, os.O_RDONLY, 0644) + w.Header().Set("Content-Type", "application/octet-stream") + } + if err != nil { + log.Error("%+v", err) + w.WriteHeader(500) + return + } + + w.WriteHeader(200) + if _, err := io.Copy(w, rd); err != nil { // TODO: default 32k buf may be too small + log.Error("%+v", err) + return + } +} + +func (handler *FetchHandler) remoteDeleteSector(w http.ResponseWriter, r *http.Request) { + log.Infof("SERVE DELETE %s", r.URL) + vars := mux.Vars(r) + + id, err := sectorutil.ParseSectorID(vars["id"]) + if err != nil { + log.Error("%+v", err) + w.WriteHeader(500) + return + } + + ft, err := ftFromString(vars["type"]) + if err != nil { + log.Error("%+v", err) + return + } + + if err := handler.delete(r.Context(), id, ft); err != nil { + log.Error("%+v", err) + w.WriteHeader(500) + return + } +} + +func ftFromString(t string) (sectorbuilder.SectorFileType, error) { + switch t { + case sectorbuilder.FTUnsealed.String(): + return sectorbuilder.FTUnsealed, nil + case sectorbuilder.FTSealed.String(): + return sectorbuilder.FTSealed, nil + case sectorbuilder.FTCache.String(): + return sectorbuilder.FTCache, nil + default: + return 0, xerrors.Errorf("unknown sector file type: '%s'", t) + } +} diff --git a/stores/index.go b/stores/index.go new file mode 100644 index 000000000..e508171b7 --- /dev/null +++ b/stores/index.go @@ -0,0 +1,312 @@ +package stores + +import ( + "context" + "net/url" + gopath "path" + "sort" + "sync" + + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-sectorbuilder" + "github.com/filecoin-project/specs-actors/actors/abi" + "github.com/filecoin-project/specs-actors/actors/abi/big" + + "github.com/filecoin-project/lotus/storage/sectorstorage/sectorutil" +) + +// ID identifies sector storage by UUID. One sector storage should map to one +// filesystem, local or networked / shared by multiple machines +type ID string + +type StorageInfo struct { + ID ID + URLs []string // TODO: Support non-http transports + Weight uint64 + + CanSeal bool + CanStore bool +} + +type SectorIndex interface { // part of storage-miner api + StorageAttach(context.Context, StorageInfo, FsStat) error + StorageInfo(context.Context, ID) (StorageInfo, error) + // TODO: StorageUpdateStats(FsStat) + + StorageDeclareSector(ctx context.Context, storageId ID, s abi.SectorID, ft sectorbuilder.SectorFileType) error + StorageDropSector(ctx context.Context, storageId ID, s abi.SectorID, ft sectorbuilder.SectorFileType) error + StorageFindSector(ctx context.Context, sector abi.SectorID, ft sectorbuilder.SectorFileType, allowFetch bool) ([]StorageInfo, error) + + StorageBestAlloc(ctx context.Context, allocate sectorbuilder.SectorFileType, sealing bool) ([]StorageInfo, error) +} + +type Decl struct { + abi.SectorID + sectorbuilder.SectorFileType +} + +type storageEntry struct { + info *StorageInfo + fsi FsStat +} + +type Index struct { + lk sync.RWMutex + + sectors map[Decl][]ID + stores map[ID]*storageEntry +} + +func NewIndex() *Index { + return &Index{ + sectors: map[Decl][]ID{}, + stores: map[ID]*storageEntry{}, + } +} + +func (i *Index) StorageList(ctx context.Context) (map[ID][]Decl, error) { + byID := map[ID]map[abi.SectorID]sectorbuilder.SectorFileType{} + + for id := range i.stores { + byID[id] = map[abi.SectorID]sectorbuilder.SectorFileType{} + } + for decl, ids := range i.sectors { + for _, id := range ids { + byID[id][decl.SectorID] |= decl.SectorFileType + } + } + + out := map[ID][]Decl{} + for id, m := range byID { + out[id] = []Decl{} + for sectorID, fileType := range m { + out[id] = append(out[id], Decl{ + SectorID: sectorID, + SectorFileType: fileType, + }) + } + } + + return out, nil +} + +func (i *Index) StorageAttach(ctx context.Context, si StorageInfo, st FsStat) error { + i.lk.Lock() + defer i.lk.Unlock() + + log.Infof("New sector storage: %s", si.ID) + + if _, ok := i.stores[si.ID]; ok { + for _, u := range si.URLs { + if _, err := url.Parse(u); err != nil { + return xerrors.Errorf("failed to parse url %s: %w", si.URLs, err) + } + } + + i.stores[si.ID].info.URLs = append(i.stores[si.ID].info.URLs, si.URLs...) + return nil + } + i.stores[si.ID] = &storageEntry{ + info: &si, + fsi: st, + } + return nil +} + +func (i *Index) StorageDeclareSector(ctx context.Context, storageId ID, s abi.SectorID, ft sectorbuilder.SectorFileType) error { + i.lk.Lock() + defer i.lk.Unlock() + + for _, fileType := range pathTypes { + if fileType&ft == 0 { + continue + } + + d := Decl{s, fileType} + + for _, sid := range i.sectors[d] { + if sid == storageId { + log.Warnf("sector %v redeclared in %s", storageId) + return nil + } + } + + i.sectors[d] = append(i.sectors[d], storageId) + } + + return nil +} + +func (i *Index) StorageDropSector(ctx context.Context, storageId ID, s abi.SectorID, ft sectorbuilder.SectorFileType) error { + i.lk.Lock() + defer i.lk.Unlock() + + for _, fileType := range pathTypes { + if fileType&ft == 0 { + continue + } + + d := Decl{s, fileType} + + if len(i.sectors[d]) == 0 { + return nil + } + + rewritten := make([]ID, 0, len(i.sectors[d])-1) + for _, sid := range i.sectors[d] { + if sid == storageId { + continue + } + + rewritten = append(rewritten, sid) + } + if len(rewritten) == 0 { + delete(i.sectors, d) + return nil + } + + i.sectors[d] = rewritten + } + + return nil +} + +func (i *Index) StorageFindSector(ctx context.Context, s abi.SectorID, ft sectorbuilder.SectorFileType, allowFetch bool) ([]StorageInfo, error) { + i.lk.RLock() + defer i.lk.RUnlock() + + storageIDs := map[ID]uint64{} + + for _, pathType := range pathTypes { + if ft&pathType == 0 { + continue + } + + for _, id := range i.sectors[Decl{s, pathType}] { + storageIDs[id]++ + } + } + + out := make([]StorageInfo, 0, len(storageIDs)) + + for id, n := range storageIDs { + st, ok := i.stores[id] + if !ok { + log.Warnf("storage %s is not present in sector index (referenced by sector %v)", id, s) + continue + } + + urls := make([]string, len(st.info.URLs)) + for k, u := range st.info.URLs { + rl, err := url.Parse(u) + if err != nil { + return nil, xerrors.Errorf("failed to parse url: %w", err) + } + + rl.Path = gopath.Join(rl.Path, ft.String(), sectorutil.SectorName(s)) + urls[k] = rl.String() + } + + out = append(out, StorageInfo{ + ID: id, + URLs: urls, + Weight: st.info.Weight * n, // storage with more sector types is better + CanSeal: st.info.CanSeal, + CanStore: st.info.CanStore, + }) + } + + if allowFetch { + for id, st := range i.stores { + if _, ok := storageIDs[id]; ok { + continue + } + + urls := make([]string, len(st.info.URLs)) + for k, u := range st.info.URLs { + rl, err := url.Parse(u) + if err != nil { + return nil, xerrors.Errorf("failed to parse url: %w", err) + } + + rl.Path = gopath.Join(rl.Path, ft.String(), sectorutil.SectorName(s)) + urls[k] = rl.String() + } + + out = append(out, StorageInfo{ + ID: id, + URLs: urls, + Weight: st.info.Weight * 0, // TODO: something better than just '0' + CanSeal: st.info.CanSeal, + CanStore: st.info.CanStore, + }) + } + } + + return out, nil +} + +func (i *Index) StorageInfo(ctx context.Context, id ID) (StorageInfo, error) { + i.lk.RLock() + defer i.lk.RUnlock() + + si, found := i.stores[id] + if !found { + return StorageInfo{}, xerrors.Errorf("sector store not found") + } + + return *si.info, nil +} + +func (i *Index) StorageBestAlloc(ctx context.Context, allocate sectorbuilder.SectorFileType, sealing bool) ([]StorageInfo, error) { + i.lk.RLock() + defer i.lk.RUnlock() + + var candidates []storageEntry + + for _, p := range i.stores { + if sealing && !p.info.CanSeal { + log.Debugf("alloc: not considering %s; can't seal", p.info.ID) + continue + } + if !sealing && !p.info.CanStore { + log.Debugf("alloc: not considering %s; can't store", p.info.ID) + continue + } + + // TODO: filter out of space + + candidates = append(candidates, *p) + } + + if len(candidates) == 0 { + return nil, xerrors.New("no good path found") + } + + sort.Slice(candidates, func(i, j int) bool { + iw := big.Mul(big.NewInt(int64(candidates[i].fsi.Free)), big.NewInt(int64(candidates[i].info.Weight))) + jw := big.Mul(big.NewInt(int64(candidates[j].fsi.Free)), big.NewInt(int64(candidates[j].info.Weight))) + + return iw.GreaterThan(jw) + }) + + out := make([]StorageInfo, len(candidates)) + for i, candidate := range candidates { + out[i] = *candidate.info + } + + return out, nil +} + +func (i *Index) FindSector(id abi.SectorID, typ sectorbuilder.SectorFileType) ([]ID, error) { + i.lk.RLock() + defer i.lk.RUnlock() + + return i.sectors[Decl{ + SectorID: id, + SectorFileType: typ, + }], nil +} + +var _ SectorIndex = &Index{} diff --git a/stores/interface.go b/stores/interface.go new file mode 100644 index 000000000..67c18b16e --- /dev/null +++ b/stores/interface.go @@ -0,0 +1,32 @@ +package stores + +import ( + "context" + "syscall" + + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-sectorbuilder" + "github.com/filecoin-project/specs-actors/actors/abi" +) + +type Store interface { + AcquireSector(ctx context.Context, s abi.SectorID, existing sectorbuilder.SectorFileType, allocate sectorbuilder.SectorFileType, sealing bool) (paths sectorbuilder.SectorPaths, stores sectorbuilder.SectorPaths, done func(), err error) +} + +type FsStat struct { + Capacity uint64 + Free uint64 // Free to use for sector storage +} + +func Stat(path string) (FsStat, error) { + var stat syscall.Statfs_t + if err := syscall.Statfs(path, &stat); err != nil { + return FsStat{}, xerrors.Errorf("statfs: %w", err) + } + + return FsStat{ + Capacity: stat.Blocks * uint64(stat.Bsize), + Free: stat.Bavail * uint64(stat.Bsize), + }, nil +} diff --git a/stores/local.go b/stores/local.go new file mode 100644 index 000000000..a8eb53ee8 --- /dev/null +++ b/stores/local.go @@ -0,0 +1,314 @@ +package stores + +import ( + "context" + "encoding/json" + "io/ioutil" + "math/bits" + "os" + "path/filepath" + "sync" + + "github.com/filecoin-project/specs-actors/actors/abi" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-sectorbuilder" + "github.com/filecoin-project/lotus/node/config" + "github.com/filecoin-project/lotus/storage/sectorstorage/sectorutil" +) + +type StoragePath struct { + ID ID + Weight uint64 + + LocalPath string + + CanSeal bool + CanStore bool +} + +// [path]/sectorstore.json +type LocalStorageMeta struct { + ID ID + Weight uint64 // 0 = readonly + + CanSeal bool + CanStore bool +} + +type LocalStorage interface { + GetStorage() (config.StorageConfig, error) + SetStorage(func(*config.StorageConfig)) error +} + +const MetaFile = "sectorstore.json" + +var pathTypes = []sectorbuilder.SectorFileType{sectorbuilder.FTUnsealed, sectorbuilder.FTSealed, sectorbuilder.FTCache} + +type Local struct { + localStorage LocalStorage + index SectorIndex + urls []string + + paths map[ID]*path + + localLk sync.RWMutex +} + +type path struct { + local string // absolute local path +} + +func NewLocal(ctx context.Context, ls LocalStorage, index SectorIndex, urls []string) (*Local, error) { + l := &Local{ + localStorage: ls, + index: index, + urls: urls, + + paths: map[ID]*path{}, + } + return l, l.open(ctx) +} + +func (st *Local) OpenPath(ctx context.Context, p string) error { + st.localLk.Lock() + defer st.localLk.Unlock() + + mb, err := ioutil.ReadFile(filepath.Join(p, MetaFile)) + if err != nil { + return xerrors.Errorf("reading storage metadata for %s: %w", p, err) + } + + var meta LocalStorageMeta + if err := json.Unmarshal(mb, &meta); err != nil { + return xerrors.Errorf("unmarshalling storage metadata for %s: %w", p, err) + } + + // TODO: Check existing / dedupe + + out := &path{ + local: p, + } + + fst, err := Stat(p) + if err != nil { + return err + } + + err = st.index.StorageAttach(ctx, StorageInfo{ + ID: meta.ID, + URLs: st.urls, + Weight: meta.Weight, + CanSeal: meta.CanSeal, + CanStore: meta.CanStore, + }, fst) + if err != nil { + return xerrors.Errorf("declaring storage in index: %w", err) + } + + for _, t := range pathTypes { + ents, err := ioutil.ReadDir(filepath.Join(p, t.String())) + if err != nil { + if os.IsNotExist(err) { + if err := os.MkdirAll(filepath.Join(p, t.String()), 0755); err != nil { + return xerrors.Errorf("openPath mkdir '%s': %w", filepath.Join(p, t.String()), err) + } + + continue + } + return xerrors.Errorf("listing %s: %w", filepath.Join(p, t.String()), err) + } + + for _, ent := range ents { + sid, err := sectorutil.ParseSectorID(ent.Name()) + if err != nil { + return xerrors.Errorf("parse sector id %s: %w", ent.Name(), err) + } + + if err := st.index.StorageDeclareSector(ctx, meta.ID, sid, t); err != nil { + return xerrors.Errorf("declare sector %d(t:%d) -> %s: %w", sid, t, meta.ID, err) + } + } + } + + st.paths[meta.ID] = out + + return nil +} + +func (st *Local) open(ctx context.Context) error { + cfg, err := st.localStorage.GetStorage() + if err != nil { + return xerrors.Errorf("getting local storage config: %w", err) + } + + for _, path := range cfg.StoragePaths { + err := st.OpenPath(ctx, path.Path) + if err != nil { + return xerrors.Errorf("opening path %s: %w", path.Path, err) + } + } + + return nil +} + +func (st *Local) AcquireSector(ctx context.Context, sid abi.SectorID, existing sectorbuilder.SectorFileType, allocate sectorbuilder.SectorFileType, sealing bool) (sectorbuilder.SectorPaths, sectorbuilder.SectorPaths, func(), error) { + if existing|allocate != existing^allocate { + return sectorbuilder.SectorPaths{}, sectorbuilder.SectorPaths{}, nil, xerrors.New("can't both find and allocate a sector") + } + + st.localLk.RLock() + + var out sectorbuilder.SectorPaths + var storageIDs sectorbuilder.SectorPaths + + for _, fileType := range pathTypes { + if fileType&existing == 0 { + continue + } + + si, err := st.index.StorageFindSector(ctx, sid, fileType, false) + if err != nil { + log.Warnf("finding existing sector %d(t:%d) failed: %+v", sid, fileType, err) + continue + } + + for _, info := range si { + p, ok := st.paths[info.ID] + if !ok { + continue + } + + if p.local == "" { // TODO: can that even be the case? + continue + } + + spath := filepath.Join(p.local, fileType.String(), sectorutil.SectorName(sid)) + sectorutil.SetPathByType(&out, fileType, spath) + sectorutil.SetPathByType(&storageIDs, fileType, string(info.ID)) + + existing ^= fileType + break + } + } + + for _, fileType := range pathTypes { + if fileType&allocate == 0 { + continue + } + + sis, err := st.index.StorageBestAlloc(ctx, fileType, sealing) + if err != nil { + st.localLk.RUnlock() + return sectorbuilder.SectorPaths{}, sectorbuilder.SectorPaths{}, nil, xerrors.Errorf("finding best storage for allocating : %w", err) + } + + var best string + var bestID ID + + for _, si := range sis { + p, ok := st.paths[si.ID] + if !ok { + continue + } + + if p.local == "" { // TODO: can that even be the case? + continue + } + + if sealing && !si.CanSeal { + continue + } + + if !sealing && !si.CanStore { + continue + } + + // TODO: Check free space + + best = filepath.Join(p.local, fileType.String(), sectorutil.SectorName(sid)) + bestID = si.ID + } + + if best == "" { + st.localLk.RUnlock() + return sectorbuilder.SectorPaths{}, sectorbuilder.SectorPaths{}, nil, xerrors.Errorf("couldn't find a suitable path for a sector") + } + + sectorutil.SetPathByType(&out, fileType, best) + sectorutil.SetPathByType(&storageIDs, fileType, string(bestID)) + allocate ^= fileType + } + + return out, storageIDs, st.localLk.RUnlock, nil +} + +func (st *Local) Local(ctx context.Context) ([]StoragePath, error) { + st.localLk.RLock() + defer st.localLk.RUnlock() + + var out []StoragePath + for id, p := range st.paths { + if p.local == "" { + continue + } + + si, err := st.index.StorageInfo(ctx, id) + if err != nil { + return nil, xerrors.Errorf("get storage info for %s: %w", id, err) + } + + out = append(out, StoragePath{ + ID: id, + Weight: si.Weight, + LocalPath: p.local, + CanSeal: si.CanSeal, + CanStore: si.CanStore, + }) + } + + return out, nil +} + +func (st *Local) delete(ctx context.Context, sid abi.SectorID, typ sectorbuilder.SectorFileType) error { + if bits.OnesCount(uint(typ)) != 1 { + return xerrors.New("delete expects one file type") + } + + si, err := st.index.StorageFindSector(ctx, sid, typ, false) + if err != nil { + return xerrors.Errorf("finding existing sector %d(t:%d) failed: %w", sid, typ, err) + } + + for _, info := range si { + p, ok := st.paths[info.ID] + if !ok { + continue + } + + if p.local == "" { // TODO: can that even be the case? + continue + } + + spath := filepath.Join(p.local, typ.String(), sectorutil.SectorName(sid)) + log.Infof("remove %s", spath) + + if err := os.RemoveAll(spath); err != nil { + log.Errorf("removing sector (%v) from %s: %+v", sid, spath, err) + } + } + + return nil +} + +func (st *Local) FsStat(id ID) (FsStat, error) { + st.localLk.RLock() + defer st.localLk.RUnlock() + + p, ok := st.paths[id] + if !ok { + return FsStat{}, xerrors.Errorf("fsstat: path not found") + } + + return Stat(p.local) +} diff --git a/stores/remote.go b/stores/remote.go new file mode 100644 index 000000000..7bbd6d225 --- /dev/null +++ b/stores/remote.go @@ -0,0 +1,204 @@ +package stores + +import ( + "context" + "mime" + "net/http" + "os" + "sort" + "sync" + + "github.com/hashicorp/go-multierror" + files "github.com/ipfs/go-ipfs-files" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-sectorbuilder" + "github.com/filecoin-project/specs-actors/actors/abi" + + "github.com/filecoin-project/lotus/lib/tarutil" + "github.com/filecoin-project/lotus/storage/sectorstorage/sectorutil" +) + +type Remote struct { + local *Local + index SectorIndex + auth http.Header + + fetchLk sync.Mutex // TODO: this can be much smarter + // TODO: allow multiple parallel fetches + // (make sure to not fetch the same sector data twice) +} + +func NewRemote(local *Local, index SectorIndex, auth http.Header) *Remote { + return &Remote{ + local: local, + index: index, + auth: auth, + } +} + +func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, existing sectorbuilder.SectorFileType, allocate sectorbuilder.SectorFileType, sealing bool) (sectorbuilder.SectorPaths, sectorbuilder.SectorPaths, func(), error) { + if existing|allocate != existing^allocate { + return sectorbuilder.SectorPaths{}, sectorbuilder.SectorPaths{}, nil, xerrors.New("can't both find and allocate a sector") + } + + r.fetchLk.Lock() + defer r.fetchLk.Unlock() + + paths, stores, done, err := r.local.AcquireSector(ctx, s, existing, allocate, sealing) + if err != nil { + return sectorbuilder.SectorPaths{}, sectorbuilder.SectorPaths{}, nil, xerrors.Errorf("local acquire error: %w", err) + } + + for _, fileType := range pathTypes { + if fileType&existing == 0 { + continue + } + + if sectorutil.PathByType(paths, fileType) != "" { + continue + } + + ap, storageID, url, foundIn, rdone, err := r.acquireFromRemote(ctx, s, fileType, sealing) + if err != nil { + done() + return sectorbuilder.SectorPaths{}, sectorbuilder.SectorPaths{}, nil, err + } + + done = mergeDone(done, rdone) + sectorutil.SetPathByType(&paths, fileType, ap) + sectorutil.SetPathByType(&stores, fileType, string(storageID)) + + if err := r.index.StorageDeclareSector(ctx, storageID, s, fileType); err != nil { + log.Warnf("declaring sector %v in %s failed: %+v", s, storageID, err) + continue + } + + // TODO: some way to allow having duplicated sectors in the system for perf + if err := r.index.StorageDropSector(ctx, foundIn, s, fileType); err != nil { + log.Warnf("dropping sector %v from %s from sector index failed: %+v", s, storageID, err) + } + + if err := r.deleteFromRemote(url); err != nil { + log.Warnf("deleting sector %v from %s (delete %s): %+v", s, storageID, url, err) + } + } + + return paths, stores, done, nil +} + +func (r *Remote) acquireFromRemote(ctx context.Context, s abi.SectorID, fileType sectorbuilder.SectorFileType, sealing bool) (string, ID, string, ID, func(), error) { + si, err := r.index.StorageFindSector(ctx, s, fileType, false) + if err != nil { + return "", "", "", "", nil, err + } + + sort.Slice(si, func(i, j int) bool { + return si[i].Weight < si[j].Weight + }) + + apaths, ids, done, err := r.local.AcquireSector(ctx, s, 0, fileType, sealing) + if err != nil { + return "", "", "", "", nil, xerrors.Errorf("allocate local sector for fetching: %w", err) + } + dest := sectorutil.PathByType(apaths, fileType) + storageID := sectorutil.PathByType(ids, fileType) + + var merr error + for _, info := range si { + for _, url := range info.URLs { + err := r.fetch(url, dest) + if err != nil { + merr = multierror.Append(merr, xerrors.Errorf("fetch error %s (storage %s) -> %s: %w", url, info.ID, dest, err)) + continue + } + + if merr != nil { + log.Warnw("acquireFromRemote encountered errors when fetching sector from remote", "errors", merr) + } + return dest, ID(storageID), url, info.ID, done, nil + } + } + + done() + return "", "", "", "", nil, xerrors.Errorf("failed to acquire sector %v from remote (tried %v): %w", s, si, merr) +} + +func (r *Remote) fetch(url, outname string) error { + log.Infof("Fetch %s -> %s", url, outname) + + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return xerrors.Errorf("request: %w", err) + } + req.Header = r.auth + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return xerrors.Errorf("do request: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != 200 { + return xerrors.Errorf("non-200 code: %d", resp.StatusCode) + } + + /*bar := pb.New64(w.sizeForType(typ)) + bar.ShowPercent = true + bar.ShowSpeed = true + bar.Units = pb.U_BYTES + + barreader := bar.NewProxyReader(resp.Body) + + bar.Start() + defer bar.Finish()*/ + + mediatype, _, err := mime.ParseMediaType(resp.Header.Get("Content-Type")) + if err != nil { + return xerrors.Errorf("parse media type: %w", err) + } + + if err := os.RemoveAll(outname); err != nil { + return xerrors.Errorf("removing dest: %w", err) + } + + switch mediatype { + case "application/x-tar": + return tarutil.ExtractTar(resp.Body, outname) + case "application/octet-stream": + return files.WriteTo(files.NewReaderFile(resp.Body), outname) + default: + return xerrors.Errorf("unknown content type: '%s'", mediatype) + } +} + +func (r *Remote) deleteFromRemote(url string) error { + log.Infof("Delete %s", url) + + req, err := http.NewRequest("DELETE", url, nil) + if err != nil { + return xerrors.Errorf("request: %w", err) + } + req.Header = r.auth + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return xerrors.Errorf("do request: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != 200 { + return xerrors.Errorf("non-200 code: %d", resp.StatusCode) + } + + return nil +} + +func mergeDone(a func(), b func()) func() { + return func() { + a() + b() + } +} + +var _ Store = &Remote{} diff --git a/worker_local.go b/worker_local.go new file mode 100644 index 000000000..d691f150e --- /dev/null +++ b/worker_local.go @@ -0,0 +1,198 @@ +package sectorstorage + +import ( + "context" + "io" + "os" + + "github.com/elastic/go-sysinfo" + "golang.org/x/xerrors" + + ffi "github.com/filecoin-project/filecoin-ffi" + "github.com/filecoin-project/go-sectorbuilder" + "github.com/filecoin-project/specs-actors/actors/abi" + storage2 "github.com/filecoin-project/specs-storage/storage" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/storage/sectorstorage/sealtasks" + "github.com/filecoin-project/lotus/storage/sectorstorage/sectorutil" + "github.com/filecoin-project/lotus/storage/sectorstorage/stores" +) + +var pathTypes = []sectorbuilder.SectorFileType{sectorbuilder.FTUnsealed, sectorbuilder.FTSealed, sectorbuilder.FTCache} + +type WorkerConfig struct { + SealProof abi.RegisteredProof + TaskTypes []sealtasks.TaskType +} + +type LocalWorker struct { + scfg *sectorbuilder.Config + storage stores.Store + localStore *stores.Local + sindex stores.SectorIndex + + acceptTasks map[sealtasks.TaskType]struct{} +} + +func NewLocalWorker(wcfg WorkerConfig, store stores.Store, local *stores.Local, sindex stores.SectorIndex) *LocalWorker { + ppt, err := wcfg.SealProof.RegisteredPoStProof() + if err != nil { + panic(err) + } + + acceptTasks := map[sealtasks.TaskType]struct{}{} + for _, taskType := range wcfg.TaskTypes { + acceptTasks[taskType] = struct{}{} + } + + return &LocalWorker{ + scfg: §orbuilder.Config{ + SealProofType: wcfg.SealProof, + PoStProofType: ppt, + }, + storage: store, + localStore: local, + sindex: sindex, + + acceptTasks: acceptTasks, + } +} + +type localWorkerPathProvider struct { + w *LocalWorker +} + +func (l *localWorkerPathProvider) AcquireSector(ctx context.Context, sector abi.SectorID, existing sectorbuilder.SectorFileType, allocate sectorbuilder.SectorFileType, sealing bool) (sectorbuilder.SectorPaths, func(), error) { + paths, storageIDs, done, err := l.w.storage.AcquireSector(ctx, sector, existing, allocate, sealing) + if err != nil { + return sectorbuilder.SectorPaths{}, nil, err + } + + log.Debugf("acquired sector %d (e:%d; a:%d): %v", sector, existing, allocate, paths) + + return paths, func() { + done() + + for _, fileType := range pathTypes { + if fileType&allocate == 0 { + continue + } + + sid := sectorutil.PathByType(storageIDs, fileType) + + if err := l.w.sindex.StorageDeclareSector(ctx, stores.ID(sid), sector, fileType); err != nil { + log.Errorf("declare sector error: %+v", err) + } + } + }, nil +} + +func (l *LocalWorker) sb() (sectorbuilder.Basic, error) { + return sectorbuilder.New(&localWorkerPathProvider{w: l}, l.scfg) +} + +func (l *LocalWorker) NewSector(ctx context.Context, sector abi.SectorID) error { + sb, err := l.sb() + if err != nil { + return err + } + + return sb.NewSector(ctx, sector) +} + +func (l *LocalWorker) AddPiece(ctx context.Context, sector abi.SectorID, epcs []abi.UnpaddedPieceSize, sz abi.UnpaddedPieceSize, r io.Reader) (abi.PieceInfo, error) { + sb, err := l.sb() + if err != nil { + return abi.PieceInfo{}, err + } + + return sb.AddPiece(ctx, sector, epcs, sz, r) +} + +func (l *LocalWorker) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage2.PreCommit1Out, err error) { + sb, err := l.sb() + if err != nil { + return nil, err + } + + return sb.SealPreCommit1(ctx, sector, ticket, pieces) +} + +func (l *LocalWorker) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage2.PreCommit1Out) (cids storage2.SectorCids, err error) { + sb, err := l.sb() + if err != nil { + return storage2.SectorCids{}, err + } + + return sb.SealPreCommit2(ctx, sector, phase1Out) +} + +func (l *LocalWorker) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage2.SectorCids) (output storage2.Commit1Out, err error) { + sb, err := l.sb() + if err != nil { + return nil, err + } + + return sb.SealCommit1(ctx, sector, ticket, seed, pieces, cids) +} + +func (l *LocalWorker) SealCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage2.Commit1Out) (proof storage2.Proof, err error) { + sb, err := l.sb() + if err != nil { + return nil, err + } + + return sb.SealCommit2(ctx, sector, phase1Out) +} + +func (l *LocalWorker) FinalizeSector(ctx context.Context, sector abi.SectorID) error { + sb, err := l.sb() + if err != nil { + return err + } + + return sb.FinalizeSector(ctx, sector) +} + +func (l *LocalWorker) TaskTypes(context.Context) (map[sealtasks.TaskType]struct{}, error) { + return l.acceptTasks, nil +} + +func (l *LocalWorker) Paths(ctx context.Context) ([]stores.StoragePath, error) { + return l.localStore.Local(ctx) +} + +func (l *LocalWorker) Info(context.Context) (api.WorkerInfo, error) { + hostname, err := os.Hostname() // TODO: allow overriding from config + if err != nil { + panic(err) + } + + gpus, err := ffi.GetGPUDevices() + if err != nil { + log.Errorf("getting gpu devices failed: %+v", err) + } + + h, err := sysinfo.Host() + if err != nil { + return api.WorkerInfo{}, xerrors.Errorf("getting host info: %w", err) + } + + mem, err := h.Memory() + if err != nil { + return api.WorkerInfo{}, xerrors.Errorf("getting memory info: %w", err) + } + + return api.WorkerInfo{ + Hostname: hostname, + Resources: api.WorkerResources{ + MemPhysical: mem.Total, + MemSwap: mem.VirtualTotal, + MemReserved: mem.VirtualUsed + mem.Total - mem.Available, // TODO: sub this process + GPUs: gpus, + }, + }, nil +} + +var _ Worker = &LocalWorker{} diff --git a/worker_remote.go b/worker_remote.go new file mode 100644 index 000000000..f49ea4dc6 --- /dev/null +++ b/worker_remote.go @@ -0,0 +1,45 @@ +package sectorstorage + +import ( + "context" + "net/http" + + "github.com/filecoin-project/specs-actors/actors/abi" + storage2 "github.com/filecoin-project/specs-storage/storage" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/api/client" +) + +type remote struct { + api.WorkerApi +} + +func (r *remote) NewSector(ctx context.Context, sector abi.SectorID) error { + return xerrors.New("unsupported") +} + +func (r *remote) AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage2.Data) (abi.PieceInfo, error) { + return abi.PieceInfo{}, xerrors.New("unsupported") +} + +func ConnectRemote(ctx context.Context, fa api.Common, url string) (*remote, error) { + token, err := fa.AuthNew(ctx, []api.Permission{"admin"}) + if err != nil { + return nil, xerrors.Errorf("creating auth token for remote connection: %w", err) + } + + headers := http.Header{} + headers.Add("Authorization", "Bearer "+string(token)) + + wapi, close, err := client.NewWorkerRPC(url, headers) + if err != nil { + return nil, xerrors.Errorf("creating jsonrpc client: %w", err) + } + _ = close // TODO + + return &remote{wapi}, nil +} + +var _ Worker = &remote{} From 15e9d6ba5a829e874552a7dcdd0576e5576f7667 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Mon, 23 Mar 2020 13:12:43 +0100 Subject: [PATCH 002/199] sector index: Don't add duplicate storage URLs --- stores/index.go | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/stores/index.go b/stores/index.go index e508171b7..da7c8212e 100644 --- a/stores/index.go +++ b/stores/index.go @@ -104,7 +104,17 @@ func (i *Index) StorageAttach(ctx context.Context, si StorageInfo, st FsStat) er } } - i.stores[si.ID].info.URLs = append(i.stores[si.ID].info.URLs, si.URLs...) + uloop: + for _, u := range si.URLs { + for _, l := range i.stores[si.ID].info.URLs { + if u == l { + continue uloop + } + } + + i.stores[si.ID].info.URLs = append(i.stores[si.ID].info.URLs, u) + } + return nil } i.stores[si.ID] = &storageEntry{ @@ -127,7 +137,7 @@ func (i *Index) StorageDeclareSector(ctx context.Context, storageId ID, s abi.Se for _, sid := range i.sectors[d] { if sid == storageId { - log.Warnf("sector %v redeclared in %s", storageId) + log.Warnf("sector %v redeclared in %s", s, storageId) return nil } } From 68e279b3997a0ca7db196ee929c23b0497a67826 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Mon, 23 Mar 2020 15:56:22 +0100 Subject: [PATCH 003/199] workers: Basic monitoring tools --- manager.go | 22 +++++++++++----------- sched.go | 8 ++++---- stats.go | 22 ++++++++++++++++++++++ 3 files changed, 37 insertions(+), 15 deletions(-) create mode 100644 stats.go diff --git a/manager.go b/manager.go index 08597bd31..d465948b2 100644 --- a/manager.go +++ b/manager.go @@ -46,7 +46,7 @@ type SectorManager interface { storage.Prover } -type workerID uint64 +type WorkerID uint64 type Manager struct { scfg *sectorbuilder.Config @@ -60,12 +60,12 @@ type Manager struct { storage.Prover workersLk sync.Mutex - nextWorker workerID - workers map[workerID]*workerHandle + nextWorker WorkerID + workers map[WorkerID]*workerHandle newWorkers chan *workerHandle schedule chan *workerRequest - workerFree chan workerID + workerFree chan WorkerID closing chan struct{} schedQueue *list.List // List[*workerRequest] @@ -99,11 +99,11 @@ func New(ls stores.LocalStorage, si stores.SectorIndex, cfg *sectorbuilder.Confi index: si, nextWorker: 0, - workers: map[workerID]*workerHandle{}, + workers: map[WorkerID]*workerHandle{}, newWorkers: make(chan *workerHandle), schedule: make(chan *workerRequest), - workerFree: make(chan workerID), + workerFree: make(chan WorkerID), closing: make(chan struct{}), schedQueue: list.New(), @@ -168,12 +168,12 @@ func (m *Manager) ReadPieceFromSealedSector(context.Context, abi.SectorID, secto panic("implement me") } -func (m *Manager) getWorkersByPaths(task sealtasks.TaskType, inPaths []stores.StorageInfo) ([]workerID, map[workerID]stores.StorageInfo) { +func (m *Manager) getWorkersByPaths(task sealtasks.TaskType, inPaths []stores.StorageInfo) ([]WorkerID, map[WorkerID]stores.StorageInfo) { m.workersLk.Lock() defer m.workersLk.Unlock() - var workers []workerID - paths := map[workerID]stores.StorageInfo{} + var workers []WorkerID + paths := map[WorkerID]stores.StorageInfo{} for i, worker := range m.workers { tt, err := worker.w.TaskTypes(context.TODO()) @@ -219,7 +219,7 @@ func (m *Manager) getWorkersByPaths(task sealtasks.TaskType, inPaths []stores.St return workers, paths } -func (m *Manager) getWorker(ctx context.Context, taskType sealtasks.TaskType, accept []workerID) (Worker, func(), error) { +func (m *Manager) getWorker(ctx context.Context, taskType sealtasks.TaskType, accept []WorkerID) (Worker, func(), error) { ret := make(chan workerResponse) select { @@ -355,7 +355,7 @@ func (m *Manager) SealCommit1(ctx context.Context, sector abi.SectorID, ticket a } func (m *Manager) SealCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage.Commit1Out) (proof storage.Proof, err error) { - var candidateWorkers []workerID + var candidateWorkers []WorkerID m.workersLk.Lock() for id, worker := range m.workers { diff --git a/sched.go b/sched.go index adf2e6cd3..a38707f74 100644 --- a/sched.go +++ b/sched.go @@ -12,7 +12,7 @@ const mib = 1 << 20 type workerRequest struct { taskType sealtasks.TaskType - accept []workerID // ordered by preference + accept []WorkerID // ordered by preference ret chan<- workerResponse cancel <-chan struct{} @@ -71,7 +71,7 @@ func (m *Manager) runSched() { } } -func (m *Manager) onWorkerFreed(wid workerID) { +func (m *Manager) onWorkerFreed(wid WorkerID) { for e := m.schedQueue.Front(); e != nil; e = e.Next() { req := e.Value.(*workerRequest) var ok bool @@ -140,7 +140,7 @@ func (m *Manager) maybeSchedRequest(req *workerRequest) (*workerResponse, error) return nil, nil // put in waiting queue } -func (m *Manager) makeResponse(wid workerID, w *workerHandle, req *workerRequest) *workerResponse { +func (m *Manager) makeResponse(wid WorkerID, w *workerHandle, req *workerRequest) *workerResponse { needRes := ResourceTable[req.taskType][m.scfg.SealProofType] w.gpuUsed = needRes.CanGPU @@ -186,7 +186,7 @@ func (m *Manager) makeResponse(wid workerID, w *workerHandle, req *workerRequest } } -func (m *Manager) canHandleRequest(wid workerID, w *workerHandle, req *workerRequest) (bool, error) { +func (m *Manager) canHandleRequest(wid WorkerID, w *workerHandle, req *workerRequest) (bool, error) { needRes, ok := ResourceTable[req.taskType][m.scfg.SealProofType] if !ok { return false, xerrors.Errorf("canHandleRequest: missing ResourceTable entry for %s/%d", req.taskType, m.scfg.SealProofType) diff --git a/stats.go b/stats.go new file mode 100644 index 000000000..2cae1decb --- /dev/null +++ b/stats.go @@ -0,0 +1,22 @@ +package sectorstorage + +import "github.com/filecoin-project/lotus/api" + +func (m *Manager) WorkerStats() map[uint64]api.WorkerStats { + m.workersLk.Lock() + defer m.workersLk.Unlock() + + out := map[uint64]api.WorkerStats{} + + for id, handle := range m.workers { + out[uint64(id)] = api.WorkerStats{ + Info: handle.info, + MemUsedMin: handle.memUsedMin, + MemUsedMax: handle.memUsedMax, + GpuUsed: handle.gpuUsed, + CpuUse: handle.cpuUse, + } + } + + return out +} From 40060e3525e6d15a35908b197e2661279df00bb2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Mon, 23 Mar 2020 16:40:36 +0100 Subject: [PATCH 004/199] sectorstorage: Fix remote sector deletion --- stores/local.go | 8 ++++++++ stores/remote.go | 16 ++++++---------- 2 files changed, 14 insertions(+), 10 deletions(-) diff --git a/stores/local.go b/stores/local.go index a8eb53ee8..64924ad00 100644 --- a/stores/local.go +++ b/stores/local.go @@ -280,6 +280,10 @@ func (st *Local) delete(ctx context.Context, sid abi.SectorID, typ sectorbuilder return xerrors.Errorf("finding existing sector %d(t:%d) failed: %w", sid, typ, err) } + if len(si) == 0 { + return xerrors.Errorf("can't delete sector %v(%d), not found", sid, typ) + } + for _, info := range si { p, ok := st.paths[info.ID] if !ok { @@ -290,6 +294,10 @@ func (st *Local) delete(ctx context.Context, sid abi.SectorID, typ sectorbuilder continue } + if err := st.index.StorageDropSector(ctx, info.ID, sid, typ); err != nil { + return xerrors.Errorf("dropping sector from index: %w", err) + } + spath := filepath.Join(p.local, typ.String(), sectorutil.SectorName(sid)) log.Infof("remove %s", spath) diff --git a/stores/remote.go b/stores/remote.go index 7bbd6d225..d2ce1626a 100644 --- a/stores/remote.go +++ b/stores/remote.go @@ -59,7 +59,7 @@ func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, existing sec continue } - ap, storageID, url, foundIn, rdone, err := r.acquireFromRemote(ctx, s, fileType, sealing) + ap, storageID, url, rdone, err := r.acquireFromRemote(ctx, s, fileType, sealing) if err != nil { done() return sectorbuilder.SectorPaths{}, sectorbuilder.SectorPaths{}, nil, err @@ -75,10 +75,6 @@ func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, existing sec } // TODO: some way to allow having duplicated sectors in the system for perf - if err := r.index.StorageDropSector(ctx, foundIn, s, fileType); err != nil { - log.Warnf("dropping sector %v from %s from sector index failed: %+v", s, storageID, err) - } - if err := r.deleteFromRemote(url); err != nil { log.Warnf("deleting sector %v from %s (delete %s): %+v", s, storageID, url, err) } @@ -87,10 +83,10 @@ func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, existing sec return paths, stores, done, nil } -func (r *Remote) acquireFromRemote(ctx context.Context, s abi.SectorID, fileType sectorbuilder.SectorFileType, sealing bool) (string, ID, string, ID, func(), error) { +func (r *Remote) acquireFromRemote(ctx context.Context, s abi.SectorID, fileType sectorbuilder.SectorFileType, sealing bool) (string, ID, string, func(), error) { si, err := r.index.StorageFindSector(ctx, s, fileType, false) if err != nil { - return "", "", "", "", nil, err + return "", "", "", nil, err } sort.Slice(si, func(i, j int) bool { @@ -99,7 +95,7 @@ func (r *Remote) acquireFromRemote(ctx context.Context, s abi.SectorID, fileType apaths, ids, done, err := r.local.AcquireSector(ctx, s, 0, fileType, sealing) if err != nil { - return "", "", "", "", nil, xerrors.Errorf("allocate local sector for fetching: %w", err) + return "", "", "", nil, xerrors.Errorf("allocate local sector for fetching: %w", err) } dest := sectorutil.PathByType(apaths, fileType) storageID := sectorutil.PathByType(ids, fileType) @@ -116,12 +112,12 @@ func (r *Remote) acquireFromRemote(ctx context.Context, s abi.SectorID, fileType if merr != nil { log.Warnw("acquireFromRemote encountered errors when fetching sector from remote", "errors", merr) } - return dest, ID(storageID), url, info.ID, done, nil + return dest, ID(storageID), url, done, nil } } done() - return "", "", "", "", nil, xerrors.Errorf("failed to acquire sector %v from remote (tried %v): %w", s, si, merr) + return "", "", "", nil, xerrors.Errorf("failed to acquire sector %v from remote (tried %v): %w", s, si, merr) } func (r *Remote) fetch(url, outname string) error { From 6842b4cb46ab4c0e506f25928e08a0a6df6a7907 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Mon, 23 Mar 2020 17:28:40 +0100 Subject: [PATCH 005/199] sectorstorage: Enable sealing on the local worker by default --- manager.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/manager.go b/manager.go index d465948b2..6a38c3f4d 100644 --- a/manager.go +++ b/manager.go @@ -115,7 +115,10 @@ func New(ls stores.LocalStorage, si stores.SectorIndex, cfg *sectorbuilder.Confi err = m.AddWorker(ctx, NewLocalWorker(WorkerConfig{ SealProof: cfg.SealProofType, - TaskTypes: []sealtasks.TaskType{sealtasks.TTAddPiece, sealtasks.TTCommit1, sealtasks.TTFinalize}, + TaskTypes: []sealtasks.TaskType{ + sealtasks.TTAddPiece, sealtasks.TTCommit1, sealtasks.TTFinalize, + sealtasks.TTPreCommit1, sealtasks.TTPreCommit2, sealtasks.TTCommit2, // TODO: Config + }, }, stor, lstor, si)) if err != nil { return nil, xerrors.Errorf("adding local worker: %w", err) From 290b7ebd26eb77ca164300d9810e10267e86c442 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Mon, 23 Mar 2020 23:43:38 +0100 Subject: [PATCH 006/199] storageminer: More storage stats in storage list --- manager.go | 4 +++ stores/http_handler.go | 25 ++++++++++++++ stores/index.go | 4 +-- stores/interface.go | 16 +++++---- stores/local.go | 4 ++- stores/remote.go | 76 +++++++++++++++++++++++++++++++++++++++--- 6 files changed, 115 insertions(+), 14 deletions(-) diff --git a/manager.go b/manager.go index 6a38c3f4d..f3aa90964 100644 --- a/manager.go +++ b/manager.go @@ -410,4 +410,8 @@ func (m *Manager) StorageLocal(ctx context.Context) (map[stores.ID]string, error return out, nil } +func (m *Manager) FsStat(ctx context.Context, id stores.ID) (stores.FsStat, error) { + return m.storage.FsStat(ctx, id) +} + var _ SectorManager = &Manager{} diff --git a/stores/http_handler.go b/stores/http_handler.go index daa81061e..97c6c34f6 100644 --- a/stores/http_handler.go +++ b/stores/http_handler.go @@ -1,6 +1,7 @@ package stores import ( + "encoding/json" "io" "net/http" "os" @@ -23,12 +24,36 @@ type FetchHandler struct { func (handler *FetchHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { // /remote/ mux := mux.NewRouter() + mux.HandleFunc("/remote/stat/{id}", handler.remoteStatFs).Methods("GET") mux.HandleFunc("/remote/{type}/{id}", handler.remoteGetSector).Methods("GET") mux.HandleFunc("/remote/{type}/{id}", handler.remoteDeleteSector).Methods("DELETE") mux.ServeHTTP(w, r) } +func (handler *FetchHandler) remoteStatFs(w http.ResponseWriter, r *http.Request) { + log.Debugf("SERVE STAT %s", r.URL) + vars := mux.Vars(r) + id := ID(vars["id"]) + + st, err := handler.Local.FsStat(id) + switch err { + case errPathNotFound: + w.WriteHeader(404) + return + case nil: + break + default: + w.WriteHeader(500) + log.Errorf("%+v", err) + return + } + + if err := json.NewEncoder(w).Encode(&st); err != nil { + log.Warnf("error writing stat response: %+v", err) + } +} + func (handler *FetchHandler) remoteGetSector(w http.ResponseWriter, r *http.Request) { log.Infof("SERVE GET %s", r.URL) vars := mux.Vars(r) diff --git a/stores/index.go b/stores/index.go index da7c8212e..ccad8ba7f 100644 --- a/stores/index.go +++ b/stores/index.go @@ -295,8 +295,8 @@ func (i *Index) StorageBestAlloc(ctx context.Context, allocate sectorbuilder.Sec } sort.Slice(candidates, func(i, j int) bool { - iw := big.Mul(big.NewInt(int64(candidates[i].fsi.Free)), big.NewInt(int64(candidates[i].info.Weight))) - jw := big.Mul(big.NewInt(int64(candidates[j].fsi.Free)), big.NewInt(int64(candidates[j].info.Weight))) + iw := big.Mul(big.NewInt(int64(candidates[i].fsi.Available)), big.NewInt(int64(candidates[i].info.Weight))) + jw := big.Mul(big.NewInt(int64(candidates[j].fsi.Available)), big.NewInt(int64(candidates[j].info.Weight))) return iw.GreaterThan(jw) }) diff --git a/stores/interface.go b/stores/interface.go index 67c18b16e..149cb9e5f 100644 --- a/stores/interface.go +++ b/stores/interface.go @@ -12,11 +12,7 @@ import ( type Store interface { AcquireSector(ctx context.Context, s abi.SectorID, existing sectorbuilder.SectorFileType, allocate sectorbuilder.SectorFileType, sealing bool) (paths sectorbuilder.SectorPaths, stores sectorbuilder.SectorPaths, done func(), err error) -} - -type FsStat struct { - Capacity uint64 - Free uint64 // Free to use for sector storage + FsStat(ctx context.Context, id ID) (FsStat, error) } func Stat(path string) (FsStat, error) { @@ -26,7 +22,13 @@ func Stat(path string) (FsStat, error) { } return FsStat{ - Capacity: stat.Blocks * uint64(stat.Bsize), - Free: stat.Bavail * uint64(stat.Bsize), + Capacity: stat.Blocks * uint64(stat.Bsize), + Available: stat.Bavail * uint64(stat.Bsize), }, nil } + +type FsStat struct { + Capacity uint64 + Available uint64 // Available to use for sector storage + Used uint64 +} diff --git a/stores/local.go b/stores/local.go index 64924ad00..581afb13f 100644 --- a/stores/local.go +++ b/stores/local.go @@ -309,13 +309,15 @@ func (st *Local) delete(ctx context.Context, sid abi.SectorID, typ sectorbuilder return nil } +var errPathNotFound = xerrors.Errorf("fsstat: path not found") + func (st *Local) FsStat(id ID) (FsStat, error) { st.localLk.RLock() defer st.localLk.RUnlock() p, ok := st.paths[id] if !ok { - return FsStat{}, xerrors.Errorf("fsstat: path not found") + return FsStat{}, errPathNotFound } return Stat(p.local) diff --git a/stores/remote.go b/stores/remote.go index d2ce1626a..a0648d972 100644 --- a/stores/remote.go +++ b/stores/remote.go @@ -2,9 +2,13 @@ package stores import ( "context" + "encoding/json" + "io/ioutil" "mime" "net/http" + "net/url" "os" + gopath "path" "sort" "sync" @@ -75,7 +79,7 @@ func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, existing sec } // TODO: some way to allow having duplicated sectors in the system for perf - if err := r.deleteFromRemote(url); err != nil { + if err := r.deleteFromRemote(ctx, url); err != nil { log.Warnf("deleting sector %v from %s (delete %s): %+v", s, storageID, url, err) } } @@ -103,7 +107,7 @@ func (r *Remote) acquireFromRemote(ctx context.Context, s abi.SectorID, fileType var merr error for _, info := range si { for _, url := range info.URLs { - err := r.fetch(url, dest) + err := r.fetch(ctx, url, dest) if err != nil { merr = multierror.Append(merr, xerrors.Errorf("fetch error %s (storage %s) -> %s: %w", url, info.ID, dest, err)) continue @@ -120,7 +124,7 @@ func (r *Remote) acquireFromRemote(ctx context.Context, s abi.SectorID, fileType return "", "", "", nil, xerrors.Errorf("failed to acquire sector %v from remote (tried %v): %w", s, si, merr) } -func (r *Remote) fetch(url, outname string) error { +func (r *Remote) fetch(ctx context.Context, url, outname string) error { log.Infof("Fetch %s -> %s", url, outname) req, err := http.NewRequest("GET", url, nil) @@ -128,6 +132,7 @@ func (r *Remote) fetch(url, outname string) error { return xerrors.Errorf("request: %w", err) } req.Header = r.auth + req = req.WithContext(ctx) resp, err := http.DefaultClient.Do(req) if err != nil { @@ -168,7 +173,7 @@ func (r *Remote) fetch(url, outname string) error { } } -func (r *Remote) deleteFromRemote(url string) error { +func (r *Remote) deleteFromRemote(ctx context.Context, url string) error { log.Infof("Delete %s", url) req, err := http.NewRequest("DELETE", url, nil) @@ -176,6 +181,7 @@ func (r *Remote) deleteFromRemote(url string) error { return xerrors.Errorf("request: %w", err) } req.Header = r.auth + req = req.WithContext(ctx) resp, err := http.DefaultClient.Do(req) if err != nil { @@ -190,6 +196,68 @@ func (r *Remote) deleteFromRemote(url string) error { return nil } +func (r *Remote) FsStat(ctx context.Context, id ID) (FsStat, error) { + st, err := r.local.FsStat(id) + switch err { + case nil: + return st, nil + case errPathNotFound: + break + default: + return FsStat{}, xerrors.Errorf("local stat: %w", err) + } + + si, err := r.index.StorageInfo(ctx, id) + if err != nil { + return FsStat{}, xerrors.Errorf("getting remote storage info: %w", err) + } + + if len(si.URLs) == 0 { + return FsStat{}, xerrors.Errorf("no known URLs for remote storage %s", id) + } + + rl, err := url.Parse(si.URLs[0]) + if err != nil { + return FsStat{}, xerrors.Errorf("failed to parse url: %w", err) + } + + rl.Path = gopath.Join(rl.Path, "stat", string(id)) + + req, err := http.NewRequest("GET", rl.String(), nil) + if err != nil { + return FsStat{}, xerrors.Errorf("request: %w", err) + } + req.Header = r.auth + req = req.WithContext(ctx) + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return FsStat{}, xerrors.Errorf("do request: %w", err) + } + switch resp.StatusCode { + case 200: + break + case 404: + return FsStat{}, errPathNotFound + case 500: + b, err := ioutil.ReadAll(resp.Body) + if err != nil { + return FsStat{}, xerrors.Errorf("fsstat: got http 500, then failed to read the error: %w", err) + } + + return FsStat{}, xerrors.New(string(b)) + } + + var out FsStat + if err := json.NewDecoder(resp.Body).Decode(&out); err != nil { + return FsStat{}, xerrors.Errorf("decoding fsstat: %w", err) + } + + defer resp.Body.Close() + + return out, nil +} + func mergeDone(a func(), b func()) func() { return func() { a() From dce6d8e4aedaf310c853024da5dbdb60bcd43831 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Tue, 24 Mar 2020 19:00:08 +0100 Subject: [PATCH 007/199] workers: Address review --- manager.go | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/manager.go b/manager.go index f3aa90964..d198c88ec 100644 --- a/manager.go +++ b/manager.go @@ -3,6 +3,7 @@ package sectorstorage import ( "container/list" "context" + "errors" "io" "net/http" "sync" @@ -24,6 +25,8 @@ import ( var log = logging.Logger("advmgr") +var ErrNoWorkers = errors.New("no suitable workers found") + type URLs []string type Worker interface { @@ -71,9 +74,7 @@ type Manager struct { schedQueue *list.List // List[*workerRequest] } -func New(ls stores.LocalStorage, si stores.SectorIndex, cfg *sectorbuilder.Config, urls URLs, ca api.Common) (*Manager, error) { - ctx := context.TODO() - +func New(ctx context.Context, ls stores.LocalStorage, si stores.SectorIndex, cfg *sectorbuilder.Config, urls URLs, ca api.Common) (*Manager, error) { lstor, err := stores.NewLocal(ctx, ls, si, urls) if err != nil { return nil, err @@ -84,7 +85,7 @@ func New(ls stores.LocalStorage, si stores.SectorIndex, cfg *sectorbuilder.Confi return nil, xerrors.Errorf("creating prover instance: %w", err) } - token, err := ca.AuthNew(context.TODO(), []api.Permission{"admin"}) + token, err := ca.AuthNew(ctx, []api.Permission{"admin"}) headers := http.Header{} headers.Add("Authorization", "Bearer "+string(token)) stor := stores.NewRemote(lstor, si, headers) @@ -272,7 +273,7 @@ func (m *Manager) AddPiece(ctx context.Context, sector abi.SectorID, existingPie candidateWorkers, _ := m.getWorkersByPaths(sealtasks.TTAddPiece, best) if len(candidateWorkers) == 0 { - return abi.PieceInfo{}, xerrors.New("no worker found") + return abi.PieceInfo{}, ErrNoWorkers } worker, done, err := m.getWorker(ctx, sealtasks.TTAddPiece, candidateWorkers) @@ -296,7 +297,7 @@ func (m *Manager) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticke candidateWorkers, _ := m.getWorkersByPaths(sealtasks.TTPreCommit1, best) if len(candidateWorkers) == 0 { - return nil, xerrors.New("no suitable workers found") + return nil, ErrNoWorkers } worker, done, err := m.getWorker(ctx, sealtasks.TTPreCommit1, candidateWorkers) @@ -320,7 +321,7 @@ func (m *Manager) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase candidateWorkers, _ := m.getWorkersByPaths(sealtasks.TTPreCommit2, best) if len(candidateWorkers) == 0 { - return storage.SectorCids{}, xerrors.New("no suitable workers found") + return storage.SectorCids{}, ErrNoWorkers } worker, done, err := m.getWorker(ctx, sealtasks.TTPreCommit2, candidateWorkers) @@ -342,7 +343,7 @@ func (m *Manager) SealCommit1(ctx context.Context, sector abi.SectorID, ticket a candidateWorkers, _ := m.getWorkersByPaths(sealtasks.TTCommit1, best) if len(candidateWorkers) == 0 { - return nil, xerrors.New("no suitable workers found") // TODO: wait? + return nil, ErrNoWorkers } // TODO: Try very hard to execute on worker with access to the sectors @@ -373,6 +374,9 @@ func (m *Manager) SealCommit2(ctx context.Context, sector abi.SectorID, phase1Ou candidateWorkers = append(candidateWorkers, id) } m.workersLk.Unlock() + if len(candidateWorkers) == 0 { + return nil, ErrNoWorkers + } worker, done, err := m.getWorker(ctx, sealtasks.TTCommit2, candidateWorkers) if err != nil { @@ -390,6 +394,9 @@ func (m *Manager) FinalizeSector(ctx context.Context, sector abi.SectorID) error } candidateWorkers, _ := m.getWorkersByPaths(sealtasks.TTFinalize, best) + if len(candidateWorkers) == 0 { + return ErrNoWorkers + } // TODO: Remove sector from sealing stores // TODO: Move the sector to long-term storage From 2900bb099f1d183889df578a66fda195d9c01c07 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Tue, 24 Mar 2020 20:38:00 +0100 Subject: [PATCH 008/199] storageminer: Config for local worker task types --- manager.go | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/manager.go b/manager.go index d198c88ec..843d841cb 100644 --- a/manager.go +++ b/manager.go @@ -74,7 +74,7 @@ type Manager struct { schedQueue *list.List // List[*workerRequest] } -func New(ctx context.Context, ls stores.LocalStorage, si stores.SectorIndex, cfg *sectorbuilder.Config, urls URLs, ca api.Common) (*Manager, error) { +func New(ctx context.Context, ls stores.LocalStorage, si stores.SectorIndex, cfg *sectorbuilder.Config, sc config.Storage, urls URLs, ca api.Common) (*Manager, error) { lstor, err := stores.NewLocal(ctx, ls, si, urls) if err != nil { return nil, err @@ -114,12 +114,16 @@ func New(ctx context.Context, ls stores.LocalStorage, si stores.SectorIndex, cfg go m.runSched() + localTasks := []sealtasks.TaskType{ + sealtasks.TTAddPiece, sealtasks.TTCommit1, sealtasks.TTFinalize, + } + if sc.AllowPreCommit1 { localTasks = append(localTasks, sealtasks.TTPreCommit1)} + if sc.AllowPreCommit2 { localTasks = append(localTasks, sealtasks.TTPreCommit2)} + if sc.AllowCommit { localTasks = append(localTasks, sealtasks.TTCommit2)} + err = m.AddWorker(ctx, NewLocalWorker(WorkerConfig{ SealProof: cfg.SealProofType, - TaskTypes: []sealtasks.TaskType{ - sealtasks.TTAddPiece, sealtasks.TTCommit1, sealtasks.TTFinalize, - sealtasks.TTPreCommit1, sealtasks.TTPreCommit2, sealtasks.TTCommit2, // TODO: Config - }, + TaskTypes: localTasks, }, stor, lstor, si)) if err != nil { return nil, xerrors.Errorf("adding local worker: %w", err) From cd892105f6a64d14496dfcccc6af33450111e8e1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Tue, 24 Mar 2020 21:28:07 +0100 Subject: [PATCH 009/199] sectorstorage: Remove unsealed sector in FinalizeSector --- stores/http_handler.go | 4 ++-- stores/interface.go | 1 + stores/local.go | 6 ++++-- stores/remote.go | 30 +++++++++++++++++++++++++++++- worker_local.go | 10 +++++++++- 5 files changed, 45 insertions(+), 6 deletions(-) diff --git a/stores/http_handler.go b/stores/http_handler.go index 97c6c34f6..657da0f9b 100644 --- a/stores/http_handler.go +++ b/stores/http_handler.go @@ -36,7 +36,7 @@ func (handler *FetchHandler) remoteStatFs(w http.ResponseWriter, r *http.Request vars := mux.Vars(r) id := ID(vars["id"]) - st, err := handler.Local.FsStat(id) + st, err := handler.Local.FsStat(r.Context(), id) switch err { case errPathNotFound: w.WriteHeader(404) @@ -129,7 +129,7 @@ func (handler *FetchHandler) remoteDeleteSector(w http.ResponseWriter, r *http.R return } - if err := handler.delete(r.Context(), id, ft); err != nil { + if err := handler.Remove(r.Context(), id, ft); err != nil { log.Error("%+v", err) w.WriteHeader(500) return diff --git a/stores/interface.go b/stores/interface.go index 149cb9e5f..e56a6d74a 100644 --- a/stores/interface.go +++ b/stores/interface.go @@ -12,6 +12,7 @@ import ( type Store interface { AcquireSector(ctx context.Context, s abi.SectorID, existing sectorbuilder.SectorFileType, allocate sectorbuilder.SectorFileType, sealing bool) (paths sectorbuilder.SectorPaths, stores sectorbuilder.SectorPaths, done func(), err error) + Remove(ctx context.Context, s abi.SectorID, types sectorbuilder.SectorFileType) error FsStat(ctx context.Context, id ID) (FsStat, error) } diff --git a/stores/local.go b/stores/local.go index 581afb13f..d891f86fa 100644 --- a/stores/local.go +++ b/stores/local.go @@ -270,7 +270,7 @@ func (st *Local) Local(ctx context.Context) ([]StoragePath, error) { return out, nil } -func (st *Local) delete(ctx context.Context, sid abi.SectorID, typ sectorbuilder.SectorFileType) error { +func (st *Local) Remove(ctx context.Context, sid abi.SectorID, typ sectorbuilder.SectorFileType) error { if bits.OnesCount(uint(typ)) != 1 { return xerrors.New("delete expects one file type") } @@ -311,7 +311,7 @@ func (st *Local) delete(ctx context.Context, sid abi.SectorID, typ sectorbuilder var errPathNotFound = xerrors.Errorf("fsstat: path not found") -func (st *Local) FsStat(id ID) (FsStat, error) { +func (st *Local) FsStat(ctx context.Context, id ID) (FsStat, error) { st.localLk.RLock() defer st.localLk.RUnlock() @@ -322,3 +322,5 @@ func (st *Local) FsStat(id ID) (FsStat, error) { return Stat(p.local) } + +var _ Store = &Local{} diff --git a/stores/remote.go b/stores/remote.go index a0648d972..ab7d465b3 100644 --- a/stores/remote.go +++ b/stores/remote.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "io/ioutil" + "math/bits" "mime" "net/http" "net/url" @@ -173,6 +174,33 @@ func (r *Remote) fetch(ctx context.Context, url, outname string) error { } } +func (r *Remote) Remove(ctx context.Context, sid abi.SectorID, typ sectorbuilder.SectorFileType) error { + if bits.OnesCount(uint(typ)) != 1 { + return xerrors.New("delete expects one file type") + } + + if err := r.local.Remove(ctx, sid, typ); err != nil { + return xerrors.Errorf("remove from local: %w", err) + } + + si, err := r.index.StorageFindSector(ctx, sid, typ, false) + if err != nil { + return xerrors.Errorf("finding existing sector %d(t:%d) failed: %w", sid, typ, err) + } + + for _, info := range si { + for _, url := range info.URLs { + if err := r.deleteFromRemote(ctx, url); err != nil { + log.Warnf("remove %s: %+v", url, err) + continue + } + break + } + } + + return nil +} + func (r *Remote) deleteFromRemote(ctx context.Context, url string) error { log.Infof("Delete %s", url) @@ -197,7 +225,7 @@ func (r *Remote) deleteFromRemote(ctx context.Context, url string) error { } func (r *Remote) FsStat(ctx context.Context, id ID) (FsStat, error) { - st, err := r.local.FsStat(id) + st, err := r.local.FsStat(ctx, id) switch err { case nil: return st, nil diff --git a/worker_local.go b/worker_local.go index d691f150e..18a0305fd 100644 --- a/worker_local.go +++ b/worker_local.go @@ -152,7 +152,15 @@ func (l *LocalWorker) FinalizeSector(ctx context.Context, sector abi.SectorID) e return err } - return sb.FinalizeSector(ctx, sector) + if err := sb.FinalizeSector(ctx, sector); err != nil { + return xerrors.Errorf("finalizing sector: %w", err) + } + + if err := l.storage.Remove(ctx, sector, sectorbuilder.FTUnsealed); err != nil { + return xerrors.Errorf("removing unsealed data: %w", err) + } + + return nil } func (l *LocalWorker) TaskTypes(context.Context) (map[sealtasks.TaskType]struct{}, error) { From d8f357b01c40fe137f29c9feeac137eb96a320eb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Wed, 25 Mar 2020 00:37:40 +0100 Subject: [PATCH 010/199] workers: More review related fixes --- resources.go | 2 +- roprov.go | 2 +- sectorutil/utils.go | 2 +- stores/http_handler.go | 3 +++ stores/remote.go | 2 +- 5 files changed, 7 insertions(+), 4 deletions(-) diff --git a/resources.go b/resources.go index 3587b41ea..ebb32f165 100644 --- a/resources.go +++ b/resources.go @@ -20,7 +20,7 @@ var FsOverheadFinalized = map[sectorbuilder.SectorFileType]int{ type Resources struct { MinMemory uint64 // What Must be in RAM for decent perf - MaxMemory uint64 // Mamory required (swap + ram) + MaxMemory uint64 // Memory required (swap + ram) MultiThread bool CanGPU bool diff --git a/roprov.go b/roprov.go index 99723e181..4b0dfbe2b 100644 --- a/roprov.go +++ b/roprov.go @@ -15,7 +15,7 @@ type readonlyProvider struct { } func (l *readonlyProvider) AcquireSector(ctx context.Context, id abi.SectorID, existing sectorbuilder.SectorFileType, allocate sectorbuilder.SectorFileType, sealing bool) (sectorbuilder.SectorPaths, func(), error) { - if allocate != 0 { + if allocate != 0 { // 0 - don't allocate anything return sectorbuilder.SectorPaths{}, nil, xerrors.New("read-only storage") } diff --git a/sectorutil/utils.go b/sectorutil/utils.go index 01862b7b4..ede59410b 100644 --- a/sectorutil/utils.go +++ b/sectorutil/utils.go @@ -14,7 +14,7 @@ func ParseSectorID(baseName string) (abi.SectorID, error) { var mid abi.ActorID read, err := fmt.Sscanf(baseName, "s-t0%d-%d", &mid, &n) if err != nil { - return abi.SectorID{}, xerrors.Errorf(": %w", err) + return abi.SectorID{}, xerrors.Errorf("sscanf sector name ('%s'): %w", baseName, err) } if read != 2 { diff --git a/stores/http_handler.go b/stores/http_handler.go index 657da0f9b..b242d1159 100644 --- a/stores/http_handler.go +++ b/stores/http_handler.go @@ -68,11 +68,13 @@ func (handler *FetchHandler) remoteGetSector(w http.ResponseWriter, r *http.Requ ft, err := ftFromString(vars["type"]) if err != nil { log.Error("%+v", err) + w.WriteHeader(500) return } paths, _, done, err := handler.Local.AcquireSector(r.Context(), id, ft, 0, false) if err != nil { log.Error("%+v", err) + w.WriteHeader(500) return } defer done() @@ -126,6 +128,7 @@ func (handler *FetchHandler) remoteDeleteSector(w http.ResponseWriter, r *http.R ft, err := ftFromString(vars["type"]) if err != nil { log.Error("%+v", err) + w.WriteHeader(500) return } diff --git a/stores/remote.go b/stores/remote.go index ab7d465b3..66b818434 100644 --- a/stores/remote.go +++ b/stores/remote.go @@ -273,7 +273,7 @@ func (r *Remote) FsStat(ctx context.Context, id ID) (FsStat, error) { return FsStat{}, xerrors.Errorf("fsstat: got http 500, then failed to read the error: %w", err) } - return FsStat{}, xerrors.New(string(b)) + return FsStat{}, xerrors.Errorf("fsstat: got http 500: %s", string(b)) } var out FsStat From 817e699738b62152090800acf0b901caf5477c99 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Wed, 25 Mar 2020 00:49:45 +0100 Subject: [PATCH 011/199] sectorstorage: wire up closing logic --- manager.go | 7 +++++++ sched.go | 14 ++++++++++++++ worker_local.go | 4 ++++ worker_remote.go | 12 +++++++++--- 4 files changed, 34 insertions(+), 3 deletions(-) diff --git a/manager.go b/manager.go index 843d841cb..3ef8e30c6 100644 --- a/manager.go +++ b/manager.go @@ -38,6 +38,8 @@ type Worker interface { Paths(context.Context) ([]stores.StoragePath, error) Info(context.Context) (api.WorkerInfo, error) + + Close() error } type SectorManager interface { @@ -425,4 +427,9 @@ func (m *Manager) FsStat(ctx context.Context, id stores.ID) (stores.FsStat, erro return m.storage.FsStat(ctx, id) } +func (m *Manager) Close() error { + close(m.closing) + return nil +} + var _ SectorManager = &Manager{} diff --git a/sched.go b/sched.go index a38707f74..d8e3d35a0 100644 --- a/sched.go +++ b/sched.go @@ -67,6 +67,9 @@ func (m *Manager) runSched() { m.schedQueue.PushBack(req) case wid := <-m.workerFree: m.onWorkerFreed(wid) + case <-m.closing: + m.schedClose() + return } } } @@ -240,3 +243,14 @@ func (m *Manager) schedNewWorker(w *workerHandle) { m.workers[id] = w m.nextWorker++ } + +func (m *Manager) schedClose() { + m.workersLk.Lock() + defer m.workersLk.Unlock() + + for i, w := range m.workers { + if err := w.w.Close(); err != nil { + log.Errorf("closing worker %d: %+v", i, err) + } + } +} diff --git a/worker_local.go b/worker_local.go index 18a0305fd..175106bad 100644 --- a/worker_local.go +++ b/worker_local.go @@ -203,4 +203,8 @@ func (l *LocalWorker) Info(context.Context) (api.WorkerInfo, error) { }, nil } +func (l *LocalWorker) Close() error { + return nil +} + var _ Worker = &LocalWorker{} diff --git a/worker_remote.go b/worker_remote.go index f49ea4dc6..ffd96f188 100644 --- a/worker_remote.go +++ b/worker_remote.go @@ -10,10 +10,12 @@ import ( "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api/client" + "github.com/filecoin-project/lotus/lib/jsonrpc" ) type remote struct { api.WorkerApi + closer jsonrpc.ClientCloser } func (r *remote) NewSector(ctx context.Context, sector abi.SectorID) error { @@ -33,13 +35,17 @@ func ConnectRemote(ctx context.Context, fa api.Common, url string) (*remote, err headers := http.Header{} headers.Add("Authorization", "Bearer "+string(token)) - wapi, close, err := client.NewWorkerRPC(url, headers) + wapi, closer, err := client.NewWorkerRPC(url, headers) if err != nil { return nil, xerrors.Errorf("creating jsonrpc client: %w", err) } - _ = close // TODO - return &remote{wapi}, nil + return &remote{wapi, closer}, nil +} + +func (r *remote) Close() error { + r.closer() + return nil } var _ Worker = &remote{} From f22942b3c8ee326d86a0c254015d9fba31b0754f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Wed, 25 Mar 2020 19:21:53 +0100 Subject: [PATCH 012/199] sectorstorage: Move sealed sectors to storage in FinalizeSector --- stores/interface.go | 4 ++++ stores/local.go | 58 +++++++++++++++++++++++++++++++++++++++++++++ stores/remote.go | 13 ++++++++++ stores/util_unix.go | 43 +++++++++++++++++++++++++++++++++ worker_local.go | 4 ++++ 5 files changed, 122 insertions(+) create mode 100644 stores/util_unix.go diff --git a/stores/interface.go b/stores/interface.go index e56a6d74a..45e371fb7 100644 --- a/stores/interface.go +++ b/stores/interface.go @@ -13,6 +13,10 @@ import ( type Store interface { AcquireSector(ctx context.Context, s abi.SectorID, existing sectorbuilder.SectorFileType, allocate sectorbuilder.SectorFileType, sealing bool) (paths sectorbuilder.SectorPaths, stores sectorbuilder.SectorPaths, done func(), err error) Remove(ctx context.Context, s abi.SectorID, types sectorbuilder.SectorFileType) error + + // move sectors into storage + MoveStorage(ctx context.Context, s abi.SectorID, types sectorbuilder.SectorFileType) error + FsStat(ctx context.Context, id ID) (FsStat, error) } diff --git a/stores/local.go b/stores/local.go index d891f86fa..0d1cf2654 100644 --- a/stores/local.go +++ b/stores/local.go @@ -309,6 +309,64 @@ func (st *Local) Remove(ctx context.Context, sid abi.SectorID, typ sectorbuilder return nil } +func (st *Local) MoveStorage(ctx context.Context, s abi.SectorID, types sectorbuilder.SectorFileType) error { + dest, destIds, sdone, err := st.AcquireSector(ctx, s, 0, types, false) + if err != nil { + return xerrors.Errorf("acquire dest storage: %w", err) + } + defer sdone() + + src, srcIds, ddone, err := st.AcquireSector(ctx, s, types, 0, false) + if err != nil { + return xerrors.Errorf("acquire src storage: %w", err) + } + defer ddone() + + for _, fileType := range pathTypes { + if fileType&types == 0 { + continue + } + + sst, err := st.index.StorageInfo(ctx, ID(sectorutil.PathByType(srcIds, fileType))) + if err != nil { + return xerrors.Errorf("failed to get source storage info: %w", err) + } + + dst, err := st.index.StorageInfo(ctx, ID(sectorutil.PathByType(destIds, fileType))) + if err != nil { + return xerrors.Errorf("failed to get source storage info: %w", err) + } + + + if sst.ID == dst.ID { + log.Debugf("not moving %v(%d); src and dest are the same", s, fileType) + continue + } + + if sst.CanStore { + log.Debugf("not moving %v(%d); source supports storage", s, fileType) + continue + } + + log.Debugf("moving %v(%d) to storage: %s(se:%t; st:%t) -> %s(se:%t; st:%t)", s, fileType, sst.ID, sst.CanSeal, sst.CanStore, dst.ID, dst.CanSeal, dst.CanStore) + + if err := st.index.StorageDropSector(ctx, ID(sectorutil.PathByType(srcIds, fileType)), s, fileType); err != nil { + return xerrors.Errorf("dropping source sector from index: %w", err) + } + + if err := move(sectorutil.PathByType(src, fileType), sectorutil.PathByType(dest, fileType)); err != nil { + // TODO: attempt some recovery (check if src is still there, re-declare) + return xerrors.Errorf("moving sector %v(%d): %w", s, fileType, err) + } + + if err := st.index.StorageDeclareSector(ctx, ID(sectorutil.PathByType(destIds, fileType)), s, fileType); err != nil { + return xerrors.Errorf("declare sector %d(t:%d) -> %s: %w", s, fileType, ID(sectorutil.PathByType(destIds, fileType)), err) + } + } + + return nil +} + var errPathNotFound = xerrors.Errorf("fsstat: path not found") func (st *Local) FsStat(ctx context.Context, id ID) (FsStat, error) { diff --git a/stores/remote.go b/stores/remote.go index 66b818434..6131ca78b 100644 --- a/stores/remote.go +++ b/stores/remote.go @@ -107,6 +107,8 @@ func (r *Remote) acquireFromRemote(ctx context.Context, s abi.SectorID, fileType var merr error for _, info := range si { + // TODO: see what we have local, prefer that + for _, url := range info.URLs { err := r.fetch(ctx, url, dest) if err != nil { @@ -174,6 +176,17 @@ func (r *Remote) fetch(ctx context.Context, url, outname string) error { } } +func (r *Remote) MoveStorage(ctx context.Context, s abi.SectorID, types sectorbuilder.SectorFileType) error { + // Make sure we have the data local + _, _, ddone, err := r.AcquireSector(ctx, s, types, 0, false) + if err != nil { + return xerrors.Errorf("acquire src storage (remote): %w", err) + } + ddone() + + return r.local.MoveStorage(ctx, s, types) +} + func (r *Remote) Remove(ctx context.Context, sid abi.SectorID, typ sectorbuilder.SectorFileType) error { if bits.OnesCount(uint(typ)) != 1 { return xerrors.New("delete expects one file type") diff --git a/stores/util_unix.go b/stores/util_unix.go new file mode 100644 index 000000000..eeb691ddf --- /dev/null +++ b/stores/util_unix.go @@ -0,0 +1,43 @@ +package stores + +import ( + "bytes" + "os/exec" + "path/filepath" + "strings" + + "github.com/mitchellh/go-homedir" + "golang.org/x/xerrors" +) + +func move(from, to string) error { + from, err := homedir.Expand(from) + if err != nil { + return xerrors.Errorf("move: expanding from: %w", err) + } + + to, err = homedir.Expand(to) + if err != nil { + return xerrors.Errorf("move: expanding to: %w", err) + } + + if filepath.Base(from) != filepath.Base(to) { + return xerrors.Errorf("move: base names must match ('%s' != '%s')", filepath.Base(from), filepath.Base(to)) + } + + log.Debugw("move sector data", "from", from, "to", to) + + toDir := filepath.Dir(to) + + // `mv` has decades of experience in moving files quickly; don't pretend we + // can do better + + var errOut bytes.Buffer + cmd := exec.Command("/usr/bin/env", "mv", "-t", toDir, from) + cmd.Stderr = &errOut + if err := cmd.Run(); err != nil { + return xerrors.Errorf("exec mv (stderr: %s): %w", strings.TrimSpace(errOut.String()), err) + } + + return nil +} diff --git a/worker_local.go b/worker_local.go index 175106bad..de3f19c89 100644 --- a/worker_local.go +++ b/worker_local.go @@ -160,6 +160,10 @@ func (l *LocalWorker) FinalizeSector(ctx context.Context, sector abi.SectorID) e return xerrors.Errorf("removing unsealed data: %w", err) } + if err := l.storage.MoveStorage(ctx, sector, sectorbuilder.FTSealed|sectorbuilder.FTCache); err != nil { + return xerrors.Errorf("moving sealed data to storage: %w", err) + } + return nil } From 8c63b2570482070aa316afb12a142af48df07837 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Wed, 25 Mar 2020 21:19:58 +0100 Subject: [PATCH 013/199] stores: 'None' file type --- roprov.go | 2 +- stores/filetype.go | 8 ++++++++ stores/http_handler.go | 2 +- stores/local.go | 4 ++-- stores/remote.go | 4 ++-- 5 files changed, 14 insertions(+), 6 deletions(-) create mode 100644 stores/filetype.go diff --git a/roprov.go b/roprov.go index 4b0dfbe2b..8355500b5 100644 --- a/roprov.go +++ b/roprov.go @@ -15,7 +15,7 @@ type readonlyProvider struct { } func (l *readonlyProvider) AcquireSector(ctx context.Context, id abi.SectorID, existing sectorbuilder.SectorFileType, allocate sectorbuilder.SectorFileType, sealing bool) (sectorbuilder.SectorPaths, func(), error) { - if allocate != 0 { // 0 - don't allocate anything + if allocate != stores.FTNone { return sectorbuilder.SectorPaths{}, nil, xerrors.New("read-only storage") } diff --git a/stores/filetype.go b/stores/filetype.go new file mode 100644 index 000000000..e85db1e53 --- /dev/null +++ b/stores/filetype.go @@ -0,0 +1,8 @@ +package stores + +import "github.com/filecoin-project/go-sectorbuilder" + +const ( + // TODO: move the other types here after we drop go-sectorbuilder + FTNone sectorbuilder.SectorFileType = 0 +) diff --git a/stores/http_handler.go b/stores/http_handler.go index b242d1159..a49349901 100644 --- a/stores/http_handler.go +++ b/stores/http_handler.go @@ -71,7 +71,7 @@ func (handler *FetchHandler) remoteGetSector(w http.ResponseWriter, r *http.Requ w.WriteHeader(500) return } - paths, _, done, err := handler.Local.AcquireSector(r.Context(), id, ft, 0, false) + paths, _, done, err := handler.Local.AcquireSector(r.Context(), id, ft, FTNone, false) if err != nil { log.Error("%+v", err) w.WriteHeader(500) diff --git a/stores/local.go b/stores/local.go index 0d1cf2654..f853de9dc 100644 --- a/stores/local.go +++ b/stores/local.go @@ -310,13 +310,13 @@ func (st *Local) Remove(ctx context.Context, sid abi.SectorID, typ sectorbuilder } func (st *Local) MoveStorage(ctx context.Context, s abi.SectorID, types sectorbuilder.SectorFileType) error { - dest, destIds, sdone, err := st.AcquireSector(ctx, s, 0, types, false) + dest, destIds, sdone, err := st.AcquireSector(ctx, s, FTNone, types, false) if err != nil { return xerrors.Errorf("acquire dest storage: %w", err) } defer sdone() - src, srcIds, ddone, err := st.AcquireSector(ctx, s, types, 0, false) + src, srcIds, ddone, err := st.AcquireSector(ctx, s, types, FTNone, false) if err != nil { return xerrors.Errorf("acquire src storage: %w", err) } diff --git a/stores/remote.go b/stores/remote.go index 6131ca78b..e44b8cfec 100644 --- a/stores/remote.go +++ b/stores/remote.go @@ -98,7 +98,7 @@ func (r *Remote) acquireFromRemote(ctx context.Context, s abi.SectorID, fileType return si[i].Weight < si[j].Weight }) - apaths, ids, done, err := r.local.AcquireSector(ctx, s, 0, fileType, sealing) + apaths, ids, done, err := r.local.AcquireSector(ctx, s, FTNone, fileType, sealing) if err != nil { return "", "", "", nil, xerrors.Errorf("allocate local sector for fetching: %w", err) } @@ -178,7 +178,7 @@ func (r *Remote) fetch(ctx context.Context, url, outname string) error { func (r *Remote) MoveStorage(ctx context.Context, s abi.SectorID, types sectorbuilder.SectorFileType) error { // Make sure we have the data local - _, _, ddone, err := r.AcquireSector(ctx, s, types, 0, false) + _, _, ddone, err := r.AcquireSector(ctx, s, types, FTNone, false) if err != nil { return xerrors.Errorf("acquire src storage (remote): %w", err) } From db93e784ab3f6dee3e58c0fa304b0665314443fc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Wed, 25 Mar 2020 21:20:24 +0100 Subject: [PATCH 014/199] workers: gofmt --- manager.go | 12 +++++++++--- stores/local.go | 1 - 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/manager.go b/manager.go index 3ef8e30c6..6f4bd58e2 100644 --- a/manager.go +++ b/manager.go @@ -119,9 +119,15 @@ func New(ctx context.Context, ls stores.LocalStorage, si stores.SectorIndex, cfg localTasks := []sealtasks.TaskType{ sealtasks.TTAddPiece, sealtasks.TTCommit1, sealtasks.TTFinalize, } - if sc.AllowPreCommit1 { localTasks = append(localTasks, sealtasks.TTPreCommit1)} - if sc.AllowPreCommit2 { localTasks = append(localTasks, sealtasks.TTPreCommit2)} - if sc.AllowCommit { localTasks = append(localTasks, sealtasks.TTCommit2)} + if sc.AllowPreCommit1 { + localTasks = append(localTasks, sealtasks.TTPreCommit1) + } + if sc.AllowPreCommit2 { + localTasks = append(localTasks, sealtasks.TTPreCommit2) + } + if sc.AllowCommit { + localTasks = append(localTasks, sealtasks.TTCommit2) + } err = m.AddWorker(ctx, NewLocalWorker(WorkerConfig{ SealProof: cfg.SealProofType, diff --git a/stores/local.go b/stores/local.go index f853de9dc..bc2e56a69 100644 --- a/stores/local.go +++ b/stores/local.go @@ -337,7 +337,6 @@ func (st *Local) MoveStorage(ctx context.Context, s abi.SectorID, types sectorbu return xerrors.Errorf("failed to get source storage info: %w", err) } - if sst.ID == dst.ID { log.Debugf("not moving %v(%d); src and dest are the same", s, fileType) continue From 0ce42d520adee189bf856abd0b4d1066136cff63 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Wed, 25 Mar 2020 22:23:01 +0100 Subject: [PATCH 015/199] stores: Mute serve-stat debug log --- stores/http_handler.go | 1 - 1 file changed, 1 deletion(-) diff --git a/stores/http_handler.go b/stores/http_handler.go index a49349901..bbc9b2b04 100644 --- a/stores/http_handler.go +++ b/stores/http_handler.go @@ -32,7 +32,6 @@ func (handler *FetchHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } func (handler *FetchHandler) remoteStatFs(w http.ResponseWriter, r *http.Request) { - log.Debugf("SERVE STAT %s", r.URL) vars := mux.Vars(r) id := ID(vars["id"]) From 3081a1b9c7e303254552810acada2d31123ed105 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Thu, 26 Mar 2020 00:04:40 +0100 Subject: [PATCH 016/199] Fix sealing sectors with multiple pieces --- mock/mock.go | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/mock/mock.go b/mock/mock.go index fdaae7f80..ca50ed189 100644 --- a/mock/mock.go +++ b/mock/mock.go @@ -159,7 +159,7 @@ func (sb *SectorMgr) SealPreCommit1(ctx context.Context, sid abi.SectorID, ticke } } - commd, err := MockVerifier.GenerateDataCommitment(abi.PaddedPieceSize(sb.sectorSize), pis) + commd, err := MockVerifier.GenerateDataCommitment(sb.proofType, pis) if err != nil { return nil, err } @@ -354,15 +354,8 @@ func (m mockVerif) VerifySeal(svi abi.SealVerifyInfo) (bool, error) { return true, nil } -func (m mockVerif) GenerateDataCommitment(ssize abi.PaddedPieceSize, pieces []abi.PieceInfo) (cid.Cid, error) { - if len(pieces) != 1 { - panic("todo") - } - if pieces[0].Size != ssize { - fmt.Println("wrong sizes? ", pieces[0].Size, ssize) - panic("todo") - } - return pieces[0].PieceCID, nil +func (m mockVerif) GenerateDataCommitment(pt abi.RegisteredProof, pieces []abi.PieceInfo) (cid.Cid, error) { + return sectorbuilder.GenerateUnsealedCID(pt, pieces) } var MockVerifier = mockVerif{} From e8e90300f028b03b8f7bf3de1ff433c2b0e888f2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Thu, 26 Mar 2020 03:50:56 +0100 Subject: [PATCH 017/199] Merge sectorbuilder into sectorstorage --- ffiwrapper/basicfs/fs.go | 70 +++++++ ffiwrapper/config.go | 79 +++++++ ffiwrapper/files.go | 53 +++++ ffiwrapper/params_shared.go | 18 ++ ffiwrapper/sealer.go | 41 ++++ ffiwrapper/sealer_cgo.go | 400 ++++++++++++++++++++++++++++++++++++ ffiwrapper/sealer_test.go | 356 ++++++++++++++++++++++++++++++++ ffiwrapper/types.go | 49 +++++ ffiwrapper/varifier_cgo.go | 80 ++++++++ manager.go | 28 +-- mock/mock.go | 12 +- mock/preseal.go | 4 +- resources.go | 18 +- roprov.go | 5 +- sectorutil/utils.go | 56 ----- stores/filetype.go | 85 +++++++- stores/http_handler.go | 22 +- stores/index.go | 31 ++- stores/interface.go | 7 +- stores/local.go | 48 +++-- stores/remote.go | 26 ++- worker_local.go | 23 +-- zerocomm/zerocomm.go | 55 +++++ zerocomm/zerocomm_test.go | 115 +++++++++++ 24 files changed, 1504 insertions(+), 177 deletions(-) create mode 100644 ffiwrapper/basicfs/fs.go create mode 100644 ffiwrapper/config.go create mode 100644 ffiwrapper/files.go create mode 100644 ffiwrapper/params_shared.go create mode 100644 ffiwrapper/sealer.go create mode 100644 ffiwrapper/sealer_cgo.go create mode 100644 ffiwrapper/sealer_test.go create mode 100644 ffiwrapper/types.go create mode 100644 ffiwrapper/varifier_cgo.go delete mode 100644 sectorutil/utils.go create mode 100644 zerocomm/zerocomm.go create mode 100644 zerocomm/zerocomm_test.go diff --git a/ffiwrapper/basicfs/fs.go b/ffiwrapper/basicfs/fs.go new file mode 100644 index 000000000..cd85a1a0b --- /dev/null +++ b/ffiwrapper/basicfs/fs.go @@ -0,0 +1,70 @@ +package basicfs + +import ( + "context" + "fmt" + "os" + "path/filepath" + "sync" + + "github.com/filecoin-project/specs-actors/actors/abi" + + "github.com/filecoin-project/lotus/storage/sectorstorage/stores" +) + +type sectorFile struct { + abi.SectorID + stores.SectorFileType +} + +type Provider struct { + Root string + + lk sync.Mutex + waitSector map[sectorFile]chan struct{} +} + +func (b *Provider) AcquireSector(ctx context.Context, id abi.SectorID, existing stores.SectorFileType, allocate stores.SectorFileType, sealing bool) (stores.SectorPaths, func(), error) { + os.Mkdir(filepath.Join(b.Root, stores.FTUnsealed.String()), 0755) + os.Mkdir(filepath.Join(b.Root, stores.FTSealed.String()), 0755) + os.Mkdir(filepath.Join(b.Root, stores.FTCache.String()), 0755) + + done := func() {} + + for i := 0; i < 3; i++ { + if (existing|allocate)&(1< MaxFallbackPostChallengeCount { + return MaxFallbackPostChallengeCount + } + return challengeCount +} + +func (sb *SectorBuilder) Stop() { + close(sb.stopping) +} + +func (sb *SectorBuilder) SectorSize() abi.SectorSize { + return sb.ssize +} + +func (sb *SectorBuilder) SealProofType() abi.RegisteredProof { + return sb.sealProofType +} + +func (sb *SectorBuilder) PoStProofType() abi.RegisteredProof { + return sb.postProofType +} diff --git a/ffiwrapper/sealer_cgo.go b/ffiwrapper/sealer_cgo.go new file mode 100644 index 000000000..cc871c835 --- /dev/null +++ b/ffiwrapper/sealer_cgo.go @@ -0,0 +1,400 @@ +//+build cgo + +package ffiwrapper + +import ( + "context" + "io" + "math/bits" + "os" + + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + + ffi "github.com/filecoin-project/filecoin-ffi" + "github.com/filecoin-project/specs-actors/actors/abi" + "github.com/filecoin-project/specs-storage/storage" + + "github.com/filecoin-project/lotus/storage/sectorstorage/stores" + "github.com/filecoin-project/lotus/storage/sectorstorage/zerocomm" +) + +var _ Basic = &SectorBuilder{} + +func New(sectors SectorProvider, cfg *Config) (*SectorBuilder, error) { + sectorSize, err := sizeFromConfig(*cfg) + if err != nil { + return nil, err + } + + sb := &SectorBuilder{ + sealProofType: cfg.SealProofType, + postProofType: cfg.PoStProofType, + ssize: sectorSize, + + sectors: sectors, + + stopping: make(chan struct{}), + } + + return sb, nil +} + +func (sb *SectorBuilder) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorInfo []abi.SectorInfo, faults []abi.SectorNumber) (ffi.SortedPrivateSectorInfo, error) { + fmap := map[abi.SectorNumber]struct{}{} + for _, fault := range faults { + fmap[fault] = struct{}{} + } + + var out []ffi.PrivateSectorInfo + for _, s := range sectorInfo { + if _, faulty := fmap[s.SectorNumber]; faulty { + continue + } + + paths, done, err := sb.sectors.AcquireSector(ctx, abi.SectorID{Miner: mid, Number: s.SectorNumber}, stores.FTCache|stores.FTSealed, 0, false) + if err != nil { + return ffi.SortedPrivateSectorInfo{}, xerrors.Errorf("acquire sector paths: %w", err) + } + done() // TODO: This is a tiny bit suboptimal + + postProofType, err := s.RegisteredProof.RegisteredPoStProof() + if err != nil { + return ffi.SortedPrivateSectorInfo{}, xerrors.Errorf("acquiring registered PoSt proof from sector info %+v: %w", s, err) + } + + out = append(out, ffi.PrivateSectorInfo{ + CacheDirPath: paths.Cache, + PoStProofType: postProofType, + SealedSectorPath: paths.Sealed, + SectorInfo: s, + }) + } + + return ffi.NewSortedPrivateSectorInfo(out...), nil +} + +func (sb *SectorBuilder) NewSector(ctx context.Context, sector abi.SectorID) error { + // TODO: Allocate the sector here instead of in addpiece + + return nil +} + +func (sb *SectorBuilder) AddPiece(ctx context.Context, sector abi.SectorID, existingPieceSizes []abi.UnpaddedPieceSize, pieceSize abi.UnpaddedPieceSize, file storage.Data) (abi.PieceInfo, error) { + f, werr, err := toReadableFile(file, int64(pieceSize)) + if err != nil { + return abi.PieceInfo{}, err + } + + var done func() + var stagedFile *os.File + + defer func() { + if done != nil { + done() + } + + if stagedFile != nil { + if err := stagedFile.Close(); err != nil { + log.Errorf("closing staged file: %+v", err) + } + } + }() + + var stagedPath stores.SectorPaths + if len(existingPieceSizes) == 0 { + stagedPath, done, err = sb.sectors.AcquireSector(ctx, sector, 0, stores.FTUnsealed, true) + if err != nil { + return abi.PieceInfo{}, xerrors.Errorf("acquire unsealed sector: %w", err) + } + + stagedFile, err = os.Create(stagedPath.Unsealed) + if err != nil { + return abi.PieceInfo{}, xerrors.Errorf("opening sector file: %w", err) + } + } else { + stagedPath, done, err = sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, 0, true) + if err != nil { + return abi.PieceInfo{}, xerrors.Errorf("acquire unsealed sector: %w", err) + } + + stagedFile, err = os.OpenFile(stagedPath.Unsealed, os.O_RDWR, 0644) + if err != nil { + return abi.PieceInfo{}, xerrors.Errorf("opening sector file: %w", err) + } + + if _, err := stagedFile.Seek(0, io.SeekEnd); err != nil { + return abi.PieceInfo{}, xerrors.Errorf("seek end: %w", err) + } + } + + _, _, pieceCID, err := ffi.WriteWithAlignment(sb.sealProofType, f, pieceSize, stagedFile, existingPieceSizes) + if err != nil { + return abi.PieceInfo{}, err + } + + if err := f.Close(); err != nil { + return abi.PieceInfo{}, err + } + + return abi.PieceInfo{ + Size: pieceSize.Padded(), + PieceCID: pieceCID, + }, werr() +} + +func (sb *SectorBuilder) ReadPieceFromSealedSector(ctx context.Context, sector abi.SectorID, offset UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealedCID cid.Cid) (io.ReadCloser, error) { + path, doneUnsealed, err := sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, stores.FTUnsealed, false) + if err != nil { + return nil, xerrors.Errorf("acquire unsealed sector path: %w", err) + } + defer doneUnsealed() + f, err := os.OpenFile(path.Unsealed, os.O_RDONLY, 0644) + if err == nil { + if _, err := f.Seek(int64(offset), io.SeekStart); err != nil { + return nil, xerrors.Errorf("seek: %w", err) + } + + lr := io.LimitReader(f, int64(size)) + + return &struct { + io.Reader + io.Closer + }{ + Reader: lr, + Closer: f, + }, nil + } + if !os.IsNotExist(err) { + return nil, err + } + + sealed, doneSealed, err := sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed|stores.FTCache, 0, false) + if err != nil { + return nil, xerrors.Errorf("acquire sealed/cache sector path: %w", err) + } + defer doneSealed() + + // TODO: GC for those + // (Probably configurable count of sectors to be kept unsealed, and just + // remove last used one (or use whatever other cache policy makes sense)) + err = ffi.Unseal( + sb.sealProofType, + sealed.Cache, + sealed.Sealed, + path.Unsealed, + sector.Number, + sector.Miner, + ticket, + unsealedCID, + ) + if err != nil { + return nil, xerrors.Errorf("unseal failed: %w", err) + } + + f, err = os.OpenFile(string(path.Unsealed), os.O_RDONLY, 0644) + if err != nil { + return nil, err + } + + if _, err := f.Seek(int64(offset), io.SeekStart); err != nil { + return nil, xerrors.Errorf("seek: %w", err) + } + + lr := io.LimitReader(f, int64(size)) + + return &struct { + io.Reader + io.Closer + }{ + Reader: lr, + Closer: f, + }, nil +} + +func (sb *SectorBuilder) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) { + paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, stores.FTSealed|stores.FTCache, true) + if err != nil { + return nil, xerrors.Errorf("acquiring sector paths: %w", err) + } + defer done() + + e, err := os.OpenFile(paths.Sealed, os.O_RDWR|os.O_CREATE, 0644) + if err != nil { + return nil, xerrors.Errorf("ensuring sealed file exists: %w", err) + } + if err := e.Close(); err != nil { + return nil, err + } + + if err := os.Mkdir(paths.Cache, 0755); err != nil { + if os.IsExist(err) { + log.Warnf("existing cache in %s; removing", paths.Cache) + + if err := os.RemoveAll(paths.Cache); err != nil { + return nil, xerrors.Errorf("remove existing sector cache from %s (sector %d): %w", paths.Cache, sector, err) + } + + if err := os.Mkdir(paths.Cache, 0755); err != nil { + return nil, xerrors.Errorf("mkdir cache path after cleanup: %w", err) + } + } else { + return nil, err + } + } + + var sum abi.UnpaddedPieceSize + for _, piece := range pieces { + sum += piece.Size.Unpadded() + } + ussize := abi.PaddedPieceSize(sb.ssize).Unpadded() + if sum != ussize { + return nil, xerrors.Errorf("aggregated piece sizes don't match sector size: %d != %d (%d)", sum, ussize, int64(ussize-sum)) + } + + // TODO: context cancellation respect + p1o, err := ffi.SealPreCommitPhase1( + sb.sealProofType, + paths.Cache, + paths.Unsealed, + paths.Sealed, + sector.Number, + sector.Miner, + ticket, + pieces, + ) + if err != nil { + return nil, xerrors.Errorf("presealing sector %d (%s): %w", sector.Number, paths.Unsealed, err) + } + return p1o, nil +} + +func (sb *SectorBuilder) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage.PreCommit1Out) (storage.SectorCids, error) { + paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTSealed|stores.FTCache, 0, true) + if err != nil { + return storage.SectorCids{}, xerrors.Errorf("acquiring sector paths: %w", err) + } + defer done() + + sealedCID, unsealedCID, err := ffi.SealPreCommitPhase2(phase1Out, paths.Cache, paths.Sealed) + if err != nil { + return storage.SectorCids{}, xerrors.Errorf("presealing sector %d (%s): %w", sector.Number, paths.Unsealed, err) + } + + return storage.SectorCids{ + Unsealed: unsealedCID, + Sealed: sealedCID, + }, nil +} + +func (sb *SectorBuilder) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) { + paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTSealed|stores.FTCache, 0, true) + if err != nil { + return nil, xerrors.Errorf("acquire sector paths: %w", err) + } + defer done() + output, err := ffi.SealCommitPhase1( + sb.sealProofType, + cids.Sealed, + cids.Unsealed, + paths.Cache, + paths.Sealed, + sector.Number, + sector.Miner, + ticket, + seed, + pieces, + ) + if err != nil { + log.Warn("StandaloneSealCommit error: ", err) + log.Warnf("num:%d tkt:%v seed:%v, pi:%v sealedCID:%v, unsealedCID:%v", sector.Number, ticket, seed, pieces, cids.Sealed, cids.Unsealed) + + return nil, xerrors.Errorf("StandaloneSealCommit: %w", err) + } + return output, nil +} + +func (sb *SectorBuilder) SealCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage.Commit1Out) (storage.Proof, error) { + return ffi.SealCommitPhase2(phase1Out, sector.Number, sector.Miner) +} + +func (sb *SectorBuilder) GenerateFallbackPoSt(ctx context.Context, miner abi.ActorID, sectorInfo []abi.SectorInfo, challengeSeed abi.PoStRandomness, faults []abi.SectorNumber) (storage.FallbackPostOut, error) { + privsectors, err := sb.pubSectorToPriv(ctx, miner, sectorInfo, faults) + if err != nil { + return storage.FallbackPostOut{}, err + } + + challengeCount := fallbackPostChallengeCount(uint64(len(sectorInfo)), uint64(len(faults))) + challengeSeed[31] = 0 + + candidates, err := ffi.GenerateCandidates(miner, challengeSeed, challengeCount, privsectors) + if err != nil { + return storage.FallbackPostOut{}, err + } + + winners := make([]abi.PoStCandidate, len(candidates)) + for idx := range winners { + winners[idx] = candidates[idx].Candidate + } + + proof, err := ffi.GeneratePoSt(miner, privsectors, challengeSeed, winners) + return storage.FallbackPostOut{ + PoStInputs: ffiToStorageCandidates(candidates), + Proof: proof, + }, err +} + +func (sb *SectorBuilder) FinalizeSector(ctx context.Context, sector abi.SectorID) error { + paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTCache, 0, false) + if err != nil { + return xerrors.Errorf("acquiring sector cache path: %w", err) + } + defer done() + + return ffi.ClearCache(paths.Cache) +} + +func GeneratePieceCIDFromFile(proofType abi.RegisteredProof, piece io.Reader, pieceSize abi.UnpaddedPieceSize) (cid.Cid, error) { + f, werr, err := toReadableFile(piece, int64(pieceSize)) + if err != nil { + return cid.Undef, err + } + + pieceCID, err := ffi.GeneratePieceCIDFromFile(proofType, f, pieceSize) + if err != nil { + return cid.Undef, err + } + + return pieceCID, werr() +} + +func GenerateUnsealedCID(proofType abi.RegisteredProof, pieces []abi.PieceInfo) (cid.Cid, error) { + var sum abi.PaddedPieceSize + for _, p := range pieces { + sum += p.Size + } + + ssize, err := SectorSizeForRegisteredProof(proofType) + if err != nil { + return cid.Undef, err + } + + { + // pad remaining space with 0 CommPs + toFill := uint64(abi.PaddedPieceSize(ssize) - sum) + n := bits.OnesCount64(toFill) + for i := 0; i < n; i++ { + next := bits.TrailingZeros64(toFill) + psize := uint64(1) << uint(next) + toFill ^= psize + + unpadded := abi.PaddedPieceSize(psize).Unpadded() + pieces = append(pieces, abi.PieceInfo{ + Size: unpadded.Padded(), + PieceCID: zerocomm.ZeroPieceCommitment(unpadded), + }) + } + } + + return ffi.GenerateUnsealedCID(proofType, pieces) +} diff --git a/ffiwrapper/sealer_test.go b/ffiwrapper/sealer_test.go new file mode 100644 index 000000000..78d4a4898 --- /dev/null +++ b/ffiwrapper/sealer_test.go @@ -0,0 +1,356 @@ +package ffiwrapper + +import ( + "context" + "fmt" + "io" + "io/ioutil" + "math/rand" + "os" + "runtime" + "sync" + "testing" + "time" + + logging "github.com/ipfs/go-log" + "golang.org/x/xerrors" + + paramfetch "github.com/filecoin-project/go-paramfetch" + "github.com/filecoin-project/specs-actors/actors/abi" + "github.com/filecoin-project/specs-storage/storage" + + "github.com/filecoin-project/lotus/storage/sectorstorage/ffiwrapper/basicfs" +) + +func init() { + logging.SetLogLevel("*", "INFO") //nolint: errcheck +} + +var sectorSize = abi.SectorSize(2048) +var sealProofType = abi.RegisteredProof_StackedDRG2KiBSeal +var postProofType = abi.RegisteredProof_StackedDRG2KiBPoSt + +type seal struct { + id abi.SectorID + cids storage.SectorCids + pi abi.PieceInfo + ticket abi.SealRandomness +} + +func (s *seal) precommit(t *testing.T, sb *SectorBuilder, id abi.SectorID, done func()) { + defer done() + dlen := abi.PaddedPieceSize(sectorSize).Unpadded() + + var err error + r := io.LimitReader(rand.New(rand.NewSource(42+int64(id.Number))), int64(dlen)) + s.pi, err = sb.AddPiece(context.TODO(), id, []abi.UnpaddedPieceSize{}, dlen, r) + if err != nil { + t.Fatalf("%+v", err) + } + + s.ticket = abi.SealRandomness{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2} + + p1, err := sb.SealPreCommit1(context.TODO(), id, s.ticket, []abi.PieceInfo{s.pi}) + if err != nil { + t.Fatalf("%+v", err) + } + cids, err := sb.SealPreCommit2(context.TODO(), id, p1) + if err != nil { + t.Fatalf("%+v", err) + } + s.cids = cids +} + +func (s *seal) commit(t *testing.T, sb *SectorBuilder, done func()) { + defer done() + seed := abi.InteractiveSealRandomness{0, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 9, 8, 7, 6, 45, 3, 2, 1, 0, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 9} + + pc1, err := sb.SealCommit1(context.TODO(), s.id, s.ticket, seed, []abi.PieceInfo{s.pi}, s.cids) + if err != nil { + t.Fatalf("%+v", err) + } + proof, err := sb.SealCommit2(context.TODO(), s.id, pc1) + if err != nil { + t.Fatalf("%+v", err) + } + + ok, err := ProofVerifier.VerifySeal(abi.SealVerifyInfo{ + SectorID: s.id, + OnChain: abi.OnChainSealVerifyInfo{ + SealedCID: s.cids.Sealed, + RegisteredProof: sealProofType, + Proof: proof, + SectorNumber: s.id.Number, + }, + Randomness: s.ticket, + InteractiveRandomness: seed, + UnsealedCID: s.cids.Unsealed, + }) + if err != nil { + t.Fatalf("%+v", err) + } + + if !ok { + t.Fatal("proof failed to validate") + } +} + +func post(t *testing.T, sb *SectorBuilder, seals ...seal) time.Time { + randomness := abi.PoStRandomness{0, 9, 2, 7, 6, 5, 4, 3, 2, 1, 0, 9, 8, 7, 6, 45, 3, 2, 1, 0, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 9, 7} + + sis := make([]abi.SectorInfo, len(seals)) + for i, s := range seals { + sis[i] = abi.SectorInfo{ + RegisteredProof: sealProofType, + SectorNumber: s.id.Number, + SealedCID: s.cids.Sealed, + } + } + + candidates, err := sb.GenerateEPostCandidates(context.TODO(), seals[0].id.Miner, sis, randomness, []abi.SectorNumber{}) + if err != nil { + t.Fatalf("%+v", err) + } + + genCandidates := time.Now() + + if len(candidates) != 1 { + t.Fatal("expected 1 candidate") + } + + candidatesPrime := make([]abi.PoStCandidate, len(candidates)) + for idx := range candidatesPrime { + candidatesPrime[idx] = candidates[idx].Candidate + } + + proofs, err := sb.ComputeElectionPoSt(context.TODO(), seals[0].id.Miner, sis, randomness, candidatesPrime) + if err != nil { + t.Fatalf("%+v", err) + } + + ePoStChallengeCount := ElectionPostChallengeCount(uint64(len(sis)), 0) + + ok, err := ProofVerifier.VerifyElectionPost(context.TODO(), abi.PoStVerifyInfo{ + Randomness: randomness, + Candidates: candidatesPrime, + Proofs: proofs, + EligibleSectors: sis, + Prover: seals[0].id.Miner, + ChallengeCount: ePoStChallengeCount, + }) + if err != nil { + t.Fatalf("%+v", err) + } + if !ok { + t.Fatal("bad post") + } + + return genCandidates +} + +func getGrothParamFileAndVerifyingKeys(s abi.SectorSize) { + dat, err := ioutil.ReadFile("./parameters.json") + if err != nil { + panic(xerrors.Errorf("failed to read contents of ./parameters.json: %w", err)) + } + + err = paramfetch.GetParams(dat, uint64(s)) + if err != nil { + panic(xerrors.Errorf("failed to acquire Groth parameters for 2KiB sectors: %w", err)) + } +} + +// TestDownloadParams exists only so that developers and CI can pre-download +// Groth parameters and verifying keys before running the tests which rely on +// those parameters and keys. To do this, run the following command: +// +// go test -run=^TestDownloadParams +// +func TestDownloadParams(t *testing.T) { + getGrothParamFileAndVerifyingKeys(sectorSize) +} + +func TestSealAndVerify(t *testing.T) { + if runtime.NumCPU() < 10 && os.Getenv("CI") == "" { // don't bother on slow hardware + t.Skip("this is slow") + } + _ = os.Setenv("RUST_LOG", "info") + + getGrothParamFileAndVerifyingKeys(sectorSize) + + cdir, err := ioutil.TempDir("", "sbtest-c-") + if err != nil { + t.Fatal(err) + } + miner := abi.ActorID(123) + + cfg := &Config{ + SealProofType: sealProofType, + PoStProofType: postProofType, + } + + sp := &basicfs.Provider{ + Root: cdir, + } + sb, err := New(sp, cfg) + if err != nil { + t.Fatalf("%+v", err) + } + cleanup := func() { + if t.Failed() { + fmt.Printf("not removing %s\n", cdir) + return + } + if err := os.RemoveAll(cdir); err != nil { + t.Error(err) + } + } + defer cleanup() + + si := abi.SectorID{Miner: miner, Number: 1} + + s := seal{id: si} + + start := time.Now() + + s.precommit(t, sb, si, func() {}) + + precommit := time.Now() + + s.commit(t, sb, func() {}) + + commit := time.Now() + + genCandidiates := post(t, sb, s) + + epost := time.Now() + + post(t, sb, s) + + if err := sb.FinalizeSector(context.TODO(), si); err != nil { + t.Fatalf("%+v", err) + } + + fmt.Printf("PreCommit: %s\n", precommit.Sub(start).String()) + fmt.Printf("Commit: %s\n", commit.Sub(precommit).String()) + fmt.Printf("GenCandidates: %s\n", genCandidiates.Sub(commit).String()) + fmt.Printf("EPoSt: %s\n", epost.Sub(genCandidiates).String()) +} + +func TestSealPoStNoCommit(t *testing.T) { + if runtime.NumCPU() < 10 && os.Getenv("CI") == "" { // don't bother on slow hardware + t.Skip("this is slow") + } + _ = os.Setenv("RUST_LOG", "info") + + getGrothParamFileAndVerifyingKeys(sectorSize) + + dir, err := ioutil.TempDir("", "sbtest") + if err != nil { + t.Fatal(err) + } + + miner := abi.ActorID(123) + + cfg := &Config{ + SealProofType: sealProofType, + PoStProofType: postProofType, + } + sp := &basicfs.Provider{ + Root: dir, + } + sb, err := New(sp, cfg) + if err != nil { + t.Fatalf("%+v", err) + } + + cleanup := func() { + if t.Failed() { + fmt.Printf("not removing %s\n", dir) + return + } + if err := os.RemoveAll(dir); err != nil { + t.Error(err) + } + } + defer cleanup() + + si := abi.SectorID{Miner: miner, Number: 1} + + s := seal{id: si} + + start := time.Now() + + s.precommit(t, sb, si, func() {}) + + precommit := time.Now() + + if err := sb.FinalizeSector(context.TODO(), si); err != nil { + t.Fatal(err) + } + + genCandidiates := post(t, sb, s) + + epost := time.Now() + + fmt.Printf("PreCommit: %s\n", precommit.Sub(start).String()) + fmt.Printf("GenCandidates: %s\n", genCandidiates.Sub(precommit).String()) + fmt.Printf("EPoSt: %s\n", epost.Sub(genCandidiates).String()) +} + +func TestSealAndVerify2(t *testing.T) { + if runtime.NumCPU() < 10 && os.Getenv("CI") == "" { // don't bother on slow hardware + t.Skip("this is slow") + } + _ = os.Setenv("RUST_LOG", "trace") + + getGrothParamFileAndVerifyingKeys(sectorSize) + + dir, err := ioutil.TempDir("", "sbtest") + if err != nil { + t.Fatal(err) + } + + miner := abi.ActorID(123) + + cfg := &Config{ + SealProofType: sealProofType, + PoStProofType: postProofType, + } + sp := &basicfs.Provider{ + Root: dir, + } + sb, err := New(sp, cfg) + if err != nil { + t.Fatalf("%+v", err) + } + + cleanup := func() { + if err := os.RemoveAll(dir); err != nil { + t.Error(err) + } + } + + defer cleanup() + + var wg sync.WaitGroup + + si1 := abi.SectorID{Miner: miner, Number: 1} + si2 := abi.SectorID{Miner: miner, Number: 2} + + s1 := seal{id: si1} + s2 := seal{id: si2} + + wg.Add(2) + go s1.precommit(t, sb, si1, wg.Done) //nolint: staticcheck + time.Sleep(100 * time.Millisecond) + go s2.precommit(t, sb, si2, wg.Done) //nolint: staticcheck + wg.Wait() + + wg.Add(2) + go s1.commit(t, sb, wg.Done) //nolint: staticcheck + go s2.commit(t, sb, wg.Done) //nolint: staticcheck + wg.Wait() + + post(t, sb, s1, s2) +} diff --git a/ffiwrapper/types.go b/ffiwrapper/types.go new file mode 100644 index 000000000..ea113fbbb --- /dev/null +++ b/ffiwrapper/types.go @@ -0,0 +1,49 @@ +package ffiwrapper + +import ( + "context" + "errors" + "github.com/ipfs/go-cid" + "io" + + "github.com/filecoin-project/specs-actors/actors/abi" + "github.com/filecoin-project/specs-storage/storage" + + "github.com/filecoin-project/lotus/storage/sectorstorage/ffiwrapper/basicfs" + "github.com/filecoin-project/lotus/storage/sectorstorage/stores" +) + +type UnpaddedByteIndex uint64 + +type Validator interface { + CanCommit(sector stores.SectorPaths) (bool, error) + CanProve(sector stores.SectorPaths) (bool, error) +} + +type Sealer interface { + storage.Sealer + storage.Storage +} + +type Basic interface { + storage.Prover + Sealer + + ReadPieceFromSealedSector(context.Context, abi.SectorID, UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (io.ReadCloser, error) +} + +type Verifier interface { + VerifySeal(abi.SealVerifyInfo) (bool, error) + VerifyElectionPost(ctx context.Context, info abi.PoStVerifyInfo) (bool, error) + VerifyFallbackPost(ctx context.Context, info abi.PoStVerifyInfo) (bool, error) +} + +var ErrSectorNotFound = errors.New("sector not found") + +type SectorProvider interface { + // * returns ErrSectorNotFound if a requested existing sector doesn't exist + // * returns an error when allocate is set, and existing isn't, and the sector exists + AcquireSector(ctx context.Context, id abi.SectorID, existing stores.SectorFileType, allocate stores.SectorFileType, sealing bool) (stores.SectorPaths, func(), error) +} + +var _ SectorProvider = &basicfs.Provider{} diff --git a/ffiwrapper/varifier_cgo.go b/ffiwrapper/varifier_cgo.go new file mode 100644 index 000000000..6c01470ce --- /dev/null +++ b/ffiwrapper/varifier_cgo.go @@ -0,0 +1,80 @@ +//+build cgo + +package ffiwrapper + +import ( + "context" + + "go.opencensus.io/trace" + + ffi "github.com/filecoin-project/filecoin-ffi" + "github.com/filecoin-project/specs-actors/actors/abi" + "github.com/filecoin-project/specs-storage/storage" +) + +func (sb *SectorBuilder) ComputeElectionPoSt(ctx context.Context, miner abi.ActorID, sectorInfo []abi.SectorInfo, challengeSeed abi.PoStRandomness, winners []abi.PoStCandidate) ([]abi.PoStProof, error) { + challengeSeed[31] = 0 + + privsects, err := sb.pubSectorToPriv(ctx, miner, sectorInfo, nil) // TODO: faults + if err != nil { + return nil, err + } + + return ffi.GeneratePoSt(miner, privsects, challengeSeed, winners) +} + +func (sb *SectorBuilder) GenerateEPostCandidates(ctx context.Context, miner abi.ActorID, sectorInfo []abi.SectorInfo, challengeSeed abi.PoStRandomness, faults []abi.SectorNumber) ([]storage.PoStCandidateWithTicket, error) { + privsectors, err := sb.pubSectorToPriv(ctx, miner, sectorInfo, faults) + if err != nil { + return nil, err + } + + challengeSeed[31] = 0 + + challengeCount := ElectionPostChallengeCount(uint64(len(sectorInfo)), uint64(len(faults))) + pc, err := ffi.GenerateCandidates(miner, challengeSeed, challengeCount, privsectors) + if err != nil { + return nil, err + } + + return ffiToStorageCandidates(pc), nil +} + +func ffiToStorageCandidates(pc []ffi.PoStCandidateWithTicket) []storage.PoStCandidateWithTicket { + out := make([]storage.PoStCandidateWithTicket, len(pc)) + for i := range out { + out[i] = storage.PoStCandidateWithTicket{ + Candidate: pc[i].Candidate, + Ticket: pc[i].Ticket, + } + } + + return out +} + +var _ Verifier = ProofVerifier + +type proofVerifier struct{} + +var ProofVerifier = proofVerifier{} + +func (proofVerifier) VerifySeal(info abi.SealVerifyInfo) (bool, error) { + return ffi.VerifySeal(info) +} + +func (proofVerifier) VerifyElectionPost(ctx context.Context, info abi.PoStVerifyInfo) (bool, error) { + return verifyPost(ctx, info) +} + +func (proofVerifier) VerifyFallbackPost(ctx context.Context, info abi.PoStVerifyInfo) (bool, error) { + return verifyPost(ctx, info) +} + +func verifyPost(ctx context.Context, info abi.PoStVerifyInfo) (bool, error) { + _, span := trace.StartSpan(ctx, "VerifyPoSt") + defer span.End() + + info.Randomness[31] = 0 + + return ffi.VerifyPoSt(info) +} diff --git a/manager.go b/manager.go index 6f4bd58e2..43438385f 100644 --- a/manager.go +++ b/manager.go @@ -4,6 +4,7 @@ import ( "container/list" "context" "errors" + "github.com/filecoin-project/lotus/storage/sectorstorage/ffiwrapper" "io" "net/http" "sync" @@ -13,7 +14,6 @@ import ( "github.com/mitchellh/go-homedir" "golang.org/x/xerrors" - "github.com/filecoin-project/go-sectorbuilder" "github.com/filecoin-project/specs-actors/actors/abi" "github.com/filecoin-project/specs-storage/storage" @@ -30,7 +30,7 @@ var ErrNoWorkers = errors.New("no suitable workers found") type URLs []string type Worker interface { - sectorbuilder.Sealer + ffiwrapper.Sealer TaskTypes(context.Context) (map[sealtasks.TaskType]struct{}, error) @@ -45,16 +45,16 @@ type Worker interface { type SectorManager interface { SectorSize() abi.SectorSize - ReadPieceFromSealedSector(context.Context, abi.SectorID, sectorbuilder.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (io.ReadCloser, error) + ReadPieceFromSealedSector(context.Context, abi.SectorID, ffiwrapper.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (io.ReadCloser, error) - sectorbuilder.Sealer + ffiwrapper.Sealer storage.Prover } type WorkerID uint64 type Manager struct { - scfg *sectorbuilder.Config + scfg *ffiwrapper.Config ls stores.LocalStorage storage *stores.Remote @@ -76,13 +76,13 @@ type Manager struct { schedQueue *list.List // List[*workerRequest] } -func New(ctx context.Context, ls stores.LocalStorage, si stores.SectorIndex, cfg *sectorbuilder.Config, sc config.Storage, urls URLs, ca api.Common) (*Manager, error) { +func New(ctx context.Context, ls stores.LocalStorage, si stores.SectorIndex, cfg *ffiwrapper.Config, sc config.Storage, urls URLs, ca api.Common) (*Manager, error) { lstor, err := stores.NewLocal(ctx, ls, si, urls) if err != nil { return nil, err } - prover, err := sectorbuilder.New(&readonlyProvider{stor: lstor}, cfg) + prover, err := ffiwrapper.New(&readonlyProvider{stor: lstor}, cfg) if err != nil { return nil, xerrors.Errorf("creating prover instance: %w", err) } @@ -180,7 +180,7 @@ func (m *Manager) SectorSize() abi.SectorSize { return sz } -func (m *Manager) ReadPieceFromSealedSector(context.Context, abi.SectorID, sectorbuilder.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (io.ReadCloser, error) { +func (m *Manager) ReadPieceFromSealedSector(context.Context, abi.SectorID, ffiwrapper.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (io.ReadCloser, error) { panic("implement me") } @@ -273,9 +273,9 @@ func (m *Manager) AddPiece(ctx context.Context, sector abi.SectorID, existingPie var best []stores.StorageInfo var err error if len(existingPieces) == 0 { // new - best, err = m.index.StorageBestAlloc(ctx, sectorbuilder.FTUnsealed, true) + best, err = m.index.StorageBestAlloc(ctx, stores.FTUnsealed, true) } else { // append to existing - best, err = m.index.StorageFindSector(ctx, sector, sectorbuilder.FTUnsealed, false) + best, err = m.index.StorageFindSector(ctx, sector, stores.FTUnsealed, false) } if err != nil { return abi.PieceInfo{}, xerrors.Errorf("finding sector path: %w", err) @@ -302,7 +302,7 @@ func (m *Manager) AddPiece(ctx context.Context, sector abi.SectorID, existingPie func (m *Manager) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) { // TODO: also consider where the unsealed data sits - best, err := m.index.StorageBestAlloc(ctx, sectorbuilder.FTCache|sectorbuilder.FTSealed, true) + best, err := m.index.StorageBestAlloc(ctx, stores.FTCache|stores.FTSealed, true) if err != nil { return nil, xerrors.Errorf("finding path for sector sealing: %w", err) } @@ -326,7 +326,7 @@ func (m *Manager) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticke func (m *Manager) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage.PreCommit1Out) (cids storage.SectorCids, err error) { // TODO: allow workers to fetch the sectors - best, err := m.index.StorageFindSector(ctx, sector, sectorbuilder.FTCache|sectorbuilder.FTSealed, true) + best, err := m.index.StorageFindSector(ctx, sector, stores.FTCache|stores.FTSealed, true) if err != nil { return storage.SectorCids{}, xerrors.Errorf("finding path for sector sealing: %w", err) } @@ -348,7 +348,7 @@ func (m *Manager) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase } func (m *Manager) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (output storage.Commit1Out, err error) { - best, err := m.index.StorageFindSector(ctx, sector, sectorbuilder.FTCache|sectorbuilder.FTSealed, true) + best, err := m.index.StorageFindSector(ctx, sector, stores.FTCache|stores.FTSealed, true) if err != nil { return nil, xerrors.Errorf("finding path for sector sealing: %w", err) } @@ -400,7 +400,7 @@ func (m *Manager) SealCommit2(ctx context.Context, sector abi.SectorID, phase1Ou } func (m *Manager) FinalizeSector(ctx context.Context, sector abi.SectorID) error { - best, err := m.index.StorageFindSector(ctx, sector, sectorbuilder.FTCache|sectorbuilder.FTSealed|sectorbuilder.FTUnsealed, true) + best, err := m.index.StorageFindSector(ctx, sector, stores.FTCache|stores.FTSealed|stores.FTUnsealed, true) if err != nil { return xerrors.Errorf("finding sealed sector: %w", err) } diff --git a/mock/mock.go b/mock/mock.go index ca50ed189..e86d2b3e7 100644 --- a/mock/mock.go +++ b/mock/mock.go @@ -5,6 +5,7 @@ import ( "context" "fmt" "github.com/filecoin-project/lotus/storage/sectorstorage" + "github.com/filecoin-project/lotus/storage/sectorstorage/ffiwrapper" "io" "io/ioutil" "math/big" @@ -12,7 +13,6 @@ import ( "sync" commcid "github.com/filecoin-project/go-fil-commcid" - "github.com/filecoin-project/go-sectorbuilder" "github.com/filecoin-project/specs-actors/actors/abi" "github.com/filecoin-project/specs-storage/storage" "github.com/ipfs/go-cid" @@ -93,7 +93,7 @@ func (sb *SectorMgr) AddPiece(ctx context.Context, sectorId abi.SectorID, existi ss.lk.Lock() defer ss.lk.Unlock() - c, err := sectorbuilder.GeneratePieceCIDFromFile(sb.proofType, r, size) + c, err := ffiwrapper.GeneratePieceCIDFromFile(sb.proofType, r, size) if err != nil { return abi.PieceInfo{}, xerrors.Errorf("failed to generate piece cid: %w", err) } @@ -273,7 +273,7 @@ func (sb *SectorMgr) GenerateEPostCandidates(ctx context.Context, mid abi.ActorI panic("todo") } - n := sectorbuilder.ElectionPostChallengeCount(uint64(len(sectorInfo)), uint64(len(faults))) + n := ffiwrapper.ElectionPostChallengeCount(uint64(len(sectorInfo)), uint64(len(faults))) if n > uint64(len(sectorInfo)) { n = uint64(len(sectorInfo)) } @@ -298,7 +298,7 @@ func (sb *SectorMgr) GenerateEPostCandidates(ctx context.Context, mid abi.ActorI return out, nil } -func (sb *SectorMgr) ReadPieceFromSealedSector(ctx context.Context, sectorID abi.SectorID, offset sectorbuilder.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, commD cid.Cid) (io.ReadCloser, error) { +func (sb *SectorMgr) ReadPieceFromSealedSector(ctx context.Context, sectorID abi.SectorID, offset ffiwrapper.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, commD cid.Cid) (io.ReadCloser, error) { if len(sb.sectors[sectorID].pieces) > 1 { panic("implme") } @@ -355,10 +355,10 @@ func (m mockVerif) VerifySeal(svi abi.SealVerifyInfo) (bool, error) { } func (m mockVerif) GenerateDataCommitment(pt abi.RegisteredProof, pieces []abi.PieceInfo) (cid.Cid, error) { - return sectorbuilder.GenerateUnsealedCID(pt, pieces) + return ffiwrapper.GenerateUnsealedCID(pt, pieces) } var MockVerifier = mockVerif{} -var _ sectorbuilder.Verifier = MockVerifier +var _ ffiwrapper.Verifier = MockVerifier var _ sectorstorage.SectorManager = &SectorMgr{} diff --git a/mock/preseal.go b/mock/preseal.go index 6bac0aaea..20a4377cd 100644 --- a/mock/preseal.go +++ b/mock/preseal.go @@ -3,7 +3,7 @@ package mock import ( "github.com/filecoin-project/go-address" commcid "github.com/filecoin-project/go-fil-commcid" - "github.com/filecoin-project/go-sectorbuilder" + "github.com/filecoin-project/lotus/storage/sectorstorage/zerocomm" "github.com/filecoin-project/specs-actors/actors/abi" "github.com/filecoin-project/specs-actors/actors/abi/big" "github.com/filecoin-project/specs-actors/actors/builtin/market" @@ -39,7 +39,7 @@ func PreSeal(ssize abi.SectorSize, maddr address.Address, sectors int) (*genesis preseal := &genesis.PreSeal{} preseal.ProofType = st - preseal.CommD = sectorbuilder.ZeroPieceCommitment(abi.PaddedPieceSize(ssize).Unpadded()) + preseal.CommD = zerocomm.ZeroPieceCommitment(abi.PaddedPieceSize(ssize).Unpadded()) d, _ := commcid.CIDToPieceCommitmentV1(preseal.CommD) r := commDR(d) preseal.CommR = commcid.ReplicaCommitmentV1ToCID(r[:]) diff --git a/resources.go b/resources.go index ebb32f165..4aafb5962 100644 --- a/resources.go +++ b/resources.go @@ -1,21 +1,21 @@ package sectorstorage import ( - "github.com/filecoin-project/go-sectorbuilder" "github.com/filecoin-project/lotus/storage/sectorstorage/sealtasks" + "github.com/filecoin-project/lotus/storage/sectorstorage/stores" "github.com/filecoin-project/specs-actors/actors/abi" ) -var FSOverheadSeal = map[sectorbuilder.SectorFileType]int{ // 10x overheads - sectorbuilder.FTUnsealed: 10, - sectorbuilder.FTSealed: 10, - sectorbuilder.FTCache: 70, // TODO: confirm for 32G +var FSOverheadSeal = map[stores.SectorFileType]int{ // 10x overheads + stores.FTUnsealed: 10, + stores.FTSealed: 10, + stores.FTCache: 70, // TODO: confirm for 32G } -var FsOverheadFinalized = map[sectorbuilder.SectorFileType]int{ - sectorbuilder.FTUnsealed: 10, - sectorbuilder.FTSealed: 10, - sectorbuilder.FTCache: 2, +var FsOverheadFinalized = map[stores.SectorFileType]int{ + stores.FTUnsealed: 10, + stores.FTSealed: 10, + stores.FTCache: 2, } type Resources struct { diff --git a/roprov.go b/roprov.go index 8355500b5..dfab863ff 100644 --- a/roprov.go +++ b/roprov.go @@ -3,7 +3,6 @@ package sectorstorage import ( "context" - "github.com/filecoin-project/go-sectorbuilder" "github.com/filecoin-project/lotus/storage/sectorstorage/stores" "github.com/filecoin-project/specs-actors/actors/abi" @@ -14,9 +13,9 @@ type readonlyProvider struct { stor *stores.Local } -func (l *readonlyProvider) AcquireSector(ctx context.Context, id abi.SectorID, existing sectorbuilder.SectorFileType, allocate sectorbuilder.SectorFileType, sealing bool) (sectorbuilder.SectorPaths, func(), error) { +func (l *readonlyProvider) AcquireSector(ctx context.Context, id abi.SectorID, existing stores.SectorFileType, allocate stores.SectorFileType, sealing bool) (stores.SectorPaths, func(), error) { if allocate != stores.FTNone { - return sectorbuilder.SectorPaths{}, nil, xerrors.New("read-only storage") + return stores.SectorPaths{}, nil, xerrors.New("read-only storage") } p, _, done, err := l.stor.AcquireSector(ctx, id, existing, allocate, sealing) diff --git a/sectorutil/utils.go b/sectorutil/utils.go deleted file mode 100644 index ede59410b..000000000 --- a/sectorutil/utils.go +++ /dev/null @@ -1,56 +0,0 @@ -package sectorutil - -import ( - "fmt" - "github.com/filecoin-project/go-sectorbuilder" - - "golang.org/x/xerrors" - - "github.com/filecoin-project/specs-actors/actors/abi" -) - -func ParseSectorID(baseName string) (abi.SectorID, error) { - var n abi.SectorNumber - var mid abi.ActorID - read, err := fmt.Sscanf(baseName, "s-t0%d-%d", &mid, &n) - if err != nil { - return abi.SectorID{}, xerrors.Errorf("sscanf sector name ('%s'): %w", baseName, err) - } - - if read != 2 { - return abi.SectorID{}, xerrors.Errorf("parseSectorID expected to scan 2 values, got %d", read) - } - - return abi.SectorID{ - Miner: mid, - Number: n, - }, nil -} - -func SectorName(sid abi.SectorID) string { - return fmt.Sprintf("s-t0%d-%d", sid.Miner, sid.Number) -} - -func PathByType(sps sectorbuilder.SectorPaths, fileType sectorbuilder.SectorFileType) string { - switch fileType { - case sectorbuilder.FTUnsealed: - return sps.Unsealed - case sectorbuilder.FTSealed: - return sps.Sealed - case sectorbuilder.FTCache: - return sps.Cache - } - - panic("requested unknown path type") -} - -func SetPathByType(sps *sectorbuilder.SectorPaths, fileType sectorbuilder.SectorFileType, p string) { - switch fileType { - case sectorbuilder.FTUnsealed: - sps.Unsealed = p - case sectorbuilder.FTSealed: - sps.Sealed = p - case sectorbuilder.FTCache: - sps.Cache = p - } -} diff --git a/stores/filetype.go b/stores/filetype.go index e85db1e53..ddd8cf45e 100644 --- a/stores/filetype.go +++ b/stores/filetype.go @@ -1,8 +1,87 @@ package stores -import "github.com/filecoin-project/go-sectorbuilder" +import ( + "fmt" + "golang.org/x/xerrors" + + "github.com/filecoin-project/specs-actors/actors/abi" +) const ( - // TODO: move the other types here after we drop go-sectorbuilder - FTNone sectorbuilder.SectorFileType = 0 + FTUnsealed SectorFileType = 1 << iota + FTSealed + FTCache ) + +const ( + FTNone SectorFileType = 0 +) + +type SectorFileType int + +func (t SectorFileType) String() string { + switch t { + case FTUnsealed: + return "unsealed" + case FTSealed: + return "sealed" + case FTCache: + return "cache" + default: + return fmt.Sprintf("", t) + } +} + +type SectorPaths struct { + Id abi.SectorID + + Unsealed string + Sealed string + Cache string +} + +func ParseSectorID(baseName string) (abi.SectorID, error) { + var n abi.SectorNumber + var mid abi.ActorID + read, err := fmt.Sscanf(baseName, "s-t0%d-%d", &mid, &n) + if err != nil { + return abi.SectorID{}, xerrors.Errorf("sscanf sector name ('%s'): %w", baseName, err) + } + + if read != 2 { + return abi.SectorID{}, xerrors.Errorf("parseSectorID expected to scan 2 values, got %d", read) + } + + return abi.SectorID{ + Miner: mid, + Number: n, + }, nil +} + +func SectorName(sid abi.SectorID) string { + return fmt.Sprintf("s-t0%d-%d", sid.Miner, sid.Number) +} + +func PathByType(sps SectorPaths, fileType SectorFileType) string { + switch fileType { + case FTUnsealed: + return sps.Unsealed + case FTSealed: + return sps.Sealed + case FTCache: + return sps.Cache + } + + panic("requested unknown path type") +} + +func SetPathByType(sps *SectorPaths, fileType SectorFileType, p string) { + switch fileType { + case FTUnsealed: + sps.Unsealed = p + case FTSealed: + sps.Sealed = p + case FTCache: + sps.Cache = p + } +} diff --git a/stores/http_handler.go b/stores/http_handler.go index bbc9b2b04..21903494b 100644 --- a/stores/http_handler.go +++ b/stores/http_handler.go @@ -10,9 +10,7 @@ import ( logging "github.com/ipfs/go-log/v2" "golang.org/x/xerrors" - "github.com/filecoin-project/go-sectorbuilder" "github.com/filecoin-project/lotus/lib/tarutil" - "github.com/filecoin-project/lotus/storage/sectorstorage/sectorutil" ) var log = logging.Logger("stores") @@ -57,7 +55,7 @@ func (handler *FetchHandler) remoteGetSector(w http.ResponseWriter, r *http.Requ log.Infof("SERVE GET %s", r.URL) vars := mux.Vars(r) - id, err := sectorutil.ParseSectorID(vars["id"]) + id, err := ParseSectorID(vars["id"]) if err != nil { log.Error("%+v", err) w.WriteHeader(500) @@ -78,7 +76,7 @@ func (handler *FetchHandler) remoteGetSector(w http.ResponseWriter, r *http.Requ } defer done() - path := sectorutil.PathByType(paths, ft) + path := PathByType(paths, ft) if path == "" { log.Error("acquired path was empty") w.WriteHeader(500) @@ -117,7 +115,7 @@ func (handler *FetchHandler) remoteDeleteSector(w http.ResponseWriter, r *http.R log.Infof("SERVE DELETE %s", r.URL) vars := mux.Vars(r) - id, err := sectorutil.ParseSectorID(vars["id"]) + id, err := ParseSectorID(vars["id"]) if err != nil { log.Error("%+v", err) w.WriteHeader(500) @@ -138,14 +136,14 @@ func (handler *FetchHandler) remoteDeleteSector(w http.ResponseWriter, r *http.R } } -func ftFromString(t string) (sectorbuilder.SectorFileType, error) { +func ftFromString(t string) (SectorFileType, error) { switch t { - case sectorbuilder.FTUnsealed.String(): - return sectorbuilder.FTUnsealed, nil - case sectorbuilder.FTSealed.String(): - return sectorbuilder.FTSealed, nil - case sectorbuilder.FTCache.String(): - return sectorbuilder.FTCache, nil + case FTUnsealed.String(): + return FTUnsealed, nil + case FTSealed.String(): + return FTSealed, nil + case FTCache.String(): + return FTCache, nil default: return 0, xerrors.Errorf("unknown sector file type: '%s'", t) } diff --git a/stores/index.go b/stores/index.go index ccad8ba7f..5e2fb81fb 100644 --- a/stores/index.go +++ b/stores/index.go @@ -9,11 +9,8 @@ import ( "golang.org/x/xerrors" - "github.com/filecoin-project/go-sectorbuilder" "github.com/filecoin-project/specs-actors/actors/abi" "github.com/filecoin-project/specs-actors/actors/abi/big" - - "github.com/filecoin-project/lotus/storage/sectorstorage/sectorutil" ) // ID identifies sector storage by UUID. One sector storage should map to one @@ -34,16 +31,16 @@ type SectorIndex interface { // part of storage-miner api StorageInfo(context.Context, ID) (StorageInfo, error) // TODO: StorageUpdateStats(FsStat) - StorageDeclareSector(ctx context.Context, storageId ID, s abi.SectorID, ft sectorbuilder.SectorFileType) error - StorageDropSector(ctx context.Context, storageId ID, s abi.SectorID, ft sectorbuilder.SectorFileType) error - StorageFindSector(ctx context.Context, sector abi.SectorID, ft sectorbuilder.SectorFileType, allowFetch bool) ([]StorageInfo, error) + StorageDeclareSector(ctx context.Context, storageId ID, s abi.SectorID, ft SectorFileType) error + StorageDropSector(ctx context.Context, storageId ID, s abi.SectorID, ft SectorFileType) error + StorageFindSector(ctx context.Context, sector abi.SectorID, ft SectorFileType, allowFetch bool) ([]StorageInfo, error) - StorageBestAlloc(ctx context.Context, allocate sectorbuilder.SectorFileType, sealing bool) ([]StorageInfo, error) + StorageBestAlloc(ctx context.Context, allocate SectorFileType, sealing bool) ([]StorageInfo, error) } type Decl struct { abi.SectorID - sectorbuilder.SectorFileType + SectorFileType } type storageEntry struct { @@ -66,10 +63,10 @@ func NewIndex() *Index { } func (i *Index) StorageList(ctx context.Context) (map[ID][]Decl, error) { - byID := map[ID]map[abi.SectorID]sectorbuilder.SectorFileType{} + byID := map[ID]map[abi.SectorID]SectorFileType{} for id := range i.stores { - byID[id] = map[abi.SectorID]sectorbuilder.SectorFileType{} + byID[id] = map[abi.SectorID]SectorFileType{} } for decl, ids := range i.sectors { for _, id := range ids { @@ -124,7 +121,7 @@ func (i *Index) StorageAttach(ctx context.Context, si StorageInfo, st FsStat) er return nil } -func (i *Index) StorageDeclareSector(ctx context.Context, storageId ID, s abi.SectorID, ft sectorbuilder.SectorFileType) error { +func (i *Index) StorageDeclareSector(ctx context.Context, storageId ID, s abi.SectorID, ft SectorFileType) error { i.lk.Lock() defer i.lk.Unlock() @@ -148,7 +145,7 @@ func (i *Index) StorageDeclareSector(ctx context.Context, storageId ID, s abi.Se return nil } -func (i *Index) StorageDropSector(ctx context.Context, storageId ID, s abi.SectorID, ft sectorbuilder.SectorFileType) error { +func (i *Index) StorageDropSector(ctx context.Context, storageId ID, s abi.SectorID, ft SectorFileType) error { i.lk.Lock() defer i.lk.Unlock() @@ -182,7 +179,7 @@ func (i *Index) StorageDropSector(ctx context.Context, storageId ID, s abi.Secto return nil } -func (i *Index) StorageFindSector(ctx context.Context, s abi.SectorID, ft sectorbuilder.SectorFileType, allowFetch bool) ([]StorageInfo, error) { +func (i *Index) StorageFindSector(ctx context.Context, s abi.SectorID, ft SectorFileType, allowFetch bool) ([]StorageInfo, error) { i.lk.RLock() defer i.lk.RUnlock() @@ -214,7 +211,7 @@ func (i *Index) StorageFindSector(ctx context.Context, s abi.SectorID, ft sector return nil, xerrors.Errorf("failed to parse url: %w", err) } - rl.Path = gopath.Join(rl.Path, ft.String(), sectorutil.SectorName(s)) + rl.Path = gopath.Join(rl.Path, ft.String(), SectorName(s)) urls[k] = rl.String() } @@ -240,7 +237,7 @@ func (i *Index) StorageFindSector(ctx context.Context, s abi.SectorID, ft sector return nil, xerrors.Errorf("failed to parse url: %w", err) } - rl.Path = gopath.Join(rl.Path, ft.String(), sectorutil.SectorName(s)) + rl.Path = gopath.Join(rl.Path, ft.String(), SectorName(s)) urls[k] = rl.String() } @@ -269,7 +266,7 @@ func (i *Index) StorageInfo(ctx context.Context, id ID) (StorageInfo, error) { return *si.info, nil } -func (i *Index) StorageBestAlloc(ctx context.Context, allocate sectorbuilder.SectorFileType, sealing bool) ([]StorageInfo, error) { +func (i *Index) StorageBestAlloc(ctx context.Context, allocate SectorFileType, sealing bool) ([]StorageInfo, error) { i.lk.RLock() defer i.lk.RUnlock() @@ -309,7 +306,7 @@ func (i *Index) StorageBestAlloc(ctx context.Context, allocate sectorbuilder.Sec return out, nil } -func (i *Index) FindSector(id abi.SectorID, typ sectorbuilder.SectorFileType) ([]ID, error) { +func (i *Index) FindSector(id abi.SectorID, typ SectorFileType) ([]ID, error) { i.lk.RLock() defer i.lk.RUnlock() diff --git a/stores/interface.go b/stores/interface.go index 45e371fb7..556cd4dbf 100644 --- a/stores/interface.go +++ b/stores/interface.go @@ -6,16 +6,15 @@ import ( "golang.org/x/xerrors" - "github.com/filecoin-project/go-sectorbuilder" "github.com/filecoin-project/specs-actors/actors/abi" ) type Store interface { - AcquireSector(ctx context.Context, s abi.SectorID, existing sectorbuilder.SectorFileType, allocate sectorbuilder.SectorFileType, sealing bool) (paths sectorbuilder.SectorPaths, stores sectorbuilder.SectorPaths, done func(), err error) - Remove(ctx context.Context, s abi.SectorID, types sectorbuilder.SectorFileType) error + AcquireSector(ctx context.Context, s abi.SectorID, existing SectorFileType, allocate SectorFileType, sealing bool) (paths SectorPaths, stores SectorPaths, done func(), err error) + Remove(ctx context.Context, s abi.SectorID, types SectorFileType) error // move sectors into storage - MoveStorage(ctx context.Context, s abi.SectorID, types sectorbuilder.SectorFileType) error + MoveStorage(ctx context.Context, s abi.SectorID, types SectorFileType) error FsStat(ctx context.Context, id ID) (FsStat, error) } diff --git a/stores/local.go b/stores/local.go index bc2e56a69..4ed0f5b3e 100644 --- a/stores/local.go +++ b/stores/local.go @@ -12,9 +12,7 @@ import ( "github.com/filecoin-project/specs-actors/actors/abi" "golang.org/x/xerrors" - "github.com/filecoin-project/go-sectorbuilder" "github.com/filecoin-project/lotus/node/config" - "github.com/filecoin-project/lotus/storage/sectorstorage/sectorutil" ) type StoragePath struct { @@ -43,7 +41,7 @@ type LocalStorage interface { const MetaFile = "sectorstore.json" -var pathTypes = []sectorbuilder.SectorFileType{sectorbuilder.FTUnsealed, sectorbuilder.FTSealed, sectorbuilder.FTCache} +var pathTypes = []SectorFileType{FTUnsealed, FTSealed, FTCache} type Local struct { localStorage LocalStorage @@ -120,7 +118,7 @@ func (st *Local) OpenPath(ctx context.Context, p string) error { } for _, ent := range ents { - sid, err := sectorutil.ParseSectorID(ent.Name()) + sid, err := ParseSectorID(ent.Name()) if err != nil { return xerrors.Errorf("parse sector id %s: %w", ent.Name(), err) } @@ -152,15 +150,15 @@ func (st *Local) open(ctx context.Context) error { return nil } -func (st *Local) AcquireSector(ctx context.Context, sid abi.SectorID, existing sectorbuilder.SectorFileType, allocate sectorbuilder.SectorFileType, sealing bool) (sectorbuilder.SectorPaths, sectorbuilder.SectorPaths, func(), error) { +func (st *Local) AcquireSector(ctx context.Context, sid abi.SectorID, existing SectorFileType, allocate SectorFileType, sealing bool) (SectorPaths, SectorPaths, func(), error) { if existing|allocate != existing^allocate { - return sectorbuilder.SectorPaths{}, sectorbuilder.SectorPaths{}, nil, xerrors.New("can't both find and allocate a sector") + return SectorPaths{}, SectorPaths{}, nil, xerrors.New("can't both find and allocate a sector") } st.localLk.RLock() - var out sectorbuilder.SectorPaths - var storageIDs sectorbuilder.SectorPaths + var out SectorPaths + var storageIDs SectorPaths for _, fileType := range pathTypes { if fileType&existing == 0 { @@ -183,9 +181,9 @@ func (st *Local) AcquireSector(ctx context.Context, sid abi.SectorID, existing s continue } - spath := filepath.Join(p.local, fileType.String(), sectorutil.SectorName(sid)) - sectorutil.SetPathByType(&out, fileType, spath) - sectorutil.SetPathByType(&storageIDs, fileType, string(info.ID)) + spath := filepath.Join(p.local, fileType.String(), SectorName(sid)) + SetPathByType(&out, fileType, spath) + SetPathByType(&storageIDs, fileType, string(info.ID)) existing ^= fileType break @@ -200,7 +198,7 @@ func (st *Local) AcquireSector(ctx context.Context, sid abi.SectorID, existing s sis, err := st.index.StorageBestAlloc(ctx, fileType, sealing) if err != nil { st.localLk.RUnlock() - return sectorbuilder.SectorPaths{}, sectorbuilder.SectorPaths{}, nil, xerrors.Errorf("finding best storage for allocating : %w", err) + return SectorPaths{}, SectorPaths{}, nil, xerrors.Errorf("finding best storage for allocating : %w", err) } var best string @@ -226,17 +224,17 @@ func (st *Local) AcquireSector(ctx context.Context, sid abi.SectorID, existing s // TODO: Check free space - best = filepath.Join(p.local, fileType.String(), sectorutil.SectorName(sid)) + best = filepath.Join(p.local, fileType.String(), SectorName(sid)) bestID = si.ID } if best == "" { st.localLk.RUnlock() - return sectorbuilder.SectorPaths{}, sectorbuilder.SectorPaths{}, nil, xerrors.Errorf("couldn't find a suitable path for a sector") + return SectorPaths{}, SectorPaths{}, nil, xerrors.Errorf("couldn't find a suitable path for a sector") } - sectorutil.SetPathByType(&out, fileType, best) - sectorutil.SetPathByType(&storageIDs, fileType, string(bestID)) + SetPathByType(&out, fileType, best) + SetPathByType(&storageIDs, fileType, string(bestID)) allocate ^= fileType } @@ -270,7 +268,7 @@ func (st *Local) Local(ctx context.Context) ([]StoragePath, error) { return out, nil } -func (st *Local) Remove(ctx context.Context, sid abi.SectorID, typ sectorbuilder.SectorFileType) error { +func (st *Local) Remove(ctx context.Context, sid abi.SectorID, typ SectorFileType) error { if bits.OnesCount(uint(typ)) != 1 { return xerrors.New("delete expects one file type") } @@ -298,7 +296,7 @@ func (st *Local) Remove(ctx context.Context, sid abi.SectorID, typ sectorbuilder return xerrors.Errorf("dropping sector from index: %w", err) } - spath := filepath.Join(p.local, typ.String(), sectorutil.SectorName(sid)) + spath := filepath.Join(p.local, typ.String(), SectorName(sid)) log.Infof("remove %s", spath) if err := os.RemoveAll(spath); err != nil { @@ -309,7 +307,7 @@ func (st *Local) Remove(ctx context.Context, sid abi.SectorID, typ sectorbuilder return nil } -func (st *Local) MoveStorage(ctx context.Context, s abi.SectorID, types sectorbuilder.SectorFileType) error { +func (st *Local) MoveStorage(ctx context.Context, s abi.SectorID, types SectorFileType) error { dest, destIds, sdone, err := st.AcquireSector(ctx, s, FTNone, types, false) if err != nil { return xerrors.Errorf("acquire dest storage: %w", err) @@ -327,12 +325,12 @@ func (st *Local) MoveStorage(ctx context.Context, s abi.SectorID, types sectorbu continue } - sst, err := st.index.StorageInfo(ctx, ID(sectorutil.PathByType(srcIds, fileType))) + sst, err := st.index.StorageInfo(ctx, ID(PathByType(srcIds, fileType))) if err != nil { return xerrors.Errorf("failed to get source storage info: %w", err) } - dst, err := st.index.StorageInfo(ctx, ID(sectorutil.PathByType(destIds, fileType))) + dst, err := st.index.StorageInfo(ctx, ID(PathByType(destIds, fileType))) if err != nil { return xerrors.Errorf("failed to get source storage info: %w", err) } @@ -349,17 +347,17 @@ func (st *Local) MoveStorage(ctx context.Context, s abi.SectorID, types sectorbu log.Debugf("moving %v(%d) to storage: %s(se:%t; st:%t) -> %s(se:%t; st:%t)", s, fileType, sst.ID, sst.CanSeal, sst.CanStore, dst.ID, dst.CanSeal, dst.CanStore) - if err := st.index.StorageDropSector(ctx, ID(sectorutil.PathByType(srcIds, fileType)), s, fileType); err != nil { + if err := st.index.StorageDropSector(ctx, ID(PathByType(srcIds, fileType)), s, fileType); err != nil { return xerrors.Errorf("dropping source sector from index: %w", err) } - if err := move(sectorutil.PathByType(src, fileType), sectorutil.PathByType(dest, fileType)); err != nil { + if err := move(PathByType(src, fileType), PathByType(dest, fileType)); err != nil { // TODO: attempt some recovery (check if src is still there, re-declare) return xerrors.Errorf("moving sector %v(%d): %w", s, fileType, err) } - if err := st.index.StorageDeclareSector(ctx, ID(sectorutil.PathByType(destIds, fileType)), s, fileType); err != nil { - return xerrors.Errorf("declare sector %d(t:%d) -> %s: %w", s, fileType, ID(sectorutil.PathByType(destIds, fileType)), err) + if err := st.index.StorageDeclareSector(ctx, ID(PathByType(destIds, fileType)), s, fileType); err != nil { + return xerrors.Errorf("declare sector %d(t:%d) -> %s: %w", s, fileType, ID(PathByType(destIds, fileType)), err) } } diff --git a/stores/remote.go b/stores/remote.go index e44b8cfec..e919baa1d 100644 --- a/stores/remote.go +++ b/stores/remote.go @@ -17,11 +17,9 @@ import ( files "github.com/ipfs/go-ipfs-files" "golang.org/x/xerrors" - "github.com/filecoin-project/go-sectorbuilder" "github.com/filecoin-project/specs-actors/actors/abi" "github.com/filecoin-project/lotus/lib/tarutil" - "github.com/filecoin-project/lotus/storage/sectorstorage/sectorutil" ) type Remote struct { @@ -42,9 +40,9 @@ func NewRemote(local *Local, index SectorIndex, auth http.Header) *Remote { } } -func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, existing sectorbuilder.SectorFileType, allocate sectorbuilder.SectorFileType, sealing bool) (sectorbuilder.SectorPaths, sectorbuilder.SectorPaths, func(), error) { +func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, existing SectorFileType, allocate SectorFileType, sealing bool) (SectorPaths, SectorPaths, func(), error) { if existing|allocate != existing^allocate { - return sectorbuilder.SectorPaths{}, sectorbuilder.SectorPaths{}, nil, xerrors.New("can't both find and allocate a sector") + return SectorPaths{}, SectorPaths{}, nil, xerrors.New("can't both find and allocate a sector") } r.fetchLk.Lock() @@ -52,7 +50,7 @@ func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, existing sec paths, stores, done, err := r.local.AcquireSector(ctx, s, existing, allocate, sealing) if err != nil { - return sectorbuilder.SectorPaths{}, sectorbuilder.SectorPaths{}, nil, xerrors.Errorf("local acquire error: %w", err) + return SectorPaths{}, SectorPaths{}, nil, xerrors.Errorf("local acquire error: %w", err) } for _, fileType := range pathTypes { @@ -60,19 +58,19 @@ func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, existing sec continue } - if sectorutil.PathByType(paths, fileType) != "" { + if PathByType(paths, fileType) != "" { continue } ap, storageID, url, rdone, err := r.acquireFromRemote(ctx, s, fileType, sealing) if err != nil { done() - return sectorbuilder.SectorPaths{}, sectorbuilder.SectorPaths{}, nil, err + return SectorPaths{}, SectorPaths{}, nil, err } done = mergeDone(done, rdone) - sectorutil.SetPathByType(&paths, fileType, ap) - sectorutil.SetPathByType(&stores, fileType, string(storageID)) + SetPathByType(&paths, fileType, ap) + SetPathByType(&stores, fileType, string(storageID)) if err := r.index.StorageDeclareSector(ctx, storageID, s, fileType); err != nil { log.Warnf("declaring sector %v in %s failed: %+v", s, storageID, err) @@ -88,7 +86,7 @@ func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, existing sec return paths, stores, done, nil } -func (r *Remote) acquireFromRemote(ctx context.Context, s abi.SectorID, fileType sectorbuilder.SectorFileType, sealing bool) (string, ID, string, func(), error) { +func (r *Remote) acquireFromRemote(ctx context.Context, s abi.SectorID, fileType SectorFileType, sealing bool) (string, ID, string, func(), error) { si, err := r.index.StorageFindSector(ctx, s, fileType, false) if err != nil { return "", "", "", nil, err @@ -102,8 +100,8 @@ func (r *Remote) acquireFromRemote(ctx context.Context, s abi.SectorID, fileType if err != nil { return "", "", "", nil, xerrors.Errorf("allocate local sector for fetching: %w", err) } - dest := sectorutil.PathByType(apaths, fileType) - storageID := sectorutil.PathByType(ids, fileType) + dest := PathByType(apaths, fileType) + storageID := PathByType(ids, fileType) var merr error for _, info := range si { @@ -176,7 +174,7 @@ func (r *Remote) fetch(ctx context.Context, url, outname string) error { } } -func (r *Remote) MoveStorage(ctx context.Context, s abi.SectorID, types sectorbuilder.SectorFileType) error { +func (r *Remote) MoveStorage(ctx context.Context, s abi.SectorID, types SectorFileType) error { // Make sure we have the data local _, _, ddone, err := r.AcquireSector(ctx, s, types, FTNone, false) if err != nil { @@ -187,7 +185,7 @@ func (r *Remote) MoveStorage(ctx context.Context, s abi.SectorID, types sectorbu return r.local.MoveStorage(ctx, s, types) } -func (r *Remote) Remove(ctx context.Context, sid abi.SectorID, typ sectorbuilder.SectorFileType) error { +func (r *Remote) Remove(ctx context.Context, sid abi.SectorID, typ SectorFileType) error { if bits.OnesCount(uint(typ)) != 1 { return xerrors.New("delete expects one file type") } diff --git a/worker_local.go b/worker_local.go index de3f19c89..3547a8a03 100644 --- a/worker_local.go +++ b/worker_local.go @@ -2,6 +2,7 @@ package sectorstorage import ( "context" + "github.com/filecoin-project/lotus/storage/sectorstorage/ffiwrapper" "io" "os" @@ -9,17 +10,15 @@ import ( "golang.org/x/xerrors" ffi "github.com/filecoin-project/filecoin-ffi" - "github.com/filecoin-project/go-sectorbuilder" "github.com/filecoin-project/specs-actors/actors/abi" storage2 "github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/storage/sectorstorage/sealtasks" - "github.com/filecoin-project/lotus/storage/sectorstorage/sectorutil" "github.com/filecoin-project/lotus/storage/sectorstorage/stores" ) -var pathTypes = []sectorbuilder.SectorFileType{sectorbuilder.FTUnsealed, sectorbuilder.FTSealed, sectorbuilder.FTCache} +var pathTypes = []stores.SectorFileType{stores.FTUnsealed, stores.FTSealed, stores.FTCache} type WorkerConfig struct { SealProof abi.RegisteredProof @@ -27,7 +26,7 @@ type WorkerConfig struct { } type LocalWorker struct { - scfg *sectorbuilder.Config + scfg *ffiwrapper.Config storage stores.Store localStore *stores.Local sindex stores.SectorIndex @@ -47,7 +46,7 @@ func NewLocalWorker(wcfg WorkerConfig, store stores.Store, local *stores.Local, } return &LocalWorker{ - scfg: §orbuilder.Config{ + scfg: &ffiwrapper.Config{ SealProofType: wcfg.SealProof, PoStProofType: ppt, }, @@ -63,10 +62,10 @@ type localWorkerPathProvider struct { w *LocalWorker } -func (l *localWorkerPathProvider) AcquireSector(ctx context.Context, sector abi.SectorID, existing sectorbuilder.SectorFileType, allocate sectorbuilder.SectorFileType, sealing bool) (sectorbuilder.SectorPaths, func(), error) { +func (l *localWorkerPathProvider) AcquireSector(ctx context.Context, sector abi.SectorID, existing stores.SectorFileType, allocate stores.SectorFileType, sealing bool) (stores.SectorPaths, func(), error) { paths, storageIDs, done, err := l.w.storage.AcquireSector(ctx, sector, existing, allocate, sealing) if err != nil { - return sectorbuilder.SectorPaths{}, nil, err + return stores.SectorPaths{}, nil, err } log.Debugf("acquired sector %d (e:%d; a:%d): %v", sector, existing, allocate, paths) @@ -79,7 +78,7 @@ func (l *localWorkerPathProvider) AcquireSector(ctx context.Context, sector abi. continue } - sid := sectorutil.PathByType(storageIDs, fileType) + sid := stores.PathByType(storageIDs, fileType) if err := l.w.sindex.StorageDeclareSector(ctx, stores.ID(sid), sector, fileType); err != nil { log.Errorf("declare sector error: %+v", err) @@ -88,8 +87,8 @@ func (l *localWorkerPathProvider) AcquireSector(ctx context.Context, sector abi. }, nil } -func (l *LocalWorker) sb() (sectorbuilder.Basic, error) { - return sectorbuilder.New(&localWorkerPathProvider{w: l}, l.scfg) +func (l *LocalWorker) sb() (ffiwrapper.Basic, error) { + return ffiwrapper.New(&localWorkerPathProvider{w: l}, l.scfg) } func (l *LocalWorker) NewSector(ctx context.Context, sector abi.SectorID) error { @@ -156,11 +155,11 @@ func (l *LocalWorker) FinalizeSector(ctx context.Context, sector abi.SectorID) e return xerrors.Errorf("finalizing sector: %w", err) } - if err := l.storage.Remove(ctx, sector, sectorbuilder.FTUnsealed); err != nil { + if err := l.storage.Remove(ctx, sector, stores.FTUnsealed); err != nil { return xerrors.Errorf("removing unsealed data: %w", err) } - if err := l.storage.MoveStorage(ctx, sector, sectorbuilder.FTSealed|sectorbuilder.FTCache); err != nil { + if err := l.storage.MoveStorage(ctx, sector, stores.FTSealed|stores.FTCache); err != nil { return xerrors.Errorf("moving sealed data to storage: %w", err) } diff --git a/zerocomm/zerocomm.go b/zerocomm/zerocomm.go new file mode 100644 index 000000000..7d6308549 --- /dev/null +++ b/zerocomm/zerocomm.go @@ -0,0 +1,55 @@ +package zerocomm + +import ( + "math/bits" + + commcid "github.com/filecoin-project/go-fil-commcid" + "github.com/filecoin-project/specs-actors/actors/abi" + "github.com/ipfs/go-cid" +) + +const Levels = 37 +const Skip = 2 // can't generate for 32, 64b + +var PieceComms = [Levels - Skip][32]byte{ + {0x37, 0x31, 0xbb, 0x99, 0xac, 0x68, 0x9f, 0x66, 0xee, 0xf5, 0x97, 0x3e, 0x4a, 0x94, 0xda, 0x18, 0x8f, 0x4d, 0xdc, 0xae, 0x58, 0x7, 0x24, 0xfc, 0x6f, 0x3f, 0xd6, 0xd, 0xfd, 0x48, 0x83, 0x33}, + {0x64, 0x2a, 0x60, 0x7e, 0xf8, 0x86, 0xb0, 0x4, 0xbf, 0x2c, 0x19, 0x78, 0x46, 0x3a, 0xe1, 0xd4, 0x69, 0x3a, 0xc0, 0xf4, 0x10, 0xeb, 0x2d, 0x1b, 0x7a, 0x47, 0xfe, 0x20, 0x5e, 0x5e, 0x75, 0xf}, + {0x57, 0xa2, 0x38, 0x1a, 0x28, 0x65, 0x2b, 0xf4, 0x7f, 0x6b, 0xef, 0x7a, 0xca, 0x67, 0x9b, 0xe4, 0xae, 0xde, 0x58, 0x71, 0xab, 0x5c, 0xf3, 0xeb, 0x2c, 0x8, 0x11, 0x44, 0x88, 0xcb, 0x85, 0x26}, + {0x1f, 0x7a, 0xc9, 0x59, 0x55, 0x10, 0xe0, 0x9e, 0xa4, 0x1c, 0x46, 0xb, 0x17, 0x64, 0x30, 0xbb, 0x32, 0x2c, 0xd6, 0xfb, 0x41, 0x2e, 0xc5, 0x7c, 0xb1, 0x7d, 0x98, 0x9a, 0x43, 0x10, 0x37, 0x2f}, + {0xfc, 0x7e, 0x92, 0x82, 0x96, 0xe5, 0x16, 0xfa, 0xad, 0xe9, 0x86, 0xb2, 0x8f, 0x92, 0xd4, 0x4a, 0x4f, 0x24, 0xb9, 0x35, 0x48, 0x52, 0x23, 0x37, 0x6a, 0x79, 0x90, 0x27, 0xbc, 0x18, 0xf8, 0x33}, + {0x8, 0xc4, 0x7b, 0x38, 0xee, 0x13, 0xbc, 0x43, 0xf4, 0x1b, 0x91, 0x5c, 0xe, 0xed, 0x99, 0x11, 0xa2, 0x60, 0x86, 0xb3, 0xed, 0x62, 0x40, 0x1b, 0xf9, 0xd5, 0x8b, 0x8d, 0x19, 0xdf, 0xf6, 0x24}, + {0xb2, 0xe4, 0x7b, 0xfb, 0x11, 0xfa, 0xcd, 0x94, 0x1f, 0x62, 0xaf, 0x5c, 0x75, 0xf, 0x3e, 0xa5, 0xcc, 0x4d, 0xf5, 0x17, 0xd5, 0xc4, 0xf1, 0x6d, 0xb2, 0xb4, 0xd7, 0x7b, 0xae, 0xc1, 0xa3, 0x2f}, + {0xf9, 0x22, 0x61, 0x60, 0xc8, 0xf9, 0x27, 0xbf, 0xdc, 0xc4, 0x18, 0xcd, 0xf2, 0x3, 0x49, 0x31, 0x46, 0x0, 0x8e, 0xae, 0xfb, 0x7d, 0x2, 0x19, 0x4d, 0x5e, 0x54, 0x81, 0x89, 0x0, 0x51, 0x8}, + {0x2c, 0x1a, 0x96, 0x4b, 0xb9, 0xb, 0x59, 0xeb, 0xfe, 0xf, 0x6d, 0xa2, 0x9a, 0xd6, 0x5a, 0xe3, 0xe4, 0x17, 0x72, 0x4a, 0x8f, 0x7c, 0x11, 0x74, 0x5a, 0x40, 0xca, 0xc1, 0xe5, 0xe7, 0x40, 0x11}, + {0xfe, 0xe3, 0x78, 0xce, 0xf1, 0x64, 0x4, 0xb1, 0x99, 0xed, 0xe0, 0xb1, 0x3e, 0x11, 0xb6, 0x24, 0xff, 0x9d, 0x78, 0x4f, 0xbb, 0xed, 0x87, 0x8d, 0x83, 0x29, 0x7e, 0x79, 0x5e, 0x2, 0x4f, 0x2}, + {0x8e, 0x9e, 0x24, 0x3, 0xfa, 0x88, 0x4c, 0xf6, 0x23, 0x7f, 0x60, 0xdf, 0x25, 0xf8, 0x3e, 0xe4, 0xd, 0xca, 0x9e, 0xd8, 0x79, 0xeb, 0x6f, 0x63, 0x52, 0xd1, 0x50, 0x84, 0xf5, 0xad, 0xd, 0x3f}, + {0x75, 0x2d, 0x96, 0x93, 0xfa, 0x16, 0x75, 0x24, 0x39, 0x54, 0x76, 0xe3, 0x17, 0xa9, 0x85, 0x80, 0xf0, 0x9, 0x47, 0xaf, 0xb7, 0xa3, 0x5, 0x40, 0xd6, 0x25, 0xa9, 0x29, 0x1c, 0xc1, 0x2a, 0x7}, + {0x70, 0x22, 0xf6, 0xf, 0x7e, 0xf6, 0xad, 0xfa, 0x17, 0x11, 0x7a, 0x52, 0x61, 0x9e, 0x30, 0xce, 0xa8, 0x2c, 0x68, 0x7, 0x5a, 0xdf, 0x1c, 0x66, 0x77, 0x86, 0xec, 0x50, 0x6e, 0xef, 0x2d, 0x19}, + {0xd9, 0x98, 0x87, 0xb9, 0x73, 0x57, 0x3a, 0x96, 0xe1, 0x13, 0x93, 0x64, 0x52, 0x36, 0xc1, 0x7b, 0x1f, 0x4c, 0x70, 0x34, 0xd7, 0x23, 0xc7, 0xa9, 0x9f, 0x70, 0x9b, 0xb4, 0xda, 0x61, 0x16, 0x2b}, + {0xd0, 0xb5, 0x30, 0xdb, 0xb0, 0xb4, 0xf2, 0x5c, 0x5d, 0x2f, 0x2a, 0x28, 0xdf, 0xee, 0x80, 0x8b, 0x53, 0x41, 0x2a, 0x2, 0x93, 0x1f, 0x18, 0xc4, 0x99, 0xf5, 0xa2, 0x54, 0x8, 0x6b, 0x13, 0x26}, + {0x84, 0xc0, 0x42, 0x1b, 0xa0, 0x68, 0x5a, 0x1, 0xbf, 0x79, 0x5a, 0x23, 0x44, 0x6, 0x4f, 0xe4, 0x24, 0xbd, 0x52, 0xa9, 0xd2, 0x43, 0x77, 0xb3, 0x94, 0xff, 0x4c, 0x4b, 0x45, 0x68, 0xe8, 0x11}, + {0x65, 0xf2, 0x9e, 0x5d, 0x98, 0xd2, 0x46, 0xc3, 0x8b, 0x38, 0x8c, 0xfc, 0x6, 0xdb, 0x1f, 0x6b, 0x2, 0x13, 0x3, 0xc5, 0xa2, 0x89, 0x0, 0xb, 0xdc, 0xe8, 0x32, 0xa9, 0xc3, 0xec, 0x42, 0x1c}, + {0xa2, 0x24, 0x75, 0x8, 0x28, 0x58, 0x50, 0x96, 0x5b, 0x7e, 0x33, 0x4b, 0x31, 0x27, 0xb0, 0xc0, 0x42, 0xb1, 0xd0, 0x46, 0xdc, 0x54, 0x40, 0x21, 0x37, 0x62, 0x7c, 0xd8, 0x79, 0x9c, 0xe1, 0x3a}, + {0xda, 0xfd, 0xab, 0x6d, 0xa9, 0x36, 0x44, 0x53, 0xc2, 0x6d, 0x33, 0x72, 0x6b, 0x9f, 0xef, 0xe3, 0x43, 0xbe, 0x8f, 0x81, 0x64, 0x9e, 0xc0, 0x9, 0xaa, 0xd3, 0xfa, 0xff, 0x50, 0x61, 0x75, 0x8}, + {0xd9, 0x41, 0xd5, 0xe0, 0xd6, 0x31, 0x4a, 0x99, 0x5c, 0x33, 0xff, 0xbd, 0x4f, 0xbe, 0x69, 0x11, 0x8d, 0x73, 0xd4, 0xe5, 0xfd, 0x2c, 0xd3, 0x1f, 0xf, 0x7c, 0x86, 0xeb, 0xdd, 0x14, 0xe7, 0x6}, + {0x51, 0x4c, 0x43, 0x5c, 0x3d, 0x4, 0xd3, 0x49, 0xa5, 0x36, 0x5f, 0xbd, 0x59, 0xff, 0xc7, 0x13, 0x62, 0x91, 0x11, 0x78, 0x59, 0x91, 0xc1, 0xa3, 0xc5, 0x3a, 0xf2, 0x20, 0x79, 0x74, 0x1a, 0x2f}, + {0xad, 0x6, 0x85, 0x39, 0x69, 0xd3, 0x7d, 0x34, 0xff, 0x8, 0xe0, 0x9f, 0x56, 0x93, 0xa, 0x4a, 0xd1, 0x9a, 0x89, 0xde, 0xf6, 0xc, 0xbf, 0xee, 0x7e, 0x1d, 0x33, 0x81, 0xc1, 0xe7, 0x1c, 0x37}, + {0x39, 0x56, 0xe, 0x7b, 0x13, 0xa9, 0x3b, 0x7, 0xa2, 0x43, 0xfd, 0x27, 0x20, 0xff, 0xa7, 0xcb, 0x3e, 0x1d, 0x2e, 0x50, 0x5a, 0xb3, 0x62, 0x9e, 0x79, 0xf4, 0x63, 0x13, 0x51, 0x2c, 0xda, 0x6}, + {0xcc, 0xc3, 0xc0, 0x12, 0xf5, 0xb0, 0x5e, 0x81, 0x1a, 0x2b, 0xbf, 0xdd, 0xf, 0x68, 0x33, 0xb8, 0x42, 0x75, 0xb4, 0x7b, 0xf2, 0x29, 0xc0, 0x5, 0x2a, 0x82, 0x48, 0x4f, 0x3c, 0x1a, 0x5b, 0x3d}, + {0x7d, 0xf2, 0x9b, 0x69, 0x77, 0x31, 0x99, 0xe8, 0xf2, 0xb4, 0xb, 0x77, 0x91, 0x9d, 0x4, 0x85, 0x9, 0xee, 0xd7, 0x68, 0xe2, 0xc7, 0x29, 0x7b, 0x1f, 0x14, 0x37, 0x3, 0x4f, 0xc3, 0xc6, 0x2c}, + {0x66, 0xce, 0x5, 0xa3, 0x66, 0x75, 0x52, 0xcf, 0x45, 0xc0, 0x2b, 0xcc, 0x4e, 0x83, 0x92, 0x91, 0x9b, 0xde, 0xac, 0x35, 0xde, 0x2f, 0xf5, 0x62, 0x71, 0x84, 0x8e, 0x9f, 0x7b, 0x67, 0x51, 0x7}, + {0xd8, 0x61, 0x2, 0x18, 0x42, 0x5a, 0xb5, 0xe9, 0x5b, 0x1c, 0xa6, 0x23, 0x9d, 0x29, 0xa2, 0xe4, 0x20, 0xd7, 0x6, 0xa9, 0x6f, 0x37, 0x3e, 0x2f, 0x9c, 0x9a, 0x91, 0xd7, 0x59, 0xd1, 0x9b, 0x1}, + {0x6d, 0x36, 0x4b, 0x1e, 0xf8, 0x46, 0x44, 0x1a, 0x5a, 0x4a, 0x68, 0x86, 0x23, 0x14, 0xac, 0xc0, 0xa4, 0x6f, 0x1, 0x67, 0x17, 0xe5, 0x34, 0x43, 0xe8, 0x39, 0xee, 0xdf, 0x83, 0xc2, 0x85, 0x3c}, + {0x7, 0x7e, 0x5f, 0xde, 0x35, 0xc5, 0xa, 0x93, 0x3, 0xa5, 0x50, 0x9, 0xe3, 0x49, 0x8a, 0x4e, 0xbe, 0xdf, 0xf3, 0x9c, 0x42, 0xb7, 0x10, 0xb7, 0x30, 0xd8, 0xec, 0x7a, 0xc7, 0xaf, 0xa6, 0x3e}, + {0xe6, 0x40, 0x5, 0xa6, 0xbf, 0xe3, 0x77, 0x79, 0x53, 0xb8, 0xad, 0x6e, 0xf9, 0x3f, 0xf, 0xca, 0x10, 0x49, 0xb2, 0x4, 0x16, 0x54, 0xf2, 0xa4, 0x11, 0xf7, 0x70, 0x27, 0x99, 0xce, 0xce, 0x2}, + {0x25, 0x9d, 0x3d, 0x6b, 0x1f, 0x4d, 0x87, 0x6d, 0x11, 0x85, 0xe1, 0x12, 0x3a, 0xf6, 0xf5, 0x50, 0x1a, 0xf0, 0xf6, 0x7c, 0xf1, 0x5b, 0x52, 0x16, 0x25, 0x5b, 0x7b, 0x17, 0x8d, 0x12, 0x5, 0x1d}, + {0x3f, 0x9a, 0x4d, 0x41, 0x1d, 0xa4, 0xef, 0x1b, 0x36, 0xf3, 0x5f, 0xf0, 0xa1, 0x95, 0xae, 0x39, 0x2a, 0xb2, 0x3f, 0xee, 0x79, 0x67, 0xb7, 0xc4, 0x1b, 0x3, 0xd1, 0x61, 0x3f, 0xc2, 0x92, 0x39}, + {0xfe, 0x4e, 0xf3, 0x28, 0xc6, 0x1a, 0xa3, 0x9c, 0xfd, 0xb2, 0x48, 0x4e, 0xaa, 0x32, 0xa1, 0x51, 0xb1, 0xfe, 0x3d, 0xfd, 0x1f, 0x96, 0xdd, 0x8c, 0x97, 0x11, 0xfd, 0x86, 0xd6, 0xc5, 0x81, 0x13}, + {0xf5, 0x5d, 0x68, 0x90, 0xe, 0x2d, 0x83, 0x81, 0xec, 0xcb, 0x81, 0x64, 0xcb, 0x99, 0x76, 0xf2, 0x4b, 0x2d, 0xe0, 0xdd, 0x61, 0xa3, 0x1b, 0x97, 0xce, 0x6e, 0xb2, 0x38, 0x50, 0xd5, 0xe8, 0x19}, + {0xaa, 0xaa, 0x8c, 0x4c, 0xb4, 0xa, 0xac, 0xee, 0x1e, 0x2, 0xdc, 0x65, 0x42, 0x4b, 0x2a, 0x6c, 0x8e, 0x99, 0xf8, 0x3, 0xb7, 0x2f, 0x79, 0x29, 0xc4, 0x10, 0x1d, 0x7f, 0xae, 0x6b, 0xff, 0x32}, +} + +func ZeroPieceCommitment(sz abi.UnpaddedPieceSize) cid.Cid { + level := bits.TrailingZeros64(uint64(sz.Padded())) - Skip - 5 // 2^5 = 32 + return commcid.PieceCommitmentV1ToCID(PieceComms[level][:]) +} diff --git a/zerocomm/zerocomm_test.go b/zerocomm/zerocomm_test.go new file mode 100644 index 000000000..ef3f11d88 --- /dev/null +++ b/zerocomm/zerocomm_test.go @@ -0,0 +1,115 @@ +package zerocomm_test + +import ( + "bytes" + "fmt" + "github.com/filecoin-project/lotus/storage/sectorstorage/zerocomm" + "io" + "testing" + + commcid "github.com/filecoin-project/go-fil-commcid" + abi "github.com/filecoin-project/specs-actors/actors/abi" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/storage/sectorstorage/ffiwrapper" +) + +func TestComms(t *testing.T) { + t.Skip("don't have enough ram") // no, but seriously, currently this needs like 3tb of /tmp + + var expPieceComms [zerocomm.Levels - zerocomm.Skip]cid.Cid + + { + l2, err := ffiwrapper.GeneratePieceCIDFromFile(abi.RegisteredProof_StackedDRG2KiBPoSt, bytes.NewReader(make([]byte, 127)), 127) + if err != nil { + t.Fatal(err) + } + expPieceComms[0] = l2 + } + + for i := 1; i < zerocomm.Levels-2; i++ { + var err error + sz := abi.UnpaddedPieceSize(127 << uint(i)) + fmt.Println(i, sz) + r := io.LimitReader(&NullReader{}, int64(sz)) + + expPieceComms[i], err = ffiwrapper.GeneratePieceCIDFromFile(abi.RegisteredProof_StackedDRG2KiBPoSt, r, sz) + if err != nil { + t.Fatal(err) + } + } + + for i, comm := range expPieceComms { + c, err := commcid.CIDToPieceCommitmentV1(comm) + if err != nil { + t.Fatal(err) + } + if string(c) != string(zerocomm.PieceComms[i][:]) { + t.Errorf("zero commitment %d didn't match", i) + } + } + + for _, comm := range expPieceComms { // Could do codegen, but this is good enough + fmt.Printf("%#v,\n", comm) + } +} + +func TestCommsSmall(t *testing.T) { + var expPieceComms [8]cid.Cid + lvls := len(expPieceComms) + zerocomm.Skip + + { + l2, err := ffiwrapper.GeneratePieceCIDFromFile(abi.RegisteredProof_StackedDRG2KiBPoSt, bytes.NewReader(make([]byte, 127)), 127) + if err != nil { + t.Fatal(err) + } + expPieceComms[0] = l2 + } + + for i := 1; i < lvls-2; i++ { + var err error + sz := abi.UnpaddedPieceSize(127 << uint(i)) + fmt.Println(i, sz) + r := io.LimitReader(&NullReader{}, int64(sz)) + + expPieceComms[i], err = ffiwrapper.GeneratePieceCIDFromFile(abi.RegisteredProof_StackedDRG2KiBPoSt, r, sz) + if err != nil { + t.Fatal(err) + } + } + + for i, comm := range expPieceComms { + c, err := commcid.CIDToPieceCommitmentV1(comm) + if err != nil { + t.Fatal(err) + } + if string(c) != string(zerocomm.PieceComms[i][:]) { + t.Errorf("zero commitment %d didn't match", i) + } + } + + for _, comm := range expPieceComms { // Could do codegen, but this is good enough + fmt.Printf("%#v,\n", comm) + } +} + +func TestForSise(t *testing.T) { + exp, err := ffiwrapper.GeneratePieceCIDFromFile(abi.RegisteredProof_StackedDRG2KiBPoSt, bytes.NewReader(make([]byte, 1016)), 1016) + if err != nil { + return + } + + actual := zerocomm.ZeroPieceCommitment(1016) + if !exp.Equals(actual) { + t.Errorf("zero commitment didn't match") + } +} + +type NullReader struct{} + +func (NullReader) Read(out []byte) (int, error) { + for i := range out { + out[i] = 0 + } + return len(out), nil +} From 617e3135c1421c1c95ec72a24d3a5ed14bdbe812 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Thu, 26 Mar 2020 20:34:38 +0100 Subject: [PATCH 018/199] Cleanup after dropping sectorbuilder --- ffiwrapper/sealer.go | 10 +-- ffiwrapper/sealer_cgo.go | 82 +++------------------ ffiwrapper/sealer_test.go | 6 +- ffiwrapper/types.go | 9 +-- ffiwrapper/varifier_cgo.go | 80 --------------------- ffiwrapper/verifier_cgo.go | 142 +++++++++++++++++++++++++++++++++++++ manager.go | 6 +- mock/mock.go | 8 +-- worker_local.go | 4 +- zerocomm/zerocomm_test.go | 2 +- 10 files changed, 176 insertions(+), 173 deletions(-) delete mode 100644 ffiwrapper/varifier_cgo.go create mode 100644 ffiwrapper/verifier_cgo.go diff --git a/ffiwrapper/sealer.go b/ffiwrapper/sealer.go index a1d27837a..12a8a3df6 100644 --- a/ffiwrapper/sealer.go +++ b/ffiwrapper/sealer.go @@ -7,7 +7,7 @@ import ( var log = logging.Logger("ffiwrapper") -type SectorBuilder struct { +type Sealer struct { sealProofType abi.RegisteredProof postProofType abi.RegisteredProof ssize abi.SectorSize // a function of sealProofType and postProofType @@ -24,18 +24,18 @@ func fallbackPostChallengeCount(sectors uint64, faults uint64) uint64 { return challengeCount } -func (sb *SectorBuilder) Stop() { +func (sb *Sealer) Stop() { close(sb.stopping) } -func (sb *SectorBuilder) SectorSize() abi.SectorSize { +func (sb *Sealer) SectorSize() abi.SectorSize { return sb.ssize } -func (sb *SectorBuilder) SealProofType() abi.RegisteredProof { +func (sb *Sealer) SealProofType() abi.RegisteredProof { return sb.sealProofType } -func (sb *SectorBuilder) PoStProofType() abi.RegisteredProof { +func (sb *Sealer) PoStProofType() abi.RegisteredProof { return sb.postProofType } diff --git a/ffiwrapper/sealer_cgo.go b/ffiwrapper/sealer_cgo.go index cc871c835..793f07c1b 100644 --- a/ffiwrapper/sealer_cgo.go +++ b/ffiwrapper/sealer_cgo.go @@ -19,15 +19,15 @@ import ( "github.com/filecoin-project/lotus/storage/sectorstorage/zerocomm" ) -var _ Basic = &SectorBuilder{} +var _ Storage = &Sealer{} -func New(sectors SectorProvider, cfg *Config) (*SectorBuilder, error) { +func New(sectors SectorProvider, cfg *Config) (*Sealer, error) { sectorSize, err := sizeFromConfig(*cfg) if err != nil { return nil, err } - sb := &SectorBuilder{ + sb := &Sealer{ sealProofType: cfg.SealProofType, postProofType: cfg.PoStProofType, ssize: sectorSize, @@ -40,47 +40,13 @@ func New(sectors SectorProvider, cfg *Config) (*SectorBuilder, error) { return sb, nil } -func (sb *SectorBuilder) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorInfo []abi.SectorInfo, faults []abi.SectorNumber) (ffi.SortedPrivateSectorInfo, error) { - fmap := map[abi.SectorNumber]struct{}{} - for _, fault := range faults { - fmap[fault] = struct{}{} - } - - var out []ffi.PrivateSectorInfo - for _, s := range sectorInfo { - if _, faulty := fmap[s.SectorNumber]; faulty { - continue - } - - paths, done, err := sb.sectors.AcquireSector(ctx, abi.SectorID{Miner: mid, Number: s.SectorNumber}, stores.FTCache|stores.FTSealed, 0, false) - if err != nil { - return ffi.SortedPrivateSectorInfo{}, xerrors.Errorf("acquire sector paths: %w", err) - } - done() // TODO: This is a tiny bit suboptimal - - postProofType, err := s.RegisteredProof.RegisteredPoStProof() - if err != nil { - return ffi.SortedPrivateSectorInfo{}, xerrors.Errorf("acquiring registered PoSt proof from sector info %+v: %w", s, err) - } - - out = append(out, ffi.PrivateSectorInfo{ - CacheDirPath: paths.Cache, - PoStProofType: postProofType, - SealedSectorPath: paths.Sealed, - SectorInfo: s, - }) - } - - return ffi.NewSortedPrivateSectorInfo(out...), nil -} - -func (sb *SectorBuilder) NewSector(ctx context.Context, sector abi.SectorID) error { +func (sb *Sealer) NewSector(ctx context.Context, sector abi.SectorID) error { // TODO: Allocate the sector here instead of in addpiece return nil } -func (sb *SectorBuilder) AddPiece(ctx context.Context, sector abi.SectorID, existingPieceSizes []abi.UnpaddedPieceSize, pieceSize abi.UnpaddedPieceSize, file storage.Data) (abi.PieceInfo, error) { +func (sb *Sealer) AddPiece(ctx context.Context, sector abi.SectorID, existingPieceSizes []abi.UnpaddedPieceSize, pieceSize abi.UnpaddedPieceSize, file storage.Data) (abi.PieceInfo, error) { f, werr, err := toReadableFile(file, int64(pieceSize)) if err != nil { return abi.PieceInfo{}, err @@ -143,7 +109,7 @@ func (sb *SectorBuilder) AddPiece(ctx context.Context, sector abi.SectorID, exis }, werr() } -func (sb *SectorBuilder) ReadPieceFromSealedSector(ctx context.Context, sector abi.SectorID, offset UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealedCID cid.Cid) (io.ReadCloser, error) { +func (sb *Sealer) ReadPieceFromSealedSector(ctx context.Context, sector abi.SectorID, offset UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealedCID cid.Cid) (io.ReadCloser, error) { path, doneUnsealed, err := sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, stores.FTUnsealed, false) if err != nil { return nil, xerrors.Errorf("acquire unsealed sector path: %w", err) @@ -212,7 +178,7 @@ func (sb *SectorBuilder) ReadPieceFromSealedSector(ctx context.Context, sector a }, nil } -func (sb *SectorBuilder) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) { +func (sb *Sealer) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) { paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, stores.FTSealed|stores.FTCache, true) if err != nil { return nil, xerrors.Errorf("acquiring sector paths: %w", err) @@ -269,7 +235,7 @@ func (sb *SectorBuilder) SealPreCommit1(ctx context.Context, sector abi.SectorID return p1o, nil } -func (sb *SectorBuilder) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage.PreCommit1Out) (storage.SectorCids, error) { +func (sb *Sealer) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage.PreCommit1Out) (storage.SectorCids, error) { paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTSealed|stores.FTCache, 0, true) if err != nil { return storage.SectorCids{}, xerrors.Errorf("acquiring sector paths: %w", err) @@ -287,7 +253,7 @@ func (sb *SectorBuilder) SealPreCommit2(ctx context.Context, sector abi.SectorID }, nil } -func (sb *SectorBuilder) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) { +func (sb *Sealer) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) { paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTSealed|stores.FTCache, 0, true) if err != nil { return nil, xerrors.Errorf("acquire sector paths: %w", err) @@ -314,37 +280,11 @@ func (sb *SectorBuilder) SealCommit1(ctx context.Context, sector abi.SectorID, t return output, nil } -func (sb *SectorBuilder) SealCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage.Commit1Out) (storage.Proof, error) { +func (sb *Sealer) SealCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage.Commit1Out) (storage.Proof, error) { return ffi.SealCommitPhase2(phase1Out, sector.Number, sector.Miner) } -func (sb *SectorBuilder) GenerateFallbackPoSt(ctx context.Context, miner abi.ActorID, sectorInfo []abi.SectorInfo, challengeSeed abi.PoStRandomness, faults []abi.SectorNumber) (storage.FallbackPostOut, error) { - privsectors, err := sb.pubSectorToPriv(ctx, miner, sectorInfo, faults) - if err != nil { - return storage.FallbackPostOut{}, err - } - - challengeCount := fallbackPostChallengeCount(uint64(len(sectorInfo)), uint64(len(faults))) - challengeSeed[31] = 0 - - candidates, err := ffi.GenerateCandidates(miner, challengeSeed, challengeCount, privsectors) - if err != nil { - return storage.FallbackPostOut{}, err - } - - winners := make([]abi.PoStCandidate, len(candidates)) - for idx := range winners { - winners[idx] = candidates[idx].Candidate - } - - proof, err := ffi.GeneratePoSt(miner, privsectors, challengeSeed, winners) - return storage.FallbackPostOut{ - PoStInputs: ffiToStorageCandidates(candidates), - Proof: proof, - }, err -} - -func (sb *SectorBuilder) FinalizeSector(ctx context.Context, sector abi.SectorID) error { +func (sb *Sealer) FinalizeSector(ctx context.Context, sector abi.SectorID) error { paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTCache, 0, false) if err != nil { return xerrors.Errorf("acquiring sector cache path: %w", err) diff --git a/ffiwrapper/sealer_test.go b/ffiwrapper/sealer_test.go index 78d4a4898..c65d8064d 100644 --- a/ffiwrapper/sealer_test.go +++ b/ffiwrapper/sealer_test.go @@ -37,7 +37,7 @@ type seal struct { ticket abi.SealRandomness } -func (s *seal) precommit(t *testing.T, sb *SectorBuilder, id abi.SectorID, done func()) { +func (s *seal) precommit(t *testing.T, sb *Sealer, id abi.SectorID, done func()) { defer done() dlen := abi.PaddedPieceSize(sectorSize).Unpadded() @@ -61,7 +61,7 @@ func (s *seal) precommit(t *testing.T, sb *SectorBuilder, id abi.SectorID, done s.cids = cids } -func (s *seal) commit(t *testing.T, sb *SectorBuilder, done func()) { +func (s *seal) commit(t *testing.T, sb *Sealer, done func()) { defer done() seed := abi.InteractiveSealRandomness{0, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 9, 8, 7, 6, 45, 3, 2, 1, 0, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 9} @@ -95,7 +95,7 @@ func (s *seal) commit(t *testing.T, sb *SectorBuilder, done func()) { } } -func post(t *testing.T, sb *SectorBuilder, seals ...seal) time.Time { +func post(t *testing.T, sb *Sealer, seals ...seal) time.Time { randomness := abi.PoStRandomness{0, 9, 2, 7, 6, 5, 4, 3, 2, 1, 0, 9, 8, 7, 6, 45, 3, 2, 1, 0, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 9, 7} sis := make([]abi.SectorInfo, len(seals)) diff --git a/ffiwrapper/types.go b/ffiwrapper/types.go index ea113fbbb..02479f705 100644 --- a/ffiwrapper/types.go +++ b/ffiwrapper/types.go @@ -3,9 +3,10 @@ package ffiwrapper import ( "context" "errors" - "github.com/ipfs/go-cid" "io" + "github.com/ipfs/go-cid" + "github.com/filecoin-project/specs-actors/actors/abi" "github.com/filecoin-project/specs-storage/storage" @@ -20,14 +21,14 @@ type Validator interface { CanProve(sector stores.SectorPaths) (bool, error) } -type Sealer interface { +type StorageSealer interface { storage.Sealer storage.Storage } -type Basic interface { +type Storage interface { storage.Prover - Sealer + StorageSealer ReadPieceFromSealedSector(context.Context, abi.SectorID, UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (io.ReadCloser, error) } diff --git a/ffiwrapper/varifier_cgo.go b/ffiwrapper/varifier_cgo.go deleted file mode 100644 index 6c01470ce..000000000 --- a/ffiwrapper/varifier_cgo.go +++ /dev/null @@ -1,80 +0,0 @@ -//+build cgo - -package ffiwrapper - -import ( - "context" - - "go.opencensus.io/trace" - - ffi "github.com/filecoin-project/filecoin-ffi" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-storage/storage" -) - -func (sb *SectorBuilder) ComputeElectionPoSt(ctx context.Context, miner abi.ActorID, sectorInfo []abi.SectorInfo, challengeSeed abi.PoStRandomness, winners []abi.PoStCandidate) ([]abi.PoStProof, error) { - challengeSeed[31] = 0 - - privsects, err := sb.pubSectorToPriv(ctx, miner, sectorInfo, nil) // TODO: faults - if err != nil { - return nil, err - } - - return ffi.GeneratePoSt(miner, privsects, challengeSeed, winners) -} - -func (sb *SectorBuilder) GenerateEPostCandidates(ctx context.Context, miner abi.ActorID, sectorInfo []abi.SectorInfo, challengeSeed abi.PoStRandomness, faults []abi.SectorNumber) ([]storage.PoStCandidateWithTicket, error) { - privsectors, err := sb.pubSectorToPriv(ctx, miner, sectorInfo, faults) - if err != nil { - return nil, err - } - - challengeSeed[31] = 0 - - challengeCount := ElectionPostChallengeCount(uint64(len(sectorInfo)), uint64(len(faults))) - pc, err := ffi.GenerateCandidates(miner, challengeSeed, challengeCount, privsectors) - if err != nil { - return nil, err - } - - return ffiToStorageCandidates(pc), nil -} - -func ffiToStorageCandidates(pc []ffi.PoStCandidateWithTicket) []storage.PoStCandidateWithTicket { - out := make([]storage.PoStCandidateWithTicket, len(pc)) - for i := range out { - out[i] = storage.PoStCandidateWithTicket{ - Candidate: pc[i].Candidate, - Ticket: pc[i].Ticket, - } - } - - return out -} - -var _ Verifier = ProofVerifier - -type proofVerifier struct{} - -var ProofVerifier = proofVerifier{} - -func (proofVerifier) VerifySeal(info abi.SealVerifyInfo) (bool, error) { - return ffi.VerifySeal(info) -} - -func (proofVerifier) VerifyElectionPost(ctx context.Context, info abi.PoStVerifyInfo) (bool, error) { - return verifyPost(ctx, info) -} - -func (proofVerifier) VerifyFallbackPost(ctx context.Context, info abi.PoStVerifyInfo) (bool, error) { - return verifyPost(ctx, info) -} - -func verifyPost(ctx context.Context, info abi.PoStVerifyInfo) (bool, error) { - _, span := trace.StartSpan(ctx, "VerifyPoSt") - defer span.End() - - info.Randomness[31] = 0 - - return ffi.VerifyPoSt(info) -} diff --git a/ffiwrapper/verifier_cgo.go b/ffiwrapper/verifier_cgo.go new file mode 100644 index 000000000..402e85fab --- /dev/null +++ b/ffiwrapper/verifier_cgo.go @@ -0,0 +1,142 @@ +//+build cgo + +package ffiwrapper + +import ( + "context" + "golang.org/x/xerrors" + + "go.opencensus.io/trace" + + ffi "github.com/filecoin-project/filecoin-ffi" + "github.com/filecoin-project/lotus/storage/sectorstorage/stores" + "github.com/filecoin-project/specs-actors/actors/abi" + "github.com/filecoin-project/specs-storage/storage" +) + +func (sb *Sealer) ComputeElectionPoSt(ctx context.Context, miner abi.ActorID, sectorInfo []abi.SectorInfo, challengeSeed abi.PoStRandomness, winners []abi.PoStCandidate) ([]abi.PoStProof, error) { + challengeSeed[31] = 0 + + privsects, err := sb.pubSectorToPriv(ctx, miner, sectorInfo, nil) // TODO: faults + if err != nil { + return nil, err + } + + return ffi.GeneratePoSt(miner, privsects, challengeSeed, winners) +} + +func (sb *Sealer) GenerateFallbackPoSt(ctx context.Context, miner abi.ActorID, sectorInfo []abi.SectorInfo, challengeSeed abi.PoStRandomness, faults []abi.SectorNumber) (storage.FallbackPostOut, error) { + privsectors, err := sb.pubSectorToPriv(ctx, miner, sectorInfo, faults) + if err != nil { + return storage.FallbackPostOut{}, err + } + + challengeCount := fallbackPostChallengeCount(uint64(len(sectorInfo)), uint64(len(faults))) + challengeSeed[31] = 0 + + candidates, err := ffi.GenerateCandidates(miner, challengeSeed, challengeCount, privsectors) + if err != nil { + return storage.FallbackPostOut{}, err + } + + winners := make([]abi.PoStCandidate, len(candidates)) + for idx := range winners { + winners[idx] = candidates[idx].Candidate + } + + proof, err := ffi.GeneratePoSt(miner, privsectors, challengeSeed, winners) + return storage.FallbackPostOut{ + PoStInputs: ffiToStorageCandidates(candidates), + Proof: proof, + }, err +} + +func (sb *Sealer) GenerateEPostCandidates(ctx context.Context, miner abi.ActorID, sectorInfo []abi.SectorInfo, challengeSeed abi.PoStRandomness, faults []abi.SectorNumber) ([]storage.PoStCandidateWithTicket, error) { + privsectors, err := sb.pubSectorToPriv(ctx, miner, sectorInfo, faults) + if err != nil { + return nil, err + } + + challengeSeed[31] = 0 + + challengeCount := ElectionPostChallengeCount(uint64(len(sectorInfo)), uint64(len(faults))) + pc, err := ffi.GenerateCandidates(miner, challengeSeed, challengeCount, privsectors) + if err != nil { + return nil, err + } + + return ffiToStorageCandidates(pc), nil +} + +func ffiToStorageCandidates(pc []ffi.PoStCandidateWithTicket) []storage.PoStCandidateWithTicket { + out := make([]storage.PoStCandidateWithTicket, len(pc)) + for i := range out { + out[i] = storage.PoStCandidateWithTicket{ + Candidate: pc[i].Candidate, + Ticket: pc[i].Ticket, + } + } + + return out +} + +func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorInfo []abi.SectorInfo, faults []abi.SectorNumber) (ffi.SortedPrivateSectorInfo, error) { + fmap := map[abi.SectorNumber]struct{}{} + for _, fault := range faults { + fmap[fault] = struct{}{} + } + + var out []ffi.PrivateSectorInfo + for _, s := range sectorInfo { + if _, faulty := fmap[s.SectorNumber]; faulty { + continue + } + + paths, done, err := sb.sectors.AcquireSector(ctx, abi.SectorID{Miner: mid, Number: s.SectorNumber}, stores.FTCache|stores.FTSealed, 0, false) + if err != nil { + return ffi.SortedPrivateSectorInfo{}, xerrors.Errorf("acquire sector paths: %w", err) + } + done() // TODO: This is a tiny bit suboptimal + + postProofType, err := s.RegisteredProof.RegisteredPoStProof() + if err != nil { + return ffi.SortedPrivateSectorInfo{}, xerrors.Errorf("acquiring registered PoSt proof from sector info %+v: %w", s, err) + } + + out = append(out, ffi.PrivateSectorInfo{ + CacheDirPath: paths.Cache, + PoStProofType: postProofType, + SealedSectorPath: paths.Sealed, + SectorInfo: s, + }) + } + + return ffi.NewSortedPrivateSectorInfo(out...), nil +} + +var _ Verifier = ProofVerifier + +type proofVerifier struct{} + +var ProofVerifier = proofVerifier{} + +func (proofVerifier) VerifySeal(info abi.SealVerifyInfo) (bool, error) { + return ffi.VerifySeal(info) +} + +func (proofVerifier) VerifyElectionPost(ctx context.Context, info abi.PoStVerifyInfo) (bool, error) { + return verifyPost(ctx, info) +} + +func (proofVerifier) VerifyFallbackPost(ctx context.Context, info abi.PoStVerifyInfo) (bool, error) { + return verifyPost(ctx, info) +} + +func verifyPost(ctx context.Context, info abi.PoStVerifyInfo) (bool, error) { + _, span := trace.StartSpan(ctx, "VerifyPoSt") + defer span.End() + + info.Randomness[31] = 0 + + return ffi.VerifyPoSt(info) +} diff --git a/manager.go b/manager.go index 43438385f..97d645484 100644 --- a/manager.go +++ b/manager.go @@ -4,7 +4,6 @@ import ( "container/list" "context" "errors" - "github.com/filecoin-project/lotus/storage/sectorstorage/ffiwrapper" "io" "net/http" "sync" @@ -19,6 +18,7 @@ import ( "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/node/config" + "github.com/filecoin-project/lotus/storage/sectorstorage/ffiwrapper" "github.com/filecoin-project/lotus/storage/sectorstorage/sealtasks" "github.com/filecoin-project/lotus/storage/sectorstorage/stores" ) @@ -30,7 +30,7 @@ var ErrNoWorkers = errors.New("no suitable workers found") type URLs []string type Worker interface { - ffiwrapper.Sealer + ffiwrapper.StorageSealer TaskTypes(context.Context) (map[sealtasks.TaskType]struct{}, error) @@ -47,7 +47,7 @@ type SectorManager interface { ReadPieceFromSealedSector(context.Context, abi.SectorID, ffiwrapper.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (io.ReadCloser, error) - ffiwrapper.Sealer + ffiwrapper.StorageSealer storage.Prover } diff --git a/mock/mock.go b/mock/mock.go index e86d2b3e7..0591958c1 100644 --- a/mock/mock.go +++ b/mock/mock.go @@ -4,8 +4,6 @@ import ( "bytes" "context" "fmt" - "github.com/filecoin-project/lotus/storage/sectorstorage" - "github.com/filecoin-project/lotus/storage/sectorstorage/ffiwrapper" "io" "io/ioutil" "math/big" @@ -20,6 +18,8 @@ import ( "golang.org/x/xerrors" "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/storage/sectorstorage" + "github.com/filecoin-project/lotus/storage/sectorstorage/ffiwrapper" ) var log = logging.Logger("sbmock") @@ -124,7 +124,7 @@ func (sb *SectorMgr) SealPreCommit1(ctx context.Context, sid abi.SectorID, ticke ss, ok := sb.sectors[sid] sb.lk.Unlock() if !ok { - return nil, xerrors.Errorf("no sector with id %d in sectorbuilder", sid) + return nil, xerrors.Errorf("no sector with id %d in storage", sid) } ss.lk.Lock() @@ -237,7 +237,7 @@ func (sb *SectorMgr) FailSector(sid abi.SectorID) error { defer sb.lk.Unlock() ss, ok := sb.sectors[sid] if !ok { - return fmt.Errorf("no such sector in sectorbuilder") + return fmt.Errorf("no such sector in storage") } ss.failed = true diff --git a/worker_local.go b/worker_local.go index 3547a8a03..24d1e14df 100644 --- a/worker_local.go +++ b/worker_local.go @@ -2,7 +2,6 @@ package sectorstorage import ( "context" - "github.com/filecoin-project/lotus/storage/sectorstorage/ffiwrapper" "io" "os" @@ -14,6 +13,7 @@ import ( storage2 "github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/storage/sectorstorage/ffiwrapper" "github.com/filecoin-project/lotus/storage/sectorstorage/sealtasks" "github.com/filecoin-project/lotus/storage/sectorstorage/stores" ) @@ -87,7 +87,7 @@ func (l *localWorkerPathProvider) AcquireSector(ctx context.Context, sector abi. }, nil } -func (l *LocalWorker) sb() (ffiwrapper.Basic, error) { +func (l *LocalWorker) sb() (ffiwrapper.Storage, error) { return ffiwrapper.New(&localWorkerPathProvider{w: l}, l.scfg) } diff --git a/zerocomm/zerocomm_test.go b/zerocomm/zerocomm_test.go index ef3f11d88..865492991 100644 --- a/zerocomm/zerocomm_test.go +++ b/zerocomm/zerocomm_test.go @@ -3,7 +3,6 @@ package zerocomm_test import ( "bytes" "fmt" - "github.com/filecoin-project/lotus/storage/sectorstorage/zerocomm" "io" "testing" @@ -12,6 +11,7 @@ import ( "github.com/ipfs/go-cid" "github.com/filecoin-project/lotus/storage/sectorstorage/ffiwrapper" + "github.com/filecoin-project/lotus/storage/sectorstorage/zerocomm" ) func TestComms(t *testing.T) { From 353ecbc62d4ee378ac76f331f89bda058654b204 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Fri, 27 Mar 2020 18:21:32 +0100 Subject: [PATCH 019/199] sectorstorage: Address review --- ffiwrapper/basicfs/fs.go | 34 ++++++++++++++++++++-------------- ffiwrapper/config.go | 26 ++------------------------ ffiwrapper/sealer_test.go | 8 +++----- stores/filetype.go | 4 ++++ stores/index.go | 6 +++--- stores/local.go | 10 +++++----- stores/remote.go | 2 +- 7 files changed, 38 insertions(+), 52 deletions(-) diff --git a/ffiwrapper/basicfs/fs.go b/ffiwrapper/basicfs/fs.go index cd85a1a0b..ce0f73b49 100644 --- a/ffiwrapper/basicfs/fs.go +++ b/ffiwrapper/basicfs/fs.go @@ -2,7 +2,6 @@ package basicfs import ( "context" - "fmt" "os" "path/filepath" "sync" @@ -25,14 +24,24 @@ type Provider struct { } func (b *Provider) AcquireSector(ctx context.Context, id abi.SectorID, existing stores.SectorFileType, allocate stores.SectorFileType, sealing bool) (stores.SectorPaths, func(), error) { - os.Mkdir(filepath.Join(b.Root, stores.FTUnsealed.String()), 0755) - os.Mkdir(filepath.Join(b.Root, stores.FTSealed.String()), 0755) - os.Mkdir(filepath.Join(b.Root, stores.FTCache.String()), 0755) + if err := os.Mkdir(filepath.Join(b.Root, stores.FTUnsealed.String()), 0755); err != nil && !os.IsExist(err) { + return stores.SectorPaths{}, nil, err + } + if err := os.Mkdir(filepath.Join(b.Root, stores.FTSealed.String()), 0755); err != nil && !os.IsExist(err) { + return stores.SectorPaths{}, nil, err + } + if err := os.Mkdir(filepath.Join(b.Root, stores.FTCache.String()), 0755); err != nil && !os.IsExist(err) { + return stores.SectorPaths{}, nil, err + } done := func() {} - for i := 0; i < 3; i++ { - if (existing|allocate)&(1< Date: Fri, 27 Mar 2020 21:08:06 +0100 Subject: [PATCH 020/199] sectorstorage: Untangle from lotus deps --- ffiwrapper/config.go | 15 +++++ ffiwrapper/verifier_cgo.go | 3 +- worker_local.go => localworker.go | 11 ++-- manager.go | 39 +++++++++---- mock/mock.go | 3 +- mock/preseal.go | 63 --------------------- mock/util.go | 16 +----- resources.go | 3 +- roprov.go | 5 +- sched.go | 8 +-- stats.go | 15 +++-- stores/http_handler.go | 2 +- stores/local.go | 16 ++++-- stores/remote.go | 2 +- tarutil/systar.go | 92 +++++++++++++++++++++++++++++++ worker_remote.go | 51 ----------------- 16 files changed, 179 insertions(+), 165 deletions(-) rename worker_local.go => localworker.go (94%) delete mode 100644 mock/preseal.go create mode 100644 tarutil/systar.go delete mode 100644 worker_remote.go diff --git a/ffiwrapper/config.go b/ffiwrapper/config.go index a2d79c410..9b1fc5f9a 100644 --- a/ffiwrapper/config.go +++ b/ffiwrapper/config.go @@ -55,3 +55,18 @@ func SectorSizeForRegisteredProof(p abi.RegisteredProof) (abi.SectorSize, error) return 0, fmt.Errorf("unsupported registered proof %d", p) } } + +func ProofTypeFromSectorSize(ssize abi.SectorSize) (abi.RegisteredProof, abi.RegisteredProof, error) { + switch ssize { + case 2 << 10: + return abi.RegisteredProof_StackedDRG2KiBPoSt, abi.RegisteredProof_StackedDRG2KiBSeal, nil + case 8 << 20: + return abi.RegisteredProof_StackedDRG8MiBPoSt, abi.RegisteredProof_StackedDRG8MiBSeal, nil + case 512 << 20: + return abi.RegisteredProof_StackedDRG512MiBPoSt, abi.RegisteredProof_StackedDRG512MiBSeal, nil + case 32 << 30: + return abi.RegisteredProof_StackedDRG32GiBPoSt, abi.RegisteredProof_StackedDRG32GiBSeal, nil + default: + return 0, 0, xerrors.Errorf("unsupported sector size for miner: %v", ssize) + } +} diff --git a/ffiwrapper/verifier_cgo.go b/ffiwrapper/verifier_cgo.go index 402e85fab..2de6137da 100644 --- a/ffiwrapper/verifier_cgo.go +++ b/ffiwrapper/verifier_cgo.go @@ -9,9 +9,10 @@ import ( "go.opencensus.io/trace" ffi "github.com/filecoin-project/filecoin-ffi" - "github.com/filecoin-project/lotus/storage/sectorstorage/stores" "github.com/filecoin-project/specs-actors/actors/abi" "github.com/filecoin-project/specs-storage/storage" + + "github.com/filecoin-project/lotus/storage/sectorstorage/stores" ) func (sb *Sealer) ComputeElectionPoSt(ctx context.Context, miner abi.ActorID, sectorInfo []abi.SectorInfo, challengeSeed abi.PoStRandomness, winners []abi.PoStCandidate) ([]abi.PoStProof, error) { diff --git a/worker_local.go b/localworker.go similarity index 94% rename from worker_local.go rename to localworker.go index 24d1e14df..e71a619f3 100644 --- a/worker_local.go +++ b/localworker.go @@ -12,7 +12,6 @@ import ( "github.com/filecoin-project/specs-actors/actors/abi" storage2 "github.com/filecoin-project/specs-storage/storage" - "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/storage/sectorstorage/ffiwrapper" "github.com/filecoin-project/lotus/storage/sectorstorage/sealtasks" "github.com/filecoin-project/lotus/storage/sectorstorage/stores" @@ -174,7 +173,7 @@ func (l *LocalWorker) Paths(ctx context.Context) ([]stores.StoragePath, error) { return l.localStore.Local(ctx) } -func (l *LocalWorker) Info(context.Context) (api.WorkerInfo, error) { +func (l *LocalWorker) Info(context.Context) (WorkerInfo, error) { hostname, err := os.Hostname() // TODO: allow overriding from config if err != nil { panic(err) @@ -187,17 +186,17 @@ func (l *LocalWorker) Info(context.Context) (api.WorkerInfo, error) { h, err := sysinfo.Host() if err != nil { - return api.WorkerInfo{}, xerrors.Errorf("getting host info: %w", err) + return WorkerInfo{}, xerrors.Errorf("getting host info: %w", err) } mem, err := h.Memory() if err != nil { - return api.WorkerInfo{}, xerrors.Errorf("getting memory info: %w", err) + return WorkerInfo{}, xerrors.Errorf("getting memory info: %w", err) } - return api.WorkerInfo{ + return WorkerInfo{ Hostname: hostname, - Resources: api.WorkerResources{ + Resources: WorkerResources{ MemPhysical: mem.Total, MemSwap: mem.VirtualTotal, MemReserved: mem.VirtualUsed + mem.Total - mem.Available, // TODO: sub this process diff --git a/manager.go b/manager.go index 97d645484..6f9d5877e 100644 --- a/manager.go +++ b/manager.go @@ -16,8 +16,6 @@ import ( "github.com/filecoin-project/specs-actors/actors/abi" "github.com/filecoin-project/specs-storage/storage" - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/node/config" "github.com/filecoin-project/lotus/storage/sectorstorage/ffiwrapper" "github.com/filecoin-project/lotus/storage/sectorstorage/sealtasks" "github.com/filecoin-project/lotus/storage/sectorstorage/stores" @@ -37,11 +35,26 @@ type Worker interface { // Returns paths accessible to the worker Paths(context.Context) ([]stores.StoragePath, error) - Info(context.Context) (api.WorkerInfo, error) + Info(context.Context) (WorkerInfo, error) Close() error } +type WorkerInfo struct { + Hostname string + + Resources WorkerResources +} + +type WorkerResources struct { + MemPhysical uint64 + MemSwap uint64 + + MemReserved uint64 // Used by system / other processes + + GPUs []string +} + type SectorManager interface { SectorSize() abi.SectorSize @@ -76,7 +89,16 @@ type Manager struct { schedQueue *list.List // List[*workerRequest] } -func New(ctx context.Context, ls stores.LocalStorage, si stores.SectorIndex, cfg *ffiwrapper.Config, sc config.Storage, urls URLs, ca api.Common) (*Manager, error) { +type SealerConfig struct { + // Local worker config + AllowPreCommit1 bool + AllowPreCommit2 bool + AllowCommit bool +} + +type StorageAuth http.Header + +func New(ctx context.Context, ls stores.LocalStorage, si stores.SectorIndex, cfg *ffiwrapper.Config, sc SealerConfig, urls URLs, sa StorageAuth) (*Manager, error) { lstor, err := stores.NewLocal(ctx, ls, si, urls) if err != nil { return nil, err @@ -87,10 +109,7 @@ func New(ctx context.Context, ls stores.LocalStorage, si stores.SectorIndex, cfg return nil, xerrors.Errorf("creating prover instance: %w", err) } - token, err := ca.AuthNew(ctx, []api.Permission{"admin"}) - headers := http.Header{} - headers.Add("Authorization", "Bearer "+string(token)) - stor := stores.NewRemote(lstor, si, headers) + stor := stores.NewRemote(lstor, si, http.Header(sa)) m := &Manager{ scfg: cfg, @@ -150,8 +169,8 @@ func (m *Manager) AddLocalStorage(ctx context.Context, path string) error { return xerrors.Errorf("opening local path: %w", err) } - if err := m.ls.SetStorage(func(sc *config.StorageConfig) { - sc.StoragePaths = append(sc.StoragePaths, config.LocalPath{Path: path}) + if err := m.ls.SetStorage(func(sc *stores.StorageConfig) { + sc.StoragePaths = append(sc.StoragePaths, stores.LocalPath{Path: path}) }); err != nil { return xerrors.Errorf("get storage config: %w", err) } diff --git a/mock/mock.go b/mock/mock.go index 0591958c1..dc4ca54ef 100644 --- a/mock/mock.go +++ b/mock/mock.go @@ -17,7 +17,6 @@ import ( logging "github.com/ipfs/go-log" "golang.org/x/xerrors" - "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/storage/sectorstorage" "github.com/filecoin-project/lotus/storage/sectorstorage/ffiwrapper" ) @@ -37,7 +36,7 @@ type SectorMgr struct { type mockVerif struct{} func NewMockSectorMgr(threads int, ssize abi.SectorSize) *SectorMgr { - rt, _, err := api.ProofTypeFromSectorSize(ssize) + rt, _, err := ffiwrapper.ProofTypeFromSectorSize(ssize) if err != nil { panic(err) } diff --git a/mock/preseal.go b/mock/preseal.go deleted file mode 100644 index 20a4377cd..000000000 --- a/mock/preseal.go +++ /dev/null @@ -1,63 +0,0 @@ -package mock - -import ( - "github.com/filecoin-project/go-address" - commcid "github.com/filecoin-project/go-fil-commcid" - "github.com/filecoin-project/lotus/storage/sectorstorage/zerocomm" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/filecoin-project/specs-actors/actors/builtin/market" - "github.com/filecoin-project/specs-actors/actors/crypto" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/chain/wallet" - "github.com/filecoin-project/lotus/genesis" -) - -func PreSeal(ssize abi.SectorSize, maddr address.Address, sectors int) (*genesis.Miner, *types.KeyInfo, error) { - k, err := wallet.GenerateKey(crypto.SigTypeBLS) - if err != nil { - return nil, nil, err - } - - genm := &genesis.Miner{ - Owner: k.Address, - Worker: k.Address, - MarketBalance: big.NewInt(0), - PowerBalance: big.NewInt(0), - SectorSize: ssize, - Sectors: make([]*genesis.PreSeal, sectors), - } - - _, st, err := api.ProofTypeFromSectorSize(ssize) - if err != nil { - return nil, nil, err - } - - for i := range genm.Sectors { - preseal := &genesis.PreSeal{} - - preseal.ProofType = st - preseal.CommD = zerocomm.ZeroPieceCommitment(abi.PaddedPieceSize(ssize).Unpadded()) - d, _ := commcid.CIDToPieceCommitmentV1(preseal.CommD) - r := commDR(d) - preseal.CommR = commcid.ReplicaCommitmentV1ToCID(r[:]) - preseal.SectorID = abi.SectorNumber(i + 1) - preseal.Deal = market.DealProposal{ - PieceCID: preseal.CommD, - PieceSize: abi.PaddedPieceSize(ssize), - Client: maddr, - Provider: maddr, - StartEpoch: 1, - EndEpoch: 10000, - StoragePricePerEpoch: big.Zero(), - ProviderCollateral: big.Zero(), - ClientCollateral: big.Zero(), - } - - genm.Sectors[i] = preseal - } - - return genm, &k.KeyInfo, nil -} diff --git a/mock/util.go b/mock/util.go index e37cf3552..2d2ebbfe2 100644 --- a/mock/util.go +++ b/mock/util.go @@ -1,20 +1,6 @@ package mock -import ( - "crypto/rand" - "io" - "io/ioutil" -) - -func randB(n uint64) []byte { - b, err := ioutil.ReadAll(io.LimitReader(rand.Reader, int64(n))) - if err != nil { - panic(err) - } - return b -} - -func commDR(in []byte) (out [32]byte) { +func CommDR(in []byte) (out [32]byte) { for i, b := range in { out[i] = ^b } diff --git a/resources.go b/resources.go index 4aafb5962..ab2e5170d 100644 --- a/resources.go +++ b/resources.go @@ -1,9 +1,10 @@ package sectorstorage import ( + "github.com/filecoin-project/specs-actors/actors/abi" + "github.com/filecoin-project/lotus/storage/sectorstorage/sealtasks" "github.com/filecoin-project/lotus/storage/sectorstorage/stores" - "github.com/filecoin-project/specs-actors/actors/abi" ) var FSOverheadSeal = map[stores.SectorFileType]int{ // 10x overheads diff --git a/roprov.go b/roprov.go index dfab863ff..694bcd2b2 100644 --- a/roprov.go +++ b/roprov.go @@ -3,10 +3,11 @@ package sectorstorage import ( "context" - "github.com/filecoin-project/lotus/storage/sectorstorage/stores" + "golang.org/x/xerrors" "github.com/filecoin-project/specs-actors/actors/abi" - "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/storage/sectorstorage/stores" ) type readonlyProvider struct { diff --git a/sched.go b/sched.go index d8e3d35a0..79f9c8971 100644 --- a/sched.go +++ b/sched.go @@ -1,11 +1,11 @@ package sectorstorage import ( - "github.com/filecoin-project/lotus/storage/sectorstorage/sealtasks" - "github.com/filecoin-project/specs-actors/actors/abi" "golang.org/x/xerrors" - "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/specs-actors/actors/abi" + + "github.com/filecoin-project/lotus/storage/sectorstorage/sealtasks" ) const mib = 1 << 20 @@ -39,7 +39,7 @@ func (r *workerRequest) respond(resp workerResponse) { type workerHandle struct { w Worker - info api.WorkerInfo + info WorkerInfo memUsedMin uint64 memUsedMax uint64 diff --git a/stats.go b/stats.go index 2cae1decb..70a5f341a 100644 --- a/stats.go +++ b/stats.go @@ -1,15 +1,22 @@ package sectorstorage -import "github.com/filecoin-project/lotus/api" +type WorkerStats struct { + Info WorkerInfo -func (m *Manager) WorkerStats() map[uint64]api.WorkerStats { + MemUsedMin uint64 + MemUsedMax uint64 + GpuUsed bool + CpuUse int +} + +func (m *Manager) WorkerStats() map[uint64]WorkerStats { m.workersLk.Lock() defer m.workersLk.Unlock() - out := map[uint64]api.WorkerStats{} + out := map[uint64]WorkerStats{} for id, handle := range m.workers { - out[uint64(id)] = api.WorkerStats{ + out[uint64(id)] = WorkerStats{ Info: handle.info, MemUsedMin: handle.memUsedMin, MemUsedMax: handle.memUsedMax, diff --git a/stores/http_handler.go b/stores/http_handler.go index 21903494b..14fbe04c8 100644 --- a/stores/http_handler.go +++ b/stores/http_handler.go @@ -10,7 +10,7 @@ import ( logging "github.com/ipfs/go-log/v2" "golang.org/x/xerrors" - "github.com/filecoin-project/lotus/lib/tarutil" + "github.com/filecoin-project/lotus/storage/sectorstorage/tarutil" ) var log = logging.Logger("stores") diff --git a/stores/local.go b/stores/local.go index a971b61b3..281475b1c 100644 --- a/stores/local.go +++ b/stores/local.go @@ -9,10 +9,9 @@ import ( "path/filepath" "sync" - "github.com/filecoin-project/specs-actors/actors/abi" "golang.org/x/xerrors" - "github.com/filecoin-project/lotus/node/config" + "github.com/filecoin-project/specs-actors/actors/abi" ) type StoragePath struct { @@ -34,9 +33,18 @@ type LocalStorageMeta struct { CanStore bool } +// .lotusstorage/storage.json +type StorageConfig struct { + StoragePaths []LocalPath +} + +type LocalPath struct { + Path string +} + type LocalStorage interface { - GetStorage() (config.StorageConfig, error) - SetStorage(func(*config.StorageConfig)) error + GetStorage() (StorageConfig, error) + SetStorage(func(*StorageConfig)) error } const MetaFile = "sectorstore.json" diff --git a/stores/remote.go b/stores/remote.go index 14550174f..349b73722 100644 --- a/stores/remote.go +++ b/stores/remote.go @@ -19,7 +19,7 @@ import ( "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/lotus/lib/tarutil" + "github.com/filecoin-project/lotus/storage/sectorstorage/tarutil" ) type Remote struct { diff --git a/tarutil/systar.go b/tarutil/systar.go new file mode 100644 index 000000000..a94354731 --- /dev/null +++ b/tarutil/systar.go @@ -0,0 +1,92 @@ +package tarutil + +import ( + "archive/tar" + "golang.org/x/xerrors" + "io" + "io/ioutil" + "os" + "path/filepath" + + logging "github.com/ipfs/go-log/v2" +) + +var log = logging.Logger("tarutil") + +func ExtractTar(body io.Reader, dir string) error { + if err := os.MkdirAll(dir, 0755); err != nil { + return xerrors.Errorf("mkdir: %w", err) + } + + tr := tar.NewReader(body) + for { + header, err := tr.Next() + switch err { + default: + return err + case io.EOF: + return nil + + case nil: + } + + f, err := os.Create(filepath.Join(dir, header.Name)) + if err != nil { + return xerrors.Errorf("creating file %s: %w", filepath.Join(dir, header.Name), err) + } + + if _, err := io.Copy(f, tr); err != nil { + return err + } + + if err := f.Close(); err != nil { + return err + } + } +} + +func TarDirectory(dir string) (io.ReadCloser, error) { + r, w := io.Pipe() + + go func() { + _ = w.CloseWithError(writeTarDirectory(dir, w)) + }() + + return r, nil +} + +func writeTarDirectory(dir string, w io.Writer) error { + tw := tar.NewWriter(w) + + files, err := ioutil.ReadDir(dir) + if err != nil { + return err + } + + for _, file := range files { + h, err := tar.FileInfoHeader(file, "") + if err != nil { + return xerrors.Errorf("getting header for file %s: %w", file.Name(), err) + } + + if err := tw.WriteHeader(h); err != nil { + return xerrors.Errorf("wiritng header for file %s: %w", file.Name(), err) + } + + f, err := os.OpenFile(filepath.Join(dir, file.Name()), os.O_RDONLY, 644) + if err != nil { + return xerrors.Errorf("opening %s for reading: %w", file.Name(), err) + } + + if _, err := io.Copy(tw, f); err != nil { + return xerrors.Errorf("copy data for file %s: %w", file.Name(), err) + } + + if err := f.Close(); err != nil { + return err + } + + } + + return nil +} diff --git a/worker_remote.go b/worker_remote.go deleted file mode 100644 index ffd96f188..000000000 --- a/worker_remote.go +++ /dev/null @@ -1,51 +0,0 @@ -package sectorstorage - -import ( - "context" - "net/http" - - "github.com/filecoin-project/specs-actors/actors/abi" - storage2 "github.com/filecoin-project/specs-storage/storage" - "golang.org/x/xerrors" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/api/client" - "github.com/filecoin-project/lotus/lib/jsonrpc" -) - -type remote struct { - api.WorkerApi - closer jsonrpc.ClientCloser -} - -func (r *remote) NewSector(ctx context.Context, sector abi.SectorID) error { - return xerrors.New("unsupported") -} - -func (r *remote) AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage2.Data) (abi.PieceInfo, error) { - return abi.PieceInfo{}, xerrors.New("unsupported") -} - -func ConnectRemote(ctx context.Context, fa api.Common, url string) (*remote, error) { - token, err := fa.AuthNew(ctx, []api.Permission{"admin"}) - if err != nil { - return nil, xerrors.Errorf("creating auth token for remote connection: %w", err) - } - - headers := http.Header{} - headers.Add("Authorization", "Bearer "+string(token)) - - wapi, closer, err := client.NewWorkerRPC(url, headers) - if err != nil { - return nil, xerrors.Errorf("creating jsonrpc client: %w", err) - } - - return &remote{wapi, closer}, nil -} - -func (r *remote) Close() error { - r.closer() - return nil -} - -var _ Worker = &remote{} From bbbe16531b7fdc8d9f380508e3c588ed79a6f6f2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Sat, 28 Mar 2020 00:02:52 +0100 Subject: [PATCH 021/199] Repo setup --- LICENSE-APACHE | 5 +++++ LICENSE-MIT | 19 +++++++++++++++++++ 2 files changed, 24 insertions(+) create mode 100644 LICENSE-APACHE create mode 100644 LICENSE-MIT diff --git a/LICENSE-APACHE b/LICENSE-APACHE new file mode 100644 index 000000000..14478a3b6 --- /dev/null +++ b/LICENSE-APACHE @@ -0,0 +1,5 @@ +Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. diff --git a/LICENSE-MIT b/LICENSE-MIT new file mode 100644 index 000000000..72dc60d84 --- /dev/null +++ b/LICENSE-MIT @@ -0,0 +1,19 @@ +The MIT License (MIT) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. From afebab334eb261b022fba2a533999444e7274b60 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Sat, 28 Mar 2020 00:21:36 +0100 Subject: [PATCH 022/199] Update imports; Add build scripts --- .gitmodules | 4 + extern/filecoin-ffi | 1 + ffiwrapper/basicfs/fs.go | 2 +- ffiwrapper/sealer_cgo.go | 4 +- ffiwrapper/sealer_test.go | 2 +- ffiwrapper/types.go | 4 +- ffiwrapper/verifier_cgo.go | 2 +- go.mod | 22 ++++ go.sum | 247 +++++++++++++++++++++++++++++++++++++ localworker.go | 6 +- manager.go | 6 +- mock/mock.go | 4 +- resources.go | 4 +- roprov.go | 2 +- sched.go | 2 +- stores/http_handler.go | 2 +- stores/remote.go | 2 +- zerocomm/zerocomm_test.go | 4 +- 18 files changed, 297 insertions(+), 23 deletions(-) create mode 100644 .gitmodules create mode 160000 extern/filecoin-ffi create mode 100644 go.mod create mode 100644 go.sum diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 000000000..a655f05b9 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,4 @@ +[submodule "extern/filecoin-ffi"] + path = extern/filecoin-ffi + url = https://github.com/filecoin-project/filecoin-ffi.git + branch = master diff --git a/extern/filecoin-ffi b/extern/filecoin-ffi new file mode 160000 index 000000000..e899cc1dd --- /dev/null +++ b/extern/filecoin-ffi @@ -0,0 +1 @@ +Subproject commit e899cc1dd0720e0a4d25b0e751b84e3733cbedc5 diff --git a/ffiwrapper/basicfs/fs.go b/ffiwrapper/basicfs/fs.go index ce0f73b49..e7e755a77 100644 --- a/ffiwrapper/basicfs/fs.go +++ b/ffiwrapper/basicfs/fs.go @@ -8,7 +8,7 @@ import ( "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/lotus/storage/sectorstorage/stores" + "github.com/filecoin-project/sector-storage/stores" ) type sectorFile struct { diff --git a/ffiwrapper/sealer_cgo.go b/ffiwrapper/sealer_cgo.go index 793f07c1b..c001b5654 100644 --- a/ffiwrapper/sealer_cgo.go +++ b/ffiwrapper/sealer_cgo.go @@ -15,8 +15,8 @@ import ( "github.com/filecoin-project/specs-actors/actors/abi" "github.com/filecoin-project/specs-storage/storage" - "github.com/filecoin-project/lotus/storage/sectorstorage/stores" - "github.com/filecoin-project/lotus/storage/sectorstorage/zerocomm" + "github.com/filecoin-project/sector-storage/stores" + "github.com/filecoin-project/sector-storage/zerocomm" ) var _ Storage = &Sealer{} diff --git a/ffiwrapper/sealer_test.go b/ffiwrapper/sealer_test.go index da2d360a8..ef458601c 100644 --- a/ffiwrapper/sealer_test.go +++ b/ffiwrapper/sealer_test.go @@ -20,7 +20,7 @@ import ( "github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/storage/sectorstorage/ffiwrapper/basicfs" + "github.com/filecoin-project/sector-storage/ffiwrapper/basicfs" ) func init() { diff --git a/ffiwrapper/types.go b/ffiwrapper/types.go index 02479f705..f89c63fb4 100644 --- a/ffiwrapper/types.go +++ b/ffiwrapper/types.go @@ -10,8 +10,8 @@ import ( "github.com/filecoin-project/specs-actors/actors/abi" "github.com/filecoin-project/specs-storage/storage" - "github.com/filecoin-project/lotus/storage/sectorstorage/ffiwrapper/basicfs" - "github.com/filecoin-project/lotus/storage/sectorstorage/stores" + "github.com/filecoin-project/sector-storage/ffiwrapper/basicfs" + "github.com/filecoin-project/sector-storage/stores" ) type UnpaddedByteIndex uint64 diff --git a/ffiwrapper/verifier_cgo.go b/ffiwrapper/verifier_cgo.go index 2de6137da..475dde617 100644 --- a/ffiwrapper/verifier_cgo.go +++ b/ffiwrapper/verifier_cgo.go @@ -12,7 +12,7 @@ import ( "github.com/filecoin-project/specs-actors/actors/abi" "github.com/filecoin-project/specs-storage/storage" - "github.com/filecoin-project/lotus/storage/sectorstorage/stores" + "github.com/filecoin-project/sector-storage/stores" ) func (sb *Sealer) ComputeElectionPoSt(ctx context.Context, miner abi.ActorID, sectorInfo []abi.SectorInfo, challengeSeed abi.PoStRandomness, winners []abi.PoStCandidate) ([]abi.PoStProof, error) { diff --git a/go.mod b/go.mod new file mode 100644 index 000000000..badde3173 --- /dev/null +++ b/go.mod @@ -0,0 +1,22 @@ +module github.com/filecoin-project/sector-storage + +go 1.13 + +require ( + github.com/elastic/go-sysinfo v1.3.0 + github.com/filecoin-project/filecoin-ffi v0.0.0-20200326153646-e899cc1dd072 + github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5 + github.com/filecoin-project/specs-actors v0.0.0-20200325195038-f27421b402c5 + github.com/filecoin-project/specs-storage v0.0.0-20200317225704-7420bc655c38 + github.com/gorilla/mux v1.7.4 + github.com/hashicorp/go-multierror v1.0.0 + github.com/ipfs/go-cid v0.0.5 + github.com/ipfs/go-ipfs-files v0.0.7 + github.com/ipfs/go-log v1.0.3 + github.com/ipfs/go-log/v2 v2.0.3 + github.com/mitchellh/go-homedir v1.1.0 + go.opencensus.io v0.22.3 + golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 +) + +replace github.com/filecoin-project/filecoin-ffi => ./extern/filecoin-ffi diff --git a/go.sum b/go.sum new file mode 100644 index 000000000..127be6522 --- /dev/null +++ b/go.sum @@ -0,0 +1,247 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= +github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= +github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= +github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= +github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= +github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= +github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= +github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= +github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 h1:HVTnpeuvF6Owjd5mniCL8DEXo7uYXdQEmOP4FJbV5tg= +github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3/go.mod h1:p1d6YEZWvFzEh4KLyvBcVSnrfNDDvK2zfK/4x2v/4pE= +github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/elastic/go-sysinfo v1.3.0 h1:eb2XFGTMlSwG/yyU9Y8jVAYLIzU2sFzWXwo2gmetyrE= +github.com/elastic/go-sysinfo v1.3.0/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= +github.com/elastic/go-windows v1.0.0/go.mod h1:TsU0Nrp7/y3+VwE82FoZF8gC/XFg/Elz6CcloAxnPgU= +github.com/filecoin-project/filecoin-ffi v0.0.0-20200326153646-e899cc1dd072 h1:7csaynzG5QwAEWR7nFoVeGLDyE2Oqd5qNmd+KoA0Hk4= +github.com/filecoin-project/filecoin-ffi v0.0.0-20200326153646-e899cc1dd072/go.mod h1:PtH9YP0rURHUKHrKeEBeWg/BqIBMQOz8wtlXlVGREBE= +github.com/filecoin-project/go-address v0.0.2-0.20200218010043-eb9bb40ed5be h1:TooKBwR/g8jG0hZ3lqe9S5sy2vTUcLOZLlz3M5wGn2E= +github.com/filecoin-project/go-address v0.0.2-0.20200218010043-eb9bb40ed5be/go.mod h1:SAOwJoakQ8EPjwNIsiakIQKsoKdkcbx8U3IapgCg9R0= +github.com/filecoin-project/go-amt-ipld/v2 v2.0.1-0.20200131012142-05d80eeccc5e/go.mod h1:boRtQhzmxNocrMxOXo1NYn4oUc1NGvR8tEa79wApNXg= +github.com/filecoin-project/go-bitfield v0.0.0-20200309034705-8c7ac40bd550 h1:aockulLU8Qjkdj4FQz53WQpNosAIYk8DxRediRLkE5c= +github.com/filecoin-project/go-bitfield v0.0.0-20200309034705-8c7ac40bd550/go.mod h1:iodsLxOFZnqKtjj2zkgqzoGNrv6vUqj69AT/J8DKXEw= +github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ= +github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5 h1:yvQJCW9mmi9zy+51xA01Ea2X7/dL7r8eKDPuGUjRmbo= +github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5/go.mod h1:JbkIgFF/Z9BDlvrJO1FuKkaWsH673/UdFaiVS6uIHlA= +github.com/filecoin-project/lotus v0.2.10 h1:ijrj/nYdKu5GiMo9r1+Zcp2A4jKHSOMZ2WNy2K/mtOE= +github.com/filecoin-project/specs-actors v0.0.0-20200226200336-94c9b92b2775/go.mod h1:0HAWYrvajFHDgRaKbF0rl+IybVLZL5z4gQ8koCMPhoU= +github.com/filecoin-project/specs-actors v0.0.0-20200302223606-0eaf97b10aaf/go.mod h1:0HAWYrvajFHDgRaKbF0rl+IybVLZL5z4gQ8koCMPhoU= +github.com/filecoin-project/specs-actors v0.0.0-20200325195038-f27421b402c5 h1:GRvglZbsmJLeDD1JN9WGQ4DjHKjbrmrKVRhlRhyfcio= +github.com/filecoin-project/specs-actors v0.0.0-20200325195038-f27421b402c5/go.mod h1:5WngRgTN5Eo4+0SjCBqLzEr2l6Mj45DrP2606gBhqI0= +github.com/filecoin-project/specs-storage v0.0.0-20200317225704-7420bc655c38 h1:ky+rfX3bG1TjOBLn14V674q+iwZpalyKzZxGRNzA11I= +github.com/filecoin-project/specs-storage v0.0.0-20200317225704-7420bc655c38/go.mod h1:dUmzHS7izOD6HW3/JpzFrjxnptxbsHXBlO8puK2UzBk= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/mux v1.7.4 h1:VuZ8uybHlWmqV03+zRzdwKL4tUnIp1MAQtp1mIFE1bc= +github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= +github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ipfs/go-block-format v0.0.2 h1:qPDvcP19izTjU8rgo6p7gTXZlkMkF5bz5G3fqIsSCPE= +github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY= +github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.4/go.mod h1:4LLaPOQwmk5z9LBgQnpkivrx8BJjUyGwTXCd5Xfj6+M= +github.com/ipfs/go-cid v0.0.5 h1:o0Ix8e/ql7Zb5UVUJEUfjsWCIY8t48++9lR8qi6oiJU= +github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= +github.com/ipfs/go-hamt-ipld v0.0.15-0.20200131012125-dd88a59d3f2e/go.mod h1:9aQJu/i/TaRDW6jqB5U217dLIDopn50wxLdHXM2CTfE= +github.com/ipfs/go-ipfs-files v0.0.7 h1:s5BRD12ndahqYifeH1S8Z73zqZhR+3IdKYAG9PiETs0= +github.com/ipfs/go-ipfs-files v0.0.7/go.mod h1:wiN/jSG8FKyk7N0WyctKSvq3ljIa2NNTiZB55kpTdOs= +github.com/ipfs/go-ipfs-util v0.0.1 h1:Wz9bL2wB2YBJqggkA4dD7oSmqB4cAnpNbGrlHJulv50= +github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc= +github.com/ipfs/go-ipld-cbor v0.0.3/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= +github.com/ipfs/go-ipld-cbor v0.0.4 h1:Aw3KPOKXjvrm6VjwJvFf1F1ekR/BH3jdof3Bk7OTiSA= +github.com/ipfs/go-ipld-cbor v0.0.4/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= +github.com/ipfs/go-ipld-format v0.0.1 h1:HCu4eB/Gh+KD/Q0M8u888RFkorTWNIL3da4oc5dwc80= +github.com/ipfs/go-ipld-format v0.0.1/go.mod h1:kyJtbkDALmFHv3QR6et67i35QzO3S0dCDnkOJhcZkms= +github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM= +github.com/ipfs/go-log v1.0.3 h1:Gg7SUYSZ7BrqaKMwM+hRgcAkKv4QLfzP4XPQt5Sx/OI= +github.com/ipfs/go-log v1.0.3/go.mod h1:OsLySYkwIbiSUR/yBTdv1qPtcE4FW3WPWk/ewz9Ru+A= +github.com/ipfs/go-log/v2 v2.0.3 h1:Q2gXcBoCALyLN/pUQlz1qgu0x3uFV6FzP9oXhpfyJpc= +github.com/ipfs/go-log/v2 v2.0.3/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= +github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52/go.mod h1:fdg+/X9Gg4AsAIzWpEHwnqd+QY3b7lajxyjE1m4hkq4= +github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= +github.com/jbenet/goprocess v0.1.3/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= +github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 h1:rp+c0RAYOWj8l6qbCUTSiRLG/iKnW3K3/QfPPuSsBt4= +github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= +github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= +github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/kami-zh/go-capturer v0.0.0-20171211120116-e492ea43421d/go.mod h1:P2viExyCEfeWGU259JnaQ34Inuec4R38JCyBx2edgD0= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/libp2p/go-flow-metrics v0.0.3/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs= +github.com/libp2p/go-libp2p-core v0.3.0/go.mod h1:ACp3DmS3/N64c2jDzcV429ukDpicbL6+TrrxANBjPGw= +github.com/libp2p/go-openssl v0.0.4/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= +github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU= +github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= +github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/mr-tron/base58 v1.1.3 h1:v+sk57XuaCKGXpWtVBX8YJzO7hMGx4Aajh4TQbdEFdc= +github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/multiformats/go-base32 v0.0.3 h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp8Nq/kkI= +github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= +github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4= +github.com/multiformats/go-multibase v0.0.1 h1:PN9/v21eLywrFWdFNsFKaU04kLJzuYzmrJR+ubhT9qA= +github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= +github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= +github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= +github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= +github.com/multiformats/go-multihash v0.0.13 h1:06x+mk/zj1FoMsgNejLpy6QTvJqlSt/BhLEy87zidlc= +github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= +github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/multiformats/go-varint v0.0.2/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/multiformats/go-varint v0.0.5 h1:XVZwSo04Cs3j/jS0uAEPpT3JY6DzMcVLLoWOSnCxOjg= +github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/polydawn/refmt v0.0.0-20190221155625-df39d6c2d992/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1 h1:CskT+S6Ay54OwxBGB0R3Rsx4Muto6UnEYTyKJbyRIAI= +github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/prometheus/procfs v0.0.0-20190425082905-87a4384529e0 h1:c8R11WC8m7KNMkTv/0+Be8vvwo4I3/Ut9AC2FW8fX3U= +github.com/prometheus/procfs v0.0.0-20190425082905-87a4384529e0/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa/go.mod h1:2RVY1rIf+2J2o/IM9+vPq9RzmHDSseB7FoXiSNIUsoU= +github.com/smola/gocompat v0.2.0/go.mod h1:1B0MlxbmoZNo3h8guHp8HztB3BSYR5itql9qtVc0ypY= +github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572/go.mod h1:w0SWMsp6j9O/dk4/ZpIhL+3CkG8ofA2vuv7k+ltqUMc= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/src-d/envconfig v1.0.0/go.mod h1:Q9YQZ7BKITldTBnoxsE5gOeB5y66RyPXeue/R4aaNBc= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= +github.com/whyrusleeping/cbor-gen v0.0.0-20191216205031-b047b6acb3c0/go.mod h1:xdlJQaiqipF0HW+Mzpg7XRM3fWbGvfgFlcppuvlkIvY= +github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= +github.com/whyrusleeping/cbor-gen v0.0.0-20200206220010-03c9665e2a66 h1:LolR9FiEfQNn5U031bAhn/46po2JgWHKadYbcWFIJ+0= +github.com/whyrusleeping/cbor-gen v0.0.0-20200206220010-03c9665e2a66/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= +github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc/go.mod h1:bopw91TMyo8J3tvftk8xmU2kPmlrt4nScJQZU2hE5EM= +github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190227160552-c95aed5357e7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190302025703-b6889370fb10/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191025021431-6c3a3bfe00ae h1:QoJmnb9uyPCrH8GIg9uRLn4Ta45yhcQtpymCd0AavO8= +golang.org/x/sys v0.0.0-20191025021431-6c3a3bfe00ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181130052023-1c3d964395ce/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/src-d/go-cli.v0 v0.0.0-20181105080154-d492247bbc0d/go.mod h1:z+K8VcOYVYcSwSjGebuDL6176A1XskgbtNl64NSg+n8= +gopkg.in/src-d/go-log.v1 v1.0.1/go.mod h1:GN34hKP0g305ysm2/hctJ0Y8nWP3zxXXJ8GFabTyABE= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +howett.net/plist v0.0.0-20181124034731-591f970eefbb h1:jhnBjNi9UFpfpl8YZhA9CrOqpnJdvzuiHsl/dnxl11M= +howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= diff --git a/localworker.go b/localworker.go index e71a619f3..6c2ca6c09 100644 --- a/localworker.go +++ b/localworker.go @@ -12,9 +12,9 @@ import ( "github.com/filecoin-project/specs-actors/actors/abi" storage2 "github.com/filecoin-project/specs-storage/storage" - "github.com/filecoin-project/lotus/storage/sectorstorage/ffiwrapper" - "github.com/filecoin-project/lotus/storage/sectorstorage/sealtasks" - "github.com/filecoin-project/lotus/storage/sectorstorage/stores" + "github.com/filecoin-project/sector-storage/ffiwrapper" + "github.com/filecoin-project/sector-storage/sealtasks" + "github.com/filecoin-project/sector-storage/stores" ) var pathTypes = []stores.SectorFileType{stores.FTUnsealed, stores.FTSealed, stores.FTCache} diff --git a/manager.go b/manager.go index 6f9d5877e..45fcfb38c 100644 --- a/manager.go +++ b/manager.go @@ -16,9 +16,9 @@ import ( "github.com/filecoin-project/specs-actors/actors/abi" "github.com/filecoin-project/specs-storage/storage" - "github.com/filecoin-project/lotus/storage/sectorstorage/ffiwrapper" - "github.com/filecoin-project/lotus/storage/sectorstorage/sealtasks" - "github.com/filecoin-project/lotus/storage/sectorstorage/stores" + "github.com/filecoin-project/sector-storage/ffiwrapper" + "github.com/filecoin-project/sector-storage/sealtasks" + "github.com/filecoin-project/sector-storage/stores" ) var log = logging.Logger("advmgr") diff --git a/mock/mock.go b/mock/mock.go index dc4ca54ef..1ada002d5 100644 --- a/mock/mock.go +++ b/mock/mock.go @@ -17,8 +17,8 @@ import ( logging "github.com/ipfs/go-log" "golang.org/x/xerrors" - "github.com/filecoin-project/lotus/storage/sectorstorage" - "github.com/filecoin-project/lotus/storage/sectorstorage/ffiwrapper" + "github.com/filecoin-project/sector-storage" + "github.com/filecoin-project/sector-storage/ffiwrapper" ) var log = logging.Logger("sbmock") diff --git a/resources.go b/resources.go index ab2e5170d..ec4e4faba 100644 --- a/resources.go +++ b/resources.go @@ -3,8 +3,8 @@ package sectorstorage import ( "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/lotus/storage/sectorstorage/sealtasks" - "github.com/filecoin-project/lotus/storage/sectorstorage/stores" + "github.com/filecoin-project/sector-storage/sealtasks" + "github.com/filecoin-project/sector-storage/stores" ) var FSOverheadSeal = map[stores.SectorFileType]int{ // 10x overheads diff --git a/roprov.go b/roprov.go index 694bcd2b2..e6ec1e8f2 100644 --- a/roprov.go +++ b/roprov.go @@ -7,7 +7,7 @@ import ( "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/lotus/storage/sectorstorage/stores" + "github.com/filecoin-project/sector-storage/stores" ) type readonlyProvider struct { diff --git a/sched.go b/sched.go index 79f9c8971..9a60a415f 100644 --- a/sched.go +++ b/sched.go @@ -5,7 +5,7 @@ import ( "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/lotus/storage/sectorstorage/sealtasks" + "github.com/filecoin-project/sector-storage/sealtasks" ) const mib = 1 << 20 diff --git a/stores/http_handler.go b/stores/http_handler.go index 14fbe04c8..b14dac54f 100644 --- a/stores/http_handler.go +++ b/stores/http_handler.go @@ -10,7 +10,7 @@ import ( logging "github.com/ipfs/go-log/v2" "golang.org/x/xerrors" - "github.com/filecoin-project/lotus/storage/sectorstorage/tarutil" + "github.com/filecoin-project/sector-storage/tarutil" ) var log = logging.Logger("stores") diff --git a/stores/remote.go b/stores/remote.go index 349b73722..90d62d91d 100644 --- a/stores/remote.go +++ b/stores/remote.go @@ -19,7 +19,7 @@ import ( "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/lotus/storage/sectorstorage/tarutil" + "github.com/filecoin-project/sector-storage/tarutil" ) type Remote struct { diff --git a/zerocomm/zerocomm_test.go b/zerocomm/zerocomm_test.go index 865492991..763ff675a 100644 --- a/zerocomm/zerocomm_test.go +++ b/zerocomm/zerocomm_test.go @@ -10,8 +10,8 @@ import ( abi "github.com/filecoin-project/specs-actors/actors/abi" "github.com/ipfs/go-cid" - "github.com/filecoin-project/lotus/storage/sectorstorage/ffiwrapper" - "github.com/filecoin-project/lotus/storage/sectorstorage/zerocomm" + "github.com/filecoin-project/sector-storage/ffiwrapper" + "github.com/filecoin-project/sector-storage/zerocomm" ) func TestComms(t *testing.T) { From c4b928d4416358ac49aa7cafb9ed91e9bef27977 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Sat, 28 Mar 2020 00:33:38 +0100 Subject: [PATCH 023/199] Revert specs-actors version --- go.mod | 4 +- go.sum | 569 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 568 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index badde3173..53aab09d2 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,9 @@ require ( github.com/elastic/go-sysinfo v1.3.0 github.com/filecoin-project/filecoin-ffi v0.0.0-20200326153646-e899cc1dd072 github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5 - github.com/filecoin-project/specs-actors v0.0.0-20200325195038-f27421b402c5 + github.com/filecoin-project/go-paramfetch v0.0.1 + github.com/filecoin-project/lotus v0.2.10 + github.com/filecoin-project/specs-actors v0.0.0-20200324235424-aef9b20a9fb1 github.com/filecoin-project/specs-storage v0.0.0-20200317225704-7420bc655c38 github.com/gorilla/mux v1.7.4 github.com/hashicorp/go-multierror v1.0.0 diff --git a/go.sum b/go.sum index 127be6522..0dd2acc0a 100644 --- a/go.sum +++ b/go.sum @@ -1,247 +1,808 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +contrib.go.opencensus.io/exporter/jaeger v0.1.0/go.mod h1:VYianECmuFPwU37O699Vc1GOcy+y8kOsfaxHRImmjbA= +contrib.go.opencensus.io/exporter/prometheus v0.1.0/go.mod h1:cGFniUXGZlKRjzOyuZJ6mgB+PgBcCIa79kEKR8YCW+A= +github.com/AndreasBriese/bbloom v0.0.0-20180913140656-343706a395b7/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= +github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= +github.com/GeertJohan/go.incremental v1.0.0/go.mod h1:6fAjUhbVuX1KcMD3c8TEgVUqmo4seqhv0i0kdATSkM0= +github.com/GeertJohan/go.rice v1.0.0 h1:KkI6O9uMaQU3VEKaj01ulavtF7o1fWT7+pk/4voiMLQ= +github.com/GeertJohan/go.rice v1.0.0/go.mod h1:eH6gbSOAUv07dQuZVnBmoDP8mgsM1rtixis4Tib9if0= +github.com/Gurpartap/async v0.0.0-20180927173644-4f7f499dd9ee/go.mod h1:W0GbEAA4uFNYOGG2cJpmFJ04E6SD1NLELPYZB57/7AY= +github.com/Kubuxu/go-os-helper v0.0.1/go.mod h1:N8B+I7vPCT80IcP58r50u4+gEEcsZETFUpAzWW2ep1Y= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d h1:G0m3OIz70MZUWq3EgK3CesDbo8upS2Vm9/P3FtgI+Jk= +github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= +github.com/Stebalien/go-bitfield v0.0.1/go.mod h1:GNjFpasyUVkHMsfEOk8EFLJ9syQ6SI+XWrX9Wf2XH0s= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= +github.com/akavel/rsrc v0.8.0/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkKq+c= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8= +github.com/btcsuite/btcd v0.0.0-20190523000118-16327141da8c/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= +github.com/btcsuite/btcd v0.0.0-20190605094302-a0d1e3e36d50/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= +github.com/btcsuite/btcd v0.0.0-20190629003639-c26ffa870fd8/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= +github.com/btcsuite/btcd v0.0.0-20190824003749-130ea5bddde3/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= +github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= +github.com/btcsuite/btcutil v0.0.0-20190207003914-4c204d697803/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.2.1-0.20180108230905-e214231b295a/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 h1:HVTnpeuvF6Owjd5mniCL8DEXo7uYXdQEmOP4FJbV5tg= github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3/go.mod h1:p1d6YEZWvFzEh4KLyvBcVSnrfNDDvK2zfK/4x2v/4pE= +github.com/cskr/pubsub v1.0.2/go.mod h1:/8MzYXk/NJAz782G8RPkFzXTZVu63VotefPnR9TIRis= +github.com/daaku/go.zipexe v1.0.0 h1:VSOgZtH418pH9L16hC/JrgSNJbbAL26pj7lmD1+CGdY= +github.com/daaku/go.zipexe v1.0.0/go.mod h1:z8IiR6TsVLEYKwXAoE/I+8ys/sDkgTzSL0CLnGVd57E= github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgraph-io/badger v1.5.5-0.20190226225317-8115aed38f8f/go.mod h1:VZxzAIRPHRVNRKRo6AXrX9BJegn6il06VMTZVJYCIjQ= +github.com/dgraph-io/badger v1.6.0-rc1/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= +github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= +github.com/dgraph-io/badger/v2 v2.0.1-rc1.0.20200120142413-c3333a5a830e/go.mod h1:3KY8+bsP8wI0OEnQJAKpd4wIJW/Mm32yw2j/9FUVnIM= +github.com/dgraph-io/ristretto v0.0.2-0.20200115201040-8f368f2f2ab3/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/elastic/go-sysinfo v1.3.0 h1:eb2XFGTMlSwG/yyU9Y8jVAYLIzU2sFzWXwo2gmetyrE= github.com/elastic/go-sysinfo v1.3.0/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= +github.com/elastic/go-windows v1.0.0 h1:qLURgZFkkrYyTTkvYpsZIgf83AUsdIHfvlJaqaZ7aSY= github.com/elastic/go-windows v1.0.0/go.mod h1:TsU0Nrp7/y3+VwE82FoZF8gC/XFg/Elz6CcloAxnPgU= -github.com/filecoin-project/filecoin-ffi v0.0.0-20200326153646-e899cc1dd072 h1:7csaynzG5QwAEWR7nFoVeGLDyE2Oqd5qNmd+KoA0Hk4= -github.com/filecoin-project/filecoin-ffi v0.0.0-20200326153646-e899cc1dd072/go.mod h1:PtH9YP0rURHUKHrKeEBeWg/BqIBMQOz8wtlXlVGREBE= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.8.0 h1:5bzFgL+oy7JITMTxUPJ00n7VxmYd/PdMp5mHFX40/RY= +github.com/fatih/color v1.8.0/go.mod h1:3l45GVGkyrnYNl9HoIjnp2NnNWvh6hLAqD8yTfGjnw8= +github.com/fd/go-nat v1.0.0/go.mod h1:BTBu/CKvMmOMUPkKVef1pngt2WFH/lg7E6yQnulfp6E= +github.com/filecoin-project/chain-validation v0.0.3/go.mod h1:NCEGFjcWRjb8akWFSOXvU6n2efkWIqAeOKU6o5WBGQw= +github.com/filecoin-project/go-address v0.0.0-20191219011437-af739c490b4f/go.mod h1:rCbpXPva2NKF9/J4X6sr7hbKBgQCxyFtRj7KOZqoIms= +github.com/filecoin-project/go-address v0.0.0-20200107215422-da8eea2842b5/go.mod h1:SAOwJoakQ8EPjwNIsiakIQKsoKdkcbx8U3IapgCg9R0= github.com/filecoin-project/go-address v0.0.2-0.20200218010043-eb9bb40ed5be h1:TooKBwR/g8jG0hZ3lqe9S5sy2vTUcLOZLlz3M5wGn2E= github.com/filecoin-project/go-address v0.0.2-0.20200218010043-eb9bb40ed5be/go.mod h1:SAOwJoakQ8EPjwNIsiakIQKsoKdkcbx8U3IapgCg9R0= +github.com/filecoin-project/go-amt-ipld v0.0.0-20191205011053-79efc22d6cdc/go.mod h1:KsFPWjF+UUYl6n9A+qbg4bjFgAOneicFZtDH/LQEX2U= +github.com/filecoin-project/go-amt-ipld/v2 v2.0.0/go.mod h1:PAZ5tvSfMfWE327osqFXKm7cBpCpBk2Nh0qKsJUmjjk= github.com/filecoin-project/go-amt-ipld/v2 v2.0.1-0.20200131012142-05d80eeccc5e/go.mod h1:boRtQhzmxNocrMxOXo1NYn4oUc1NGvR8tEa79wApNXg= github.com/filecoin-project/go-bitfield v0.0.0-20200309034705-8c7ac40bd550 h1:aockulLU8Qjkdj4FQz53WQpNosAIYk8DxRediRLkE5c= github.com/filecoin-project/go-bitfield v0.0.0-20200309034705-8c7ac40bd550/go.mod h1:iodsLxOFZnqKtjj2zkgqzoGNrv6vUqj69AT/J8DKXEw= +github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2/go.mod h1:pqTiPHobNkOVM5thSRsHYjyQfq7O5QSCMhvuu9JoDlg= +github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 h1:2pMXdBnCiXjfCYx/hLqFxccPoqsSveQFxVLvNxy9bus= github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ= +github.com/filecoin-project/go-data-transfer v0.0.0-20191219005021-4accf56bd2ce/go.mod h1:b14UWxhxVCAjrQUYvVGrQRRsjAh79wXYejw9RbUcAww= github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5 h1:yvQJCW9mmi9zy+51xA01Ea2X7/dL7r8eKDPuGUjRmbo= github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5/go.mod h1:JbkIgFF/Z9BDlvrJO1FuKkaWsH673/UdFaiVS6uIHlA= +github.com/filecoin-project/go-fil-markets v0.0.0-20200114015428-74d100f305f8/go.mod h1:c8NTjvFVy1Ud02mmGDjOiMeawY2t6ALfrrdvAB01FQc= +github.com/filecoin-project/go-paramfetch v0.0.0-20200102181131-b20d579f2878/go.mod h1:40kI2Gv16mwcRsHptI3OAV4nlOEU7wVDc4RgMylNFjU= +github.com/filecoin-project/go-paramfetch v0.0.1 h1:gV7bs5YaqlgpGFMiLxInGK2L1FyCXUE0rimz4L7ghoE= +github.com/filecoin-project/go-paramfetch v0.0.1/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc= +github.com/filecoin-project/go-sectorbuilder v0.0.1/go.mod h1:3OZ4E3B2OuwhJjtxR4r7hPU9bCfB+A+hm4alLEsaeDc= +github.com/filecoin-project/go-sectorbuilder v0.0.2-0.20200203173614-42d67726bb62/go.mod h1:jNGVCDihkMFnraYVLH1xl4ceZQVxx/u4dOORrTKeRi0= +github.com/filecoin-project/go-statestore v0.1.0/go.mod h1:LFc9hD+fRxPqiHiaqUEZOinUJB4WARkRfNl10O7kTnI= github.com/filecoin-project/lotus v0.2.10 h1:ijrj/nYdKu5GiMo9r1+Zcp2A4jKHSOMZ2WNy2K/mtOE= +github.com/filecoin-project/lotus v0.2.10/go.mod h1:om5PQA9ZT0lf16qI7Fz/ZGLn4LDCMqPC8ntZA9uncRE= github.com/filecoin-project/specs-actors v0.0.0-20200226200336-94c9b92b2775/go.mod h1:0HAWYrvajFHDgRaKbF0rl+IybVLZL5z4gQ8koCMPhoU= github.com/filecoin-project/specs-actors v0.0.0-20200302223606-0eaf97b10aaf/go.mod h1:0HAWYrvajFHDgRaKbF0rl+IybVLZL5z4gQ8koCMPhoU= -github.com/filecoin-project/specs-actors v0.0.0-20200325195038-f27421b402c5 h1:GRvglZbsmJLeDD1JN9WGQ4DjHKjbrmrKVRhlRhyfcio= -github.com/filecoin-project/specs-actors v0.0.0-20200325195038-f27421b402c5/go.mod h1:5WngRgTN5Eo4+0SjCBqLzEr2l6Mj45DrP2606gBhqI0= +github.com/filecoin-project/specs-actors v0.0.0-20200324235424-aef9b20a9fb1 h1:IL6A1yAamz0HtLQEdZS57hnRZHPL11VIrQxMZ1Nn5hI= +github.com/filecoin-project/specs-actors v0.0.0-20200324235424-aef9b20a9fb1/go.mod h1:5WngRgTN5Eo4+0SjCBqLzEr2l6Mj45DrP2606gBhqI0= github.com/filecoin-project/specs-storage v0.0.0-20200317225704-7420bc655c38 h1:ky+rfX3bG1TjOBLn14V674q+iwZpalyKzZxGRNzA11I= github.com/filecoin-project/specs-storage v0.0.0-20200317225704-7420bc655c38/go.mod h1:dUmzHS7izOD6HW3/JpzFrjxnptxbsHXBlO8puK2UzBk= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1/go.mod h1:0eHX/BVySxPc6SE2mZRoppGq7qcEagxdmQnA3dzork8= +github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI= +github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f h1:KMlcu9X58lhTA/KrfX8Bi1LQSO4pzoVjTiL3h4Jk+Zk= +github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.4 h1:VuZ8uybHlWmqV03+zRzdwKL4tUnIp1MAQtp1mIFE1bc= github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= +github.com/gxed/pubsub v0.0.0-20180201040156-26ebdf44f824/go.mod h1:OiEWyHgK+CWrmOlVquHaIK1vhpUJydC9m0Je6mhaiNE= +github.com/hannahhoward/cbor-gen-for v0.0.0-20191216214420-3e450425c40c/go.mod h1:WVPCl0HO/0RAL5+vBH2GMxBomlxBF70MAS78+Lu1//k= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huin/goupnp v0.0.0-20180415215157-1395d1447324/go.mod h1:MZ2ZmwcBpvOoJ22IJsc7va19ZwoheaBk43rKg12SKag= +github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= +github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/influxdata/influxdb1-client v0.0.0-20190809212627-fc22c7df067e/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/ipfs/bbloom v0.0.1/go.mod h1:oqo8CVWsJFMOZqTglBG4wydCE4IQA/G2/SEofB0rjUI= +github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0= +github.com/ipfs/go-bitswap v0.0.9/go.mod h1:kAPf5qgn2W2DrgAcscZ3HrM9qh4pH+X8Fkk3UPrwvis= +github.com/ipfs/go-bitswap v0.1.0/go.mod h1:FFJEf18E9izuCqUtHxbWEvq+reg7o4CW5wSAE1wsxj0= +github.com/ipfs/go-bitswap v0.1.2/go.mod h1:qxSWS4NXGs7jQ6zQvoPY3+NmOfHHG47mhkiLzBpJQIs= +github.com/ipfs/go-bitswap v0.1.3/go.mod h1:YEQlFy0kkxops5Vy+OxWdRSEZIoS7I7KDIwoa5Chkps= +github.com/ipfs/go-bitswap v0.1.8/go.mod h1:TOWoxllhccevbWFUR2N7B1MTSVVge1s6XSMiCSA4MzM= +github.com/ipfs/go-block-format v0.0.1/go.mod h1:DK/YYcsSUIVAFNwo/KZCdIIbpN0ROH/baNLgayt4pFc= github.com/ipfs/go-block-format v0.0.2 h1:qPDvcP19izTjU8rgo6p7gTXZlkMkF5bz5G3fqIsSCPE= github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY= +github.com/ipfs/go-blockservice v0.0.7/go.mod h1:EOfb9k/Y878ZTRY/CH0x5+ATtaipfbRhbvNSdgc/7So= +github.com/ipfs/go-blockservice v0.1.0/go.mod h1:hzmMScl1kXHg3M2BjTymbVPjv627N7sYcvYaKbop39M= +github.com/ipfs/go-blockservice v0.1.3-0.20190908200855-f22eea50656c/go.mod h1:t+411r7psEUhLueM8C7aPA7cxCclv4O3VsUVxt9kz2I= +github.com/ipfs/go-car v0.0.3-0.20191203022317-23b0a85fd1b1/go.mod h1:rmd887mJxQRDfndfDEY3Liyx8gQVyfFFRSHdsnDSAlk= +github.com/ipfs/go-car v0.0.3-0.20200121013634-f188c0e24291/go.mod h1:AG6sBpd2PWMccpAG7XLFBBQ/4rfBEtzUNeO2GSMesYk= github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.4-0.20191112011718-79e75dffeb10/go.mod h1:/BYOuUoxkE+0f6tGzlzMvycuN+5l35VOR4Bpg2sCmds= github.com/ipfs/go-cid v0.0.4/go.mod h1:4LLaPOQwmk5z9LBgQnpkivrx8BJjUyGwTXCd5Xfj6+M= github.com/ipfs/go-cid v0.0.5 h1:o0Ix8e/ql7Zb5UVUJEUfjsWCIY8t48++9lR8qi6oiJU= github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= +github.com/ipfs/go-datastore v0.0.1/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= +github.com/ipfs/go-datastore v0.0.5/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= +github.com/ipfs/go-datastore v0.1.0/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= +github.com/ipfs/go-datastore v0.1.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= +github.com/ipfs/go-datastore v0.3.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= +github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= +github.com/ipfs/go-ds-badger v0.0.2/go.mod h1:Y3QpeSFWQf6MopLTiZD+VT6IC1yZqaGmjvRcKeSGij8= +github.com/ipfs/go-ds-badger v0.0.5/go.mod h1:g5AuuCGmr7efyzQhLL8MzwqcauPojGPUaHzfGTzuE3s= +github.com/ipfs/go-ds-badger v0.0.7/go.mod h1:qt0/fWzZDoPW6jpQeqUjR5kBfhDNB65jd9YlmAvpQBk= +github.com/ipfs/go-ds-badger2 v0.0.0-20200123200730-d75eb2678a5d/go.mod h1:sTQFaWUoW0OvhXzfHnQ9j39L6fdlqDkptDYcpC1XrYE= +github.com/ipfs/go-ds-leveldb v0.0.1/go.mod h1:feO8V3kubwsEF22n0YRQCffeb79OOYIykR4L04tMOYc= +github.com/ipfs/go-ds-leveldb v0.1.0/go.mod h1:hqAW8y4bwX5LWcCtku2rFNX3vjDZCy5LZCg+cSZvYb8= +github.com/ipfs/go-filestore v0.0.2/go.mod h1:KnZ41qJsCt2OX2mxZS0xsK3Psr0/oB93HMMssLujjVc= +github.com/ipfs/go-fs-lock v0.0.1/go.mod h1:DNBekbboPKcxs1aukPSaOtFA3QfSdi5C855v0i9XJ8Y= +github.com/ipfs/go-graphsync v0.0.4/go.mod h1:6UACBjfOXEa8rQL3Q/JpZpWS0nZDCLx134WUkjrmFpQ= +github.com/ipfs/go-hamt-ipld v0.0.14-0.20191218031521-b2c774a54db1/go.mod h1:8yRx0xLUps1Xq8ZDnIwIVdQRp7JjA55gGvCiRHT91Vk= github.com/ipfs/go-hamt-ipld v0.0.15-0.20200131012125-dd88a59d3f2e/go.mod h1:9aQJu/i/TaRDW6jqB5U217dLIDopn50wxLdHXM2CTfE= +github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08= +github.com/ipfs/go-ipfs-blockstore v0.1.0/go.mod h1:5aD0AvHPi7mZc6Ci1WCAhiBQu2IsfTduLl+422H6Rqw= +github.com/ipfs/go-ipfs-blockstore v0.1.1/go.mod h1:8gZOgIN5e+Xdg2YSGdwTTRbguSVjYyosIDRQCY8E9QM= +github.com/ipfs/go-ipfs-blocksutil v0.0.1/go.mod h1:Yq4M86uIOmxmGPUHv/uI7uKqZNtLb449gwKqXjIsnRk= +github.com/ipfs/go-ipfs-chunker v0.0.1/go.mod h1:tWewYK0we3+rMbOh7pPFGDyypCtvGcBFymgY4rSDLAw= +github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= +github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= +github.com/ipfs/go-ipfs-ds-help v0.0.1/go.mod h1:gtP9xRaZXqIQRh1HRpp595KbBEdgqWFxefeVKOV8sxo= +github.com/ipfs/go-ipfs-exchange-interface v0.0.1/go.mod h1:c8MwfHjtQjPoDyiy9cFquVtVHkO9b9Ob3FG91qJnWCM= +github.com/ipfs/go-ipfs-exchange-offline v0.0.1/go.mod h1:WhHSFCVYX36H/anEKQboAzpUws3x7UeEGkzQc3iNkM0= +github.com/ipfs/go-ipfs-files v0.0.3/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4= +github.com/ipfs/go-ipfs-files v0.0.4/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4= github.com/ipfs/go-ipfs-files v0.0.7 h1:s5BRD12ndahqYifeH1S8Z73zqZhR+3IdKYAG9PiETs0= github.com/ipfs/go-ipfs-files v0.0.7/go.mod h1:wiN/jSG8FKyk7N0WyctKSvq3ljIa2NNTiZB55kpTdOs= +github.com/ipfs/go-ipfs-flags v0.0.1/go.mod h1:RnXBb9WV53GSfTrSDVK61NLTFKvWc60n+K9EgCDh+rA= +github.com/ipfs/go-ipfs-posinfo v0.0.1/go.mod h1:SwyeVP+jCwiDu0C313l/8jg6ZxM0qqtlt2a0vILTc1A= +github.com/ipfs/go-ipfs-pq v0.0.1/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7BgUmUfY= +github.com/ipfs/go-ipfs-routing v0.0.1/go.mod h1:k76lf20iKFxQTjcJokbPM9iBXVXVZhcOwc360N4nuKs= +github.com/ipfs/go-ipfs-routing v0.1.0/go.mod h1:hYoUkJLyAUKhF58tysKpids8RNDPO42BVMgK5dNsoqY= github.com/ipfs/go-ipfs-util v0.0.1 h1:Wz9bL2wB2YBJqggkA4dD7oSmqB4cAnpNbGrlHJulv50= github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc= +github.com/ipfs/go-ipld-cbor v0.0.2/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= github.com/ipfs/go-ipld-cbor v0.0.3/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= github.com/ipfs/go-ipld-cbor v0.0.4 h1:Aw3KPOKXjvrm6VjwJvFf1F1ekR/BH3jdof3Bk7OTiSA= github.com/ipfs/go-ipld-cbor v0.0.4/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= github.com/ipfs/go-ipld-format v0.0.1 h1:HCu4eB/Gh+KD/Q0M8u888RFkorTWNIL3da4oc5dwc80= github.com/ipfs/go-ipld-format v0.0.1/go.mod h1:kyJtbkDALmFHv3QR6et67i35QzO3S0dCDnkOJhcZkms= +github.com/ipfs/go-ipld-format v0.0.2 h1:OVAGlyYT6JPZ0pEfGntFPS40lfrDmaDbQwNHEY2G9Zs= +github.com/ipfs/go-ipld-format v0.0.2/go.mod h1:4B6+FM2u9OJ9zCV+kSbgFAZlOrv1Hqbf0INGQgiKf9k= github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM= +github.com/ipfs/go-log v1.0.0/go.mod h1:JO7RzlMK6rA+CIxFMLOuB6Wf5b81GDiKElL7UPSIKjA= +github.com/ipfs/go-log v1.0.1/go.mod h1:HuWlQttfN6FWNHRhlY5yMk/lW7evQC0HHGOxEwMRR8I= github.com/ipfs/go-log v1.0.3 h1:Gg7SUYSZ7BrqaKMwM+hRgcAkKv4QLfzP4XPQt5Sx/OI= github.com/ipfs/go-log v1.0.3/go.mod h1:OsLySYkwIbiSUR/yBTdv1qPtcE4FW3WPWk/ewz9Ru+A= +github.com/ipfs/go-log/v2 v2.0.1/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= +github.com/ipfs/go-log/v2 v2.0.2/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= github.com/ipfs/go-log/v2 v2.0.3 h1:Q2gXcBoCALyLN/pUQlz1qgu0x3uFV6FzP9oXhpfyJpc= github.com/ipfs/go-log/v2 v2.0.3/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= +github.com/ipfs/go-merkledag v0.0.6/go.mod h1:QYPdnlvkOg7GnQRofu9XZimC5ZW5Wi3bKys/4GQQfto= +github.com/ipfs/go-merkledag v0.1.0/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk= +github.com/ipfs/go-merkledag v0.2.3/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk= +github.com/ipfs/go-merkledag v0.2.4/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk= +github.com/ipfs/go-metrics-interface v0.0.1/go.mod h1:6s6euYU4zowdslK0GKHmqaIZ3j/b/tL7HTWtJ4VPgWY= +github.com/ipfs/go-path v0.0.7/go.mod h1:6KTKmeRnBXgqrTvzFrPV3CamxcgvXX/4z79tfAd2Sno= +github.com/ipfs/go-peertaskqueue v0.0.4/go.mod h1:03H8fhyeMfKNFWqzYEVyMbcPUeYrqP1MX6Kd+aN+rMQ= +github.com/ipfs/go-peertaskqueue v0.1.0/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3Vzz/jUmWw8Z0U= +github.com/ipfs/go-peertaskqueue v0.1.1/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3Vzz/jUmWw8Z0U= +github.com/ipfs/go-todocounter v0.0.1/go.mod h1:l5aErvQc8qKE2r7NDMjmq5UNAvuZy0rC8BHOplkWvZ4= +github.com/ipfs/go-unixfs v0.2.2-0.20190827150610-868af2e9e5cb/go.mod h1:IwAAgul1UQIcNZzKPYZWOCijryFBeCV79cNubPzol+k= +github.com/ipfs/go-verifcid v0.0.1/go.mod h1:5Hrva5KBeIog4A+UpqlaIU+DEstipcJYQQZc0g37pY0= +github.com/ipld/go-ipld-prime v0.0.1/go.mod h1:bDDSvVz7vaK12FNvMeRYnpRFkSUPNQOiCYQezMD/P3w= +github.com/ipld/go-ipld-prime v0.0.2-0.20191108012745-28a82f04c785/go.mod h1:bDDSvVz7vaK12FNvMeRYnpRFkSUPNQOiCYQezMD/P3w= +github.com/ipld/go-ipld-prime-proto v0.0.0-20191113031812-e32bd156a1e5/go.mod h1:gcvzoEDBjwycpXt3LBE061wT9f46szXGHAmj9uoP6fU= +github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52 h1:QG4CGBqCeuBo6aZlGAamSkxWdgWfZGeE49eUOWJPA4c= github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52/go.mod h1:fdg+/X9Gg4AsAIzWpEHwnqd+QY3b7lajxyjE1m4hkq4= +github.com/jackpal/gateway v1.0.4/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA= +github.com/jackpal/gateway v1.0.5/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA= +github.com/jackpal/go-nat-pmp v1.0.1/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= +github.com/jbenet/go-cienv v0.0.0-20150120210510-1bb1476777ec/go.mod h1:rGaEvXB4uRSZMmzKNLoXvTu1sfx+1kv/DojUlPrSZGs= github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= +github.com/jbenet/go-random v0.0.0-20190219211222-123a90aedc0c/go.mod h1:sdx1xVM9UuLw1tXnhJWN3piypTUO3vCIHYmG15KE/dU= +github.com/jbenet/go-temp-err-catcher v0.0.0-20150120210811-aac704a3f4f2/go.mod h1:8GXXJV31xl8whumTzdZsTt3RnUIiPqzkyf7mxToRCMs= +github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8/go.mod h1:Ly/wlsjFq/qrU3Rar62tu1gASgGw6chQbSh/XgIIXCY= github.com/jbenet/goprocess v0.1.3/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 h1:rp+c0RAYOWj8l6qbCUTSiRLG/iKnW3K3/QfPPuSsBt4= github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= +github.com/jtolds/gls v4.2.1+incompatible h1:fSuqC+Gmlu6l/ZYAoZzx2pyucC8Xza35fpRVWLVmUEE= github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kami-zh/go-capturer v0.0.0-20171211120116-e492ea43421d/go.mod h1:P2viExyCEfeWGU259JnaQ34Inuec4R38JCyBx2edgD0= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/koron/go-ssdp v0.0.0-20180514024734-4a0ed625a78b/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= +github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/libp2p/go-addr-util v0.0.1/go.mod h1:4ac6O7n9rIAKB1dnd+s8IbbMXkt+oBpzX4/+RACcnlQ= +github.com/libp2p/go-buffer-pool v0.0.1/go.mod h1:xtyIz9PMobb13WaxR6Zo1Pd1zXJKYg0a8KiIvDp3TzQ= +github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM= +github.com/libp2p/go-conn-security v0.0.1/go.mod h1:bGmu51N0KU9IEjX7kl2PQjgZa40JQWnayTvNMgD/vyk= +github.com/libp2p/go-conn-security-multistream v0.0.2/go.mod h1:nc9vud7inQ+d6SO0I/6dSWrdMnHnzZNHeyUQqrAJulE= +github.com/libp2p/go-conn-security-multistream v0.1.0/go.mod h1:aw6eD7LOsHEX7+2hJkDxw1MteijaVcI+/eP2/x3J1xc= +github.com/libp2p/go-eventbus v0.0.2/go.mod h1:Hr/yGlwxA/stuLnpMiu82lpNKpvRy3EaJxPu40XYOwk= +github.com/libp2p/go-eventbus v0.0.3/go.mod h1:Hr/yGlwxA/stuLnpMiu82lpNKpvRy3EaJxPu40XYOwk= +github.com/libp2p/go-eventbus v0.1.0/go.mod h1:vROgu5cs5T7cv7POWlWxBaVLxfSegC5UGQf8A2eEmx4= +github.com/libp2p/go-flow-metrics v0.0.1/go.mod h1:Iv1GH0sG8DtYN3SVJ2eG221wMiNpZxBdp967ls1g+k8= github.com/libp2p/go-flow-metrics v0.0.3/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs= +github.com/libp2p/go-libp2p v0.0.30/go.mod h1:XWT8FGHlhptAv1+3V/+J5mEpzyui/5bvFsNuWYs611A= +github.com/libp2p/go-libp2p v0.1.0/go.mod h1:6D/2OBauqLUoqcADOJpn9WbKqvaM07tDw68qHM0BxUM= +github.com/libp2p/go-libp2p v0.1.1/go.mod h1:I00BRo1UuUSdpuc8Q2mN7yDF/oTUTRAX6JWpTiK9Rp8= +github.com/libp2p/go-libp2p v0.2.1/go.mod h1:HZbtEOrgZN4F1fGZVvkV+930Wx3DkqlpBlO8dIoZWds= +github.com/libp2p/go-libp2p v0.3.0/go.mod h1:J7DPB1+zB5VLc8v/kKSD8+u2cbyIGI0Dh/Pf3Wprt+0= +github.com/libp2p/go-libp2p v0.4.2/go.mod h1:MNmgUxUw5pMsdOzMlT0EE7oKjRasl+WyVwM0IBlpKgQ= +github.com/libp2p/go-libp2p-autonat v0.0.6/go.mod h1:uZneLdOkZHro35xIhpbtTzLlgYturpu4J5+0cZK3MqE= +github.com/libp2p/go-libp2p-autonat v0.1.0/go.mod h1:1tLf2yXxiE/oKGtDwPYWTSYG3PtvYlJmg7NeVtPRqH8= +github.com/libp2p/go-libp2p-autonat v0.1.1/go.mod h1:OXqkeGOY2xJVWKAGV2inNF5aKN/djNA3fdpCWloIudE= +github.com/libp2p/go-libp2p-blankhost v0.0.1/go.mod h1:Ibpbw/7cPPYwFb7PACIWdvxxv0t0XCCI10t7czjAjTc= +github.com/libp2p/go-libp2p-blankhost v0.1.1/go.mod h1:pf2fvdLJPsC1FsVrNP3DUUvMzUts2dsLLBEpo1vW1ro= +github.com/libp2p/go-libp2p-blankhost v0.1.3/go.mod h1:KML1//wiKR8vuuJO0y3LUd1uLv+tlkGTAr3jC0S5cLg= +github.com/libp2p/go-libp2p-blankhost v0.1.4/go.mod h1:oJF0saYsAXQCSfDq254GMNmLNz6ZTHTOvtF4ZydUvwU= +github.com/libp2p/go-libp2p-circuit v0.0.9/go.mod h1:uU+IBvEQzCu953/ps7bYzC/D/R0Ho2A9LfKVVCatlqU= +github.com/libp2p/go-libp2p-circuit v0.1.0/go.mod h1:Ahq4cY3V9VJcHcn1SBXjr78AbFkZeIRmfunbA7pmFh8= +github.com/libp2p/go-libp2p-circuit v0.1.1/go.mod h1:Ahq4cY3V9VJcHcn1SBXjr78AbFkZeIRmfunbA7pmFh8= +github.com/libp2p/go-libp2p-circuit v0.1.4/go.mod h1:CY67BrEjKNDhdTk8UgBX1Y/H5c3xkAcs3gnksxY7osU= +github.com/libp2p/go-libp2p-connmgr v0.1.0/go.mod h1:wZxh8veAmU5qdrfJ0ZBLcU8oJe9L82ciVP/fl1VHjXk= +github.com/libp2p/go-libp2p-core v0.0.1/go.mod h1:g/VxnTZ/1ygHxH3dKok7Vno1VfpvGcGip57wjTU4fco= +github.com/libp2p/go-libp2p-core v0.0.2/go.mod h1:9dAcntw/n46XycV4RnlBq3BpgrmyUi9LuoTNdPrbUco= +github.com/libp2p/go-libp2p-core v0.0.3/go.mod h1:j+YQMNz9WNSkNezXOsahp9kwZBKBvxLpKD316QWSJXE= +github.com/libp2p/go-libp2p-core v0.0.4/go.mod h1:jyuCQP356gzfCFtRKyvAbNkyeuxb7OlyhWZ3nls5d2I= +github.com/libp2p/go-libp2p-core v0.0.6/go.mod h1:0d9xmaYAVY5qmbp/fcgxHT3ZJsLjYeYPMJAUKpaCHrE= +github.com/libp2p/go-libp2p-core v0.0.9/go.mod h1:0d9xmaYAVY5qmbp/fcgxHT3ZJsLjYeYPMJAUKpaCHrE= +github.com/libp2p/go-libp2p-core v0.2.0/go.mod h1:X0eyB0Gy93v0DZtSYbEM7RnMChm9Uv3j7yRXjO77xSI= +github.com/libp2p/go-libp2p-core v0.2.2/go.mod h1:8fcwTbsG2B+lTgRJ1ICZtiM5GWCWZVoVrLaDRvIRng0= +github.com/libp2p/go-libp2p-core v0.2.4/go.mod h1:STh4fdfa5vDYr0/SzYYeqnt+E6KfEV5VxfIrm0bcI0g= +github.com/libp2p/go-libp2p-core v0.3.0 h1:F7PqduvrztDtFsAa/bcheQ3azmNo+Nq7m8hQY5GiUW8= github.com/libp2p/go-libp2p-core v0.3.0/go.mod h1:ACp3DmS3/N64c2jDzcV429ukDpicbL6+TrrxANBjPGw= +github.com/libp2p/go-libp2p-crypto v0.0.1/go.mod h1:yJkNyDmO341d5wwXxDUGO0LykUVT72ImHNUqh5D/dBE= +github.com/libp2p/go-libp2p-crypto v0.0.2/go.mod h1:eETI5OUfBnvARGOHrJz2eWNyTUxEGZnBxMcbUjfIj4I= +github.com/libp2p/go-libp2p-crypto v0.1.0/go.mod h1:sPUokVISZiy+nNuTTH/TY+leRSxnFj/2GLjtOTW90hI= +github.com/libp2p/go-libp2p-discovery v0.0.5/go.mod h1:YtF20GUxjgoKZ4zmXj8j3Nb2TUSBHFlOCetzYdbZL5I= +github.com/libp2p/go-libp2p-discovery v0.1.0/go.mod h1:4F/x+aldVHjHDHuX85x1zWoFTGElt8HnoDzwkFZm29g= +github.com/libp2p/go-libp2p-discovery v0.2.0/go.mod h1:s4VGaxYMbw4+4+tsoQTqh7wfxg97AEdo4GYBt6BadWg= +github.com/libp2p/go-libp2p-host v0.0.1/go.mod h1:qWd+H1yuU0m5CwzAkvbSjqKairayEHdR5MMl7Cwa7Go= +github.com/libp2p/go-libp2p-host v0.0.3/go.mod h1:Y/qPyA6C8j2coYyos1dfRm0I8+nvd4TGrDGt4tA7JR8= +github.com/libp2p/go-libp2p-interface-connmgr v0.0.1/go.mod h1:GarlRLH0LdeWcLnYM/SaBykKFl9U5JFnbBGruAk/D5k= +github.com/libp2p/go-libp2p-interface-connmgr v0.0.4/go.mod h1:GarlRLH0LdeWcLnYM/SaBykKFl9U5JFnbBGruAk/D5k= +github.com/libp2p/go-libp2p-interface-connmgr v0.0.5/go.mod h1:GarlRLH0LdeWcLnYM/SaBykKFl9U5JFnbBGruAk/D5k= +github.com/libp2p/go-libp2p-interface-pnet v0.0.1/go.mod h1:el9jHpQAXK5dnTpKA4yfCNBZXvrzdOU75zz+C6ryp3k= +github.com/libp2p/go-libp2p-kad-dht v0.1.1/go.mod h1:1kj2Rk5pX3/0RwqMm9AMNCT7DzcMHYhgDN5VTi+cY0M= +github.com/libp2p/go-libp2p-kbucket v0.2.0/go.mod h1:JNymBToym3QXKBMKGy3m29+xprg0EVr/GJFHxFEdgh8= +github.com/libp2p/go-libp2p-loggables v0.0.1/go.mod h1:lDipDlBNYbpyqyPX/KcoO+eq0sJYEVR2JgOexcivchg= +github.com/libp2p/go-libp2p-loggables v0.1.0/go.mod h1:EyumB2Y6PrYjr55Q3/tiJ/o3xoDasoRYM7nOzEpoa90= +github.com/libp2p/go-libp2p-metrics v0.0.1/go.mod h1:jQJ95SXXA/K1VZi13h52WZMa9ja78zjyy5rspMsC/08= +github.com/libp2p/go-libp2p-mplex v0.1.1/go.mod h1:KUQWpGkCzfV7UIpi8SKsAVxyBgz1c9R5EvxgnwLsb/I= +github.com/libp2p/go-libp2p-mplex v0.2.0/go.mod h1:Ejl9IyjvXJ0T9iqUTE1jpYATQ9NM3g+OtR+EMMODbKo= +github.com/libp2p/go-libp2p-mplex v0.2.1/go.mod h1:SC99Rxs8Vuzrf/6WhmH41kNn13TiYdAWNYHrwImKLnE= +github.com/libp2p/go-libp2p-nat v0.0.4/go.mod h1:N9Js/zVtAXqaeT99cXgTV9e75KpnWCvVOiGzlcHmBbY= +github.com/libp2p/go-libp2p-nat v0.0.5/go.mod h1:1qubaE5bTZMJE+E/uu2URroMbzdubFz1ChgiN79yKPE= +github.com/libp2p/go-libp2p-net v0.0.1/go.mod h1:Yt3zgmlsHOgUWSXmt5V/Jpz9upuJBE8EgNU9DrCcR8c= +github.com/libp2p/go-libp2p-net v0.0.2/go.mod h1:Yt3zgmlsHOgUWSXmt5V/Jpz9upuJBE8EgNU9DrCcR8c= +github.com/libp2p/go-libp2p-netutil v0.0.1/go.mod h1:GdusFvujWZI9Vt0X5BKqwWWmZFxecf9Gt03cKxm2f/Q= +github.com/libp2p/go-libp2p-netutil v0.1.0/go.mod h1:3Qv/aDqtMLTUyQeundkKsA+YCThNdbQD54k3TqjpbFU= +github.com/libp2p/go-libp2p-peer v0.0.1/go.mod h1:nXQvOBbwVqoP+T5Y5nCjeH4sP9IX/J0AMzcDUVruVoo= +github.com/libp2p/go-libp2p-peer v0.1.1/go.mod h1:jkF12jGB4Gk/IOo+yomm+7oLWxF278F7UnrYUQ1Q8es= +github.com/libp2p/go-libp2p-peer v0.2.0/go.mod h1:RCffaCvUyW2CJmG2gAWVqwePwW7JMgxjsHm7+J5kjWY= +github.com/libp2p/go-libp2p-peerstore v0.0.1/go.mod h1:RabLyPVJLuNQ+GFyoEkfi8H4Ti6k/HtZJ7YKgtSq+20= +github.com/libp2p/go-libp2p-peerstore v0.0.6/go.mod h1:RabLyPVJLuNQ+GFyoEkfi8H4Ti6k/HtZJ7YKgtSq+20= +github.com/libp2p/go-libp2p-peerstore v0.1.0/go.mod h1:2CeHkQsr8svp4fZ+Oi9ykN1HBb6u0MOvdJ7YIsmcwtY= +github.com/libp2p/go-libp2p-peerstore v0.1.2/go.mod h1:BJ9sHlm59/80oSkpWgr1MyY1ciXAXV397W6h1GH/uKI= +github.com/libp2p/go-libp2p-peerstore v0.1.3/go.mod h1:BJ9sHlm59/80oSkpWgr1MyY1ciXAXV397W6h1GH/uKI= +github.com/libp2p/go-libp2p-peerstore v0.1.4/go.mod h1:+4BDbDiiKf4PzpANZDAT+knVdLxvqh7hXOujessqdzs= +github.com/libp2p/go-libp2p-protocol v0.0.1/go.mod h1:Af9n4PiruirSDjHycM1QuiMi/1VZNHYcK8cLgFJLZ4s= +github.com/libp2p/go-libp2p-protocol v0.1.0/go.mod h1:KQPHpAabB57XQxGrXCNvbL6UEXfQqUgC/1adR2Xtflk= +github.com/libp2p/go-libp2p-pubsub v0.2.6/go.mod h1:5jEp7R3ItQ0pgcEMrPZYE9DQTg/H3CTc7Mu1j2G4Y5o= +github.com/libp2p/go-libp2p-quic-transport v0.1.1/go.mod h1:wqG/jzhF3Pu2NrhJEvE+IE0NTHNXslOPn9JQzyCAxzU= +github.com/libp2p/go-libp2p-record v0.0.1/go.mod h1:grzqg263Rug/sRex85QrDOLntdFAymLDLm7lxMgU79Q= +github.com/libp2p/go-libp2p-record v0.1.0/go.mod h1:ujNc8iuE5dlKWVy6wuL6dd58t0n7xI4hAIl8pE6wu5Q= +github.com/libp2p/go-libp2p-record v0.1.1/go.mod h1:VRgKajOyMVgP/F0L5g3kH7SVskp17vFi2xheb5uMJtg= +github.com/libp2p/go-libp2p-routing v0.0.1/go.mod h1:N51q3yTr4Zdr7V8Jt2JIktVU+3xBBylx1MZeVA6t1Ys= +github.com/libp2p/go-libp2p-routing v0.1.0/go.mod h1:zfLhI1RI8RLEzmEaaPwzonRvXeeSHddONWkcTcB54nE= +github.com/libp2p/go-libp2p-routing-helpers v0.1.0/go.mod h1:oUs0h39vNwYtYXnQWOTU5BaafbedSyWCCal3gqHuoOQ= +github.com/libp2p/go-libp2p-secio v0.0.3/go.mod h1:hS7HQ00MgLhRO/Wyu1bTX6ctJKhVpm+j2/S2A5UqYb0= +github.com/libp2p/go-libp2p-secio v0.1.0/go.mod h1:tMJo2w7h3+wN4pgU2LSYeiKPrfqBgkOsdiKK77hE7c8= +github.com/libp2p/go-libp2p-secio v0.1.1/go.mod h1:tMJo2w7h3+wN4pgU2LSYeiKPrfqBgkOsdiKK77hE7c8= +github.com/libp2p/go-libp2p-secio v0.2.0/go.mod h1:2JdZepB8J5V9mBp79BmwsaPQhRPNN2NrnB2lKQcdy6g= +github.com/libp2p/go-libp2p-secio v0.2.1/go.mod h1:cWtZpILJqkqrSkiYcDBh5lA3wbT2Q+hz3rJQq3iftD8= +github.com/libp2p/go-libp2p-swarm v0.0.6/go.mod h1:s5GZvzg9xXe8sbeESuFpjt8CJPTCa8mhEusweJqyFy8= +github.com/libp2p/go-libp2p-swarm v0.1.0/go.mod h1:wQVsCdjsuZoc730CgOvh5ox6K8evllckjebkdiY5ta4= +github.com/libp2p/go-libp2p-swarm v0.1.1/go.mod h1:4NVJaLwq/dr5kEq79Jo6pMin7ZFwLx73ln1FTefR91Q= +github.com/libp2p/go-libp2p-swarm v0.2.0/go.mod h1:x07b4zkMFo2EvgPV2bMTlNmdQc8i+74Jjio7xGvsTgU= +github.com/libp2p/go-libp2p-swarm v0.2.2/go.mod h1:fvmtQ0T1nErXym1/aa1uJEyN7JzaTNyBcHImCxRpPKU= +github.com/libp2p/go-libp2p-testing v0.0.1/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= +github.com/libp2p/go-libp2p-testing v0.0.2/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= +github.com/libp2p/go-libp2p-testing v0.0.3/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= +github.com/libp2p/go-libp2p-testing v0.0.4/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= +github.com/libp2p/go-libp2p-testing v0.1.0/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0= +github.com/libp2p/go-libp2p-tls v0.1.0/go.mod h1:VZdoSWQDeNpIIAFJFv+6uqTqpnIIDHcqZQSTC/A1TT0= +github.com/libp2p/go-libp2p-transport v0.0.1/go.mod h1:UzbUs9X+PHOSw7S3ZmeOxfnwaQY5vGDzZmKPod3N3tk= +github.com/libp2p/go-libp2p-transport v0.0.4/go.mod h1:StoY3sx6IqsP6XKoabsPnHCwqKXWUMWU7Rfcsubee/A= +github.com/libp2p/go-libp2p-transport v0.0.5/go.mod h1:StoY3sx6IqsP6XKoabsPnHCwqKXWUMWU7Rfcsubee/A= +github.com/libp2p/go-libp2p-transport-upgrader v0.0.4/go.mod h1:RGq+tupk+oj7PzL2kn/m1w6YXxcIAYJYeI90h6BGgUc= +github.com/libp2p/go-libp2p-transport-upgrader v0.1.1/go.mod h1:IEtA6or8JUbsV07qPW4r01GnTenLW4oi3lOPbUMGJJA= +github.com/libp2p/go-libp2p-yamux v0.1.2/go.mod h1:xUoV/RmYkg6BW/qGxA9XJyg+HzXFYkeXbnhjmnYzKp8= +github.com/libp2p/go-libp2p-yamux v0.1.3/go.mod h1:VGSQVrqkh6y4nm0189qqxMtvyBft44MOYYPpYKXiVt4= +github.com/libp2p/go-libp2p-yamux v0.2.0/go.mod h1:Db2gU+XfLpm6E4rG5uGCFX6uXA8MEXOxFcRoXUODaK8= +github.com/libp2p/go-libp2p-yamux v0.2.1/go.mod h1:1FBXiHDk1VyRM1C0aez2bCfHQ4vMZKkAQzZbkSQt5fI= +github.com/libp2p/go-maddr-filter v0.0.1/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q= +github.com/libp2p/go-maddr-filter v0.0.4/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q= +github.com/libp2p/go-maddr-filter v0.0.5/go.mod h1:Jk+36PMfIqCJhAnaASRH83bdAvfDRp/w6ENFaC9bG+M= +github.com/libp2p/go-mplex v0.0.3/go.mod h1:pK5yMLmOoBR1pNCqDlA2GQrdAVTMkqFalaTWe7l4Yd0= +github.com/libp2p/go-mplex v0.0.4/go.mod h1:pK5yMLmOoBR1pNCqDlA2GQrdAVTMkqFalaTWe7l4Yd0= +github.com/libp2p/go-mplex v0.1.0/go.mod h1:SXgmdki2kwCUlCCbfGLEgHjC4pFqhTp0ZoV6aiKgxDU= +github.com/libp2p/go-msgio v0.0.2/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= +github.com/libp2p/go-msgio v0.0.3/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= +github.com/libp2p/go-msgio v0.0.4/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= +github.com/libp2p/go-nat v0.0.3/go.mod h1:88nUEt0k0JD45Bk93NIwDqjlhiOwOoV36GchpcVc1yI= +github.com/libp2p/go-nat v0.0.4/go.mod h1:Nmw50VAvKuk38jUBcmNh6p9lUJLoODbJRvYAa/+KSDo= +github.com/libp2p/go-openssl v0.0.2/go.mod h1:v8Zw2ijCSWBQi8Pq5GAixw6DbFfa9u6VIYDXnvOXkc0= +github.com/libp2p/go-openssl v0.0.3/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= +github.com/libp2p/go-openssl v0.0.4 h1:d27YZvLoTyMhIN4njrkr8zMDOM4lfpHIp6A+TK9fovg= github.com/libp2p/go-openssl v0.0.4/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= +github.com/libp2p/go-reuseport v0.0.1/go.mod h1:jn6RmB1ufnQwl0Q1f+YxAj8isJgDCQzaaxIFYDhcYEA= +github.com/libp2p/go-reuseport-transport v0.0.2/go.mod h1:YkbSDrvjUVDL6b8XqriyA20obEtsW9BLkuOUyQAOCbs= +github.com/libp2p/go-stream-muxer v0.0.1/go.mod h1:bAo8x7YkSpadMTbtTaxGVHWUQsR/l5MEaHbKaliuT14= +github.com/libp2p/go-stream-muxer v0.1.0/go.mod h1:8JAVsjeRBCWwPoZeH0W1imLOcriqXJyFvB0mR4A04sQ= +github.com/libp2p/go-stream-muxer-multistream v0.1.1/go.mod h1:zmGdfkQ1AzOECIAcccoL8L//laqawOsO03zX8Sa+eGw= +github.com/libp2p/go-stream-muxer-multistream v0.2.0/go.mod h1:j9eyPol/LLRqT+GPLSxvimPhNph4sfYfMoDPd7HkzIc= +github.com/libp2p/go-tcp-transport v0.0.4/go.mod h1:+E8HvC8ezEVOxIo3V5vCK9l1y/19K427vCzQ+xHKH/o= +github.com/libp2p/go-tcp-transport v0.1.0/go.mod h1:oJ8I5VXryj493DEJ7OsBieu8fcg2nHGctwtInJVpipc= +github.com/libp2p/go-tcp-transport v0.1.1/go.mod h1:3HzGvLbx6etZjnFlERyakbaYPdfjg2pWP97dFZworkY= +github.com/libp2p/go-testutil v0.0.1/go.mod h1:iAcJc/DKJQanJ5ws2V+u5ywdL2n12X1WbbEG+Jjy69I= +github.com/libp2p/go-testutil v0.1.0/go.mod h1:81b2n5HypcVyrCg/MJx4Wgfp/VHojytjVe/gLzZ2Ehc= +github.com/libp2p/go-ws-transport v0.0.5/go.mod h1:Qbl4BxPfXXhhd/o0wcrgoaItHqA9tnZjoFZnxykuaXU= +github.com/libp2p/go-ws-transport v0.1.0/go.mod h1:rjw1MG1LU9YDC6gzmwObkPd/Sqwhw7yT74kj3raBFuo= +github.com/libp2p/go-ws-transport v0.1.2/go.mod h1:dsh2Ld8F+XNmzpkaAijmg5Is+e9l6/1tK/6VFOdN69Y= +github.com/libp2p/go-yamux v1.2.1/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= +github.com/libp2p/go-yamux v1.2.2/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= +github.com/libp2p/go-yamux v1.2.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= +github.com/lucas-clemente/quic-go v0.11.2/go.mod h1:PpMmPfPKO9nKJ/psF49ESTAGQSdfXxlg1otPbEB2nOw= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/marten-seemann/qtls v0.2.3/go.mod h1:xzjG7avBwGGbdZ8dTGxlBnLArsVKLvwmjgmPuiQEcYk= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.9 h1:d5US/mDsogSGW37IV293h//ZFaeajb69h+EHFsv2xGg= +github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= +github.com/mattn/go-runewidth v0.0.7 h1:Ei8KR0497xHyKJPAv59M1dkC+rOZCMBJ+t3fZ+twI54= +github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= +github.com/miekg/dns v1.1.12/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g= github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/minio/sha256-simd v0.0.0-20190328051042-05b4dd3047e5/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/minio/sha256-simd v0.1.0/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU= github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= +github.com/mr-tron/base58 v1.1.1/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/mr-tron/base58 v1.1.3 h1:v+sk57XuaCKGXpWtVBX8YJzO7hMGx4Aajh4TQbdEFdc= github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/multiformats/go-base32 v0.0.3 h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp8Nq/kkI= github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= +github.com/multiformats/go-multiaddr v0.0.1/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= +github.com/multiformats/go-multiaddr v0.0.2/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= +github.com/multiformats/go-multiaddr v0.0.4/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= +github.com/multiformats/go-multiaddr v0.1.0/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= +github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= +github.com/multiformats/go-multiaddr v0.2.0 h1:lR52sFwcTCuQb6bTfnXF6zA2XfyYvyd+5a9qECv/J90= github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4= +github.com/multiformats/go-multiaddr-dns v0.0.1/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= +github.com/multiformats/go-multiaddr-dns v0.0.2/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= +github.com/multiformats/go-multiaddr-dns v0.0.3/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= +github.com/multiformats/go-multiaddr-dns v0.2.0 h1:YWJoIDwLePniH7OU5hBnDZV6SWuvJqJ0YtN6pLeH9zA= +github.com/multiformats/go-multiaddr-dns v0.2.0/go.mod h1:TJ5pr5bBO7Y1B18djPuRsVkduhQH2YqYSbxWJzYGdK0= +github.com/multiformats/go-multiaddr-fmt v0.0.1/go.mod h1:aBYjqL4T/7j4Qx+R73XSv/8JsgnRFlf0w2KGLCmXl3Q= +github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo= +github.com/multiformats/go-multiaddr-net v0.0.1/go.mod h1:nw6HSxNmCIQH27XPGBuX+d1tnvM7ihcFwHMSstNAVUU= +github.com/multiformats/go-multiaddr-net v0.1.0/go.mod h1:5JNbcfBOP4dnhoZOv10JJVkJO0pCCEf8mTnipAo2UZQ= +github.com/multiformats/go-multiaddr-net v0.1.1/go.mod h1:5JNbcfBOP4dnhoZOv10JJVkJO0pCCEf8mTnipAo2UZQ= github.com/multiformats/go-multibase v0.0.1 h1:PN9/v21eLywrFWdFNsFKaU04kLJzuYzmrJR+ubhT9qA= github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= +github.com/multiformats/go-multihash v0.0.5/go.mod h1:lt/HCbqlQwlPBz7lv0sQCdtfcMtlJvakRUn/0Ual8po= +github.com/multiformats/go-multihash v0.0.6/go.mod h1:XuKXPp8VHcTygube3OWZC+aZrA+H1IhmjoCDtJc7PXM= github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= +github.com/multiformats/go-multihash v0.0.9/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= github.com/multiformats/go-multihash v0.0.13 h1:06x+mk/zj1FoMsgNejLpy6QTvJqlSt/BhLEy87zidlc= github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= +github.com/multiformats/go-multistream v0.0.1/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= +github.com/multiformats/go-multistream v0.0.4/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= +github.com/multiformats/go-multistream v0.1.0/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= +github.com/multiformats/go-multistream v0.1.1/go.mod h1:KmHZ40hzVxiaiwlj3MEbYgK9JFk2/9UktWZAF54Du38= github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.2/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.5 h1:XVZwSo04Cs3j/jS0uAEPpT3JY6DzMcVLLoWOSnCxOjg= github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/nkovacs/streamquote v0.0.0-20170412213628-49af9bddb229/go.mod h1:0aYXnNPJ8l7uZxf45rWW1a/uME32OF0rhiYGNQ2oF2E= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.9.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.6.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/otiai10/copy v1.0.2/go.mod h1:c7RpqBkwMom4bYTSkLSym4VSJz/XtncWRAj/J4PEIMY= +github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= +github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/polydawn/refmt v0.0.0-20190221155625-df39d6c2d992/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/polydawn/refmt v0.0.0-20190408063855-01bf1e26dd14/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1 h1:CskT+S6Ay54OwxBGB0R3Rsx4Muto6UnEYTyKJbyRIAI= github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/polydawn/refmt v0.0.0-20190809202753-05966cbd336a h1:hjZfReYVLbqFkAtr2us7vdy04YWz3LVAirzP7reh8+M= +github.com/polydawn/refmt v0.0.0-20190809202753-05966cbd336a/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190425082905-87a4384529e0 h1:c8R11WC8m7KNMkTv/0+Be8vvwo4I3/Ut9AC2FW8fX3U= github.com/prometheus/procfs v0.0.0-20190425082905-87a4384529e0/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/shirou/gopsutil v2.18.12+incompatible h1:1eaJvGomDnH74/5cF4CTmTbLHAriGFsTZppLXDX93OM= +github.com/shirou/gopsutil v2.18.12+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= +github.com/smartystreets/assertions v1.0.1 h1:voD4ITNjPL5jjBfgR/r8fPIIBrliWrWHeiJApdr3r4w= +github.com/smartystreets/assertions v1.0.1/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= +github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa h1:E+gaaifzi2xF65PbDmuKI3PhLWY6G5opMLniFq8vmXA= github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa/go.mod h1:2RVY1rIf+2J2o/IM9+vPq9RzmHDSseB7FoXiSNIUsoU= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/goconvey v0.0.0-20190710185942-9d28bd7c0945/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337 h1:WN9BUFbdyOsSH/XohnWpXOlq9NBD5sGAB2FciQMUEe8= +github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smola/gocompat v0.2.0/go.mod h1:1B0MlxbmoZNo3h8guHp8HztB3BSYR5itql9qtVc0ypY= +github.com/spacemonkeygo/openssl v0.0.0-20181017203307-c2dcc5cca94a/go.mod h1:7AyxJNCJ7SBZ1MfVQCWD6Uqo2oubI2Eq2y2eqf+A5r0= +github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 h1:RC6RW7j+1+HkWaX/Yh71Ee5ZHaHYt7ZP4sQgUrm6cDU= github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572/go.mod h1:w0SWMsp6j9O/dk4/ZpIhL+3CkG8ofA2vuv7k+ltqUMc= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/src-d/envconfig v1.0.0/go.mod h1:Q9YQZ7BKITldTBnoxsE5gOeB5y66RyPXeue/R4aaNBc= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli/v2 v2.0.0/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= +github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436 h1:qOpVTI+BrstcjTZLm2Yz/3sOnqkzj3FQoh0g+E5s3Gc= github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= +github.com/warpfork/go-wish v0.0.0-20190328234359-8b3e70f8e830 h1:8kxMKmKzXXL4Ru1nyhvdms/JjWt+3YLpvRb/bAjO/y0= +github.com/warpfork/go-wish v0.0.0-20190328234359-8b3e70f8e830/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= +github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc/go.mod h1:r45hJU7yEoA81k6MWNhpMj/kms0n14dkzkxYHoB96UM= +github.com/whyrusleeping/bencher v0.0.0-20190829221104-bb6607aa8bba h1:X4n8JG2e2biEZZXdBKt9HX7DN3bYGFUqljqqy0DqgnY= +github.com/whyrusleeping/bencher v0.0.0-20190829221104-bb6607aa8bba/go.mod h1:CHQnYnQUEPydYCwuy8lmTHfGmdw9TKrhWV0xLx8l0oM= +github.com/whyrusleeping/cbor-gen v0.0.0-20190910031516-c1cbffdb01bb/go.mod h1:xdlJQaiqipF0HW+Mzpg7XRM3fWbGvfgFlcppuvlkIvY= +github.com/whyrusleeping/cbor-gen v0.0.0-20190917003517-d78d67427694/go.mod h1:xdlJQaiqipF0HW+Mzpg7XRM3fWbGvfgFlcppuvlkIvY= +github.com/whyrusleeping/cbor-gen v0.0.0-20191116002219-891f55cd449d/go.mod h1:xdlJQaiqipF0HW+Mzpg7XRM3fWbGvfgFlcppuvlkIvY= +github.com/whyrusleeping/cbor-gen v0.0.0-20191212224538-d370462a7e8a/go.mod h1:xdlJQaiqipF0HW+Mzpg7XRM3fWbGvfgFlcppuvlkIvY= github.com/whyrusleeping/cbor-gen v0.0.0-20191216205031-b047b6acb3c0/go.mod h1:xdlJQaiqipF0HW+Mzpg7XRM3fWbGvfgFlcppuvlkIvY= +github.com/whyrusleeping/cbor-gen v0.0.0-20200121162646-b63bacf5eaf8/go.mod h1:xdlJQaiqipF0HW+Mzpg7XRM3fWbGvfgFlcppuvlkIvY= github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= github.com/whyrusleeping/cbor-gen v0.0.0-20200206220010-03c9665e2a66 h1:LolR9FiEfQNn5U031bAhn/46po2JgWHKadYbcWFIJ+0= github.com/whyrusleeping/cbor-gen v0.0.0-20200206220010-03c9665e2a66/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= +github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f/go.mod h1:p9UJB6dDgdPgMJZs7UjUOdulKyRr9fqkS+6JKAInPy8= +github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc= github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc/go.mod h1:bopw91TMyo8J3tvftk8xmU2kPmlrt4nScJQZU2hE5EM= +github.com/whyrusleeping/go-notifier v0.0.0-20170827234753-097c5d47330f/go.mod h1:cZNvX9cFybI01GriPRMXDtczuvUhgbcYr9iCGaNlRv8= +github.com/whyrusleeping/go-smux-multiplex v3.0.16+incompatible/go.mod h1:34LEDbeKFZInPUrAG+bjuJmUXONGdEFW7XL0SpTY1y4= +github.com/whyrusleeping/go-smux-multistream v2.0.2+incompatible/go.mod h1:dRWHHvc4HDQSHh9gbKEBbUZ+f2Q8iZTPG3UOGYODxSQ= +github.com/whyrusleeping/go-smux-yamux v2.0.8+incompatible/go.mod h1:6qHUzBXUbB9MXmw3AUdB52L8sEb/hScCqOdW2kj/wuI= +github.com/whyrusleeping/mafmt v1.2.8/go.mod h1:faQJFPbLSxzD9xpA02ttW/tS9vZykNvXwGvqIpk20FA= +github.com/whyrusleeping/mdns v0.0.0-20180901202407-ef14215e6b30/go.mod h1:j4l84WPFclQPj320J9gp0XwNKBb3U0zt5CBqjPp22G4= +github.com/whyrusleeping/mdns v0.0.0-20190826153040-b9b60ed33aa9/go.mod h1:j4l84WPFclQPj320J9gp0XwNKBb3U0zt5CBqjPp22G4= +github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7/go.mod h1:X2c0RVCI1eSUFI8eLcY3c0423ykwiUdxLJtkDvruhjI= +github.com/whyrusleeping/pubsub v0.0.0-20131020042734-02de8aa2db3d/go.mod h1:g7ckxrjiFh8mi1AY7ox23PZD0g6QU/TxW3U3unX7I3A= +github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee/go.mod h1:m2aV4LZI4Aez7dP5PMyVKEHhUyEJ/RjmPEDOpDvudHg= +github.com/whyrusleeping/yamux v1.1.5/go.mod h1:E8LnQQ8HKx5KD29HZFUwM1PxCOdPRzGwur1mcYhXcD8= github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.5.1 h1:rsqfU5vBkVknbhUGbAUwQKR2H4ItV8tjJ+6kJX4cxHM= +go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/dig v1.7.0/go.mod h1:z+dSd2TP9Usi48jL8M3v63iSBVkiwtVyMKxMZYYauPg= +go.uber.org/fx v1.9.0/go.mod h1:mFdUyAUuJ3w4jAckiKSKbldsxy1ojpAMJ+dVZg5Y0Aw= +go.uber.org/goleak v0.10.0/go.mod h1:VCZuO8V8mFPlL0F5J5GK1rtHV3DrFcQ1R8ryq7FK0aI= go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.4.0 h1:f3WCSC2KzAcBXGATIxAB1E2XuCpNU255wNKZ505qi3E= +go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0 h1:nR6NoDBgAf67s68NhaXbsojM+2gxp3S1hWkHDl27pVU= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go4.org v0.0.0-20190218023631-ce4c26f7be8e/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= +go4.org v0.0.0-20190313082347-94abd6928b1d/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190225124518-7f87c0fbb88b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190618222545-ea8f1a30c443/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f h1:J5lckAjkw6qYlOZNj90mLYNTEKDvWeuc1yieZ8qUzUE= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/net v0.0.0-20180524181706-dfa909b99c79/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190227160552-c95aed5357e7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190611141213-3f473d35a33a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180202135801-37707fdb30a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190302025703-b6889370fb10/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190524122548-abf6ff778158/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190524152521-dbbf3f1254d4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190526052359-791d8a0f4d09/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190610200419-93c9922d18ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191025021431-6c3a3bfe00ae h1:QoJmnb9uyPCrH8GIg9uRLn4Ta45yhcQtpymCd0AavO8= golang.org/x/sys v0.0.0-20191025021431-6c3a3bfe00ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200107162124-548cf772de50 h1:YvQ10rzcqWXLlJZ3XCUoO25savxmscf4+SC+ZqiCHhA= +golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181130052023-1c3d964395ce/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200108195415-316d2f248479 h1:csuS+MHeEA2eWhyjQCMaPMq4z1+/PohkBSjJZHSIbOE= +golang.org/x/tools v0.0.0-20200108195415-316d2f248479/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +google.golang.org/api v0.3.2/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.28 h1:n1tBJnnK2r7g9OW2btFH91V92STTUevLXYFb8gy9EMk= +gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/src-d/go-cli.v0 v0.0.0-20181105080154-d492247bbc0d/go.mod h1:z+K8VcOYVYcSwSjGebuDL6176A1XskgbtNl64NSg+n8= gopkg.in/src-d/go-log.v1 v1.0.1/go.mod h1:GN34hKP0g305ysm2/hctJ0Y8nWP3zxXXJ8GFabTyABE= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/urfave/cli.v2 v2.0.0-20180128182452-d3ae77c26ac8/go.mod h1:cKXr3E0k4aosgycml1b5z33BVV6hai1Kh7uDgFOkbcs= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= howett.net/plist v0.0.0-20181124034731-591f970eefbb h1:jhnBjNi9UFpfpl8YZhA9CrOqpnJdvzuiHsl/dnxl11M= howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= +launchpad.net/gocheck v0.0.0-20140225173054-000000000087/go.mod h1:hj7XX3B/0A+80Vse0e+BUHsHMTEhd0O4cpUHr/e/BUM= From 0730ece757f05d602347d782fd027ccae9f86dc4 Mon Sep 17 00:00:00 2001 From: laser Date: Wed, 1 Apr 2020 11:56:32 -0700 Subject: [PATCH 024/199] loosen constraints on 8MiB and 2KiB sectors (for testing) --- resources.go | 91 ++++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 82 insertions(+), 9 deletions(-) diff --git a/resources.go b/resources.go index ec4e4faba..310373bfe 100644 --- a/resources.go +++ b/resources.go @@ -49,6 +49,22 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ BaseMinMemory: 1 << 30, }, + abi.RegisteredProof_StackedDRG2KiBSeal: Resources{ + MaxMemory: 1 << 11, + MinMemory: 1 << 11, + + MultiThread: false, + + BaseMinMemory: 1 << 11, + }, + abi.RegisteredProof_StackedDRG8MiBSeal: Resources{ + MaxMemory: 1 << 23, + MinMemory: 1 << 23, + + MultiThread: false, + + BaseMinMemory: 1 << 23, + }, }, sealtasks.TTPreCommit1: { abi.RegisteredProof_StackedDRG32GiBSeal: Resources{ @@ -67,6 +83,22 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ BaseMinMemory: 1 << 30, }, + abi.RegisteredProof_StackedDRG2KiBSeal: Resources{ + MaxMemory: 1 << 11, + MinMemory: 1 << 11, + + MultiThread: false, + + BaseMinMemory: 1 << 11, + }, + abi.RegisteredProof_StackedDRG8MiBSeal: Resources{ + MaxMemory: 1 << 23, + MinMemory: 1 << 23, + + MultiThread: false, + + BaseMinMemory: 1 << 23, + }, }, sealtasks.TTPreCommit2: { abi.RegisteredProof_StackedDRG32GiBSeal: Resources{ @@ -85,6 +117,22 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ BaseMinMemory: 1 << 30, }, + abi.RegisteredProof_StackedDRG2KiBSeal: Resources{ + MaxMemory: 1 << 11, + MinMemory: 1 << 11, + + MultiThread: true, + + BaseMinMemory: 1 << 11, + }, + abi.RegisteredProof_StackedDRG8MiBSeal: Resources{ + MaxMemory: 1 << 23, + MinMemory: 1 << 23, + + MultiThread: true, + + BaseMinMemory: 1 << 23, + }, }, sealtasks.TTCommit1: { // Very short (~100ms), so params are very light abi.RegisteredProof_StackedDRG32GiBSeal: Resources{ @@ -103,6 +151,22 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ BaseMinMemory: 1 << 30, }, + abi.RegisteredProof_StackedDRG2KiBSeal: Resources{ + MaxMemory: 1 << 11, + MinMemory: 1 << 11, + + MultiThread: false, + + BaseMinMemory: 1 << 11, + }, + abi.RegisteredProof_StackedDRG8MiBSeal: Resources{ + MaxMemory: 1 << 23, + MinMemory: 1 << 23, + + MultiThread: false, + + BaseMinMemory: 1 << 23, + }, }, sealtasks.TTCommit2: { // TODO: Measure more accurately abi.RegisteredProof_StackedDRG32GiBSeal: Resources{ @@ -123,14 +187,23 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ BaseMinMemory: 10 << 30, }, + abi.RegisteredProof_StackedDRG2KiBSeal: Resources{ + MaxMemory: 1 << 11, + MinMemory: 1 << 11, + + MultiThread: false, + CanGPU: true, + + BaseMinMemory: 1 << 11, + }, + abi.RegisteredProof_StackedDRG8MiBSeal: Resources{ + MaxMemory: 1 << 23, + MinMemory: 1 << 23, + + MultiThread: false, + CanGPU: true, + + BaseMinMemory: 1 << 23, + }, }, } - -func init() { - // for now we just reuse params for 2kib and 8mib from 512mib - - for taskType := range ResourceTable { - ResourceTable[taskType][abi.RegisteredProof_StackedDRG8MiBSeal] = ResourceTable[taskType][abi.RegisteredProof_StackedDRG512MiBSeal] - ResourceTable[taskType][abi.RegisteredProof_StackedDRG2KiBSeal] = ResourceTable[taskType][abi.RegisteredProof_StackedDRG512MiBSeal] - } -} From c99952d8cd5a2f66e0d0559a58fb99a2d494241f Mon Sep 17 00:00:00 2001 From: laser Date: Wed, 1 Apr 2020 14:24:09 -0700 Subject: [PATCH 025/199] shift 2 --- resources.go | 60 ++++++++++++++++++++++++++-------------------------- 1 file changed, 30 insertions(+), 30 deletions(-) diff --git a/resources.go b/resources.go index 310373bfe..af31b5233 100644 --- a/resources.go +++ b/resources.go @@ -50,20 +50,20 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ BaseMinMemory: 1 << 30, }, abi.RegisteredProof_StackedDRG2KiBSeal: Resources{ - MaxMemory: 1 << 11, - MinMemory: 1 << 11, + MaxMemory: 2 << 10, + MinMemory: 2 << 10, MultiThread: false, - BaseMinMemory: 1 << 11, + BaseMinMemory: 2 << 10, }, abi.RegisteredProof_StackedDRG8MiBSeal: Resources{ - MaxMemory: 1 << 23, - MinMemory: 1 << 23, + MaxMemory: 2 << 22, + MinMemory: 2 << 22, MultiThread: false, - BaseMinMemory: 1 << 23, + BaseMinMemory: 2 << 22, }, }, sealtasks.TTPreCommit1: { @@ -84,20 +84,20 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ BaseMinMemory: 1 << 30, }, abi.RegisteredProof_StackedDRG2KiBSeal: Resources{ - MaxMemory: 1 << 11, - MinMemory: 1 << 11, + MaxMemory: 2 << 10, + MinMemory: 2 << 10, MultiThread: false, - BaseMinMemory: 1 << 11, + BaseMinMemory: 2 << 10, }, abi.RegisteredProof_StackedDRG8MiBSeal: Resources{ - MaxMemory: 1 << 23, - MinMemory: 1 << 23, + MaxMemory: 2 << 22, + MinMemory: 2 << 22, MultiThread: false, - BaseMinMemory: 1 << 23, + BaseMinMemory: 2 << 22, }, }, sealtasks.TTPreCommit2: { @@ -118,20 +118,20 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ BaseMinMemory: 1 << 30, }, abi.RegisteredProof_StackedDRG2KiBSeal: Resources{ - MaxMemory: 1 << 11, - MinMemory: 1 << 11, + MaxMemory: 2 << 10, + MinMemory: 2 << 10, MultiThread: true, - BaseMinMemory: 1 << 11, + BaseMinMemory: 2 << 10, }, abi.RegisteredProof_StackedDRG8MiBSeal: Resources{ - MaxMemory: 1 << 23, - MinMemory: 1 << 23, + MaxMemory: 2 << 22, + MinMemory: 2 << 22, MultiThread: true, - BaseMinMemory: 1 << 23, + BaseMinMemory: 2 << 22, }, }, sealtasks.TTCommit1: { // Very short (~100ms), so params are very light @@ -152,20 +152,20 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ BaseMinMemory: 1 << 30, }, abi.RegisteredProof_StackedDRG2KiBSeal: Resources{ - MaxMemory: 1 << 11, - MinMemory: 1 << 11, + MaxMemory: 2 << 10, + MinMemory: 2 << 10, MultiThread: false, - BaseMinMemory: 1 << 11, + BaseMinMemory: 2 << 10, }, abi.RegisteredProof_StackedDRG8MiBSeal: Resources{ - MaxMemory: 1 << 23, - MinMemory: 1 << 23, + MaxMemory: 2 << 22, + MinMemory: 2 << 22, MultiThread: false, - BaseMinMemory: 1 << 23, + BaseMinMemory: 2 << 22, }, }, sealtasks.TTCommit2: { // TODO: Measure more accurately @@ -188,22 +188,22 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ BaseMinMemory: 10 << 30, }, abi.RegisteredProof_StackedDRG2KiBSeal: Resources{ - MaxMemory: 1 << 11, - MinMemory: 1 << 11, + MaxMemory: 2 << 10, + MinMemory: 2 << 10, MultiThread: false, CanGPU: true, - BaseMinMemory: 1 << 11, + BaseMinMemory: 2 << 10, }, abi.RegisteredProof_StackedDRG8MiBSeal: Resources{ - MaxMemory: 1 << 23, - MinMemory: 1 << 23, + MaxMemory: 2 << 22, + MinMemory: 2 << 22, MultiThread: false, CanGPU: true, - BaseMinMemory: 1 << 23, + BaseMinMemory: 2 << 22, }, }, } From feb97789c5a2e7a7b0e88fd0dba1c28713b8b5d4 Mon Sep 17 00:00:00 2001 From: laser Date: Wed, 1 Apr 2020 14:58:23 -0700 Subject: [PATCH 026/199] add README.md --- README.md | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 README.md diff --git a/README.md b/README.md new file mode 100644 index 000000000..23304ec2c --- /dev/null +++ b/README.md @@ -0,0 +1,15 @@ +# sector-storage + +[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](http://ipn.io) +[![standard-readme compliant](https://img.shields.io/badge/standard--readme-OK-green.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme) + +> a concrete implementation of the [specs-storage](https://github.com/filecoin-project/specs-storage) interface + +The sector-storage project provides a implementation-nonspecific reference implementation of the [specs-storage](https://github.com/filecoin-project/specs-storage) interface. + +## License + +The Filecoin Project is dual-licensed under Apache 2.0 and MIT terms: + +- Apache License, Version 2.0, ([LICENSE-APACHE](https://github.com/filecoin-project/sector-storage/blob/master/LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) +- MIT license ([LICENSE-MIT](https://github.com/filecoin-project/sector-storage/blob/master/LICENSE-MIT) or http://opensource.org/licenses/MIT) From ba291a92542289050602b61c8c9749393d6e8819 Mon Sep 17 00:00:00 2001 From: laser Date: Wed, 1 Apr 2020 15:20:17 -0700 Subject: [PATCH 027/199] add CircleCI badge --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 23304ec2c..28eaffe05 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,7 @@ # sector-storage [![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](http://ipn.io) +[![CircleCI](https://circleci.com/gh/filecoin-project/sector-storage.svg?style=svg)](https://circleci.com/gh/filecoin-project/sector-storage) [![standard-readme compliant](https://img.shields.io/badge/standard--readme-OK-green.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme) > a concrete implementation of the [specs-storage](https://github.com/filecoin-project/specs-storage) interface From fcaaae705d232c5f6462dd0282f689d409aec41c Mon Sep 17 00:00:00 2001 From: laser Date: Wed, 1 Apr 2020 15:38:29 -0700 Subject: [PATCH 028/199] circleci: add shell config - download groth parameters and keys - perform go mod tidy check --- .circleci/config.yml | 49 ++++++++++++++++++++++++++ parameters.json | 82 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 131 insertions(+) create mode 100644 .circleci/config.yml create mode 100644 parameters.json diff --git a/.circleci/config.yml b/.circleci/config.yml new file mode 100644 index 000000000..1b0696247 --- /dev/null +++ b/.circleci/config.yml @@ -0,0 +1,49 @@ +version: 2.1 +orbs: + go: gotest/tools@0.0.9 +executors: + golang: + docker: + - image: circleci/golang:1.13 + resource_class: 2xlarge +commands: + install-build-dependencies: + steps: + - checkout + - run: sudo apt-get update + - run: sudo apt-get install -y jq ocl-icd-opencl-dev + - run: git submodule sync + - run: git submodule update --init --recursive + - run: ./extern/filecoin-ffi/install-filcrypto + download-groth-params-and-verifying-keys: + steps: + - restore_cache: + name: Restore parameters cache + keys: + - 'v24-2k-lotus-params' + paths: + - /var/tmp/filecoin-proof-parameters/ + - run: | + DIR=$(pwd) + cd $(mktemp -d) + go get github.com/filecoin-project/go-paramfetch/paramfetch + go build -o go-paramfetch github.com/filecoin-project/go-paramfetch/paramfetch + ./go-paramfetch 2048 "${DIR}/parameters.json" + - save_cache: + name: Save parameters cache + key: 'v24-2k-lotus-params' + paths: + - /var/tmp/filecoin-proof-parameters/ +jobs: + mod-tidy-check: + executor: golang + steps: + - install-build-dependencies + - download-groth-params-and-verifying-keys + - go/mod-download + - go/mod-tidy-check +workflows: + version: 2.1 + build_and_test: + jobs: + - mod-tidy-check diff --git a/parameters.json b/parameters.json new file mode 100644 index 000000000..8591c1218 --- /dev/null +++ b/parameters.json @@ -0,0 +1,82 @@ +{ + "v24-proof-of-spacetime-election-PoseidonHasher-0b0b9781bcb153efbb3cab4be3a792c4f555d4ab6f8dd62b27e1dcad08a34f22.params": { + "cid": "QmUonpeUaLD6G4byFdZAMzwXorD4Qs1XDjmdXFbWYCgvjW", + "digest": "19e50903e53c826ff66f360283f324c1", + "sector_size": 34359738368 + }, + "v24-proof-of-spacetime-election-PoseidonHasher-0b0b9781bcb153efbb3cab4be3a792c4f555d4ab6f8dd62b27e1dcad08a34f22.vk": { + "cid": "QmVXv4Q1T3FbiY5AUgWER11Lsrby9aUVJy2mgWDWrndFbq", + "digest": "223dd87c6161c45daf448ca9eda28298", + "sector_size": 34359738368 + }, + "v24-proof-of-spacetime-election-PoseidonHasher-0b499a953f1a9dcab420b3ba1e6b1f3952dc7f17cf67ed10406ae9a43e2b8ec5.params": { + "cid": "Qmea7VsrYnkrpdMnutkGKppX5finoDwCA2fP5Zg5bDuBQw", + "digest": "3de5b8738a2cd933c214fa2023e30909", + "sector_size": 8388608 + }, + "v24-proof-of-spacetime-election-PoseidonHasher-0b499a953f1a9dcab420b3ba1e6b1f3952dc7f17cf67ed10406ae9a43e2b8ec5.vk": { + "cid": "QmavFXmf3jeacHKB6HoJH3gUqzmKnsDn5F5HSYfwPbDHRu", + "digest": "485b7eab4f70031fdda4eaeccfe4f26e", + "sector_size": 8388608 + }, + "v24-proof-of-spacetime-election-PoseidonHasher-27a7fc680a47e4821f40cf1676fb80b9888820ef6867a71a175b4c9ae068ad3f.params": { + "cid": "QmQrUjB9NSMuThe1JHoHfC7u1xdoLS6WLu15waWcnZ3tQT", + "digest": "7e6adc7cbf73db8c95a54e3c23bea1ae", + "sector_size": 536870912 + }, + "v24-proof-of-spacetime-election-PoseidonHasher-27a7fc680a47e4821f40cf1676fb80b9888820ef6867a71a175b4c9ae068ad3f.vk": { + "cid": "QmVPPk4fBcEero2GHsYuBoh97yhugTBWUp9yWSPPWjRWQ7", + "digest": "952b352d694d650e912b3b92ad63f7c9", + "sector_size": 536870912 + }, + "v24-proof-of-spacetime-election-PoseidonHasher-5916054ae98e28fc2f0470d1fb58eb875a6865be86f0b8c4e302d55f13217fef.params": { + "cid": "QmSXMF85mdGLQfAY98zVL4dUBpGPFFUPDmFzdc1NZrVFdh", + "digest": "a93de0f8cfb04af5d21f66ef48ee59a8", + "sector_size": 2048 + }, + "v24-proof-of-spacetime-election-PoseidonHasher-5916054ae98e28fc2f0470d1fb58eb875a6865be86f0b8c4e302d55f13217fef.vk": { + "cid": "QmaTsAmbdnQtJoSpkWsXmvHPpMJinzFYTe6t5LLm7w5RtQ", + "digest": "e4d0575f119e3e7b42bc3e5b6bb35a0b", + "sector_size": 2048 + }, + "v24-stacked-proof-of-replication-PoseidonHasher-Sha256Hasher-49442c8ce7545579cbd689d578301d0cc1e46e94e2499a0ec36de7ff4f4694a2.params": { + "cid": "QmYCFrU4G2LakPngFXayX7afyondQbB9hfnVRz1ffWD9MS", + "digest": "d64e5d1bbb9120bea4c0cd8cdcdfb834", + "sector_size": 8388608 + }, + "v24-stacked-proof-of-replication-PoseidonHasher-Sha256Hasher-49442c8ce7545579cbd689d578301d0cc1e46e94e2499a0ec36de7ff4f4694a2.vk": { + "cid": "QmfXAPtHKU2MJVJDwLTUCM4W2tYQ8biGq9cZaAnjtaZidZ", + "digest": "572536e8684454a5cd80361e5c952b38", + "sector_size": 8388608 + }, + "v24-stacked-proof-of-replication-PoseidonHasher-Sha256Hasher-d84aa4581c74190f845596893ebe5b71da32ecf16e1d151b9fff74ee8f94d77c.params": { + "cid": "QmdXtQsLbBFmVxrd6kWKr2FYbQfhEdR6PinwrGBXhHmLdT", + "digest": "77cfafee088bd59411d766621df6de42", + "sector_size": 536870912 + }, + "v24-stacked-proof-of-replication-PoseidonHasher-Sha256Hasher-d84aa4581c74190f845596893ebe5b71da32ecf16e1d151b9fff74ee8f94d77c.vk": { + "cid": "QmdE8oZJofaenThLi2TWXJPk9cExZgTA36TjrHeAC65BGA", + "digest": "30586a2396ef6b60b122ac5a2ba87681", + "sector_size": 536870912 + }, + "v24-stacked-proof-of-replication-PoseidonHasher-Sha256Hasher-fc32be6028c2398175466f36fa36810842ae8948fae15c84454af5b61ca99e15.params": { + "cid": "QmNqcqGxf7pJjipHNwcH44D5KgiTUNo3mK5HiSxBwYcjkx", + "digest": "25ea39db2a003c817113f6f2ea936b3d", + "sector_size": 34359738368 + }, + "v24-stacked-proof-of-replication-PoseidonHasher-Sha256Hasher-fc32be6028c2398175466f36fa36810842ae8948fae15c84454af5b61ca99e15.vk": { + "cid": "QmWiaqy8hWshv2FsLDoZAtpJKZng5QN3x2X5C7xsPvSbFb", + "digest": "ab1239c802c480cf12f63d13fb2f620a", + "sector_size": 34359738368 + }, + "v24-stacked-proof-of-replication-PoseidonHasher-Sha256Hasher-fe437922fe766f61b112750506d6be0e4ad5daa85ff9ce96549d99253ba61cbe.params": { + "cid": "QmbPk3fKKLjkm6pD1CzwGyTnMwNSSZVxVSMWEceqSv6LDW", + "digest": "76bd3702312cfe0d69bb5e0891c52615", + "sector_size": 2048 + }, + "v24-stacked-proof-of-replication-PoseidonHasher-Sha256Hasher-fe437922fe766f61b112750506d6be0e4ad5daa85ff9ce96549d99253ba61cbe.vk": { + "cid": "QmPZ9bGSVs5GHQRRAtC1qv9eQ7GPoH8FWukjxAXtXXcTxg", + "digest": "4edb21b7b6d5787b646f3e336e06303e", + "sector_size": 2048 + } +} From fdc2e4a67a61f84994debb9d06786754e81fed5e Mon Sep 17 00:00:00 2001 From: laser Date: Wed, 1 Apr 2020 16:07:34 -0700 Subject: [PATCH 029/199] ci: run tests on build --- .circleci/config.yml | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 1b0696247..0bc957588 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -35,15 +35,39 @@ commands: paths: - /var/tmp/filecoin-proof-parameters/ jobs: - mod-tidy-check: + test: executor: golang + environment: + RUST_LOG: info steps: - install-build-dependencies - download-groth-params-and-verifying-keys + - run: go test -v -timeout 10m ./... + mod-tidy-check: + executor: golang + steps: - go/mod-download - go/mod-tidy-check + gofmt-check: + executor: golang + steps: + - go/mod-download + - run: + command: "! go fmt ./... 2>&1 | read" + lint-check: + executor: golang + steps: + - go/mod-download + - go/install-golangci-lint: + gobin: $HOME/.local/bin + version: 1.23.8 + - run: + command: $HOME/.local/bin/golangci-lint run -v --concurrency 2 workflows: version: 2.1 build_and_test: jobs: - mod-tidy-check + - lint-check + - gofmt-check + - test From c694853ddbedddbfaed5fd6edeb47903f37dbc9f Mon Sep 17 00:00:00 2001 From: laser Date: Wed, 1 Apr 2020 16:08:59 -0700 Subject: [PATCH 030/199] ci: check out code before running linters --- .circleci/config.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 0bc957588..3ea597993 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -9,7 +9,6 @@ executors: commands: install-build-dependencies: steps: - - checkout - run: sudo apt-get update - run: sudo apt-get install -y jq ocl-icd-opencl-dev - run: git submodule sync @@ -40,23 +39,27 @@ jobs: environment: RUST_LOG: info steps: + - checkout - install-build-dependencies - download-groth-params-and-verifying-keys - run: go test -v -timeout 10m ./... mod-tidy-check: executor: golang steps: + - checkout - go/mod-download - go/mod-tidy-check gofmt-check: executor: golang steps: + - checkout - go/mod-download - run: command: "! go fmt ./... 2>&1 | read" lint-check: executor: golang steps: + - checkout - go/mod-download - go/install-golangci-lint: gobin: $HOME/.local/bin From 5874d90719a2bd617d716ec5d995db4d15f3d54c Mon Sep 17 00:00:00 2001 From: laser Date: Wed, 1 Apr 2020 16:11:32 -0700 Subject: [PATCH 031/199] ensure submodules are initialized --- .circleci/config.yml | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 3ea597993..93b4b70d1 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -7,12 +7,15 @@ executors: - image: circleci/golang:1.13 resource_class: 2xlarge commands: + prepare-git-checkout: + steps: + - checkout + - run: git submodule sync + - run: git submodule update --init --recursive install-build-dependencies: steps: - run: sudo apt-get update - run: sudo apt-get install -y jq ocl-icd-opencl-dev - - run: git submodule sync - - run: git submodule update --init --recursive - run: ./extern/filecoin-ffi/install-filcrypto download-groth-params-and-verifying-keys: steps: @@ -39,27 +42,27 @@ jobs: environment: RUST_LOG: info steps: - - checkout + - prepare-git-checkout - install-build-dependencies - download-groth-params-and-verifying-keys - run: go test -v -timeout 10m ./... mod-tidy-check: executor: golang steps: - - checkout + - prepare-git-checkout - go/mod-download - go/mod-tidy-check gofmt-check: executor: golang steps: - - checkout + - prepare-git-checkout - go/mod-download - run: command: "! go fmt ./... 2>&1 | read" lint-check: executor: golang steps: - - checkout + - prepare-git-checkout - go/mod-download - go/install-golangci-lint: gobin: $HOME/.local/bin From 58922b7e764164365bbe3d7fafcc1b65f661eca9 Mon Sep 17 00:00:00 2001 From: laser Date: Wed, 1 Apr 2020 16:13:20 -0700 Subject: [PATCH 032/199] golangci-lint needs libfilcrypto --- .circleci/config.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.circleci/config.yml b/.circleci/config.yml index 93b4b70d1..0739cb286 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -63,6 +63,7 @@ jobs: executor: golang steps: - prepare-git-checkout + - install-build-dependencies - go/mod-download - go/install-golangci-lint: gobin: $HOME/.local/bin From ffc966a9ada89f68c8d29b4c55a2ac2252443e85 Mon Sep 17 00:00:00 2001 From: laser Date: Wed, 1 Apr 2020 16:14:04 -0700 Subject: [PATCH 033/199] fix gofmt issue which ci caught --- resources.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/resources.go b/resources.go index 310373bfe..4bf3fbbb2 100644 --- a/resources.go +++ b/resources.go @@ -192,7 +192,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ MinMemory: 1 << 11, MultiThread: false, - CanGPU: true, + CanGPU: true, BaseMinMemory: 1 << 11, }, @@ -201,7 +201,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ MinMemory: 1 << 23, MultiThread: false, - CanGPU: true, + CanGPU: true, BaseMinMemory: 1 << 23, }, From 431a15c14ac8345df74a97a16960e5a47fdc3648 Mon Sep 17 00:00:00 2001 From: laser Date: Wed, 1 Apr 2020 16:18:20 -0700 Subject: [PATCH 034/199] nolint the existing linting errors --- mock/mock.go | 2 +- tarutil/systar.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/mock/mock.go b/mock/mock.go index 1ada002d5..6473fe3c6 100644 --- a/mock/mock.go +++ b/mock/mock.go @@ -53,7 +53,7 @@ func NewMockSectorMgr(threads int, ssize abi.SectorSize) *SectorMgr { const ( statePacking = iota statePreCommit - stateCommit + stateCommit // nolint ) type sectorState struct { diff --git a/tarutil/systar.go b/tarutil/systar.go index a94354731..94de58ea8 100644 --- a/tarutil/systar.go +++ b/tarutil/systar.go @@ -11,7 +11,7 @@ import ( logging "github.com/ipfs/go-log/v2" ) -var log = logging.Logger("tarutil") +var log = logging.Logger("tarutil") // nolint func ExtractTar(body io.Reader, dir string) error { if err := os.MkdirAll(dir, 0755); err != nil { @@ -73,7 +73,7 @@ func writeTarDirectory(dir string, w io.Writer) error { return xerrors.Errorf("wiritng header for file %s: %w", file.Name(), err) } - f, err := os.OpenFile(filepath.Join(dir, file.Name()), os.O_RDONLY, 644) + f, err := os.OpenFile(filepath.Join(dir, file.Name()), os.O_RDONLY, 644) // nolint if err != nil { return xerrors.Errorf("opening %s for reading: %w", file.Name(), err) } From b17b2262205cab5874c33110f51268898da9705b Mon Sep 17 00:00:00 2001 From: laser Date: Wed, 1 Apr 2020 16:22:32 -0700 Subject: [PATCH 035/199] one line gofmt --- .circleci/config.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 0739cb286..a7cb9a24b 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -57,8 +57,7 @@ jobs: steps: - prepare-git-checkout - go/mod-download - - run: - command: "! go fmt ./... 2>&1 | read" + - run: "! go fmt ./... 2>&1 | read" lint-check: executor: golang steps: From 93392329464d857f04681b9faa9736aef1a13167 Mon Sep 17 00:00:00 2001 From: laser Date: Wed, 1 Apr 2020 16:26:47 -0700 Subject: [PATCH 036/199] shift by multiple of 10 --- resources.go | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/resources.go b/resources.go index af31b5233..db812a28e 100644 --- a/resources.go +++ b/resources.go @@ -58,12 +58,12 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ BaseMinMemory: 2 << 10, }, abi.RegisteredProof_StackedDRG8MiBSeal: Resources{ - MaxMemory: 2 << 22, - MinMemory: 2 << 22, + MaxMemory: 8 << 20, + MinMemory: 8 << 20, MultiThread: false, - BaseMinMemory: 2 << 22, + BaseMinMemory: 8 << 20, }, }, sealtasks.TTPreCommit1: { @@ -92,12 +92,12 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ BaseMinMemory: 2 << 10, }, abi.RegisteredProof_StackedDRG8MiBSeal: Resources{ - MaxMemory: 2 << 22, - MinMemory: 2 << 22, + MaxMemory: 8 << 20, + MinMemory: 8 << 20, MultiThread: false, - BaseMinMemory: 2 << 22, + BaseMinMemory: 8 << 20, }, }, sealtasks.TTPreCommit2: { @@ -126,12 +126,12 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ BaseMinMemory: 2 << 10, }, abi.RegisteredProof_StackedDRG8MiBSeal: Resources{ - MaxMemory: 2 << 22, - MinMemory: 2 << 22, + MaxMemory: 8 << 20, + MinMemory: 8 << 20, MultiThread: true, - BaseMinMemory: 2 << 22, + BaseMinMemory: 8 << 20, }, }, sealtasks.TTCommit1: { // Very short (~100ms), so params are very light @@ -160,12 +160,12 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ BaseMinMemory: 2 << 10, }, abi.RegisteredProof_StackedDRG8MiBSeal: Resources{ - MaxMemory: 2 << 22, - MinMemory: 2 << 22, + MaxMemory: 8 << 20, + MinMemory: 8 << 20, MultiThread: false, - BaseMinMemory: 2 << 22, + BaseMinMemory: 8 << 20, }, }, sealtasks.TTCommit2: { // TODO: Measure more accurately @@ -197,13 +197,13 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ BaseMinMemory: 2 << 10, }, abi.RegisteredProof_StackedDRG8MiBSeal: Resources{ - MaxMemory: 2 << 22, - MinMemory: 2 << 22, + MaxMemory: 8 << 20, + MinMemory: 8 << 20, MultiThread: false, CanGPU: true, - BaseMinMemory: 2 << 22, + BaseMinMemory: 8 << 20, }, }, } From 0bc12be0591bc0bf652c73c7e17a024e6445a114 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Mon, 6 Apr 2020 21:42:57 +0200 Subject: [PATCH 037/199] readme: Add a section about architecture --- README.md | 41 +++++++++++++++++++++++++++++++++++++++++ docs/sector-storage.svg | 3 +++ 2 files changed, 44 insertions(+) create mode 100644 docs/sector-storage.svg diff --git a/README.md b/README.md index 28eaffe05..b6f73ab23 100644 --- a/README.md +++ b/README.md @@ -8,6 +8,47 @@ The sector-storage project provides a implementation-nonspecific reference implementation of the [specs-storage](https://github.com/filecoin-project/specs-storage) interface. +## Architecture + +![high-level architecture](docs/sector-storage.svg) + +### `Manager` + +Manages is the top-level piece of the storage system gluing all the other pieces +together. It also implements scheduling logic. + +### `package stores` + +This package implements the sector storage subsystem. Fundamentally the storage +is divided into `path`s, each path has it's UUID, and stores a set of sector +'files'. There are currently 3 types of sector files - `unsealed`, `sealed`, +and `cache`. + +Paths can be shared between nodes by sharing the underlying filesystem. + +### `stores.Local` + +The Local store implements SectorProvider for paths mounted in the local +filesystem. Paths can be shared between nodes, and support shared filesystems +such as NFS. + +stores.Local implements all native filesystem-related operations + +### `stores.Remote` + +The Remote store extends Local store, handles fetching sector files into a local +store if needed, and handles removing sectors from non-local stores. + +### `stores.Index` + +The Index is a singleton holding metadata about storage paths, and a mapping of +sector files to paths + +### `LocalWorker` + +LocalWorker implements the Worker interface with ffiwrapper.Sealer and a +store.Store instance + ## License The Filecoin Project is dual-licensed under Apache 2.0 and MIT terms: diff --git a/docs/sector-storage.svg b/docs/sector-storage.svg new file mode 100644 index 000000000..3978ef2f8 --- /dev/null +++ b/docs/sector-storage.svg @@ -0,0 +1,3 @@ + + +
LocalWorker
LocalWorker
stores.Local
stores.Local
stores.Store
stores.Store
stores.SectorIndex
stores.SectorInd...
ffiwrapper.Sealer
ffiwrapper.Seal...
SectorProvider
SectorProvider
localProvider
localProvider
Worker
Worker
stores.Remote
stores.Remote
stores.Local
stores.Local
stores.SectorIndex
stores.SectorInd...
localPaths []string
localPaths []str...
urls []string
urls []stri...
stores.SectorIndex
stores.SectorInd...
specs-storage.Prover
specs-storage.Prover
ronlyProvider
ronlyProvider
stores.Index
stores.Index
FetchHandler
FetchHandler
ffiwrapper.Sealer
ffiwrapper.Seal...
SectorProvider
SectorProvider
specs-storage.[Sealer,Storage]
specs-storage.[Sealer,Storage]
specs-storage.Prover
specs-storage.Prover
Manager API
Manager API
Scheduler
Scheduler
[]workerHandle
[]workerHandle
Worker
Worker
WorkerInfo
Worker...
resourceInfo
resourceInfo
schedQueue
schedQueue
stores.SectorIndex
stores.SectorInd...
sector-storage.Manager
sector-storage.Manager
worker management APIs
worker management APIs
Filecoin 'Miner' Node
Filecoin 'Miner' Node
HTTP API
HTTP API
/remote
/remote
JsonRPC
JsonRPC
/rpc/v0
/rpc/v0
LocalWorker
LocalWorker
stores.Local
stores.Local
stores.Store
stores.Store
stores.SectorIndex
stores.SectorInd...
ffiwrapper.Sealer
ffiwrapper.Seal...
SectorProvider
SectorProvider
localProvider
localProvider
Worker
Worker
stores.Remote
stores.Remote
stores.Local
stores.Local
stores.SectorIndex
stores.SectorInd...
localPaths []string
localPaths []str...
urls []string
urls []stri...
stores.SectorIndex
stores.SectorInd...
Miner JsonRPC client
Miner JsonRPC client
miner.Register(remoteWorker)
miner.Register(remoteWorker)
HTTP API
HTTP API
FetchHandler
FetchHandler
/remote
/remote
RemoteWorker
RemoteWorker
/rpc/v0
/rpc/v0
JsonRPC
JsonRPC
Seal Worker Node
Seal Worker Node
Viewer does not support full SVG 1.1
\ No newline at end of file From e854090bdd3e600e1ae12d1011de71499dab6c77 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Fri, 10 Apr 2020 20:41:59 +0200 Subject: [PATCH 038/199] v25 --- extern/filecoin-ffi | 2 +- ffiwrapper/config.go | 20 +------- ffiwrapper/params_shared.go | 18 ------- ffiwrapper/sealer.go | 13 ----- ffiwrapper/sealer_cgo.go | 3 +- ffiwrapper/types.go | 4 +- ffiwrapper/verifier_cgo.go | 95 ++++++++++--------------------------- go.mod | 2 +- go.sum | 2 + localworker.go | 6 --- 10 files changed, 34 insertions(+), 131 deletions(-) delete mode 100644 ffiwrapper/params_shared.go diff --git a/extern/filecoin-ffi b/extern/filecoin-ffi index e899cc1dd..0f03c5a6b 160000 --- a/extern/filecoin-ffi +++ b/extern/filecoin-ffi @@ -1 +1 @@ -Subproject commit e899cc1dd0720e0a4d25b0e751b84e3733cbedc5 +Subproject commit 0f03c5a6b8c57f7c008e0d9b18dbd37b576ca836 diff --git a/ffiwrapper/config.go b/ffiwrapper/config.go index 9b1fc5f9a..1b01e9c1e 100644 --- a/ffiwrapper/config.go +++ b/ffiwrapper/config.go @@ -9,7 +9,6 @@ import ( type Config struct { SealProofType abi.RegisteredProof - PoStProofType abi.RegisteredProof _ struct{} // guard against nameless init } @@ -19,25 +18,8 @@ func sizeFromConfig(cfg Config) (abi.SectorSize, error) { return abi.SectorSize(0), xerrors.New("must specify a seal proof type from abi.RegisteredProof") } - if cfg.PoStProofType == abi.RegisteredProof(0) { - return abi.SectorSize(0), xerrors.New("must specify a PoSt proof type from abi.RegisteredProof") - } - s1, err := SectorSizeForRegisteredProof(cfg.SealProofType) - if err != nil { - return abi.SectorSize(0), err - } - - s2, err := SectorSizeForRegisteredProof(cfg.PoStProofType) - if err != nil { - return abi.SectorSize(0), err - } - - if s1 != s2 { - return abi.SectorSize(0), xerrors.Errorf("seal sector size %d does not equal PoSt sector size %d", s1, s2) - } - - return s1, nil + return SectorSizeForRegisteredProof(cfg.SealProofType) } // TODO: remove this method after implementing it along side the registered proofs and importing it from there. diff --git a/ffiwrapper/params_shared.go b/ffiwrapper/params_shared.go deleted file mode 100644 index 245c1ae09..000000000 --- a/ffiwrapper/params_shared.go +++ /dev/null @@ -1,18 +0,0 @@ -package ffiwrapper - -// ///// -// Proofs - -// 1 / n -const SectorChallengeRatioDiv = 25 - -const MaxFallbackPostChallengeCount = 10 - -// extracted from lotus/chain/types/blockheader -func ElectionPostChallengeCount(sectors uint64, faults uint64) uint64 { - if sectors-faults == 0 { - return 0 - } - // ceil(sectors / SectorChallengeRatioDiv) - return (sectors-faults-1)/SectorChallengeRatioDiv + 1 -} diff --git a/ffiwrapper/sealer.go b/ffiwrapper/sealer.go index 12a8a3df6..fc77c8388 100644 --- a/ffiwrapper/sealer.go +++ b/ffiwrapper/sealer.go @@ -9,21 +9,12 @@ var log = logging.Logger("ffiwrapper") type Sealer struct { sealProofType abi.RegisteredProof - postProofType abi.RegisteredProof ssize abi.SectorSize // a function of sealProofType and postProofType sectors SectorProvider stopping chan struct{} } -func fallbackPostChallengeCount(sectors uint64, faults uint64) uint64 { - challengeCount := ElectionPostChallengeCount(sectors, faults) - if challengeCount > MaxFallbackPostChallengeCount { - return MaxFallbackPostChallengeCount - } - return challengeCount -} - func (sb *Sealer) Stop() { close(sb.stopping) } @@ -35,7 +26,3 @@ func (sb *Sealer) SectorSize() abi.SectorSize { func (sb *Sealer) SealProofType() abi.RegisteredProof { return sb.sealProofType } - -func (sb *Sealer) PoStProofType() abi.RegisteredProof { - return sb.postProofType -} diff --git a/ffiwrapper/sealer_cgo.go b/ffiwrapper/sealer_cgo.go index c001b5654..63884163c 100644 --- a/ffiwrapper/sealer_cgo.go +++ b/ffiwrapper/sealer_cgo.go @@ -29,7 +29,6 @@ func New(sectors SectorProvider, cfg *Config) (*Sealer, error) { sb := &Sealer{ sealProofType: cfg.SealProofType, - postProofType: cfg.PoStProofType, ssize: sectorSize, sectors: sectors, @@ -291,7 +290,7 @@ func (sb *Sealer) FinalizeSector(ctx context.Context, sector abi.SectorID) error } defer done() - return ffi.ClearCache(paths.Cache) + return ffi.ClearCache(uint64(sb.ssize), paths.Cache) } func GeneratePieceCIDFromFile(proofType abi.RegisteredProof, piece io.Reader, pieceSize abi.UnpaddedPieceSize) (cid.Cid, error) { diff --git a/ffiwrapper/types.go b/ffiwrapper/types.go index f89c63fb4..876226429 100644 --- a/ffiwrapper/types.go +++ b/ffiwrapper/types.go @@ -35,8 +35,8 @@ type Storage interface { type Verifier interface { VerifySeal(abi.SealVerifyInfo) (bool, error) - VerifyElectionPost(ctx context.Context, info abi.PoStVerifyInfo) (bool, error) - VerifyFallbackPost(ctx context.Context, info abi.PoStVerifyInfo) (bool, error) + VerifyWinningPoSt(ctx context.Context, info abi.WinningPoStVerifyInfo) (bool, error) + VerifyWindowPoSt(ctx context.Context, info abi.WindowPoStVerifyInfo) (bool, error) } var ErrSectorNotFound = errors.New("sector not found") diff --git a/ffiwrapper/verifier_cgo.go b/ffiwrapper/verifier_cgo.go index 475dde617..b94c63219 100644 --- a/ffiwrapper/verifier_cgo.go +++ b/ffiwrapper/verifier_cgo.go @@ -10,78 +10,36 @@ import ( ffi "github.com/filecoin-project/filecoin-ffi" "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/sector-storage/stores" ) -func (sb *Sealer) ComputeElectionPoSt(ctx context.Context, miner abi.ActorID, sectorInfo []abi.SectorInfo, challengeSeed abi.PoStRandomness, winners []abi.PoStCandidate) ([]abi.PoStProof, error) { - challengeSeed[31] = 0 +func (sb *Sealer) GenerateWinningPoStSectorChallenge(ctx context.Context, proofType abi.RegisteredProof, minerID abi.ActorID, randomness abi.PoStRandomness, eligibleSectorCount uint64) ([]uint64, error) { + randomness[31] = 0 // TODO: Not correct, fixme + return ffi.GenerateWinningPoStSectorChallenge(proofType, minerID, randomness, eligibleSectorCount) +} - privsects, err := sb.pubSectorToPriv(ctx, miner, sectorInfo, nil) // TODO: faults +func (sb *Sealer) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []abi.SectorInfo, randomness abi.PoStRandomness) ([]abi.PoStProof, error) { + randomness[31] = 0 // TODO: Not correct, fixme + privsectors, err := sb.pubSectorToPriv(ctx, minerID, sectorInfo, nil, abi.RegisteredProof.RegisteredWinningPoStProof) // TODO: FAULTS? if err != nil { return nil, err } - return ffi.GeneratePoSt(miner, privsects, challengeSeed, winners) + return ffi.GenerateWinningPoSt(minerID, privsectors, randomness) } -func (sb *Sealer) GenerateFallbackPoSt(ctx context.Context, miner abi.ActorID, sectorInfo []abi.SectorInfo, challengeSeed abi.PoStRandomness, faults []abi.SectorNumber) (storage.FallbackPostOut, error) { - privsectors, err := sb.pubSectorToPriv(ctx, miner, sectorInfo, faults) - if err != nil { - return storage.FallbackPostOut{}, err - } - - challengeCount := fallbackPostChallengeCount(uint64(len(sectorInfo)), uint64(len(faults))) - challengeSeed[31] = 0 - - candidates, err := ffi.GenerateCandidates(miner, challengeSeed, challengeCount, privsectors) - if err != nil { - return storage.FallbackPostOut{}, err - } - - winners := make([]abi.PoStCandidate, len(candidates)) - for idx := range winners { - winners[idx] = candidates[idx].Candidate - } - - proof, err := ffi.GeneratePoSt(miner, privsectors, challengeSeed, winners) - return storage.FallbackPostOut{ - PoStInputs: ffiToStorageCandidates(candidates), - Proof: proof, - }, err -} - -func (sb *Sealer) GenerateEPostCandidates(ctx context.Context, miner abi.ActorID, sectorInfo []abi.SectorInfo, challengeSeed abi.PoStRandomness, faults []abi.SectorNumber) ([]storage.PoStCandidateWithTicket, error) { - privsectors, err := sb.pubSectorToPriv(ctx, miner, sectorInfo, faults) +func (sb *Sealer) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []abi.SectorInfo, randomness abi.PoStRandomness) ([]abi.PoStProof, error) { + randomness[31] = 0 // TODO: Not correct, fixme + privsectors, err := sb.pubSectorToPriv(ctx, minerID, sectorInfo, nil, abi.RegisteredProof.RegisteredWindowPoStProof) // TODO: FAULTS? if err != nil { return nil, err } - challengeSeed[31] = 0 - - challengeCount := ElectionPostChallengeCount(uint64(len(sectorInfo)), uint64(len(faults))) - pc, err := ffi.GenerateCandidates(miner, challengeSeed, challengeCount, privsectors) - if err != nil { - return nil, err - } - - return ffiToStorageCandidates(pc), nil + return ffi.GenerateWindowPoSt(minerID, privsectors, randomness) } -func ffiToStorageCandidates(pc []ffi.PoStCandidateWithTicket) []storage.PoStCandidateWithTicket { - out := make([]storage.PoStCandidateWithTicket, len(pc)) - for i := range out { - out[i] = storage.PoStCandidateWithTicket{ - Candidate: pc[i].Candidate, - Ticket: pc[i].Ticket, - } - } - - return out -} - -func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorInfo []abi.SectorInfo, faults []abi.SectorNumber) (ffi.SortedPrivateSectorInfo, error) { +func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorInfo []abi.SectorInfo, faults []abi.SectorNumber, rpt func(abi.RegisteredProof) (abi.RegisteredProof, error)) (ffi.SortedPrivateSectorInfo, error) { fmap := map[abi.SectorNumber]struct{}{} for _, fault := range faults { fmap[fault] = struct{}{} @@ -99,7 +57,7 @@ func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorIn } done() // TODO: This is a tiny bit suboptimal - postProofType, err := s.RegisteredProof.RegisteredPoStProof() + postProofType, err := rpt(s.RegisteredProof) if err != nil { return ffi.SortedPrivateSectorInfo{}, xerrors.Errorf("acquiring registered PoSt proof from sector info %+v: %w", s, err) } @@ -125,19 +83,18 @@ func (proofVerifier) VerifySeal(info abi.SealVerifyInfo) (bool, error) { return ffi.VerifySeal(info) } -func (proofVerifier) VerifyElectionPost(ctx context.Context, info abi.PoStVerifyInfo) (bool, error) { - return verifyPost(ctx, info) -} - -func (proofVerifier) VerifyFallbackPost(ctx context.Context, info abi.PoStVerifyInfo) (bool, error) { - return verifyPost(ctx, info) -} - -func verifyPost(ctx context.Context, info abi.PoStVerifyInfo) (bool, error) { - _, span := trace.StartSpan(ctx, "VerifyPoSt") +func (proofVerifier) VerifyWinningPoSt(ctx context.Context, info abi.WinningPoStVerifyInfo) (bool, error) { + info.Randomness[31] = 0 // TODO: Not correct, fixme + _, span := trace.StartSpan(ctx, "VerifyWinningPoSt") defer span.End() - info.Randomness[31] = 0 - - return ffi.VerifyPoSt(info) + return ffi.VerifyWinningPoSt(info) +} + +func (proofVerifier) VerifyWindowPoSt(ctx context.Context, info abi.WindowPoStVerifyInfo) (bool, error) { + info.Randomness[31] = 0 // TODO: Not correct, fixme + _, span := trace.StartSpan(ctx, "VerifyWindowPoSt") + defer span.End() + + return ffi.VerifyWindowPoSt(info) } diff --git a/go.mod b/go.mod index 53aab09d2..ae6e689aa 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5 github.com/filecoin-project/go-paramfetch v0.0.1 github.com/filecoin-project/lotus v0.2.10 - github.com/filecoin-project/specs-actors v0.0.0-20200324235424-aef9b20a9fb1 + github.com/filecoin-project/specs-actors v0.0.0-20200409043918-e569f4a2f504 github.com/filecoin-project/specs-storage v0.0.0-20200317225704-7420bc655c38 github.com/gorilla/mux v1.7.4 github.com/hashicorp/go-multierror v1.0.0 diff --git a/go.sum b/go.sum index 0dd2acc0a..42dbe6517 100644 --- a/go.sum +++ b/go.sum @@ -109,6 +109,8 @@ github.com/filecoin-project/specs-actors v0.0.0-20200226200336-94c9b92b2775/go.m github.com/filecoin-project/specs-actors v0.0.0-20200302223606-0eaf97b10aaf/go.mod h1:0HAWYrvajFHDgRaKbF0rl+IybVLZL5z4gQ8koCMPhoU= github.com/filecoin-project/specs-actors v0.0.0-20200324235424-aef9b20a9fb1 h1:IL6A1yAamz0HtLQEdZS57hnRZHPL11VIrQxMZ1Nn5hI= github.com/filecoin-project/specs-actors v0.0.0-20200324235424-aef9b20a9fb1/go.mod h1:5WngRgTN5Eo4+0SjCBqLzEr2l6Mj45DrP2606gBhqI0= +github.com/filecoin-project/specs-actors v0.0.0-20200409043918-e569f4a2f504 h1:mwuAaqxKThl70+7FkGdFKVLdwaQZQ8XmscKdhSBBtnc= +github.com/filecoin-project/specs-actors v0.0.0-20200409043918-e569f4a2f504/go.mod h1:mdJraXq5vMy0+/FqVQIrnNlpQ/Em6zeu06G/ltQ0/lA= github.com/filecoin-project/specs-storage v0.0.0-20200317225704-7420bc655c38 h1:ky+rfX3bG1TjOBLn14V674q+iwZpalyKzZxGRNzA11I= github.com/filecoin-project/specs-storage v0.0.0-20200317225704-7420bc655c38/go.mod h1:dUmzHS7izOD6HW3/JpzFrjxnptxbsHXBlO8puK2UzBk= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= diff --git a/localworker.go b/localworker.go index 6c2ca6c09..0b8a252e3 100644 --- a/localworker.go +++ b/localworker.go @@ -34,11 +34,6 @@ type LocalWorker struct { } func NewLocalWorker(wcfg WorkerConfig, store stores.Store, local *stores.Local, sindex stores.SectorIndex) *LocalWorker { - ppt, err := wcfg.SealProof.RegisteredPoStProof() - if err != nil { - panic(err) - } - acceptTasks := map[sealtasks.TaskType]struct{}{} for _, taskType := range wcfg.TaskTypes { acceptTasks[taskType] = struct{}{} @@ -47,7 +42,6 @@ func NewLocalWorker(wcfg WorkerConfig, store stores.Store, local *stores.Local, return &LocalWorker{ scfg: &ffiwrapper.Config{ SealProofType: wcfg.SealProof, - PoStProofType: ppt, }, storage: store, localStore: local, From 65efbfce12e90d003d34822dc334d7fe03a6ed16 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Fri, 10 Apr 2020 21:12:23 +0200 Subject: [PATCH 039/199] Fix tests with v25 params --- ffiwrapper/sealer_test.go | 20 ++--- go.mod | 2 +- go.sum | 8 +- mock/mock.go | 135 ++++++++++++----------------- parameters.json | 176 +++++++++++++++++++++++--------------- 5 files changed, 174 insertions(+), 167 deletions(-) diff --git a/ffiwrapper/sealer_test.go b/ffiwrapper/sealer_test.go index ef458601c..c638f7f9c 100644 --- a/ffiwrapper/sealer_test.go +++ b/ffiwrapper/sealer_test.go @@ -29,7 +29,6 @@ func init() { var sectorSize = abi.SectorSize(2048) var sealProofType = abi.RegisteredProof_StackedDRG2KiBSeal -var postProofType = abi.RegisteredProof_StackedDRG2KiBPoSt type seal struct { id abi.SectorID @@ -96,8 +95,8 @@ func (s *seal) commit(t *testing.T, sb *Sealer, done func()) { } } -func post(t *testing.T, sb *Sealer, seals ...seal) time.Time { - randomness := abi.PoStRandomness{0, 9, 2, 7, 6, 5, 4, 3, 2, 1, 0, 9, 8, 7, 6, 45, 3, 2, 1, 0, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 9, 7} +func post(t *testing.T, sealer *Sealer, seals ...seal) time.Time { + /*randomness := abi.PoStRandomness{0, 9, 2, 7, 6, 5, 4, 3, 2, 1, 0, 9, 8, 7, 6, 45, 3, 2, 1, 0, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 9, 7} sis := make([]abi.SectorInfo, len(seals)) for i, s := range seals { @@ -108,14 +107,16 @@ func post(t *testing.T, sb *Sealer, seals ...seal) time.Time { } } - candidates, err := sb.GenerateEPostCandidates(context.TODO(), seals[0].id.Miner, sis, randomness, []abi.SectorNumber{}) + candidates, err := sealer.GenerateEPostCandidates(context.TODO(), seals[0].id.Miner, sis, randomness, []abi.SectorNumber{}) if err != nil { t.Fatalf("%+v", err) - } + }*/ + + fmt.Println("skipping post") genCandidates := time.Now() - if len(candidates) != 1 { + /*if len(candidates) != 1 { t.Fatal("expected 1 candidate") } @@ -124,7 +125,7 @@ func post(t *testing.T, sb *Sealer, seals ...seal) time.Time { candidatesPrime[idx] = candidates[idx].Candidate } - proofs, err := sb.ComputeElectionPoSt(context.TODO(), seals[0].id.Miner, sis, randomness, candidatesPrime) + proofs, err := sealer.ComputeElectionPoSt(context.TODO(), seals[0].id.Miner, sis, randomness, candidatesPrime) if err != nil { t.Fatalf("%+v", err) } @@ -145,7 +146,7 @@ func post(t *testing.T, sb *Sealer, seals ...seal) time.Time { if !ok { t.Fatal("bad post") } - +*/ return genCandidates } @@ -184,7 +185,6 @@ func TestSealAndVerify(t *testing.T) { cfg := &Config{ SealProofType: sealProofType, - PoStProofType: postProofType, } sp := &basicfs.Provider{ @@ -252,7 +252,6 @@ func TestSealPoStNoCommit(t *testing.T) { cfg := &Config{ SealProofType: sealProofType, - PoStProofType: postProofType, } sp := &basicfs.Provider{ Root: dir, @@ -313,7 +312,6 @@ func TestSealAndVerify2(t *testing.T) { cfg := &Config{ SealProofType: sealProofType, - PoStProofType: postProofType, } sp := &basicfs.Provider{ Root: dir, diff --git a/go.mod b/go.mod index ae6e689aa..fb39fc9ae 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( github.com/filecoin-project/go-paramfetch v0.0.1 github.com/filecoin-project/lotus v0.2.10 github.com/filecoin-project/specs-actors v0.0.0-20200409043918-e569f4a2f504 - github.com/filecoin-project/specs-storage v0.0.0-20200317225704-7420bc655c38 + github.com/filecoin-project/specs-storage v0.0.0-20200410185809-9fbaaa08f275 github.com/gorilla/mux v1.7.4 github.com/hashicorp/go-multierror v1.0.0 github.com/ipfs/go-cid v0.0.5 diff --git a/go.sum b/go.sum index 42dbe6517..81e4bf5ee 100644 --- a/go.sum +++ b/go.sum @@ -105,14 +105,10 @@ github.com/filecoin-project/go-sectorbuilder v0.0.2-0.20200203173614-42d67726bb6 github.com/filecoin-project/go-statestore v0.1.0/go.mod h1:LFc9hD+fRxPqiHiaqUEZOinUJB4WARkRfNl10O7kTnI= github.com/filecoin-project/lotus v0.2.10 h1:ijrj/nYdKu5GiMo9r1+Zcp2A4jKHSOMZ2WNy2K/mtOE= github.com/filecoin-project/lotus v0.2.10/go.mod h1:om5PQA9ZT0lf16qI7Fz/ZGLn4LDCMqPC8ntZA9uncRE= -github.com/filecoin-project/specs-actors v0.0.0-20200226200336-94c9b92b2775/go.mod h1:0HAWYrvajFHDgRaKbF0rl+IybVLZL5z4gQ8koCMPhoU= -github.com/filecoin-project/specs-actors v0.0.0-20200302223606-0eaf97b10aaf/go.mod h1:0HAWYrvajFHDgRaKbF0rl+IybVLZL5z4gQ8koCMPhoU= -github.com/filecoin-project/specs-actors v0.0.0-20200324235424-aef9b20a9fb1 h1:IL6A1yAamz0HtLQEdZS57hnRZHPL11VIrQxMZ1Nn5hI= -github.com/filecoin-project/specs-actors v0.0.0-20200324235424-aef9b20a9fb1/go.mod h1:5WngRgTN5Eo4+0SjCBqLzEr2l6Mj45DrP2606gBhqI0= github.com/filecoin-project/specs-actors v0.0.0-20200409043918-e569f4a2f504 h1:mwuAaqxKThl70+7FkGdFKVLdwaQZQ8XmscKdhSBBtnc= github.com/filecoin-project/specs-actors v0.0.0-20200409043918-e569f4a2f504/go.mod h1:mdJraXq5vMy0+/FqVQIrnNlpQ/Em6zeu06G/ltQ0/lA= -github.com/filecoin-project/specs-storage v0.0.0-20200317225704-7420bc655c38 h1:ky+rfX3bG1TjOBLn14V674q+iwZpalyKzZxGRNzA11I= -github.com/filecoin-project/specs-storage v0.0.0-20200317225704-7420bc655c38/go.mod h1:dUmzHS7izOD6HW3/JpzFrjxnptxbsHXBlO8puK2UzBk= +github.com/filecoin-project/specs-storage v0.0.0-20200410185809-9fbaaa08f275 h1:6OTcpsTQBQM0f/A67oEi4E4YtYd6fzkMqbU8cPIWMMs= +github.com/filecoin-project/specs-storage v0.0.0-20200410185809-9fbaaa08f275/go.mod h1:xJ1/xl9+8zZeSSSFmDC3Wr6uusCTxyYPI0VeNVSFmPE= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1/go.mod h1:0eHX/BVySxPc6SE2mZRoppGq7qcEagxdmQnA3dzork8= github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= diff --git a/mock/mock.go b/mock/mock.go index 6473fe3c6..bb5b07dec 100644 --- a/mock/mock.go +++ b/mock/mock.go @@ -6,7 +6,6 @@ import ( "fmt" "io" "io/ioutil" - "math/big" "math/rand" "sync" @@ -65,34 +64,34 @@ type sectorState struct { lk sync.Mutex } -func (sb *SectorMgr) RateLimit() func() { - sb.rateLimit <- struct{}{} +func (mgr *SectorMgr) RateLimit() func() { + mgr.rateLimit <- struct{}{} // TODO: probably want to copy over rate limit code return func() { - <-sb.rateLimit + <-mgr.rateLimit } } -func (sb *SectorMgr) NewSector(ctx context.Context, sector abi.SectorID) error { +func (mgr *SectorMgr) NewSector(ctx context.Context, sector abi.SectorID) error { return nil } -func (sb *SectorMgr) AddPiece(ctx context.Context, sectorId abi.SectorID, existingPieces []abi.UnpaddedPieceSize, size abi.UnpaddedPieceSize, r io.Reader) (abi.PieceInfo, error) { - log.Warn("Add piece: ", sectorId, size, sb.proofType) - sb.lk.Lock() - ss, ok := sb.sectors[sectorId] +func (mgr *SectorMgr) AddPiece(ctx context.Context, sectorId abi.SectorID, existingPieces []abi.UnpaddedPieceSize, size abi.UnpaddedPieceSize, r io.Reader) (abi.PieceInfo, error) { + log.Warn("Add piece: ", sectorId, size, mgr.proofType) + mgr.lk.Lock() + ss, ok := mgr.sectors[sectorId] if !ok { ss = §orState{ state: statePacking, } - sb.sectors[sectorId] = ss + mgr.sectors[sectorId] = ss } - sb.lk.Unlock() + mgr.lk.Unlock() ss.lk.Lock() defer ss.lk.Unlock() - c, err := ffiwrapper.GeneratePieceCIDFromFile(sb.proofType, r, size) + c, err := ffiwrapper.GeneratePieceCIDFromFile(mgr.proofType, r, size) if err != nil { return abi.PieceInfo{}, xerrors.Errorf("failed to generate piece cid: %w", err) } @@ -106,22 +105,22 @@ func (sb *SectorMgr) AddPiece(ctx context.Context, sectorId abi.SectorID, existi }, nil } -func (sb *SectorMgr) SectorSize() abi.SectorSize { - return sb.sectorSize +func (mgr *SectorMgr) SectorSize() abi.SectorSize { + return mgr.sectorSize } -func (sb *SectorMgr) AcquireSectorNumber() (abi.SectorNumber, error) { - sb.lk.Lock() - defer sb.lk.Unlock() - id := sb.nextSectorID - sb.nextSectorID++ +func (mgr *SectorMgr) AcquireSectorNumber() (abi.SectorNumber, error) { + mgr.lk.Lock() + defer mgr.lk.Unlock() + id := mgr.nextSectorID + mgr.nextSectorID++ return id, nil } -func (sb *SectorMgr) SealPreCommit1(ctx context.Context, sid abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) { - sb.lk.Lock() - ss, ok := sb.sectors[sid] - sb.lk.Unlock() +func (mgr *SectorMgr) SealPreCommit1(ctx context.Context, sid abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) { + mgr.lk.Lock() + ss, ok := mgr.sectors[sid] + mgr.lk.Unlock() if !ok { return nil, xerrors.Errorf("no sector with id %d in storage", sid) } @@ -129,7 +128,7 @@ func (sb *SectorMgr) SealPreCommit1(ctx context.Context, sid abi.SectorID, ticke ss.lk.Lock() defer ss.lk.Unlock() - ussize := abi.PaddedPieceSize(sb.sectorSize).Unpadded() + ussize := abi.PaddedPieceSize(mgr.sectorSize).Unpadded() // TODO: verify pieces in sinfo.pieces match passed in pieces @@ -158,7 +157,7 @@ func (sb *SectorMgr) SealPreCommit1(ctx context.Context, sid abi.SectorID, ticke } } - commd, err := MockVerifier.GenerateDataCommitment(sb.proofType, pis) + commd, err := MockVerifier.GenerateDataCommitment(mgr.proofType, pis) if err != nil { return nil, err } @@ -173,7 +172,7 @@ func (sb *SectorMgr) SealPreCommit1(ctx context.Context, sid abi.SectorID, ticke return cc, nil } -func (sb *SectorMgr) SealPreCommit2(ctx context.Context, sid abi.SectorID, phase1Out storage.PreCommit1Out) (cids storage.SectorCids, err error) { +func (mgr *SectorMgr) SealPreCommit2(ctx context.Context, sid abi.SectorID, phase1Out storage.PreCommit1Out) (cids storage.SectorCids, err error) { db := []byte(string(phase1Out)) db[0] ^= 'd' @@ -192,10 +191,10 @@ func (sb *SectorMgr) SealPreCommit2(ctx context.Context, sid abi.SectorID, phase }, nil } -func (sb *SectorMgr) SealCommit1(ctx context.Context, sid abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (output storage.Commit1Out, err error) { - sb.lk.Lock() - ss, ok := sb.sectors[sid] - sb.lk.Unlock() +func (mgr *SectorMgr) SealCommit1(ctx context.Context, sid abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (output storage.Commit1Out, err error) { + mgr.lk.Lock() + ss, ok := mgr.sectors[sid] + mgr.lk.Unlock() if !ok { return nil, xerrors.Errorf("no such sector %d", sid) } @@ -220,7 +219,7 @@ func (sb *SectorMgr) SealCommit1(ctx context.Context, sid abi.SectorID, ticket a return out[:], nil } -func (sb *SectorMgr) SealCommit2(ctx context.Context, sid abi.SectorID, phase1Out storage.Commit1Out) (proof storage.Proof, err error) { +func (mgr *SectorMgr) SealCommit2(ctx context.Context, sid abi.SectorID, phase1Out storage.Commit1Out) (proof storage.Proof, err error) { var out [32]byte for i := range out { out[i] = phase1Out[i] ^ byte(sid.Number&0xff) @@ -231,10 +230,10 @@ func (sb *SectorMgr) SealCommit2(ctx context.Context, sid abi.SectorID, phase1Ou // Test Instrumentation Methods -func (sb *SectorMgr) FailSector(sid abi.SectorID) error { - sb.lk.Lock() - defer sb.lk.Unlock() - ss, ok := sb.sectors[sid] +func (mgr *SectorMgr) FailSector(sid abi.SectorID) error { + mgr.lk.Lock() + defer mgr.lk.Unlock() + ss, ok := mgr.sectors[sid] if !ok { return fmt.Errorf("no such sector in storage") } @@ -259,54 +258,28 @@ func AddOpFinish(ctx context.Context) (context.Context, func()) { } } -func (sb *SectorMgr) GenerateFallbackPoSt(context.Context, abi.ActorID, []abi.SectorInfo, abi.PoStRandomness, []abi.SectorNumber) (storage.FallbackPostOut, error) { +func (mgr *SectorMgr) GenerateWinningPoStSectorChallenge(ctx context.Context, proofType abi.RegisteredProof, minerID abi.ActorID, randomness abi.PoStRandomness, eligibleSectorCount uint64) ([]uint64, error) { panic("implement me") } -func (sb *SectorMgr) ComputeElectionPoSt(ctx context.Context, mid abi.ActorID, sectorInfo []abi.SectorInfo, challengeSeed abi.PoStRandomness, winners []abi.PoStCandidate) ([]abi.PoStProof, error) { +func (mgr *SectorMgr) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []abi.SectorInfo, randomness abi.PoStRandomness) ([]abi.PoStProof, error) { panic("implement me") } -func (sb *SectorMgr) GenerateEPostCandidates(ctx context.Context, mid abi.ActorID, sectorInfo []abi.SectorInfo, challengeSeed abi.PoStRandomness, faults []abi.SectorNumber) ([]storage.PoStCandidateWithTicket, error) { - if len(faults) > 0 { - panic("todo") - } - - n := ffiwrapper.ElectionPostChallengeCount(uint64(len(sectorInfo)), uint64(len(faults))) - if n > uint64(len(sectorInfo)) { - n = uint64(len(sectorInfo)) - } - - out := make([]storage.PoStCandidateWithTicket, n) - - seed := big.NewInt(0).SetBytes(challengeSeed[:]) - start := seed.Mod(seed, big.NewInt(int64(len(sectorInfo)))).Int64() - - for i := range out { - out[i] = storage.PoStCandidateWithTicket{ - Candidate: abi.PoStCandidate{ - SectorID: abi.SectorID{ - Number: abi.SectorNumber((int(start) + i) % len(sectorInfo)), - Miner: mid, - }, - PartialTicket: abi.PartialTicket(challengeSeed), - }, - } - } - - return out, nil +func (mgr *SectorMgr) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []abi.SectorInfo, randomness abi.PoStRandomness) ([]abi.PoStProof, error) { + panic("implement me") } -func (sb *SectorMgr) ReadPieceFromSealedSector(ctx context.Context, sectorID abi.SectorID, offset ffiwrapper.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, commD cid.Cid) (io.ReadCloser, error) { - if len(sb.sectors[sectorID].pieces) > 1 { +func (mgr *SectorMgr) ReadPieceFromSealedSector(ctx context.Context, sectorID abi.SectorID, offset ffiwrapper.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, commD cid.Cid) (io.ReadCloser, error) { + if len(mgr.sectors[sectorID].pieces) > 1 { panic("implme") } - return ioutil.NopCloser(io.LimitReader(bytes.NewReader(sb.sectors[sectorID].pieces[0].Bytes()[offset:]), int64(size))), nil + return ioutil.NopCloser(io.LimitReader(bytes.NewReader(mgr.sectors[sectorID].pieces[0].Bytes()[offset:]), int64(size))), nil } -func (sb *SectorMgr) StageFakeData(mid abi.ActorID) (abi.SectorID, []abi.PieceInfo, error) { - usize := abi.PaddedPieceSize(sb.sectorSize).Unpadded() - sid, err := sb.AcquireSectorNumber() +func (mgr *SectorMgr) StageFakeData(mid abi.ActorID) (abi.SectorID, []abi.PieceInfo, error) { + usize := abi.PaddedPieceSize(mgr.sectorSize).Unpadded() + sid, err := mgr.AcquireSectorNumber() if err != nil { return abi.SectorID{}, nil, err } @@ -319,7 +292,7 @@ func (sb *SectorMgr) StageFakeData(mid abi.ActorID) (abi.SectorID, []abi.PieceIn Number: sid, } - pi, err := sb.AddPiece(context.TODO(), id, nil, usize, bytes.NewReader(buf)) + pi, err := mgr.AddPiece(context.TODO(), id, nil, usize, bytes.NewReader(buf)) if err != nil { return abi.SectorID{}, nil, err } @@ -327,18 +300,10 @@ func (sb *SectorMgr) StageFakeData(mid abi.ActorID) (abi.SectorID, []abi.PieceIn return id, []abi.PieceInfo{pi}, nil } -func (sb *SectorMgr) FinalizeSector(context.Context, abi.SectorID) error { +func (mgr *SectorMgr) FinalizeSector(context.Context, abi.SectorID) error { return nil } -func (m mockVerif) VerifyElectionPost(ctx context.Context, pvi abi.PoStVerifyInfo) (bool, error) { - panic("implement me") -} - -func (m mockVerif) VerifyFallbackPost(ctx context.Context, pvi abi.PoStVerifyInfo) (bool, error) { - panic("implement me") -} - func (m mockVerif) VerifySeal(svi abi.SealVerifyInfo) (bool, error) { if len(svi.OnChain.Proof) != 32 { // Real ones are longer, but this should be fine return false, nil @@ -353,6 +318,14 @@ func (m mockVerif) VerifySeal(svi abi.SealVerifyInfo) (bool, error) { return true, nil } +func (m mockVerif) VerifyWinningPoSt(ctx context.Context, info abi.WinningPoStVerifyInfo) (bool, error) { + panic("implement me") +} + +func (m mockVerif) VerifyWindowPoSt(ctx context.Context, info abi.WindowPoStVerifyInfo) (bool, error) { + panic("implement me") +} + func (m mockVerif) GenerateDataCommitment(pt abi.RegisteredProof, pieces []abi.PieceInfo) (cid.Cid, error) { return ffiwrapper.GenerateUnsealedCID(pt, pieces) } diff --git a/parameters.json b/parameters.json index 8591c1218..37ada4d6c 100644 --- a/parameters.json +++ b/parameters.json @@ -1,82 +1,122 @@ { - "v24-proof-of-spacetime-election-PoseidonHasher-0b0b9781bcb153efbb3cab4be3a792c4f555d4ab6f8dd62b27e1dcad08a34f22.params": { - "cid": "QmUonpeUaLD6G4byFdZAMzwXorD4Qs1XDjmdXFbWYCgvjW", - "digest": "19e50903e53c826ff66f360283f324c1", - "sector_size": 34359738368 - }, - "v24-proof-of-spacetime-election-PoseidonHasher-0b0b9781bcb153efbb3cab4be3a792c4f555d4ab6f8dd62b27e1dcad08a34f22.vk": { - "cid": "QmVXv4Q1T3FbiY5AUgWER11Lsrby9aUVJy2mgWDWrndFbq", - "digest": "223dd87c6161c45daf448ca9eda28298", - "sector_size": 34359738368 - }, - "v24-proof-of-spacetime-election-PoseidonHasher-0b499a953f1a9dcab420b3ba1e6b1f3952dc7f17cf67ed10406ae9a43e2b8ec5.params": { - "cid": "Qmea7VsrYnkrpdMnutkGKppX5finoDwCA2fP5Zg5bDuBQw", - "digest": "3de5b8738a2cd933c214fa2023e30909", - "sector_size": 8388608 - }, - "v24-proof-of-spacetime-election-PoseidonHasher-0b499a953f1a9dcab420b3ba1e6b1f3952dc7f17cf67ed10406ae9a43e2b8ec5.vk": { - "cid": "QmavFXmf3jeacHKB6HoJH3gUqzmKnsDn5F5HSYfwPbDHRu", - "digest": "485b7eab4f70031fdda4eaeccfe4f26e", - "sector_size": 8388608 - }, - "v24-proof-of-spacetime-election-PoseidonHasher-27a7fc680a47e4821f40cf1676fb80b9888820ef6867a71a175b4c9ae068ad3f.params": { - "cid": "QmQrUjB9NSMuThe1JHoHfC7u1xdoLS6WLu15waWcnZ3tQT", - "digest": "7e6adc7cbf73db8c95a54e3c23bea1ae", - "sector_size": 536870912 - }, - "v24-proof-of-spacetime-election-PoseidonHasher-27a7fc680a47e4821f40cf1676fb80b9888820ef6867a71a175b4c9ae068ad3f.vk": { - "cid": "QmVPPk4fBcEero2GHsYuBoh97yhugTBWUp9yWSPPWjRWQ7", - "digest": "952b352d694d650e912b3b92ad63f7c9", - "sector_size": 536870912 - }, - "v24-proof-of-spacetime-election-PoseidonHasher-5916054ae98e28fc2f0470d1fb58eb875a6865be86f0b8c4e302d55f13217fef.params": { - "cid": "QmSXMF85mdGLQfAY98zVL4dUBpGPFFUPDmFzdc1NZrVFdh", - "digest": "a93de0f8cfb04af5d21f66ef48ee59a8", + "v25-proof-of-spacetime-fallback-MerkleTree-0170db1f394b35d995252228ee359194b13199d259380541dc529fb0099096b0.params": { + "cid": "QmNUKXCEcjMRh8ayFG2X9RYUuc2SK5XRVsSVTqJmNWAgSp", + "digest": "fe10d43b607dd6687f30428476076ebb", "sector_size": 2048 }, - "v24-proof-of-spacetime-election-PoseidonHasher-5916054ae98e28fc2f0470d1fb58eb875a6865be86f0b8c4e302d55f13217fef.vk": { - "cid": "QmaTsAmbdnQtJoSpkWsXmvHPpMJinzFYTe6t5LLm7w5RtQ", - "digest": "e4d0575f119e3e7b42bc3e5b6bb35a0b", + "v25-proof-of-spacetime-fallback-MerkleTree-0170db1f394b35d995252228ee359194b13199d259380541dc529fb0099096b0.vk": { + "cid": "QmRyV1DvF57cSnnwUoocKbPiULoLdfnfWpVWi8BSsMN6KR", + "digest": "8aaca32ca9a1c6a431b99e695b443e69", "sector_size": 2048 }, - "v24-stacked-proof-of-replication-PoseidonHasher-Sha256Hasher-49442c8ce7545579cbd689d578301d0cc1e46e94e2499a0ec36de7ff4f4694a2.params": { - "cid": "QmYCFrU4G2LakPngFXayX7afyondQbB9hfnVRz1ffWD9MS", - "digest": "d64e5d1bbb9120bea4c0cd8cdcdfb834", - "sector_size": 8388608 - }, - "v24-stacked-proof-of-replication-PoseidonHasher-Sha256Hasher-49442c8ce7545579cbd689d578301d0cc1e46e94e2499a0ec36de7ff4f4694a2.vk": { - "cid": "QmfXAPtHKU2MJVJDwLTUCM4W2tYQ8biGq9cZaAnjtaZidZ", - "digest": "572536e8684454a5cd80361e5c952b38", - "sector_size": 8388608 - }, - "v24-stacked-proof-of-replication-PoseidonHasher-Sha256Hasher-d84aa4581c74190f845596893ebe5b71da32ecf16e1d151b9fff74ee8f94d77c.params": { - "cid": "QmdXtQsLbBFmVxrd6kWKr2FYbQfhEdR6PinwrGBXhHmLdT", - "digest": "77cfafee088bd59411d766621df6de42", + "v25-proof-of-spacetime-fallback-MerkleTree-0cfb4f178bbb71cf2ecfcd42accce558b27199ab4fb59cb78f2483fe21ef36d9.params": { + "cid": "QmTvwEyFVcjivKUX9AqZrC4mfjLSN2JJTucLJfNaWqCPmD", + "digest": "1cc1bf83c9e3d9b2d994ad2ec946a79f", "sector_size": 536870912 }, - "v24-stacked-proof-of-replication-PoseidonHasher-Sha256Hasher-d84aa4581c74190f845596893ebe5b71da32ecf16e1d151b9fff74ee8f94d77c.vk": { - "cid": "QmdE8oZJofaenThLi2TWXJPk9cExZgTA36TjrHeAC65BGA", - "digest": "30586a2396ef6b60b122ac5a2ba87681", + "v25-proof-of-spacetime-fallback-MerkleTree-0cfb4f178bbb71cf2ecfcd42accce558b27199ab4fb59cb78f2483fe21ef36d9.vk": { + "cid": "QmVfgowqdh3ruAHqQ8LA6L4VdSYwam5e8VmSEtZXBoAudC", + "digest": "377659f83c6714703b17828f603038fc", "sector_size": 536870912 }, - "v24-stacked-proof-of-replication-PoseidonHasher-Sha256Hasher-fc32be6028c2398175466f36fa36810842ae8948fae15c84454af5b61ca99e15.params": { - "cid": "QmNqcqGxf7pJjipHNwcH44D5KgiTUNo3mK5HiSxBwYcjkx", - "digest": "25ea39db2a003c817113f6f2ea936b3d", - "sector_size": 34359738368 - }, - "v24-stacked-proof-of-replication-PoseidonHasher-Sha256Hasher-fc32be6028c2398175466f36fa36810842ae8948fae15c84454af5b61ca99e15.vk": { - "cid": "QmWiaqy8hWshv2FsLDoZAtpJKZng5QN3x2X5C7xsPvSbFb", - "digest": "ab1239c802c480cf12f63d13fb2f620a", - "sector_size": 34359738368 - }, - "v24-stacked-proof-of-replication-PoseidonHasher-Sha256Hasher-fe437922fe766f61b112750506d6be0e4ad5daa85ff9ce96549d99253ba61cbe.params": { - "cid": "QmbPk3fKKLjkm6pD1CzwGyTnMwNSSZVxVSMWEceqSv6LDW", - "digest": "76bd3702312cfe0d69bb5e0891c52615", + "v25-proof-of-spacetime-fallback-MerkleTree-3ea05428c9d11689f23529cde32fd30aabd50f7d2c93657c1d3650bca3e8ea9e.params": { + "cid": "QmQ2HrKCWbtWQNNQiBj3BFE8QrqMyed8P5Vw5vyyzuSMsF", + "digest": "2e15ec3fbff51abf66d241252fb8babd", "sector_size": 2048 }, - "v24-stacked-proof-of-replication-PoseidonHasher-Sha256Hasher-fe437922fe766f61b112750506d6be0e4ad5daa85ff9ce96549d99253ba61cbe.vk": { - "cid": "QmPZ9bGSVs5GHQRRAtC1qv9eQ7GPoH8FWukjxAXtXXcTxg", - "digest": "4edb21b7b6d5787b646f3e336e06303e", + "v25-proof-of-spacetime-fallback-MerkleTree-3ea05428c9d11689f23529cde32fd30aabd50f7d2c93657c1d3650bca3e8ea9e.vk": { + "cid": "QmVZRduda8L1AYsT3u3uk2kqiMnwm5Sx9D8pZbTVHAZG5i", + "digest": "11c74ae0068ca7e4a5fd8cb1eaf5b511", "sector_size": 2048 + }, + "v25-proof-of-spacetime-fallback-MerkleTree-50c7368dea9593ed0989e70974d28024efa9d156d585b7eea1be22b2e753f331.params": { + "cid": "QmPQkry7TXuE8nxHFAySp3X8qRXMYj2ArffoFxF2C1hYwf", + "digest": "526edf009176616771af4ba915eb5073", + "sector_size": 8388608 + }, + "v25-proof-of-spacetime-fallback-MerkleTree-50c7368dea9593ed0989e70974d28024efa9d156d585b7eea1be22b2e753f331.vk": { + "cid": "QmT5bjrKBUpWEfaveWoPCu96EuHN2HuzbRzS9tSxttPCzw", + "digest": "c29e6b2927b8a28593f7c0c035b32cf5", + "sector_size": 8388608 + }, + "v25-proof-of-spacetime-fallback-MerkleTree-5294475db5237a2e83c3e52fd6c2b03859a1831d45ed08c4f35dbf9a803165a9.params": { + "cid": "QmXn1v64YTKLAH6yemhotr2dp1ZtjfspT328itKrMfnBW6", + "digest": "66459a78bd5e0225a19f140068620b7f", + "sector_size": 8388608 + }, + "v25-proof-of-spacetime-fallback-MerkleTree-5294475db5237a2e83c3e52fd6c2b03859a1831d45ed08c4f35dbf9a803165a9.vk": { + "cid": "QmTax8iBqjyP3EMUSnkSoxpjxh7dWrpE5RbfN2FA4oUgc4", + "digest": "e482988346217c846cecd80dfffef35f", + "sector_size": 8388608 + }, + "v25-proof-of-spacetime-fallback-MerkleTree-7d739b8cf60f1b0709eeebee7730e297683552e4b69cab6984ec0285663c5781.params": { + "cid": "QmdVN2xTAJtKLrUdXfP7JjGpMGnZRmbDT8FHdkzxruRoLQ", + "digest": "4b27a62d2179523a2176ec7a1f2837be", + "sector_size": 536870912 + }, + "v25-proof-of-spacetime-fallback-MerkleTree-7d739b8cf60f1b0709eeebee7730e297683552e4b69cab6984ec0285663c5781.vk": { + "cid": "QmakhHMzRBB85LLniDeRif71prLckqj7RHCc3NSgZsevQF", + "digest": "21271b25537a42e79247bd403e3ba37e", + "sector_size": 536870912 + }, + "v25-proof-of-spacetime-fallback-MerkleTree-0377ded656c6f524f1618760bffe4e0a1c51d5a70c4509eedae8a27555733edc.params": { + "cid": "QmZwPa4C5iUKPwGL7pkzZVNpn1Z9QkELneLAX4JFdRc7m5", + "digest": "263b3ee83cfff7c287900346742e363a", + "sector_size": 34359738368 + }, + "v25-proof-of-spacetime-fallback-MerkleTree-0377ded656c6f524f1618760bffe4e0a1c51d5a70c4509eedae8a27555733edc.vk": { + "cid": "QmUVAe53gJ4eC7wmDG2K5WWEtTvfQJaAPBstEtfznJrPhR", + "digest": "e6bc2cb5808b6a5cde7b51bfe0543313", + "sector_size": 34359738368 + }, + "v25-proof-of-spacetime-fallback-MerkleTree-559e581f022bb4e4ec6e719e563bf0e026ad6de42e56c18714a2c692b1b88d7e.params": { + "cid": "QmXiiXheXvZV8rVkdDCFPdUYJVCNa67THGa7VgQRkqNojy", + "digest": "f031cdaf063c00baa637eae5e4b338c8", + "sector_size": 34359738368 + }, + "v25-proof-of-spacetime-fallback-MerkleTree-559e581f022bb4e4ec6e719e563bf0e026ad6de42e56c18714a2c692b1b88d7e.vk": { + "cid": "QmXSzhELrQMBhJgYqpT8qTL9Piwti3eziCYt49EJ77368r", + "digest": "3f7f6e287a32083f131d4948e04e6e5b", + "sector_size": 34359738368 + }, + "v25-stacked-proof-of-replication-MerkleTree-Sha256Hasher-840969a6a9533823ecdc37310ef8c99d35991a2145300e10be0b883f1226a0f6.params": { + "cid": "QmbaFhfNtz6TuQdiC5oyL5rWSyUNQzcD68A6PT9mCTbvd7", + "digest": "c0cbe5bd951eb944557784a5a423fd18", + "sector_size": 2048 + }, + "v25-stacked-proof-of-replication-MerkleTree-Sha256Hasher-840969a6a9533823ecdc37310ef8c99d35991a2145300e10be0b883f1226a0f6.vk": { + "cid": "QmYfeAWeg7mKQJvoUCVatqa36WFbWYH2B9JMrJTorhJdUu", + "digest": "3ed77a85380eeacfea658fc4b1ad8b95", + "sector_size": 2048 + }, + "v25-stacked-proof-of-replication-MerkleTree-Sha256Hasher-e3c3fd959a83bf60522a401dc3bf0e2d48f0e2172bcdf4c0cb3c39fa4deacd87.params": { + "cid": "QmYuGgnRHx9x4DAVtkGYGir8SDvRE17pUMH17riEpWguuN", + "digest": "b59249298e9d1bb9d25891b828e03c94", + "sector_size": 536870912 + }, + "v25-stacked-proof-of-replication-MerkleTree-Sha256Hasher-e3c3fd959a83bf60522a401dc3bf0e2d48f0e2172bcdf4c0cb3c39fa4deacd87.vk": { + "cid": "QmUE4Qhd3vUPMQwh1TPJkVxZVisxoLKj93ZDU3zfW7koc4", + "digest": "b4e3e2ea3eba88d2eba3d59472ef4094", + "sector_size": 536870912 + }, + "v25-stacked-proof-of-replication-MerkleTree-Sha256Hasher-e4a49558d04647264048879511e843136e4488499e23bc442a341083a19ee79c.params": { + "cid": "QmePVNPMxzDuPF3mQaZ9Ld1hTGhResvGZgZ61NXy5cDQPK", + "digest": "0deb36662833379267609fc4e5f4176b", + "sector_size": 8388608 + }, + "v25-stacked-proof-of-replication-MerkleTree-Sha256Hasher-e4a49558d04647264048879511e843136e4488499e23bc442a341083a19ee79c.vk": { + "cid": "QmWLpw8pLwuCGiUQGQiwuXTjKcvPwsaS573gQ6YPc67jVm", + "digest": "1618f598e3a5c26acee17540aa5cd536", + "sector_size": 8388608 + }, + "v25-stacked-proof-of-replication-MerkleTree-Sha256Hasher-8a0719d8b9de3605f89b084c73210dfe2a557407c6343f8d32640094f2c9d074.params": { + "cid": "QmdtfjaJpqE8pRt1cmceh8c2Qj8GNwrzmmSmckZr6VDAWR", + "digest": "18796da53b41f23e341d19ce7954f647", + "sector_size": 34359738368 + }, + "v25-stacked-proof-of-replication-MerkleTree-Sha256Hasher-8a0719d8b9de3605f89b084c73210dfe2a557407c6343f8d32640094f2c9d074.vk": { + "cid": "QmYF8Y17nHYAvbRA7NCQMs31VsBiMcAbwrViZwyT4Gvb8C", + "digest": "39d80879d4d7353e2ed5771670d97dfc", + "sector_size": 34359738368 } } From cd464ef91690a16feeba25f7e3734b5564964ef6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Fri, 10 Apr 2020 23:01:35 +0200 Subject: [PATCH 040/199] Drop SectorSizeForRegisteredProof --- ffiwrapper/config.go | 31 +++++++------------------------ ffiwrapper/sealer_cgo.go | 2 +- mock/mock.go | 2 +- 3 files changed, 9 insertions(+), 26 deletions(-) diff --git a/ffiwrapper/config.go b/ffiwrapper/config.go index 1b01e9c1e..b0c3b02e8 100644 --- a/ffiwrapper/config.go +++ b/ffiwrapper/config.go @@ -1,7 +1,6 @@ package ffiwrapper import ( - "fmt" "golang.org/x/xerrors" "github.com/filecoin-project/specs-actors/actors/abi" @@ -19,36 +18,20 @@ func sizeFromConfig(cfg Config) (abi.SectorSize, error) { } - return SectorSizeForRegisteredProof(cfg.SealProofType) + return cfg.SealProofType.SectorSize() } -// TODO: remove this method after implementing it along side the registered proofs and importing it from there. -func SectorSizeForRegisteredProof(p abi.RegisteredProof) (abi.SectorSize, error) { - switch p { - case abi.RegisteredProof_StackedDRG32GiBSeal, abi.RegisteredProof_StackedDRG32GiBPoSt: - return 32 << 30, nil - case abi.RegisteredProof_StackedDRG2KiBSeal, abi.RegisteredProof_StackedDRG2KiBPoSt: - return 2 << 10, nil - case abi.RegisteredProof_StackedDRG8MiBSeal, abi.RegisteredProof_StackedDRG8MiBPoSt: - return 8 << 20, nil - case abi.RegisteredProof_StackedDRG512MiBSeal, abi.RegisteredProof_StackedDRG512MiBPoSt: - return 512 << 20, nil - default: - return 0, fmt.Errorf("unsupported registered proof %d", p) - } -} - -func ProofTypeFromSectorSize(ssize abi.SectorSize) (abi.RegisteredProof, abi.RegisteredProof, error) { +func SealProofTypeFromSectorSize(ssize abi.SectorSize) (abi.RegisteredProof, error) { switch ssize { case 2 << 10: - return abi.RegisteredProof_StackedDRG2KiBPoSt, abi.RegisteredProof_StackedDRG2KiBSeal, nil + return abi.RegisteredProof_StackedDRG2KiBSeal, nil case 8 << 20: - return abi.RegisteredProof_StackedDRG8MiBPoSt, abi.RegisteredProof_StackedDRG8MiBSeal, nil + return abi.RegisteredProof_StackedDRG8MiBSeal, nil case 512 << 20: - return abi.RegisteredProof_StackedDRG512MiBPoSt, abi.RegisteredProof_StackedDRG512MiBSeal, nil + return abi.RegisteredProof_StackedDRG512MiBSeal, nil case 32 << 30: - return abi.RegisteredProof_StackedDRG32GiBPoSt, abi.RegisteredProof_StackedDRG32GiBSeal, nil + return abi.RegisteredProof_StackedDRG32GiBSeal, nil default: - return 0, 0, xerrors.Errorf("unsupported sector size for miner: %v", ssize) + return 0, xerrors.Errorf("unsupported sector size for miner: %v", ssize) } } diff --git a/ffiwrapper/sealer_cgo.go b/ffiwrapper/sealer_cgo.go index 63884163c..6764c3d96 100644 --- a/ffiwrapper/sealer_cgo.go +++ b/ffiwrapper/sealer_cgo.go @@ -313,7 +313,7 @@ func GenerateUnsealedCID(proofType abi.RegisteredProof, pieces []abi.PieceInfo) sum += p.Size } - ssize, err := SectorSizeForRegisteredProof(proofType) + ssize, err := proofType.SectorSize() if err != nil { return cid.Undef, err } diff --git a/mock/mock.go b/mock/mock.go index bb5b07dec..d55b40f5a 100644 --- a/mock/mock.go +++ b/mock/mock.go @@ -35,7 +35,7 @@ type SectorMgr struct { type mockVerif struct{} func NewMockSectorMgr(threads int, ssize abi.SectorSize) *SectorMgr { - rt, _, err := ffiwrapper.ProofTypeFromSectorSize(ssize) + rt, err := ffiwrapper.SealProofTypeFromSectorSize(ssize) if err != nil { panic(err) } From c0d619cd86169a0caff142d14986fc237264ddf6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Fri, 10 Apr 2020 23:01:42 +0200 Subject: [PATCH 041/199] gofmt --- ffiwrapper/config.go | 1 - ffiwrapper/sealer_test.go | 2 +- ffiwrapper/verifier_cgo.go | 4 ++-- 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/ffiwrapper/config.go b/ffiwrapper/config.go index b0c3b02e8..be8e2833b 100644 --- a/ffiwrapper/config.go +++ b/ffiwrapper/config.go @@ -17,7 +17,6 @@ func sizeFromConfig(cfg Config) (abi.SectorSize, error) { return abi.SectorSize(0), xerrors.New("must specify a seal proof type from abi.RegisteredProof") } - return cfg.SealProofType.SectorSize() } diff --git a/ffiwrapper/sealer_test.go b/ffiwrapper/sealer_test.go index c638f7f9c..f16e5e2d9 100644 --- a/ffiwrapper/sealer_test.go +++ b/ffiwrapper/sealer_test.go @@ -146,7 +146,7 @@ func post(t *testing.T, sealer *Sealer, seals ...seal) time.Time { if !ok { t.Fatal("bad post") } -*/ + */ return genCandidates } diff --git a/ffiwrapper/verifier_cgo.go b/ffiwrapper/verifier_cgo.go index b94c63219..662ff8d67 100644 --- a/ffiwrapper/verifier_cgo.go +++ b/ffiwrapper/verifier_cgo.go @@ -20,7 +20,7 @@ func (sb *Sealer) GenerateWinningPoStSectorChallenge(ctx context.Context, proofT } func (sb *Sealer) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []abi.SectorInfo, randomness abi.PoStRandomness) ([]abi.PoStProof, error) { - randomness[31] = 0 // TODO: Not correct, fixme + randomness[31] = 0 // TODO: Not correct, fixme privsectors, err := sb.pubSectorToPriv(ctx, minerID, sectorInfo, nil, abi.RegisteredProof.RegisteredWinningPoStProof) // TODO: FAULTS? if err != nil { return nil, err @@ -30,7 +30,7 @@ func (sb *Sealer) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, } func (sb *Sealer) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []abi.SectorInfo, randomness abi.PoStRandomness) ([]abi.PoStProof, error) { - randomness[31] = 0 // TODO: Not correct, fixme + randomness[31] = 0 // TODO: Not correct, fixme privsectors, err := sb.pubSectorToPriv(ctx, minerID, sectorInfo, nil, abi.RegisteredProof.RegisteredWindowPoStProof) // TODO: FAULTS? if err != nil { return nil, err From 9d548f743298f9329aeedaae94cc3201e43297a7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Sat, 11 Apr 2020 01:28:57 +0200 Subject: [PATCH 042/199] Use FFI master --- extern/filecoin-ffi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/extern/filecoin-ffi b/extern/filecoin-ffi index 0f03c5a6b..870251cd0 160000 --- a/extern/filecoin-ffi +++ b/extern/filecoin-ffi @@ -1 +1 @@ -Subproject commit 0f03c5a6b8c57f7c008e0d9b18dbd37b576ca836 +Subproject commit 870251cd04c54e7a3a08b714f3e71a9edec28445 From 6850784b1efe56e23bdd746368fce92609b734de Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Fri, 17 Apr 2020 16:00:21 +0200 Subject: [PATCH 043/199] Move GenerateWinningPoStSectorChallenge to Verifier --- ffiwrapper/types.go | 2 ++ ffiwrapper/verifier_cgo.go | 10 +++++----- go.mod | 2 +- go.sum | 4 ++-- mock/mock.go | 8 ++++---- 5 files changed, 14 insertions(+), 12 deletions(-) diff --git a/ffiwrapper/types.go b/ffiwrapper/types.go index 876226429..98612175d 100644 --- a/ffiwrapper/types.go +++ b/ffiwrapper/types.go @@ -37,6 +37,8 @@ type Verifier interface { VerifySeal(abi.SealVerifyInfo) (bool, error) VerifyWinningPoSt(ctx context.Context, info abi.WinningPoStVerifyInfo) (bool, error) VerifyWindowPoSt(ctx context.Context, info abi.WindowPoStVerifyInfo) (bool, error) + + GenerateWinningPoStSectorChallenge(context.Context, abi.RegisteredProof, abi.ActorID, abi.PoStRandomness, uint64) ([]uint64, error) } var ErrSectorNotFound = errors.New("sector not found") diff --git a/ffiwrapper/verifier_cgo.go b/ffiwrapper/verifier_cgo.go index 662ff8d67..bf9a60c50 100644 --- a/ffiwrapper/verifier_cgo.go +++ b/ffiwrapper/verifier_cgo.go @@ -14,11 +14,6 @@ import ( "github.com/filecoin-project/sector-storage/stores" ) -func (sb *Sealer) GenerateWinningPoStSectorChallenge(ctx context.Context, proofType abi.RegisteredProof, minerID abi.ActorID, randomness abi.PoStRandomness, eligibleSectorCount uint64) ([]uint64, error) { - randomness[31] = 0 // TODO: Not correct, fixme - return ffi.GenerateWinningPoStSectorChallenge(proofType, minerID, randomness, eligibleSectorCount) -} - func (sb *Sealer) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []abi.SectorInfo, randomness abi.PoStRandomness) ([]abi.PoStProof, error) { randomness[31] = 0 // TODO: Not correct, fixme privsectors, err := sb.pubSectorToPriv(ctx, minerID, sectorInfo, nil, abi.RegisteredProof.RegisteredWinningPoStProof) // TODO: FAULTS? @@ -98,3 +93,8 @@ func (proofVerifier) VerifyWindowPoSt(ctx context.Context, info abi.WindowPoStVe return ffi.VerifyWindowPoSt(info) } + +func (proofVerifier) GenerateWinningPoStSectorChallenge(ctx context.Context, proofType abi.RegisteredProof, minerID abi.ActorID, randomness abi.PoStRandomness, eligibleSectorCount uint64) ([]uint64, error) { + randomness[31] = 0 // TODO: Not correct, fixme + return ffi.GenerateWinningPoStSectorChallenge(proofType, minerID, randomness, eligibleSectorCount) +} diff --git a/go.mod b/go.mod index fb39fc9ae..c18f6874f 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( github.com/filecoin-project/go-paramfetch v0.0.1 github.com/filecoin-project/lotus v0.2.10 github.com/filecoin-project/specs-actors v0.0.0-20200409043918-e569f4a2f504 - github.com/filecoin-project/specs-storage v0.0.0-20200410185809-9fbaaa08f275 + github.com/filecoin-project/specs-storage v0.0.0-20200417134612-61b2d91a6102 github.com/gorilla/mux v1.7.4 github.com/hashicorp/go-multierror v1.0.0 github.com/ipfs/go-cid v0.0.5 diff --git a/go.sum b/go.sum index 81e4bf5ee..41c719485 100644 --- a/go.sum +++ b/go.sum @@ -107,8 +107,8 @@ github.com/filecoin-project/lotus v0.2.10 h1:ijrj/nYdKu5GiMo9r1+Zcp2A4jKHSOMZ2WN github.com/filecoin-project/lotus v0.2.10/go.mod h1:om5PQA9ZT0lf16qI7Fz/ZGLn4LDCMqPC8ntZA9uncRE= github.com/filecoin-project/specs-actors v0.0.0-20200409043918-e569f4a2f504 h1:mwuAaqxKThl70+7FkGdFKVLdwaQZQ8XmscKdhSBBtnc= github.com/filecoin-project/specs-actors v0.0.0-20200409043918-e569f4a2f504/go.mod h1:mdJraXq5vMy0+/FqVQIrnNlpQ/Em6zeu06G/ltQ0/lA= -github.com/filecoin-project/specs-storage v0.0.0-20200410185809-9fbaaa08f275 h1:6OTcpsTQBQM0f/A67oEi4E4YtYd6fzkMqbU8cPIWMMs= -github.com/filecoin-project/specs-storage v0.0.0-20200410185809-9fbaaa08f275/go.mod h1:xJ1/xl9+8zZeSSSFmDC3Wr6uusCTxyYPI0VeNVSFmPE= +github.com/filecoin-project/specs-storage v0.0.0-20200417134612-61b2d91a6102 h1:T3f/zkuvgtgqcXrb0NO3BicuveGOxxUAMPa/Yif2kuE= +github.com/filecoin-project/specs-storage v0.0.0-20200417134612-61b2d91a6102/go.mod h1:xJ1/xl9+8zZeSSSFmDC3Wr6uusCTxyYPI0VeNVSFmPE= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1/go.mod h1:0eHX/BVySxPc6SE2mZRoppGq7qcEagxdmQnA3dzork8= github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= diff --git a/mock/mock.go b/mock/mock.go index d55b40f5a..5ed879ce0 100644 --- a/mock/mock.go +++ b/mock/mock.go @@ -258,10 +258,6 @@ func AddOpFinish(ctx context.Context) (context.Context, func()) { } } -func (mgr *SectorMgr) GenerateWinningPoStSectorChallenge(ctx context.Context, proofType abi.RegisteredProof, minerID abi.ActorID, randomness abi.PoStRandomness, eligibleSectorCount uint64) ([]uint64, error) { - panic("implement me") -} - func (mgr *SectorMgr) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []abi.SectorInfo, randomness abi.PoStRandomness) ([]abi.PoStProof, error) { panic("implement me") } @@ -330,6 +326,10 @@ func (m mockVerif) GenerateDataCommitment(pt abi.RegisteredProof, pieces []abi.P return ffiwrapper.GenerateUnsealedCID(pt, pieces) } +func (m mockVerif) GenerateWinningPoStSectorChallenge(ctx context.Context, proofType abi.RegisteredProof, minerID abi.ActorID, randomness abi.PoStRandomness, eligibleSectorCount uint64) ([]uint64, error) { + panic("implement me") +} + var MockVerifier = mockVerif{} var _ ffiwrapper.Verifier = MockVerifier From 16d40dfd15a57875b3e98f747d0c6f47e405e44d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Sat, 18 Apr 2020 00:52:42 +0200 Subject: [PATCH 044/199] mock: fix winning post --- mock/mock.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mock/mock.go b/mock/mock.go index 5ed879ce0..854d89870 100644 --- a/mock/mock.go +++ b/mock/mock.go @@ -183,7 +183,7 @@ func (mgr *SectorMgr) SealPreCommit2(ctx context.Context, sid abi.SectorID, phas commr[32-(i+1)] = db[i] } - commR := commcid.DataCommitmentV1ToCID(commr) + commR := commcid.ReplicaCommitmentV1ToCID(commr) return storage.SectorCids{ Unsealed: d, @@ -327,7 +327,7 @@ func (m mockVerif) GenerateDataCommitment(pt abi.RegisteredProof, pieces []abi.P } func (m mockVerif) GenerateWinningPoStSectorChallenge(ctx context.Context, proofType abi.RegisteredProof, minerID abi.ActorID, randomness abi.PoStRandomness, eligibleSectorCount uint64) ([]uint64, error) { - panic("implement me") + return []uint64{0}, nil } var MockVerifier = mockVerif{} From a87547367090996cb87f4b9dc8d7af7bf9867ee6 Mon Sep 17 00:00:00 2001 From: anorth <445306+anorth@users.noreply.github.com> Date: Wed, 22 Apr 2020 15:49:28 +1000 Subject: [PATCH 045/199] Update to specs-actors v1.0.0 --- go.mod | 2 +- go.sum | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index c18f6874f..c903856f1 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5 github.com/filecoin-project/go-paramfetch v0.0.1 github.com/filecoin-project/lotus v0.2.10 - github.com/filecoin-project/specs-actors v0.0.0-20200409043918-e569f4a2f504 + github.com/filecoin-project/specs-actors v1.0.0 github.com/filecoin-project/specs-storage v0.0.0-20200417134612-61b2d91a6102 github.com/gorilla/mux v1.7.4 github.com/hashicorp/go-multierror v1.0.0 diff --git a/go.sum b/go.sum index 41c719485..89f9c1344 100644 --- a/go.sum +++ b/go.sum @@ -90,6 +90,8 @@ github.com/filecoin-project/go-amt-ipld/v2 v2.0.0/go.mod h1:PAZ5tvSfMfWE327osqFX github.com/filecoin-project/go-amt-ipld/v2 v2.0.1-0.20200131012142-05d80eeccc5e/go.mod h1:boRtQhzmxNocrMxOXo1NYn4oUc1NGvR8tEa79wApNXg= github.com/filecoin-project/go-bitfield v0.0.0-20200309034705-8c7ac40bd550 h1:aockulLU8Qjkdj4FQz53WQpNosAIYk8DxRediRLkE5c= github.com/filecoin-project/go-bitfield v0.0.0-20200309034705-8c7ac40bd550/go.mod h1:iodsLxOFZnqKtjj2zkgqzoGNrv6vUqj69AT/J8DKXEw= +github.com/filecoin-project/go-bitfield v0.0.0-20200416002808-b3ee67ec9060 h1:/3qjGMn6ukXgZJHsIbuwGL7ipla8DOV3uHZDBJkBYfU= +github.com/filecoin-project/go-bitfield v0.0.0-20200416002808-b3ee67ec9060/go.mod h1:iodsLxOFZnqKtjj2zkgqzoGNrv6vUqj69AT/J8DKXEw= github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2/go.mod h1:pqTiPHobNkOVM5thSRsHYjyQfq7O5QSCMhvuu9JoDlg= github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 h1:2pMXdBnCiXjfCYx/hLqFxccPoqsSveQFxVLvNxy9bus= github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ= @@ -107,6 +109,8 @@ github.com/filecoin-project/lotus v0.2.10 h1:ijrj/nYdKu5GiMo9r1+Zcp2A4jKHSOMZ2WN github.com/filecoin-project/lotus v0.2.10/go.mod h1:om5PQA9ZT0lf16qI7Fz/ZGLn4LDCMqPC8ntZA9uncRE= github.com/filecoin-project/specs-actors v0.0.0-20200409043918-e569f4a2f504 h1:mwuAaqxKThl70+7FkGdFKVLdwaQZQ8XmscKdhSBBtnc= github.com/filecoin-project/specs-actors v0.0.0-20200409043918-e569f4a2f504/go.mod h1:mdJraXq5vMy0+/FqVQIrnNlpQ/Em6zeu06G/ltQ0/lA= +github.com/filecoin-project/specs-actors v1.0.0 h1:H0G6n2R8MnfvYuI0irmY7Bj5FI/JHUxnIldg/YX472I= +github.com/filecoin-project/specs-actors v1.0.0/go.mod h1:nQYnFbQ7Y0bHZyq6HDEuVlCPR+U3z5Q3wMOQ+2aiV+Y= github.com/filecoin-project/specs-storage v0.0.0-20200417134612-61b2d91a6102 h1:T3f/zkuvgtgqcXrb0NO3BicuveGOxxUAMPa/Yif2kuE= github.com/filecoin-project/specs-storage v0.0.0-20200417134612-61b2d91a6102/go.mod h1:xJ1/xl9+8zZeSSSFmDC3Wr6uusCTxyYPI0VeNVSFmPE= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= @@ -618,6 +622,8 @@ github.com/whyrusleeping/cbor-gen v0.0.0-20200121162646-b63bacf5eaf8/go.mod h1:x github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= github.com/whyrusleeping/cbor-gen v0.0.0-20200206220010-03c9665e2a66 h1:LolR9FiEfQNn5U031bAhn/46po2JgWHKadYbcWFIJ+0= github.com/whyrusleeping/cbor-gen v0.0.0-20200206220010-03c9665e2a66/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= +github.com/whyrusleeping/cbor-gen v0.0.0-20200414195334-429a0b5e922e h1:JY8o/ebUUrCYetWmjRCNghxC59cOEaili83rxPRQCLw= +github.com/whyrusleeping/cbor-gen v0.0.0-20200414195334-429a0b5e922e/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f/go.mod h1:p9UJB6dDgdPgMJZs7UjUOdulKyRr9fqkS+6JKAInPy8= github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc= github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc/go.mod h1:bopw91TMyo8J3tvftk8xmU2kPmlrt4nScJQZU2hE5EM= From c1888035acbc2aacca379d2e58034b981fa59521 Mon Sep 17 00:00:00 2001 From: damonlau27 Date: Wed, 22 Apr 2020 16:18:02 +0800 Subject: [PATCH 046/199] prefer remote worker --- sched.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/sched.go b/sched.go index 9a60a415f..fb689663a 100644 --- a/sched.go +++ b/sched.go @@ -117,7 +117,8 @@ func (m *Manager) maybeSchedRequest(req *workerRequest) (*workerResponse, error) tried := 0 - for _, id := range req.accept { + for i := len(req.accept) - 1; i >= 0; i-- { + id := req.accept[i] w, ok := m.workers[id] if !ok { log.Warnf("requested worker %d is not in scheduler", id) From 9eb049a833b99e20a610f14d290abed9546432e4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Fri, 24 Apr 2020 00:16:21 +0200 Subject: [PATCH 047/199] move WorkerInfo to not require importing ffi --- localworker.go | 11 ++++++----- manager.go | 18 ++---------------- sched.go | 3 ++- stats.go | 15 ++++----------- storiface/worker.go | 25 +++++++++++++++++++++++++ 5 files changed, 39 insertions(+), 33 deletions(-) create mode 100644 storiface/worker.go diff --git a/localworker.go b/localworker.go index 0b8a252e3..7de302428 100644 --- a/localworker.go +++ b/localworker.go @@ -15,6 +15,7 @@ import ( "github.com/filecoin-project/sector-storage/ffiwrapper" "github.com/filecoin-project/sector-storage/sealtasks" "github.com/filecoin-project/sector-storage/stores" + "github.com/filecoin-project/sector-storage/storiface" ) var pathTypes = []stores.SectorFileType{stores.FTUnsealed, stores.FTSealed, stores.FTCache} @@ -167,7 +168,7 @@ func (l *LocalWorker) Paths(ctx context.Context) ([]stores.StoragePath, error) { return l.localStore.Local(ctx) } -func (l *LocalWorker) Info(context.Context) (WorkerInfo, error) { +func (l *LocalWorker) Info(context.Context) (storiface.WorkerInfo, error) { hostname, err := os.Hostname() // TODO: allow overriding from config if err != nil { panic(err) @@ -180,17 +181,17 @@ func (l *LocalWorker) Info(context.Context) (WorkerInfo, error) { h, err := sysinfo.Host() if err != nil { - return WorkerInfo{}, xerrors.Errorf("getting host info: %w", err) + return storiface.WorkerInfo{}, xerrors.Errorf("getting host info: %w", err) } mem, err := h.Memory() if err != nil { - return WorkerInfo{}, xerrors.Errorf("getting memory info: %w", err) + return storiface.WorkerInfo{}, xerrors.Errorf("getting memory info: %w", err) } - return WorkerInfo{ + return storiface.WorkerInfo{ Hostname: hostname, - Resources: WorkerResources{ + Resources: storiface.WorkerResources{ MemPhysical: mem.Total, MemSwap: mem.VirtualTotal, MemReserved: mem.VirtualUsed + mem.Total - mem.Available, // TODO: sub this process diff --git a/manager.go b/manager.go index 45fcfb38c..f0b6308c9 100644 --- a/manager.go +++ b/manager.go @@ -19,6 +19,7 @@ import ( "github.com/filecoin-project/sector-storage/ffiwrapper" "github.com/filecoin-project/sector-storage/sealtasks" "github.com/filecoin-project/sector-storage/stores" + "github.com/filecoin-project/sector-storage/storiface" ) var log = logging.Logger("advmgr") @@ -35,26 +36,11 @@ type Worker interface { // Returns paths accessible to the worker Paths(context.Context) ([]stores.StoragePath, error) - Info(context.Context) (WorkerInfo, error) + Info(context.Context) (storiface.WorkerInfo, error) Close() error } -type WorkerInfo struct { - Hostname string - - Resources WorkerResources -} - -type WorkerResources struct { - MemPhysical uint64 - MemSwap uint64 - - MemReserved uint64 // Used by system / other processes - - GPUs []string -} - type SectorManager interface { SectorSize() abi.SectorSize diff --git a/sched.go b/sched.go index fb689663a..233f9faa2 100644 --- a/sched.go +++ b/sched.go @@ -6,6 +6,7 @@ import ( "github.com/filecoin-project/specs-actors/actors/abi" "github.com/filecoin-project/sector-storage/sealtasks" + "github.com/filecoin-project/sector-storage/storiface" ) const mib = 1 << 20 @@ -39,7 +40,7 @@ func (r *workerRequest) respond(resp workerResponse) { type workerHandle struct { w Worker - info WorkerInfo + info storiface.WorkerInfo memUsedMin uint64 memUsedMax uint64 diff --git a/stats.go b/stats.go index 70a5f341a..492919460 100644 --- a/stats.go +++ b/stats.go @@ -1,22 +1,15 @@ package sectorstorage -type WorkerStats struct { - Info WorkerInfo +import "github.com/filecoin-project/sector-storage/storiface" - MemUsedMin uint64 - MemUsedMax uint64 - GpuUsed bool - CpuUse int -} - -func (m *Manager) WorkerStats() map[uint64]WorkerStats { +func (m *Manager) WorkerStats() map[uint64]storiface.WorkerStats { m.workersLk.Lock() defer m.workersLk.Unlock() - out := map[uint64]WorkerStats{} + out := map[uint64]storiface.WorkerStats{} for id, handle := range m.workers { - out[uint64(id)] = WorkerStats{ + out[uint64(id)] = storiface.WorkerStats{ Info: handle.info, MemUsedMin: handle.memUsedMin, MemUsedMax: handle.memUsedMax, diff --git a/storiface/worker.go b/storiface/worker.go new file mode 100644 index 000000000..e739dbc44 --- /dev/null +++ b/storiface/worker.go @@ -0,0 +1,25 @@ +package storiface + +type WorkerInfo struct { + Hostname string + + Resources WorkerResources +} + +type WorkerResources struct { + MemPhysical uint64 + MemSwap uint64 + + MemReserved uint64 // Used by system / other processes + + GPUs []string +} + +type WorkerStats struct { + Info WorkerInfo + + MemUsedMin uint64 + MemUsedMax uint64 + GpuUsed bool + CpuUse int +} From 14256e22d36d174d922a30646ac97899ac97b39d Mon Sep 17 00:00:00 2001 From: anorth <445306+anorth@users.noreply.github.com> Date: Sat, 25 Apr 2020 12:35:47 +1000 Subject: [PATCH 048/199] Upgrade to specs-actors v0.2.0 --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index c903856f1..6d23d9388 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5 github.com/filecoin-project/go-paramfetch v0.0.1 github.com/filecoin-project/lotus v0.2.10 - github.com/filecoin-project/specs-actors v1.0.0 + github.com/filecoin-project/specs-actors v0.2.0 github.com/filecoin-project/specs-storage v0.0.0-20200417134612-61b2d91a6102 github.com/gorilla/mux v1.7.4 github.com/hashicorp/go-multierror v1.0.0 diff --git a/go.sum b/go.sum index 89f9c1344..f69462732 100644 --- a/go.sum +++ b/go.sum @@ -109,8 +109,8 @@ github.com/filecoin-project/lotus v0.2.10 h1:ijrj/nYdKu5GiMo9r1+Zcp2A4jKHSOMZ2WN github.com/filecoin-project/lotus v0.2.10/go.mod h1:om5PQA9ZT0lf16qI7Fz/ZGLn4LDCMqPC8ntZA9uncRE= github.com/filecoin-project/specs-actors v0.0.0-20200409043918-e569f4a2f504 h1:mwuAaqxKThl70+7FkGdFKVLdwaQZQ8XmscKdhSBBtnc= github.com/filecoin-project/specs-actors v0.0.0-20200409043918-e569f4a2f504/go.mod h1:mdJraXq5vMy0+/FqVQIrnNlpQ/Em6zeu06G/ltQ0/lA= -github.com/filecoin-project/specs-actors v1.0.0 h1:H0G6n2R8MnfvYuI0irmY7Bj5FI/JHUxnIldg/YX472I= -github.com/filecoin-project/specs-actors v1.0.0/go.mod h1:nQYnFbQ7Y0bHZyq6HDEuVlCPR+U3z5Q3wMOQ+2aiV+Y= +github.com/filecoin-project/specs-actors v0.2.0 h1:bKxloHLegeYJttIJbQjl4/tdsKOUtYtpiZsEfB4eOnI= +github.com/filecoin-project/specs-actors v0.2.0/go.mod h1:nQYnFbQ7Y0bHZyq6HDEuVlCPR+U3z5Q3wMOQ+2aiV+Y= github.com/filecoin-project/specs-storage v0.0.0-20200417134612-61b2d91a6102 h1:T3f/zkuvgtgqcXrb0NO3BicuveGOxxUAMPa/Yif2kuE= github.com/filecoin-project/specs-storage v0.0.0-20200417134612-61b2d91a6102/go.mod h1:xJ1/xl9+8zZeSSSFmDC3Wr6uusCTxyYPI0VeNVSFmPE= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= From 2ef195583f5815ce75a721bd6879b0fcab69a518 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Mon, 27 Apr 2020 13:43:59 +0200 Subject: [PATCH 049/199] Add Makefile --- Makefile | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 Makefile diff --git a/Makefile b/Makefile new file mode 100644 index 000000000..7b8d49683 --- /dev/null +++ b/Makefile @@ -0,0 +1,29 @@ +all: build +.PHONY: all + +SUBMODULES= + +FFI_PATH:=./extern/filecoin-ffi/ +FFI_DEPS:=.install-filcrypto +FFI_DEPS:=$(addprefix $(FFI_PATH),$(FFI_DEPS)) + +$(FFI_DEPS): .filecoin-build ; + +.filecoin-build: $(FFI_PATH) + $(MAKE) -C $(FFI_PATH) $(FFI_DEPS:$(FFI_PATH)%=%) + @touch $@ + +.update-modules: + git submodule update --init --recursive + @touch $@ + +test: .update-modules .filecoin-build + go test -v ./... +.PHONY: test +SUBMODULES+=test + +build: $(SUBMODULES) + +clean: + rm -f .filecoin-build + rm -f .update-modules From f59d6b971bb916e413d39803075d8f705f45d5b9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Mon, 27 Apr 2020 14:38:24 +0200 Subject: [PATCH 050/199] sched: Nicer handling of multicore tasks --- localworker.go | 2 ++ sched.go | 23 +++++++---------------- storiface/worker.go | 1 + 3 files changed, 10 insertions(+), 16 deletions(-) diff --git a/localworker.go b/localworker.go index 7de302428..0952b5882 100644 --- a/localworker.go +++ b/localworker.go @@ -4,6 +4,7 @@ import ( "context" "io" "os" + "runtime" "github.com/elastic/go-sysinfo" "golang.org/x/xerrors" @@ -195,6 +196,7 @@ func (l *LocalWorker) Info(context.Context) (storiface.WorkerInfo, error) { MemPhysical: mem.Total, MemSwap: mem.VirtualTotal, MemReserved: mem.VirtualUsed + mem.Total - mem.Available, // TODO: sub this process + CPUs: uint64(runtime.NumCPU()), GPUs: gpus, }, }, nil diff --git a/sched.go b/sched.go index 233f9faa2..0d07bbb96 100644 --- a/sched.go +++ b/sched.go @@ -45,7 +45,7 @@ type workerHandle struct { memUsedMin uint64 memUsedMax uint64 gpuUsed bool - cpuUse int // -1 - multicore thing; 0 - free; 1+ - singlecore things + cpuUse uint64 // 0 - free; 1+ - singlecore things } func (m *Manager) runSched() { @@ -150,13 +150,9 @@ func (m *Manager) makeResponse(wid WorkerID, w *workerHandle, req *workerRequest w.gpuUsed = needRes.CanGPU if needRes.MultiThread { - w.cpuUse = -1 + w.cpuUse += w.info.Resources.CPUs } else { - if w.cpuUse != -1 { - w.cpuUse++ - } else { - log.Warnf("sched: makeResponse for worker %d: worker cpu is in multicore use, but a single core task was scheduled", wid) - } + w.cpuUse++ } w.memUsedMin += needRes.MinMemory @@ -173,8 +169,8 @@ func (m *Manager) makeResponse(wid WorkerID, w *workerHandle, req *workerRequest } if needRes.MultiThread { - w.cpuUse = 0 - } else if w.cpuUse != -1 { + w.cpuUse -= w.info.Resources.CPUs + } else { w.cpuUse-- } @@ -216,13 +212,8 @@ func (m *Manager) canHandleRequest(wid WorkerID, w *workerHandle, req *workerReq } if needRes.MultiThread { - if w.cpuUse != 0 { - log.Debugf("sched: not scheduling on worker %d; multicore process needs free CPU", wid) - return false, nil - } - } else { - if w.cpuUse == -1 { - log.Debugf("sched: not scheduling on worker %d; CPU in use by a multicore process", wid) + if w.cpuUse > 0 { + log.Debugf("sched: not scheduling on worker %d; multicore process needs %d threads, %d in use, target %d", wid, res.CPUs, w.cpuUse, res.CPUs) return false, nil } } diff --git a/storiface/worker.go b/storiface/worker.go index e739dbc44..27442206d 100644 --- a/storiface/worker.go +++ b/storiface/worker.go @@ -12,6 +12,7 @@ type WorkerResources struct { MemReserved uint64 // Used by system / other processes + CPUs uint64 // Logical cores GPUs []string } From 5bea676ce320a2382243abde0e067b89102b5ff2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Mon, 27 Apr 2020 14:55:37 +0200 Subject: [PATCH 051/199] worker: Fetch task type --- localworker.go | 9 +++++++++ manager.go | 3 ++- sealtasks/task.go | 2 ++ storiface/worker.go | 2 +- 4 files changed, 14 insertions(+), 2 deletions(-) diff --git a/localworker.go b/localworker.go index 0952b5882..f3f12e8c1 100644 --- a/localworker.go +++ b/localworker.go @@ -104,6 +104,15 @@ func (l *LocalWorker) AddPiece(ctx context.Context, sector abi.SectorID, epcs [] return sb.AddPiece(ctx, sector, epcs, sz, r) } +func (l *LocalWorker) Fetch(ctx context.Context, sector abi.SectorID, fileType stores.SectorFileType, sealing bool) error { + _, done, err := (&localWorkerPathProvider{w: l}).AcquireSector(ctx, sector, fileType, stores.FTNone, sealing) + if err != nil { + return err + } + done() + return nil +} + func (l *LocalWorker) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage2.PreCommit1Out, err error) { sb, err := l.sb() if err != nil { diff --git a/manager.go b/manager.go index f0b6308c9..e5150b3de 100644 --- a/manager.go +++ b/manager.go @@ -30,6 +30,7 @@ type URLs []string type Worker interface { ffiwrapper.StorageSealer + Fetch(context.Context, abi.SectorID, stores.SectorFileType, bool) error TaskTypes(context.Context) (map[sealtasks.TaskType]struct{}, error) @@ -122,7 +123,7 @@ func New(ctx context.Context, ls stores.LocalStorage, si stores.SectorIndex, cfg go m.runSched() localTasks := []sealtasks.TaskType{ - sealtasks.TTAddPiece, sealtasks.TTCommit1, sealtasks.TTFinalize, + sealtasks.TTAddPiece, sealtasks.TTCommit1, sealtasks.TTFinalize, sealtasks.TTFetch, } if sc.AllowPreCommit1 { localTasks = append(localTasks, sealtasks.TTPreCommit1) diff --git a/sealtasks/task.go b/sealtasks/task.go index 8eefa14fa..8fbe7a7b4 100644 --- a/sealtasks/task.go +++ b/sealtasks/task.go @@ -10,4 +10,6 @@ const ( TTCommit2 TaskType = "seal/v0/commit/2" TTFinalize TaskType = "seal/v0/finalize" + + TTFetch TaskType = "seal/v0/fetch" ) diff --git a/storiface/worker.go b/storiface/worker.go index 27442206d..0f49e8971 100644 --- a/storiface/worker.go +++ b/storiface/worker.go @@ -22,5 +22,5 @@ type WorkerStats struct { MemUsedMin uint64 MemUsedMax uint64 GpuUsed bool - CpuUse int + CpuUse uint64 } From 8c4dc60e75fca85210de42c95523656915453ccf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Mon, 27 Apr 2020 20:37:31 +0200 Subject: [PATCH 052/199] move scheduling logic to a separate struct --- manager.go | 221 ++++++++++++++++------------------------ resources.go | 94 ++++++++++++----- sched.go | 280 +++++++++++++++++++++++++++++++++++---------------- stats.go | 6 +- 4 files changed, 354 insertions(+), 247 deletions(-) diff --git a/manager.go b/manager.go index e5150b3de..f125cdafd 100644 --- a/manager.go +++ b/manager.go @@ -1,12 +1,10 @@ package sectorstorage import ( - "container/list" "context" "errors" "io" "net/http" - "sync" "github.com/ipfs/go-cid" logging "github.com/ipfs/go-log/v2" @@ -62,18 +60,9 @@ type Manager struct { remoteHnd *stores.FetchHandler index stores.SectorIndex + sched *scheduler + storage.Prover - - workersLk sync.Mutex - nextWorker WorkerID - workers map[WorkerID]*workerHandle - - newWorkers chan *workerHandle - schedule chan *workerRequest - workerFree chan WorkerID - closing chan struct{} - - schedQueue *list.List // List[*workerRequest] } type SealerConfig struct { @@ -107,20 +96,12 @@ func New(ctx context.Context, ls stores.LocalStorage, si stores.SectorIndex, cfg remoteHnd: &stores.FetchHandler{Local: lstor}, index: si, - nextWorker: 0, - workers: map[WorkerID]*workerHandle{}, - - newWorkers: make(chan *workerHandle), - schedule: make(chan *workerRequest), - workerFree: make(chan WorkerID), - closing: make(chan struct{}), - - schedQueue: list.New(), + sched: newScheduler(cfg.SealProofType), Prover: prover, } - go m.runSched() + go m.sched.runSched() localTasks := []sealtasks.TaskType{ sealtasks.TTAddPiece, sealtasks.TTCommit1, sealtasks.TTFinalize, sealtasks.TTFetch, @@ -170,7 +151,7 @@ func (m *Manager) AddWorker(ctx context.Context, w Worker) error { return xerrors.Errorf("getting worker info: %w", err) } - m.newWorkers <- &workerHandle{ + m.sched.newWorkers <- &workerHandle{ w: w, info: info, } @@ -190,7 +171,7 @@ func (m *Manager) ReadPieceFromSealedSector(context.Context, abi.SectorID, ffiwr panic("implement me") } -func (m *Manager) getWorkersByPaths(task sealtasks.TaskType, inPaths []stores.StorageInfo) ([]WorkerID, map[WorkerID]stores.StorageInfo) { +/*func (m *Manager) getWorkersByPaths(task sealtasks.TaskType, inPaths []stores.StorageInfo) ([]WorkerID, map[WorkerID]stores.StorageInfo) { m.workersLk.Lock() defer m.workersLk.Unlock() @@ -249,8 +230,8 @@ func (m *Manager) getWorker(ctx context.Context, taskType sealtasks.TaskType, ac taskType: taskType, accept: accept, - cancel: ctx.Done(), - ret: ret, + ctx: ctx.Done(), + ret: ret, }: case <-m.closing: return nil, nil, xerrors.New("closing") @@ -266,6 +247,16 @@ func (m *Manager) getWorker(ctx context.Context, taskType sealtasks.TaskType, ac case <-ctx.Done(): return nil, nil, ctx.Err() } +}*/ + +func schedNop(context.Context, Worker) error { + return nil +} + +func schedFetch(sector abi.SectorID, ft stores.SectorFileType, sealing bool) func(context.Context, Worker) error { + return func(ctx context.Context, worker Worker) error { + return worker.Fetch(ctx, sector, ft, sealing) + } } func (m *Manager) NewSector(ctx context.Context, sector abi.SectorID) error { @@ -274,151 +265,114 @@ func (m *Manager) NewSector(ctx context.Context, sector abi.SectorID) error { } func (m *Manager) AddPiece(ctx context.Context, sector abi.SectorID, existingPieces []abi.UnpaddedPieceSize, sz abi.UnpaddedPieceSize, r io.Reader) (abi.PieceInfo, error) { - // TODO: consider multiple paths vs workers when initially allocating - - var best []stores.StorageInfo + var selector WorkerSelector var err error if len(existingPieces) == 0 { // new - best, err = m.index.StorageBestAlloc(ctx, stores.FTUnsealed, true) + selector, err = newAllocSelector(ctx, m.index, stores.FTUnsealed) } else { // append to existing - best, err = m.index.StorageFindSector(ctx, sector, stores.FTUnsealed, false) + selector, err = newExistingSelector(ctx, m.index, sector, stores.FTUnsealed, false) } if err != nil { - return abi.PieceInfo{}, xerrors.Errorf("finding sector path: %w", err) + return abi.PieceInfo{}, xerrors.Errorf("creating path selector: %w", err) } - log.Debugf("find workers for %v", best) - candidateWorkers, _ := m.getWorkersByPaths(sealtasks.TTAddPiece, best) + var out abi.PieceInfo + err = m.sched.Schedule(ctx, sealtasks.TTAddPiece, selector, schedNop, func(ctx context.Context, w Worker) error { + p, err := w.AddPiece(ctx, sector, existingPieces, sz, r) + if err != nil { + return err + } + out = p + return nil + }) - if len(candidateWorkers) == 0 { - return abi.PieceInfo{}, ErrNoWorkers - } - - worker, done, err := m.getWorker(ctx, sealtasks.TTAddPiece, candidateWorkers) - if err != nil { - return abi.PieceInfo{}, xerrors.Errorf("scheduling worker: %w", err) - } - defer done() - - // TODO: select(candidateWorkers, ...) - // TODO: remove the sectorbuilder abstraction, pass path directly - return worker.AddPiece(ctx, sector, existingPieces, sz, r) + return out, err } func (m *Manager) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) { // TODO: also consider where the unsealed data sits - best, err := m.index.StorageBestAlloc(ctx, stores.FTCache|stores.FTSealed, true) + selector, err := newAllocSelector(ctx, m.index, stores.FTCache|stores.FTSealed) if err != nil { - return nil, xerrors.Errorf("finding path for sector sealing: %w", err) + return nil, xerrors.Errorf("creating path selector: %w", err) } - candidateWorkers, _ := m.getWorkersByPaths(sealtasks.TTPreCommit1, best) - if len(candidateWorkers) == 0 { - return nil, ErrNoWorkers - } + err = m.sched.Schedule(ctx, sealtasks.TTPreCommit1, selector, schedFetch(sector, stores.FTUnsealed, true), func(ctx context.Context, w Worker) error { + p, err := w.SealPreCommit1(ctx, sector, ticket, pieces) + if err != nil { + return err + } + out = p + return nil + }) - worker, done, err := m.getWorker(ctx, sealtasks.TTPreCommit1, candidateWorkers) - if err != nil { - return nil, xerrors.Errorf("scheduling worker: %w", err) - } - defer done() - - // TODO: select(candidateWorkers, ...) - // TODO: remove the sectorbuilder abstraction, pass path directly - return worker.SealPreCommit1(ctx, sector, ticket, pieces) + return out, err } -func (m *Manager) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage.PreCommit1Out) (cids storage.SectorCids, err error) { - // TODO: allow workers to fetch the sectors - - best, err := m.index.StorageFindSector(ctx, sector, stores.FTCache|stores.FTSealed, true) +func (m *Manager) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage.PreCommit1Out) (out storage.SectorCids, err error) { + selector, err := newExistingSelector(ctx, m.index, sector, stores.FTCache|stores.FTSealed, true) if err != nil { - return storage.SectorCids{}, xerrors.Errorf("finding path for sector sealing: %w", err) + return storage.SectorCids{}, xerrors.Errorf("creating path selector: %w", err) } - candidateWorkers, _ := m.getWorkersByPaths(sealtasks.TTPreCommit2, best) - if len(candidateWorkers) == 0 { - return storage.SectorCids{}, ErrNoWorkers - } - - worker, done, err := m.getWorker(ctx, sealtasks.TTPreCommit2, candidateWorkers) - if err != nil { - return storage.SectorCids{}, xerrors.Errorf("scheduling worker: %w", err) - } - defer done() - - // TODO: select(candidateWorkers, ...) - // TODO: remove the sectorbuilder abstraction, pass path directly - return worker.SealPreCommit2(ctx, sector, phase1Out) + err = m.sched.Schedule(ctx, sealtasks.TTPreCommit2, selector, schedFetch(sector, stores.FTCache|stores.FTSealed, true), func(ctx context.Context, w Worker) error { + p, err := w.SealPreCommit2(ctx, sector, phase1Out) + if err != nil { + return err + } + out = p + return nil + }) + return out, err } -func (m *Manager) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (output storage.Commit1Out, err error) { - best, err := m.index.StorageFindSector(ctx, sector, stores.FTCache|stores.FTSealed, true) +func (m *Manager) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (out storage.Commit1Out, err error) { + selector, err := newExistingSelector(ctx, m.index, sector, stores.FTCache|stores.FTSealed, false) if err != nil { - return nil, xerrors.Errorf("finding path for sector sealing: %w", err) - } - - candidateWorkers, _ := m.getWorkersByPaths(sealtasks.TTCommit1, best) - if len(candidateWorkers) == 0 { - return nil, ErrNoWorkers + return storage.Commit1Out{}, xerrors.Errorf("creating path selector: %w", err) } // TODO: Try very hard to execute on worker with access to the sectors - worker, done, err := m.getWorker(ctx, sealtasks.TTCommit1, candidateWorkers) - if err != nil { - return nil, xerrors.Errorf("scheduling worker: %w", err) - } - defer done() - - // TODO: select(candidateWorkers, ...) - // TODO: remove the sectorbuilder abstraction, pass path directly - return worker.SealCommit1(ctx, sector, ticket, seed, pieces, cids) + // (except, don't.. for now at least - we are using this step to bring data + // into 'provable' storage. Optimally we'd do that in commit2, in parallel + // with snark compute) + err = m.sched.Schedule(ctx, sealtasks.TTCommit1, selector, schedFetch(sector, stores.FTCache|stores.FTSealed, true), func(ctx context.Context, w Worker) error { + p, err := w.SealCommit1(ctx, sector, ticket, seed, pieces, cids) + if err != nil { + return err + } + out = p + return nil + }) + return out, err } -func (m *Manager) SealCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage.Commit1Out) (proof storage.Proof, err error) { - var candidateWorkers []WorkerID +func (m *Manager) SealCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage.Commit1Out) (out storage.Proof, err error) { + selector := newTaskSelector() - m.workersLk.Lock() - for id, worker := range m.workers { - tt, err := worker.w.TaskTypes(ctx) + err = m.sched.Schedule(ctx, sealtasks.TTCommit2, selector, schedNop, func(ctx context.Context, w Worker) error { + p, err := w.SealCommit2(ctx, sector, phase1Out) if err != nil { - log.Errorf("error getting supported worker task types: %+v", err) - continue + return err } - if _, ok := tt[sealtasks.TTCommit2]; !ok { - continue - } - candidateWorkers = append(candidateWorkers, id) - } - m.workersLk.Unlock() - if len(candidateWorkers) == 0 { - return nil, ErrNoWorkers - } + out = p + return nil + }) - worker, done, err := m.getWorker(ctx, sealtasks.TTCommit2, candidateWorkers) - if err != nil { - return nil, xerrors.Errorf("scheduling worker: %w", err) - } - defer done() - - return worker.SealCommit2(ctx, sector, phase1Out) + return out, err } func (m *Manager) FinalizeSector(ctx context.Context, sector abi.SectorID) error { - best, err := m.index.StorageFindSector(ctx, sector, stores.FTCache|stores.FTSealed|stores.FTUnsealed, true) + selector, err := newExistingSelector(ctx, m.index, sector, stores.FTCache|stores.FTSealed|stores.FTUnsealed, false) if err != nil { - return xerrors.Errorf("finding sealed sector: %w", err) + return xerrors.Errorf("creating path selector: %w", err) } - candidateWorkers, _ := m.getWorkersByPaths(sealtasks.TTFinalize, best) - if len(candidateWorkers) == 0 { - return ErrNoWorkers - } - - // TODO: Remove sector from sealing stores - // TODO: Move the sector to long-term storage - return m.workers[candidateWorkers[0]].w.FinalizeSector(ctx, sector) + return m.sched.Schedule(ctx, sealtasks.TTFinalize, selector, + schedFetch(sector, stores.FTCache|stores.FTSealed|stores.FTUnsealed, false), + func(ctx context.Context, w Worker) error { + return w.FinalizeSector(ctx, sector) + }) } func (m *Manager) StorageLocal(ctx context.Context) (map[stores.ID]string, error) { @@ -440,8 +394,7 @@ func (m *Manager) FsStat(ctx context.Context, id stores.ID) (stores.FsStat, erro } func (m *Manager) Close() error { - close(m.closing) - return nil + return m.sched.Close() } var _ SectorManager = &Manager{} diff --git a/resources.go b/resources.go index 2861a2be8..afe4c166c 100644 --- a/resources.go +++ b/resources.go @@ -23,12 +23,16 @@ type Resources struct { MinMemory uint64 // What Must be in RAM for decent perf MaxMemory uint64 // Memory required (swap + ram) - MultiThread bool - CanGPU bool + Threads int // -1 = multithread + CanGPU bool BaseMinMemory uint64 // What Must be in RAM for decent perf (shared between threads) } +func (r Resources) MultiThread() bool { + return r.Threads == -1 +} + const MaxCachingOverhead = 32 << 30 var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ @@ -37,7 +41,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ MaxMemory: 32 << 30, MinMemory: 32 << 30, - MultiThread: false, + Threads: 1, BaseMinMemory: 1 << 30, }, @@ -45,7 +49,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ MaxMemory: 1 << 30, MinMemory: 1 << 30, - MultiThread: false, + Threads: 1, BaseMinMemory: 1 << 30, }, @@ -53,7 +57,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ MaxMemory: 2 << 10, MinMemory: 2 << 10, - MultiThread: false, + Threads: 1, BaseMinMemory: 2 << 10, }, @@ -61,7 +65,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ MaxMemory: 8 << 20, MinMemory: 8 << 20, - MultiThread: false, + Threads: 1, BaseMinMemory: 8 << 20, }, @@ -71,7 +75,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ MaxMemory: 64 << 30, MinMemory: 32 << 30, - MultiThread: false, + Threads: 1, BaseMinMemory: 30 << 30, }, @@ -79,7 +83,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ MaxMemory: 3 << 29, // 1.5G MinMemory: 1 << 30, - MultiThread: false, + Threads: 1, BaseMinMemory: 1 << 30, }, @@ -87,7 +91,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ MaxMemory: 2 << 10, MinMemory: 2 << 10, - MultiThread: false, + Threads: 1, BaseMinMemory: 2 << 10, }, @@ -95,7 +99,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ MaxMemory: 8 << 20, MinMemory: 8 << 20, - MultiThread: false, + Threads: 1, BaseMinMemory: 8 << 20, }, @@ -105,7 +109,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ MaxMemory: 96 << 30, MinMemory: 64 << 30, - MultiThread: true, + Threads: -1, BaseMinMemory: 30 << 30, }, @@ -113,7 +117,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ MaxMemory: 3 << 29, // 1.5G MinMemory: 1 << 30, - MultiThread: true, + Threads: -1, BaseMinMemory: 1 << 30, }, @@ -121,7 +125,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ MaxMemory: 2 << 10, MinMemory: 2 << 10, - MultiThread: true, + Threads: -1, BaseMinMemory: 2 << 10, }, @@ -129,7 +133,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ MaxMemory: 8 << 20, MinMemory: 8 << 20, - MultiThread: true, + Threads: -1, BaseMinMemory: 8 << 20, }, @@ -139,7 +143,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ MaxMemory: 1 << 30, MinMemory: 1 << 30, - MultiThread: false, + Threads: 0, BaseMinMemory: 1 << 30, }, @@ -147,7 +151,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ MaxMemory: 1 << 30, MinMemory: 1 << 30, - MultiThread: false, + Threads: 0, BaseMinMemory: 1 << 30, }, @@ -155,7 +159,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ MaxMemory: 2 << 10, MinMemory: 2 << 10, - MultiThread: false, + Threads: 0, BaseMinMemory: 2 << 10, }, @@ -163,7 +167,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ MaxMemory: 8 << 20, MinMemory: 8 << 20, - MultiThread: false, + Threads: 0, BaseMinMemory: 8 << 20, }, @@ -173,8 +177,8 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ MaxMemory: 110 << 30, MinMemory: 60 << 30, - MultiThread: true, - CanGPU: true, + Threads: -1, + CanGPU: true, BaseMinMemory: 64 << 30, // params }, @@ -182,8 +186,8 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ MaxMemory: 3 << 29, // 1.5G MinMemory: 1 << 30, - MultiThread: false, // This is fine - CanGPU: true, + Threads: 1, // This is fine + CanGPU: true, BaseMinMemory: 10 << 30, }, @@ -191,8 +195,8 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ MaxMemory: 2 << 10, MinMemory: 2 << 10, - MultiThread: false, - CanGPU: true, + Threads: 1, + CanGPU: true, BaseMinMemory: 2 << 10, }, @@ -200,10 +204,48 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ MaxMemory: 8 << 20, MinMemory: 8 << 20, - MultiThread: false, - CanGPU: true, + Threads: 1, + CanGPU: true, BaseMinMemory: 8 << 20, }, }, + sealtasks.TTFetch: { + abi.RegisteredProof_StackedDRG32GiBSeal: Resources{ + MaxMemory: 1 << 20, + MinMemory: 1 << 20, + + Threads: 0, + CanGPU: false, + + BaseMinMemory: 0, + }, + abi.RegisteredProof_StackedDRG512MiBSeal: Resources{ + MaxMemory: 1 << 20, + MinMemory: 1 << 20, + + Threads: 0, + CanGPU: false, + + BaseMinMemory: 0, + }, + abi.RegisteredProof_StackedDRG2KiBSeal: Resources{ + MaxMemory: 1 << 20, + MinMemory: 1 << 20, + + Threads: 0, + CanGPU: false, + + BaseMinMemory: 0, + }, + abi.RegisteredProof_StackedDRG8MiBSeal: Resources{ + MaxMemory: 1 << 20, + MinMemory: 1 << 20, + + Threads: 0, + CanGPU: false, + + BaseMinMemory: 0, + }, + }, } diff --git a/sched.go b/sched.go index 0d07bbb96..1a2efb5e7 100644 --- a/sched.go +++ b/sched.go @@ -1,6 +1,12 @@ package sectorstorage import ( + "container/list" + "context" + "sort" + "sync" + + "github.com/hashicorp/go-multierror" "golang.org/x/xerrors" "github.com/filecoin-project/specs-actors/actors/abi" @@ -11,29 +17,95 @@ import ( const mib = 1 << 20 +type WorkerAction func(ctx context.Context, w Worker) error + +type WorkerSelector interface { + Ok(ctx context.Context, task sealtasks.TaskType, a *workerHandle) (bool, error) // true if worker is acceptable for performing a task + + Cmp(ctx context.Context, task sealtasks.TaskType, a, b *workerHandle) (bool, error) // true if a is preferred over b +} + +type scheduler struct { + spt abi.RegisteredProof + + workersLk sync.Mutex + nextWorker WorkerID + workers map[WorkerID]*workerHandle + + newWorkers chan *workerHandle + schedule chan *workerRequest + workerFree chan WorkerID + closing chan struct{} + + schedQueue *list.List // List[*workerRequest] +} + +func newScheduler(spt abi.RegisteredProof) *scheduler { + return &scheduler{ + spt: spt, + + nextWorker: 0, + workers: map[WorkerID]*workerHandle{}, + + newWorkers: make(chan *workerHandle), + schedule: make(chan *workerRequest), + workerFree: make(chan WorkerID), + closing: make(chan struct{}), + + schedQueue: list.New(), + } +} + +func (sh *scheduler) Schedule(ctx context.Context, taskType sealtasks.TaskType, sel WorkerSelector, prepare WorkerAction, work WorkerAction) error { + ret := make(chan workerResponse) + + select { + case sh.schedule <- &workerRequest{ + taskType: taskType, + sel: sel, + + prepare: prepare, + work: work, + + ret: ret, + ctx: ctx, + }: + case <-sh.closing: + return xerrors.New("closing") + case <-ctx.Done(): + return ctx.Err() + } + + select { + case resp := <-ret: + return resp.err + case <-sh.closing: + return xerrors.New("closing") + case <-ctx.Done(): + return ctx.Err() + } +} + type workerRequest struct { taskType sealtasks.TaskType - accept []WorkerID // ordered by preference + sel WorkerSelector - ret chan<- workerResponse - cancel <-chan struct{} + prepare WorkerAction + work WorkerAction + + ret chan<- workerResponse + ctx context.Context } type workerResponse struct { err error - - worker Worker - done func() } -func (r *workerRequest) respond(resp workerResponse) { +func (r *workerRequest) respond(err error) { select { - case r.ret <- resp: - case <-r.cancel: + case r.ret <- workerResponse{err: err}: + case <-r.ctx.Done(): log.Warnf("request got cancelled before we could respond") - if resp.done != nil { - resp.done() - } } } @@ -48,60 +120,56 @@ type workerHandle struct { cpuUse uint64 // 0 - free; 1+ - singlecore things } -func (m *Manager) runSched() { +func (sh *scheduler) runSched() { for { select { - case w := <-m.newWorkers: - m.schedNewWorker(w) - case req := <-m.schedule: - resp, err := m.maybeSchedRequest(req) + case w := <-sh.newWorkers: + sh.schedNewWorker(w) + case req := <-sh.schedule: + scheduled, err := sh.maybeSchedRequest(req) if err != nil { - req.respond(workerResponse{err: err}) + req.respond(err) + continue + } + if scheduled { continue } - if resp != nil { - req.respond(*resp) - continue - } - - m.schedQueue.PushBack(req) - case wid := <-m.workerFree: - m.onWorkerFreed(wid) - case <-m.closing: - m.schedClose() + sh.schedQueue.PushBack(req) + case wid := <-sh.workerFree: + sh.onWorkerFreed(wid) + case <-sh.closing: + sh.schedClose() return } } } -func (m *Manager) onWorkerFreed(wid WorkerID) { - for e := m.schedQueue.Front(); e != nil; e = e.Next() { +func (sh *scheduler) onWorkerFreed(wid WorkerID) { + for e := sh.schedQueue.Front(); e != nil; e = e.Next() { req := e.Value.(*workerRequest) - var ok bool - for _, id := range req.accept { - if id == wid { - ok = true - break - } + + ok, err := req.sel.Ok(req.ctx, req.taskType, sh.workers[wid]) + if err != nil { + log.Errorf("onWorkerFreed req.sel.Ok error: %+v", err) + continue } + if !ok { continue } - resp, err := m.maybeSchedRequest(req) + scheduled, err := sh.maybeSchedRequest(req) if err != nil { - req.respond(workerResponse{err: err}) + req.respond(err) continue } - if resp != nil { - req.respond(*resp) - + if scheduled { pe := e.Prev() - m.schedQueue.Remove(e) + sh.schedQueue.Remove(e) if pe == nil { - pe = m.schedQueue.Front() + pe = sh.schedQueue.Front() } if pe == nil { break @@ -112,44 +180,68 @@ func (m *Manager) onWorkerFreed(wid WorkerID) { } } -func (m *Manager) maybeSchedRequest(req *workerRequest) (*workerResponse, error) { - m.workersLk.Lock() - defer m.workersLk.Unlock() +func (sh *scheduler) maybeSchedRequest(req *workerRequest) (bool, error) { + sh.workersLk.Lock() + defer sh.workersLk.Unlock() tried := 0 + var acceptable []WorkerID + + for wid, worker := range sh.workers { + ok, err := req.sel.Ok(req.ctx, req.taskType, worker) + if err != nil { + return false, err + } - for i := len(req.accept) - 1; i >= 0; i-- { - id := req.accept[i] - w, ok := m.workers[id] if !ok { - log.Warnf("requested worker %d is not in scheduler", id) + continue } tried++ - canDo, err := m.canHandleRequest(id, w, req) + canDo, err := sh.canHandleRequest(wid, worker, req) if err != nil { - return nil, err + return false, err } if !canDo { continue } - return m.makeResponse(id, w, req), nil + acceptable = append(acceptable, wid) + } + + if len(acceptable) > 0 { + { + var serr error + + sort.SliceStable(acceptable, func(i, j int) bool { + r, err := req.sel.Cmp(req.ctx, req.taskType, sh.workers[acceptable[i]], sh.workers[acceptable[j]]) + if err != nil { + serr = multierror.Append(serr, err) + } + return r + }) + + if serr != nil { + return false, xerrors.Errorf("error(s) selecting best worker: %w", serr) + } + } + + return true, sh.assignWorker(acceptable[0], sh.workers[acceptable[0]], req) } if tried == 0 { - return nil, xerrors.New("maybeSchedRequest didn't find any good workers") + return false, xerrors.New("maybeSchedRequest didn't find any good workers") } - return nil, nil // put in waiting queue + return false, nil // put in waiting queue } -func (m *Manager) makeResponse(wid WorkerID, w *workerHandle, req *workerRequest) *workerResponse { - needRes := ResourceTable[req.taskType][m.scfg.SealProofType] +func (sh *scheduler) assignWorker(wid WorkerID, w *workerHandle, req *workerRequest) error { + needRes := ResourceTable[req.taskType][sh.spt] w.gpuUsed = needRes.CanGPU - if needRes.MultiThread { + if needRes.MultiThread() { w.cpuUse += w.info.Resources.CPUs } else { w.cpuUse++ @@ -158,17 +250,17 @@ func (m *Manager) makeResponse(wid WorkerID, w *workerHandle, req *workerRequest w.memUsedMin += needRes.MinMemory w.memUsedMax += needRes.MaxMemory - return &workerResponse{ - err: nil, - worker: w.w, - done: func() { - m.workersLk.Lock() + go func() { + var err error + + defer func() { + sh.workersLk.Lock() if needRes.CanGPU { w.gpuUsed = false } - if needRes.MultiThread { + if needRes.MultiThread() { w.cpuUse -= w.info.Resources.CPUs } else { w.cpuUse-- @@ -177,20 +269,35 @@ func (m *Manager) makeResponse(wid WorkerID, w *workerHandle, req *workerRequest w.memUsedMin -= needRes.MinMemory w.memUsedMax -= needRes.MaxMemory - m.workersLk.Unlock() + sh.workersLk.Unlock() select { - case m.workerFree <- wid: - case <-m.closing: + case sh.workerFree <- wid: + case <-sh.closing: } - }, - } + }() + + err = req.prepare(req.ctx, w.w) + if err == nil { + err = req.work(req.ctx, w.w) + } + + select { + case req.ret <- workerResponse{err: err}: + case <-req.ctx.Done(): + log.Warnf("request got cancelled before we could respond") + case <-sh.closing: + log.Warnf("scheduler closed while sending response") + } + }() + + return nil } -func (m *Manager) canHandleRequest(wid WorkerID, w *workerHandle, req *workerRequest) (bool, error) { - needRes, ok := ResourceTable[req.taskType][m.scfg.SealProofType] +func (sh *scheduler) canHandleRequest(wid WorkerID, w *workerHandle, req *workerRequest) (bool, error) { + needRes, ok := ResourceTable[req.taskType][sh.spt] if !ok { - return false, xerrors.Errorf("canHandleRequest: missing ResourceTable entry for %s/%d", req.taskType, m.scfg.SealProofType) + return false, xerrors.Errorf("canHandleRequest: missing ResourceTable entry for %s/%d", req.taskType, sh.spt) } res := w.info.Resources @@ -203,7 +310,7 @@ func (m *Manager) canHandleRequest(wid WorkerID, w *workerHandle, req *workerReq } maxNeedMem := res.MemReserved + w.memUsedMax + needRes.MaxMemory + needRes.BaseMinMemory - if m.scfg.SealProofType == abi.RegisteredProof_StackedDRG32GiBSeal { + if sh.spt == abi.RegisteredProof_StackedDRG32GiBSeal { maxNeedMem += MaxCachingOverhead } if maxNeedMem > res.MemSwap+res.MemPhysical { @@ -211,7 +318,7 @@ func (m *Manager) canHandleRequest(wid WorkerID, w *workerHandle, req *workerReq return false, nil } - if needRes.MultiThread { + if needRes.MultiThread() { if w.cpuUse > 0 { log.Debugf("sched: not scheduling on worker %d; multicore process needs %d threads, %d in use, target %d", wid, res.CPUs, w.cpuUse, res.CPUs) return false, nil @@ -228,22 +335,27 @@ func (m *Manager) canHandleRequest(wid WorkerID, w *workerHandle, req *workerReq return true, nil } -func (m *Manager) schedNewWorker(w *workerHandle) { - m.workersLk.Lock() - defer m.workersLk.Unlock() +func (sh *scheduler) schedNewWorker(w *workerHandle) { + sh.workersLk.Lock() + defer sh.workersLk.Unlock() - id := m.nextWorker - m.workers[id] = w - m.nextWorker++ + id := sh.nextWorker + sh.workers[id] = w + sh.nextWorker++ } -func (m *Manager) schedClose() { - m.workersLk.Lock() - defer m.workersLk.Unlock() +func (sh *scheduler) schedClose() { + sh.workersLk.Lock() + defer sh.workersLk.Unlock() - for i, w := range m.workers { + for i, w := range sh.workers { if err := w.w.Close(); err != nil { log.Errorf("closing worker %d: %+v", i, err) } } } + +func (sh *scheduler) Close() error { + close(sh.closing) + return nil +} diff --git a/stats.go b/stats.go index 492919460..84e6c5bb3 100644 --- a/stats.go +++ b/stats.go @@ -3,12 +3,12 @@ package sectorstorage import "github.com/filecoin-project/sector-storage/storiface" func (m *Manager) WorkerStats() map[uint64]storiface.WorkerStats { - m.workersLk.Lock() - defer m.workersLk.Unlock() + m.sched.workersLk.Lock() + defer m.sched.workersLk.Unlock() out := map[uint64]storiface.WorkerStats{} - for id, handle := range m.workers { + for id, handle := range m.sched.workers { out[uint64(id)] = storiface.WorkerStats{ Info: handle.info, MemUsedMin: handle.memUsedMin, From d399312786ca06ac48f23ea9abf03a33606867ca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Mon, 27 Apr 2020 22:43:42 +0200 Subject: [PATCH 053/199] sched: virtual resource pool for prefetching sector data --- manager.go | 84 ++---------------------- sched.go | 150 +++++++++++++++++++++++++------------------ selector_alloc.go | 59 +++++++++++++++++ selector_existing.go | 60 +++++++++++++++++ selector_task.go | 46 +++++++++++++ stats.go | 8 +-- 6 files changed, 260 insertions(+), 147 deletions(-) create mode 100644 selector_alloc.go create mode 100644 selector_existing.go create mode 100644 selector_task.go diff --git a/manager.go b/manager.go index f125cdafd..f4408b6e0 100644 --- a/manager.go +++ b/manager.go @@ -152,8 +152,10 @@ func (m *Manager) AddWorker(ctx context.Context, w Worker) error { } m.sched.newWorkers <- &workerHandle{ - w: w, - info: info, + w: w, + info: info, + preparing: &activeResources{}, + active: &activeResources{}, } return nil } @@ -171,84 +173,6 @@ func (m *Manager) ReadPieceFromSealedSector(context.Context, abi.SectorID, ffiwr panic("implement me") } -/*func (m *Manager) getWorkersByPaths(task sealtasks.TaskType, inPaths []stores.StorageInfo) ([]WorkerID, map[WorkerID]stores.StorageInfo) { - m.workersLk.Lock() - defer m.workersLk.Unlock() - - var workers []WorkerID - paths := map[WorkerID]stores.StorageInfo{} - - for i, worker := range m.workers { - tt, err := worker.w.TaskTypes(context.TODO()) - if err != nil { - log.Errorf("error getting supported worker task types: %+v", err) - continue - } - if _, ok := tt[task]; !ok { - log.Debugf("dropping worker %d; task %s not supported (supports %v)", i, task, tt) - continue - } - - phs, err := worker.w.Paths(context.TODO()) - if err != nil { - log.Errorf("error getting worker paths: %+v", err) - continue - } - - // check if the worker has access to the path we selected - var st *stores.StorageInfo - for _, p := range phs { - for _, meta := range inPaths { - if p.ID == meta.ID { - if st != nil && st.Weight > p.Weight { - continue - } - - p := meta // copy - st = &p - } - } - } - if st == nil { - log.Debugf("skipping worker %d; doesn't have any of %v", i, inPaths) - log.Debugf("skipping worker %d; only has %v", i, phs) - continue - } - - paths[i] = *st - workers = append(workers, i) - } - - return workers, paths -} - -func (m *Manager) getWorker(ctx context.Context, taskType sealtasks.TaskType, accept []WorkerID) (Worker, func(), error) { - ret := make(chan workerResponse) - - select { - case m.schedule <- &workerRequest{ - taskType: taskType, - accept: accept, - - ctx: ctx.Done(), - ret: ret, - }: - case <-m.closing: - return nil, nil, xerrors.New("closing") - case <-ctx.Done(): - return nil, nil, ctx.Err() - } - - select { - case resp := <-ret: - return resp.worker, resp.done, resp.err - case <-m.closing: - return nil, nil, xerrors.New("closing") - case <-ctx.Done(): - return nil, nil, ctx.Err() - } -}*/ - func schedNop(context.Context, Worker) error { return nil } diff --git a/sched.go b/sched.go index 1a2efb5e7..78f2a6664 100644 --- a/sched.go +++ b/sched.go @@ -109,15 +109,22 @@ func (r *workerRequest) respond(err error) { } } +type activeResources struct { + memUsedMin uint64 + memUsedMax uint64 + gpuUsed bool + cpuUse uint64 + + cond *sync.Cond +} + type workerHandle struct { w Worker info storiface.WorkerInfo - memUsedMin uint64 - memUsedMax uint64 - gpuUsed bool - cpuUse uint64 // 0 - free; 1+ - singlecore things + preparing *activeResources + active *activeResources } func (sh *scheduler) runSched() { @@ -198,12 +205,8 @@ func (sh *scheduler) maybeSchedRequest(req *workerRequest) (bool, error) { } tried++ - canDo, err := sh.canHandleRequest(wid, worker, req) - if err != nil { - return false, err - } - - if !canDo { + needRes := ResourceTable[req.taskType][sh.spt] + if !canHandleRequest(needRes, sh.spt, wid, worker.info.Resources, worker.preparing) { continue } @@ -240,99 +243,120 @@ func (sh *scheduler) maybeSchedRequest(req *workerRequest) (bool, error) { func (sh *scheduler) assignWorker(wid WorkerID, w *workerHandle, req *workerRequest) error { needRes := ResourceTable[req.taskType][sh.spt] - w.gpuUsed = needRes.CanGPU - if needRes.MultiThread() { - w.cpuUse += w.info.Resources.CPUs - } else { - w.cpuUse++ - } - - w.memUsedMin += needRes.MinMemory - w.memUsedMax += needRes.MaxMemory + w.preparing.add(w.info.Resources, needRes) go func() { - var err error - - defer func() { - sh.workersLk.Lock() - - if needRes.CanGPU { - w.gpuUsed = false - } - - if needRes.MultiThread() { - w.cpuUse -= w.info.Resources.CPUs - } else { - w.cpuUse-- - } - - w.memUsedMin -= needRes.MinMemory - w.memUsedMax -= needRes.MaxMemory + err := req.prepare(req.ctx, w.w) + sh.workersLk.Lock() + err = w.active.withResources(sh.spt, wid, w.info.Resources, needRes, &sh.workersLk, func() error { + w.preparing.free(w.info.Resources, needRes) sh.workersLk.Unlock() + defer sh.workersLk.Lock() // we MUST return locked from this function select { case sh.workerFree <- wid: case <-sh.closing: } - }() - err = req.prepare(req.ctx, w.w) - if err == nil { err = req.work(req.ctx, w.w) - } - select { - case req.ret <- workerResponse{err: err}: - case <-req.ctx.Done(): - log.Warnf("request got cancelled before we could respond") - case <-sh.closing: - log.Warnf("scheduler closed while sending response") - } + select { + case req.ret <- workerResponse{err: err}: + case <-req.ctx.Done(): + log.Warnf("request got cancelled before we could respond") + case <-sh.closing: + log.Warnf("scheduler closed while sending response") + } + + return nil + }) + + sh.workersLk.Unlock() }() return nil } -func (sh *scheduler) canHandleRequest(wid WorkerID, w *workerHandle, req *workerRequest) (bool, error) { - needRes, ok := ResourceTable[req.taskType][sh.spt] - if !ok { - return false, xerrors.Errorf("canHandleRequest: missing ResourceTable entry for %s/%d", req.taskType, sh.spt) +func (a *activeResources) withResources(spt abi.RegisteredProof, id WorkerID, wr storiface.WorkerResources, r Resources, locker sync.Locker, cb func() error) error { + for !canHandleRequest(r, spt, id, wr, a) { + if a.cond == nil { + a.cond = sync.NewCond(locker) + } + a.cond.Wait() } - res := w.info.Resources + a.add(wr, r) + + err := cb() + + a.free(wr, r) + if a.cond != nil { + a.cond.Broadcast() + } + + return err +} + +func (a *activeResources) add(wr storiface.WorkerResources, r Resources) { + a.gpuUsed = r.CanGPU + if r.MultiThread() { + a.cpuUse += wr.CPUs + } else { + a.cpuUse += uint64(r.Threads) + } + + a.memUsedMin += r.MinMemory + a.memUsedMax += r.MaxMemory +} + +func (a *activeResources) free(wr storiface.WorkerResources, r Resources) { + if r.CanGPU { + a.gpuUsed = false + } + if r.MultiThread() { + a.cpuUse -= wr.CPUs + } else { + a.cpuUse -= uint64(r.Threads) + } + + a.memUsedMin -= r.MinMemory + a.memUsedMax -= r.MaxMemory +} + +func canHandleRequest(needRes Resources, spt abi.RegisteredProof, wid WorkerID, res storiface.WorkerResources, active *activeResources) bool { // TODO: dedupe needRes.BaseMinMemory per task type (don't add if that task is already running) - minNeedMem := res.MemReserved + w.memUsedMin + needRes.MinMemory + needRes.BaseMinMemory + minNeedMem := res.MemReserved + active.memUsedMin + needRes.MinMemory + needRes.BaseMinMemory if minNeedMem > res.MemPhysical { log.Debugf("sched: not scheduling on worker %d; not enough physical memory - need: %dM, have %dM", wid, minNeedMem/mib, res.MemPhysical/mib) - return false, nil + return false } - maxNeedMem := res.MemReserved + w.memUsedMax + needRes.MaxMemory + needRes.BaseMinMemory - if sh.spt == abi.RegisteredProof_StackedDRG32GiBSeal { + maxNeedMem := res.MemReserved + active.memUsedMax + needRes.MaxMemory + needRes.BaseMinMemory + if spt == abi.RegisteredProof_StackedDRG32GiBSeal { maxNeedMem += MaxCachingOverhead } if maxNeedMem > res.MemSwap+res.MemPhysical { log.Debugf("sched: not scheduling on worker %d; not enough virtual memory - need: %dM, have %dM", wid, maxNeedMem/mib, (res.MemSwap+res.MemPhysical)/mib) - return false, nil + return false } if needRes.MultiThread() { - if w.cpuUse > 0 { - log.Debugf("sched: not scheduling on worker %d; multicore process needs %d threads, %d in use, target %d", wid, res.CPUs, w.cpuUse, res.CPUs) - return false, nil + if active.cpuUse > 0 { + log.Debugf("sched: not scheduling on worker %d; multicore process needs %d threads, %d in use, target %d", wid, res.CPUs, active.cpuUse, res.CPUs) + return false } } if len(res.GPUs) > 0 && needRes.CanGPU { - if w.gpuUsed { + if active.gpuUsed { log.Debugf("sched: not scheduling on worker %d; GPU in use", wid) - return false, nil + return false } } - return true, nil + return true } func (sh *scheduler) schedNewWorker(w *workerHandle) { diff --git a/selector_alloc.go b/selector_alloc.go new file mode 100644 index 000000000..1ceab0ecb --- /dev/null +++ b/selector_alloc.go @@ -0,0 +1,59 @@ +package sectorstorage + +import ( + "context" + + "golang.org/x/xerrors" + + "github.com/filecoin-project/sector-storage/sealtasks" + "github.com/filecoin-project/sector-storage/stores" +) + +type allocSelector struct { + best []stores.StorageInfo +} + +func newAllocSelector(ctx context.Context, index stores.SectorIndex, alloc stores.SectorFileType) (*allocSelector, error) { + best, err := index.StorageBestAlloc(ctx, alloc, true) + if err != nil { + return nil, err + } + + return &allocSelector{ + best: best, + }, nil +} + +func (s *allocSelector) Ok(ctx context.Context, task sealtasks.TaskType, whnd *workerHandle) (bool, error) { + tasks, err := whnd.w.TaskTypes(ctx) + if err != nil { + return false, xerrors.Errorf("getting supported worker task types: %w", err) + } + if _, supported := tasks[task]; !supported { + return false, nil + } + + paths, err := whnd.w.Paths(ctx) + if err != nil { + return false, xerrors.Errorf("getting worker paths: %w", err) + } + + have := map[stores.ID]struct{}{} + for _, path := range paths { + have[path.ID] = struct{}{} + } + + for _, info := range s.best { + if _, ok := have[info.ID]; ok { + return true, nil + } + } + + return false, nil +} + +func (s *allocSelector) Cmp(ctx context.Context, task sealtasks.TaskType, a, b *workerHandle) (bool, error) { + return a.info.Hostname > b.info.Hostname, nil // TODO: Better strategy +} + +var _ WorkerSelector = &allocSelector{} diff --git a/selector_existing.go b/selector_existing.go new file mode 100644 index 000000000..eccdefbf2 --- /dev/null +++ b/selector_existing.go @@ -0,0 +1,60 @@ +package sectorstorage + +import ( + "context" + + "golang.org/x/xerrors" + + "github.com/filecoin-project/sector-storage/sealtasks" + "github.com/filecoin-project/sector-storage/stores" + "github.com/filecoin-project/specs-actors/actors/abi" +) + +type existingSelector struct { + best []stores.StorageInfo +} + +func newExistingSelector(ctx context.Context, index stores.SectorIndex, sector abi.SectorID, alloc stores.SectorFileType, allowFetch bool) (*existingSelector, error) { + best, err := index.StorageFindSector(ctx, sector, alloc, allowFetch) + if err != nil { + return nil, err + } + + return &existingSelector{ + best: best, + }, nil +} + +func (s *existingSelector) Ok(ctx context.Context, task sealtasks.TaskType, whnd *workerHandle) (bool, error) { + tasks, err := whnd.w.TaskTypes(ctx) + if err != nil { + return false, xerrors.Errorf("getting supported worker task types: %w", err) + } + if _, supported := tasks[task]; !supported { + return false, nil + } + + paths, err := whnd.w.Paths(ctx) + if err != nil { + return false, xerrors.Errorf("getting worker paths: %w", err) + } + + have := map[stores.ID]struct{}{} + for _, path := range paths { + have[path.ID] = struct{}{} + } + + for _, info := range s.best { + if _, ok := have[info.ID]; ok { + return true, nil + } + } + + return false, nil +} + +func (s *existingSelector) Cmp(ctx context.Context, task sealtasks.TaskType, a, b *workerHandle) (bool, error) { + return a.info.Hostname > b.info.Hostname, nil // TODO: Better strategy +} + +var _ WorkerSelector = &existingSelector{} diff --git a/selector_task.go b/selector_task.go new file mode 100644 index 000000000..f1f4b7770 --- /dev/null +++ b/selector_task.go @@ -0,0 +1,46 @@ +package sectorstorage + +import ( + "context" + + "golang.org/x/xerrors" + + "github.com/filecoin-project/sector-storage/sealtasks" + "github.com/filecoin-project/sector-storage/stores" +) + +type taskSelector struct { + best []stores.StorageInfo +} + +func newTaskSelector() *taskSelector { + return &taskSelector{} +} + +func (s *taskSelector) Ok(ctx context.Context, task sealtasks.TaskType, whnd *workerHandle) (bool, error) { + tasks, err := whnd.w.TaskTypes(ctx) + if err != nil { + return false, xerrors.Errorf("getting supported worker task types: %w", err) + } + _, supported := tasks[task] + + return supported, nil +} + +func (s *taskSelector) Cmp(ctx context.Context, task sealtasks.TaskType, a, b *workerHandle) (bool, error) { + atasks, err := a.w.TaskTypes(ctx) + if err != nil { + return false, xerrors.Errorf("getting supported worker task types: %w", err) + } + btasks, err := b.w.TaskTypes(ctx) + if err != nil { + return false, xerrors.Errorf("getting supported worker task types: %w", err) + } + if len(atasks) != len(btasks) { + return len(atasks) < len(btasks), nil // prefer workers which can do less + } + + return a.info.Hostname > a.info.Hostname, nil // TODO: Better fallback strategy +} + +var _ WorkerSelector = &allocSelector{} diff --git a/stats.go b/stats.go index 84e6c5bb3..dbbee07f3 100644 --- a/stats.go +++ b/stats.go @@ -11,10 +11,10 @@ func (m *Manager) WorkerStats() map[uint64]storiface.WorkerStats { for id, handle := range m.sched.workers { out[uint64(id)] = storiface.WorkerStats{ Info: handle.info, - MemUsedMin: handle.memUsedMin, - MemUsedMax: handle.memUsedMax, - GpuUsed: handle.gpuUsed, - CpuUse: handle.cpuUse, + MemUsedMin: handle.active.memUsedMin, + MemUsedMax: handle.active.memUsedMax, + GpuUsed: handle.active.gpuUsed, + CpuUse: handle.active.cpuUse, } } From d57877af31b8ba88f63390c456eac0dba8b15a47 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Mon, 27 Apr 2020 22:59:17 +0200 Subject: [PATCH 054/199] sched: Fix error handling in assignWorker --- sched.go | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/sched.go b/sched.go index 78f2a6664..2d988d082 100644 --- a/sched.go +++ b/sched.go @@ -247,8 +247,28 @@ func (sh *scheduler) assignWorker(wid WorkerID, w *workerHandle, req *workerRequ go func() { err := req.prepare(req.ctx, w.w) - sh.workersLk.Lock() + + if err != nil { + w.preparing.free(w.info.Resources, needRes) + sh.workersLk.Unlock() + + select { + case sh.workerFree <- wid: + case <-sh.closing: + log.Warnf("scheduler closed while sending response (prepare error: %+v)", err) + } + + select { + case req.ret <- workerResponse{err: err}: + case <-req.ctx.Done(): + log.Warnf("request got cancelled before we could respond (prepare error: %+v)", err) + case <-sh.closing: + log.Warnf("scheduler closed while sending response (prepare error: %+v)", err) + } + return + } + err = w.active.withResources(sh.spt, wid, w.info.Resources, needRes, &sh.workersLk, func() error { w.preparing.free(w.info.Resources, needRes) sh.workersLk.Unlock() From 5f1a27d52b305deaad707683c9247bafc948044e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Tue, 28 Apr 2020 12:31:08 +0200 Subject: [PATCH 055/199] sched: Address review --- sched.go | 5 +++++ selector_task.go | 4 ++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/sched.go b/sched.go index 2d988d082..ffaad211d 100644 --- a/sched.go +++ b/sched.go @@ -293,6 +293,11 @@ func (sh *scheduler) assignWorker(wid WorkerID, w *workerHandle, req *workerRequ }) sh.workersLk.Unlock() + + // This error should always be nil, since nothing is setting it, but just to be safe: + if err != nil { + log.Errorf("error executing worker (withResources): %+v", err) + } }() return nil diff --git a/selector_task.go b/selector_task.go index f1f4b7770..5e67ad665 100644 --- a/selector_task.go +++ b/selector_task.go @@ -27,7 +27,7 @@ func (s *taskSelector) Ok(ctx context.Context, task sealtasks.TaskType, whnd *wo return supported, nil } -func (s *taskSelector) Cmp(ctx context.Context, task sealtasks.TaskType, a, b *workerHandle) (bool, error) { +func (s *taskSelector) Cmp(ctx context.Context, _ sealtasks.TaskType, a, b *workerHandle) (bool, error) { atasks, err := a.w.TaskTypes(ctx) if err != nil { return false, xerrors.Errorf("getting supported worker task types: %w", err) @@ -40,7 +40,7 @@ func (s *taskSelector) Cmp(ctx context.Context, task sealtasks.TaskType, a, b *w return len(atasks) < len(btasks), nil // prefer workers which can do less } - return a.info.Hostname > a.info.Hostname, nil // TODO: Better fallback strategy + return a.info.Hostname > b.info.Hostname, nil // TODO: Better fallback strategy } var _ WorkerSelector = &allocSelector{} From 5ab39592cf80812f3d69420cf1893afd473c97a4 Mon Sep 17 00:00:00 2001 From: anorth <445306+anorth@users.noreply.github.com> Date: Wed, 29 Apr 2020 13:36:31 +1000 Subject: [PATCH 056/199] Upgrade to specs-actors v0.3.0 --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 6d23d9388..8caa9899c 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5 github.com/filecoin-project/go-paramfetch v0.0.1 github.com/filecoin-project/lotus v0.2.10 - github.com/filecoin-project/specs-actors v0.2.0 + github.com/filecoin-project/specs-actors v0.3.0 github.com/filecoin-project/specs-storage v0.0.0-20200417134612-61b2d91a6102 github.com/gorilla/mux v1.7.4 github.com/hashicorp/go-multierror v1.0.0 diff --git a/go.sum b/go.sum index f69462732..261afc91e 100644 --- a/go.sum +++ b/go.sum @@ -109,8 +109,8 @@ github.com/filecoin-project/lotus v0.2.10 h1:ijrj/nYdKu5GiMo9r1+Zcp2A4jKHSOMZ2WN github.com/filecoin-project/lotus v0.2.10/go.mod h1:om5PQA9ZT0lf16qI7Fz/ZGLn4LDCMqPC8ntZA9uncRE= github.com/filecoin-project/specs-actors v0.0.0-20200409043918-e569f4a2f504 h1:mwuAaqxKThl70+7FkGdFKVLdwaQZQ8XmscKdhSBBtnc= github.com/filecoin-project/specs-actors v0.0.0-20200409043918-e569f4a2f504/go.mod h1:mdJraXq5vMy0+/FqVQIrnNlpQ/Em6zeu06G/ltQ0/lA= -github.com/filecoin-project/specs-actors v0.2.0 h1:bKxloHLegeYJttIJbQjl4/tdsKOUtYtpiZsEfB4eOnI= -github.com/filecoin-project/specs-actors v0.2.0/go.mod h1:nQYnFbQ7Y0bHZyq6HDEuVlCPR+U3z5Q3wMOQ+2aiV+Y= +github.com/filecoin-project/specs-actors v0.3.0 h1:QxgAuTrZr5TPqjyprZk0nTYW5o0JWpzbb5v+4UHHvN0= +github.com/filecoin-project/specs-actors v0.3.0/go.mod h1:nQYnFbQ7Y0bHZyq6HDEuVlCPR+U3z5Q3wMOQ+2aiV+Y= github.com/filecoin-project/specs-storage v0.0.0-20200417134612-61b2d91a6102 h1:T3f/zkuvgtgqcXrb0NO3BicuveGOxxUAMPa/Yif2kuE= github.com/filecoin-project/specs-storage v0.0.0-20200417134612-61b2d91a6102/go.mod h1:xJ1/xl9+8zZeSSSFmDC3Wr6uusCTxyYPI0VeNVSFmPE= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= From a0af4d4d2b4cbdd94b7b4b8b45f64e841cfa2e17 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Wed, 29 Apr 2020 14:13:21 +0200 Subject: [PATCH 057/199] index: hold rlock in StorageList --- stores/index.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/stores/index.go b/stores/index.go index 12828b575..6f8b11002 100644 --- a/stores/index.go +++ b/stores/index.go @@ -63,6 +63,9 @@ func NewIndex() *Index { } func (i *Index) StorageList(ctx context.Context) (map[ID][]Decl, error) { + i.lk.RLock() + defer i.lk.RUnlock() + byID := map[ID]map[abi.SectorID]SectorFileType{} for id := range i.stores { From 9777ddd2b7f47cf235c751f75449ab2e20be88e8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Wed, 29 Apr 2020 16:03:44 +0200 Subject: [PATCH 058/199] stores: nicer error when sector was lost --- stores/index.go | 2 -- stores/remote.go | 6 +++++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/stores/index.go b/stores/index.go index 6f8b11002..c3271225e 100644 --- a/stores/index.go +++ b/stores/index.go @@ -277,11 +277,9 @@ func (i *Index) StorageBestAlloc(ctx context.Context, allocate SectorFileType, s for _, p := range i.stores { if sealing && !p.info.CanSeal { - log.Debugf("alloc: not considering %s; can't seal", p.info.ID) continue } if !sealing && !p.info.CanStore { - log.Debugf("alloc: not considering %s; can't store", p.info.ID) continue } diff --git a/stores/remote.go b/stores/remote.go index 90d62d91d..ed7ffdaeb 100644 --- a/stores/remote.go +++ b/stores/remote.go @@ -92,7 +92,11 @@ func (r *Remote) acquireFromRemote(ctx context.Context, s abi.SectorID, fileType return "", "", "", nil, err } - sort.Slice(si, func(i, j int) bool { + if len(si) == 0 { + return "", "", "", nil, xerrors.Errorf("failed to acquire sector %v from remote(%d): not found", s, fileType) + } + + sort.Slice(si, func(i, j int) bool { return si[i].Weight < si[j].Weight }) From 38cf04b8887cddd3ecb1317323c5ec08d392536e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Wed, 29 Apr 2020 16:04:05 +0200 Subject: [PATCH 059/199] sched: prefer workers with lower utilization --- sched.go | 22 +++++++++++++++++++++- selector_alloc.go | 2 +- selector_existing.go | 2 +- selector_task.go | 2 +- 4 files changed, 24 insertions(+), 4 deletions(-) diff --git a/sched.go b/sched.go index ffaad211d..ad3c948dd 100644 --- a/sched.go +++ b/sched.go @@ -194,6 +194,8 @@ func (sh *scheduler) maybeSchedRequest(req *workerRequest) (bool, error) { tried := 0 var acceptable []WorkerID + needRes := ResourceTable[req.taskType][sh.spt] + for wid, worker := range sh.workers { ok, err := req.sel.Ok(req.ctx, req.taskType, worker) if err != nil { @@ -205,7 +207,6 @@ func (sh *scheduler) maybeSchedRequest(req *workerRequest) (bool, error) { } tried++ - needRes := ResourceTable[req.taskType][sh.spt] if !canHandleRequest(needRes, sh.spt, wid, worker.info.Resources, worker.preparing) { continue } @@ -384,6 +385,25 @@ func canHandleRequest(needRes Resources, spt abi.RegisteredProof, wid WorkerID, return true } +func (a *activeResources) utilization(wr storiface.WorkerResources) float64 { + var max float64 + + cpu := float64(a.cpuUse) / float64(wr.CPUs) + max = cpu + + memMin := float64(a.memUsedMin + wr.MemReserved) / float64(wr.MemPhysical) + if memMin > max { + max = memMin + } + + memMax := float64(a.memUsedMax + wr.MemReserved) / float64(wr.MemPhysical + wr.MemSwap) + if memMax > max { + max = memMax + } + + return max +} + func (sh *scheduler) schedNewWorker(w *workerHandle) { sh.workersLk.Lock() defer sh.workersLk.Unlock() diff --git a/selector_alloc.go b/selector_alloc.go index 1ceab0ecb..c7d06a7bc 100644 --- a/selector_alloc.go +++ b/selector_alloc.go @@ -53,7 +53,7 @@ func (s *allocSelector) Ok(ctx context.Context, task sealtasks.TaskType, whnd *w } func (s *allocSelector) Cmp(ctx context.Context, task sealtasks.TaskType, a, b *workerHandle) (bool, error) { - return a.info.Hostname > b.info.Hostname, nil // TODO: Better strategy + return a.active.utilization(a.info.Resources) < b.active.utilization(b.info.Resources), nil } var _ WorkerSelector = &allocSelector{} diff --git a/selector_existing.go b/selector_existing.go index eccdefbf2..46dd3278e 100644 --- a/selector_existing.go +++ b/selector_existing.go @@ -54,7 +54,7 @@ func (s *existingSelector) Ok(ctx context.Context, task sealtasks.TaskType, whnd } func (s *existingSelector) Cmp(ctx context.Context, task sealtasks.TaskType, a, b *workerHandle) (bool, error) { - return a.info.Hostname > b.info.Hostname, nil // TODO: Better strategy + return a.active.utilization(a.info.Resources) < b.active.utilization(b.info.Resources), nil } var _ WorkerSelector = &existingSelector{} diff --git a/selector_task.go b/selector_task.go index 5e67ad665..2f20f9a28 100644 --- a/selector_task.go +++ b/selector_task.go @@ -40,7 +40,7 @@ func (s *taskSelector) Cmp(ctx context.Context, _ sealtasks.TaskType, a, b *work return len(atasks) < len(btasks), nil // prefer workers which can do less } - return a.info.Hostname > b.info.Hostname, nil // TODO: Better fallback strategy + return a.active.utilization(a.info.Resources) < b.active.utilization(b.info.Resources), nil } var _ WorkerSelector = &allocSelector{} From dd237159424ca83ed7335fbec19a3057bc9c7adb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Wed, 29 Apr 2020 16:56:20 +0200 Subject: [PATCH 060/199] sched: add missing check for non-multicore thread availability --- sched.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/sched.go b/sched.go index ad3c948dd..99ff6ded2 100644 --- a/sched.go +++ b/sched.go @@ -373,6 +373,11 @@ func canHandleRequest(needRes Resources, spt abi.RegisteredProof, wid WorkerID, log.Debugf("sched: not scheduling on worker %d; multicore process needs %d threads, %d in use, target %d", wid, res.CPUs, active.cpuUse, res.CPUs) return false } + } else { + if active.cpuUse + uint64(needRes.Threads) > res.CPUs { + log.Debugf("sched: not scheduling on worker %d; not enough threads, need %d, %d in use, target %d", wid, needRes.Threads, active.cpuUse, res.CPUs) + return false + } } if len(res.GPUs) > 0 && needRes.CanGPU { From 7f1c9c89e735da5135aa27b56545d661b0299d41 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Wed, 29 Apr 2020 17:58:55 +0200 Subject: [PATCH 061/199] remote store: Handle parallel fetches --- stores/remote.go | 35 ++++++++++++++++++++++++++++++----- 1 file changed, 30 insertions(+), 5 deletions(-) diff --git a/stores/remote.go b/stores/remote.go index ed7ffdaeb..863bee083 100644 --- a/stores/remote.go +++ b/stores/remote.go @@ -27,9 +27,8 @@ type Remote struct { index SectorIndex auth http.Header - fetchLk sync.Mutex // TODO: this can be much smarter - // TODO: allow multiple parallel fetches - // (make sure to not fetch the same sector data twice) + fetchLk sync.Mutex + fetching map[abi.SectorID]chan struct{} } func NewRemote(local *Local, index SectorIndex, auth http.Header) *Remote { @@ -37,6 +36,8 @@ func NewRemote(local *Local, index SectorIndex, auth http.Header) *Remote { local: local, index: index, auth: auth, + + fetching: map[abi.SectorID]chan struct{}{}, } } @@ -45,8 +46,32 @@ func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, existing Sec return SectorPaths{}, SectorPaths{}, nil, xerrors.New("can't both find and allocate a sector") } - r.fetchLk.Lock() - defer r.fetchLk.Unlock() + for { + r.fetchLk.Lock() + + c, locked := r.fetching[s] + if !locked { + r.fetching[s] = make(chan struct{}) + r.fetchLk.Unlock() + break + } + + r.fetchLk.Unlock() + + select { + case <-c: + continue + case <-ctx.Done(): + return SectorPaths{}, SectorPaths{}, nil, ctx.Err() + } + } + + defer func() { + r.fetchLk.Lock() + close(r.fetching[s]) + delete(r.fetching, s) + r.fetchLk.Unlock() + }() paths, stores, done, err := r.local.AcquireSector(ctx, s, existing, allocate, sealing) if err != nil { From 1da6937a0fa16d91e6d220359369ab8154a83385 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Fri, 1 May 2020 17:29:27 +0200 Subject: [PATCH 062/199] sched: Try scheduling on newly connected workers --- sched.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/sched.go b/sched.go index 99ff6ded2..aaf8b9e26 100644 --- a/sched.go +++ b/sched.go @@ -131,7 +131,9 @@ func (sh *scheduler) runSched() { for { select { case w := <-sh.newWorkers: - sh.schedNewWorker(w) + wid := sh.schedNewWorker(w) + + sh.onWorkerFreed(wid) case req := <-sh.schedule: scheduled, err := sh.maybeSchedRequest(req) if err != nil { @@ -409,13 +411,15 @@ func (a *activeResources) utilization(wr storiface.WorkerResources) float64 { return max } -func (sh *scheduler) schedNewWorker(w *workerHandle) { +func (sh *scheduler) schedNewWorker(w *workerHandle) WorkerID { sh.workersLk.Lock() defer sh.workersLk.Unlock() id := sh.nextWorker sh.workers[id] = w sh.nextWorker++ + + return id } func (sh *scheduler) schedClose() { From f0569bd3c4f80cf6fe5927080b903b6763105c8a Mon Sep 17 00:00:00 2001 From: laser Date: Fri, 1 May 2020 09:18:00 -0700 Subject: [PATCH 063/199] remove lotus from dependency graph --- ffiwrapper/sealer_test.go | 11 +- go.mod | 8 +- go.sum | 477 -------------------------------------- 3 files changed, 13 insertions(+), 483 deletions(-) diff --git a/ffiwrapper/sealer_test.go b/ffiwrapper/sealer_test.go index f16e5e2d9..0f4918862 100644 --- a/ffiwrapper/sealer_test.go +++ b/ffiwrapper/sealer_test.go @@ -16,11 +16,9 @@ import ( "golang.org/x/xerrors" paramfetch "github.com/filecoin-project/go-paramfetch" + "github.com/filecoin-project/sector-storage/ffiwrapper/basicfs" "github.com/filecoin-project/specs-actors/actors/abi" "github.com/filecoin-project/specs-storage/storage" - - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/sector-storage/ffiwrapper/basicfs" ) func init() { @@ -151,9 +149,12 @@ func post(t *testing.T, sealer *Sealer, seals ...seal) time.Time { } func getGrothParamFileAndVerifyingKeys(s abi.SectorSize) { - dat := build.ParametersJson() + dat, err := ioutil.ReadFile("../parameters.json") + if err != nil { + panic(err) + } - err := paramfetch.GetParams(dat, uint64(s)) + err = paramfetch.GetParams(dat, uint64(s)) if err != nil { panic(xerrors.Errorf("failed to acquire Groth parameters for 2KiB sectors: %w", err)) } diff --git a/go.mod b/go.mod index 8caa9899c..80fa7f46c 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,6 @@ require ( github.com/filecoin-project/filecoin-ffi v0.0.0-20200326153646-e899cc1dd072 github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5 github.com/filecoin-project/go-paramfetch v0.0.1 - github.com/filecoin-project/lotus v0.2.10 github.com/filecoin-project/specs-actors v0.3.0 github.com/filecoin-project/specs-storage v0.0.0-20200417134612-61b2d91a6102 github.com/gorilla/mux v1.7.4 @@ -16,8 +15,15 @@ require ( github.com/ipfs/go-ipfs-files v0.0.7 github.com/ipfs/go-log v1.0.3 github.com/ipfs/go-log/v2 v2.0.3 + github.com/mattn/go-isatty v0.0.9 // indirect github.com/mitchellh/go-homedir v1.1.0 go.opencensus.io v0.22.3 + go.uber.org/atomic v1.5.1 // indirect + go.uber.org/zap v1.13.0 // indirect + golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 // indirect + golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f // indirect + golang.org/x/sys v0.0.0-20200107162124-548cf772de50 // indirect + golang.org/x/tools v0.0.0-20200108195415-316d2f248479 // indirect golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 ) diff --git a/go.sum b/go.sum index 261afc91e..de87462da 100644 --- a/go.sum +++ b/go.sum @@ -1,112 +1,43 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -contrib.go.opencensus.io/exporter/jaeger v0.1.0/go.mod h1:VYianECmuFPwU37O699Vc1GOcy+y8kOsfaxHRImmjbA= -contrib.go.opencensus.io/exporter/prometheus v0.1.0/go.mod h1:cGFniUXGZlKRjzOyuZJ6mgB+PgBcCIa79kEKR8YCW+A= -github.com/AndreasBriese/bbloom v0.0.0-20180913140656-343706a395b7/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= -github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= -github.com/GeertJohan/go.incremental v1.0.0/go.mod h1:6fAjUhbVuX1KcMD3c8TEgVUqmo4seqhv0i0kdATSkM0= -github.com/GeertJohan/go.rice v1.0.0 h1:KkI6O9uMaQU3VEKaj01ulavtF7o1fWT7+pk/4voiMLQ= -github.com/GeertJohan/go.rice v1.0.0/go.mod h1:eH6gbSOAUv07dQuZVnBmoDP8mgsM1rtixis4Tib9if0= -github.com/Gurpartap/async v0.0.0-20180927173644-4f7f499dd9ee/go.mod h1:W0GbEAA4uFNYOGG2cJpmFJ04E6SD1NLELPYZB57/7AY= -github.com/Kubuxu/go-os-helper v0.0.1/go.mod h1:N8B+I7vPCT80IcP58r50u4+gEEcsZETFUpAzWW2ep1Y= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= -github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d h1:G0m3OIz70MZUWq3EgK3CesDbo8upS2Vm9/P3FtgI+Jk= -github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= -github.com/Stebalien/go-bitfield v0.0.1/go.mod h1:GNjFpasyUVkHMsfEOk8EFLJ9syQ6SI+XWrX9Wf2XH0s= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= -github.com/akavel/rsrc v0.8.0/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkKq+c= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8= -github.com/btcsuite/btcd v0.0.0-20190523000118-16327141da8c/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= -github.com/btcsuite/btcd v0.0.0-20190605094302-a0d1e3e36d50/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= -github.com/btcsuite/btcd v0.0.0-20190629003639-c26ffa870fd8/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= -github.com/btcsuite/btcd v0.0.0-20190824003749-130ea5bddde3/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= -github.com/btcsuite/btcutil v0.0.0-20190207003914-4c204d697803/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-semver v0.2.1-0.20180108230905-e214231b295a/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 h1:HVTnpeuvF6Owjd5mniCL8DEXo7uYXdQEmOP4FJbV5tg= github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3/go.mod h1:p1d6YEZWvFzEh4KLyvBcVSnrfNDDvK2zfK/4x2v/4pE= -github.com/cskr/pubsub v1.0.2/go.mod h1:/8MzYXk/NJAz782G8RPkFzXTZVu63VotefPnR9TIRis= -github.com/daaku/go.zipexe v1.0.0 h1:VSOgZtH418pH9L16hC/JrgSNJbbAL26pj7lmD1+CGdY= -github.com/daaku/go.zipexe v1.0.0/go.mod h1:z8IiR6TsVLEYKwXAoE/I+8ys/sDkgTzSL0CLnGVd57E= github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgraph-io/badger v1.5.5-0.20190226225317-8115aed38f8f/go.mod h1:VZxzAIRPHRVNRKRo6AXrX9BJegn6il06VMTZVJYCIjQ= -github.com/dgraph-io/badger v1.6.0-rc1/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= -github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= -github.com/dgraph-io/badger/v2 v2.0.1-rc1.0.20200120142413-c3333a5a830e/go.mod h1:3KY8+bsP8wI0OEnQJAKpd4wIJW/Mm32yw2j/9FUVnIM= -github.com/dgraph-io/ristretto v0.0.2-0.20200115201040-8f368f2f2ab3/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= -github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= -github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= -github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= -github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= -github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/elastic/go-sysinfo v1.3.0 h1:eb2XFGTMlSwG/yyU9Y8jVAYLIzU2sFzWXwo2gmetyrE= github.com/elastic/go-sysinfo v1.3.0/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= github.com/elastic/go-windows v1.0.0 h1:qLURgZFkkrYyTTkvYpsZIgf83AUsdIHfvlJaqaZ7aSY= github.com/elastic/go-windows v1.0.0/go.mod h1:TsU0Nrp7/y3+VwE82FoZF8gC/XFg/Elz6CcloAxnPgU= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.8.0 h1:5bzFgL+oy7JITMTxUPJ00n7VxmYd/PdMp5mHFX40/RY= github.com/fatih/color v1.8.0/go.mod h1:3l45GVGkyrnYNl9HoIjnp2NnNWvh6hLAqD8yTfGjnw8= -github.com/fd/go-nat v1.0.0/go.mod h1:BTBu/CKvMmOMUPkKVef1pngt2WFH/lg7E6yQnulfp6E= -github.com/filecoin-project/chain-validation v0.0.3/go.mod h1:NCEGFjcWRjb8akWFSOXvU6n2efkWIqAeOKU6o5WBGQw= -github.com/filecoin-project/go-address v0.0.0-20191219011437-af739c490b4f/go.mod h1:rCbpXPva2NKF9/J4X6sr7hbKBgQCxyFtRj7KOZqoIms= -github.com/filecoin-project/go-address v0.0.0-20200107215422-da8eea2842b5/go.mod h1:SAOwJoakQ8EPjwNIsiakIQKsoKdkcbx8U3IapgCg9R0= github.com/filecoin-project/go-address v0.0.2-0.20200218010043-eb9bb40ed5be h1:TooKBwR/g8jG0hZ3lqe9S5sy2vTUcLOZLlz3M5wGn2E= github.com/filecoin-project/go-address v0.0.2-0.20200218010043-eb9bb40ed5be/go.mod h1:SAOwJoakQ8EPjwNIsiakIQKsoKdkcbx8U3IapgCg9R0= -github.com/filecoin-project/go-amt-ipld v0.0.0-20191205011053-79efc22d6cdc/go.mod h1:KsFPWjF+UUYl6n9A+qbg4bjFgAOneicFZtDH/LQEX2U= -github.com/filecoin-project/go-amt-ipld/v2 v2.0.0/go.mod h1:PAZ5tvSfMfWE327osqFXKm7cBpCpBk2Nh0qKsJUmjjk= github.com/filecoin-project/go-amt-ipld/v2 v2.0.1-0.20200131012142-05d80eeccc5e/go.mod h1:boRtQhzmxNocrMxOXo1NYn4oUc1NGvR8tEa79wApNXg= github.com/filecoin-project/go-bitfield v0.0.0-20200309034705-8c7ac40bd550 h1:aockulLU8Qjkdj4FQz53WQpNosAIYk8DxRediRLkE5c= github.com/filecoin-project/go-bitfield v0.0.0-20200309034705-8c7ac40bd550/go.mod h1:iodsLxOFZnqKtjj2zkgqzoGNrv6vUqj69AT/J8DKXEw= github.com/filecoin-project/go-bitfield v0.0.0-20200416002808-b3ee67ec9060 h1:/3qjGMn6ukXgZJHsIbuwGL7ipla8DOV3uHZDBJkBYfU= github.com/filecoin-project/go-bitfield v0.0.0-20200416002808-b3ee67ec9060/go.mod h1:iodsLxOFZnqKtjj2zkgqzoGNrv6vUqj69AT/J8DKXEw= -github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2/go.mod h1:pqTiPHobNkOVM5thSRsHYjyQfq7O5QSCMhvuu9JoDlg= github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 h1:2pMXdBnCiXjfCYx/hLqFxccPoqsSveQFxVLvNxy9bus= github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ= -github.com/filecoin-project/go-data-transfer v0.0.0-20191219005021-4accf56bd2ce/go.mod h1:b14UWxhxVCAjrQUYvVGrQRRsjAh79wXYejw9RbUcAww= github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5 h1:yvQJCW9mmi9zy+51xA01Ea2X7/dL7r8eKDPuGUjRmbo= github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5/go.mod h1:JbkIgFF/Z9BDlvrJO1FuKkaWsH673/UdFaiVS6uIHlA= -github.com/filecoin-project/go-fil-markets v0.0.0-20200114015428-74d100f305f8/go.mod h1:c8NTjvFVy1Ud02mmGDjOiMeawY2t6ALfrrdvAB01FQc= -github.com/filecoin-project/go-paramfetch v0.0.0-20200102181131-b20d579f2878/go.mod h1:40kI2Gv16mwcRsHptI3OAV4nlOEU7wVDc4RgMylNFjU= github.com/filecoin-project/go-paramfetch v0.0.1 h1:gV7bs5YaqlgpGFMiLxInGK2L1FyCXUE0rimz4L7ghoE= github.com/filecoin-project/go-paramfetch v0.0.1/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc= -github.com/filecoin-project/go-sectorbuilder v0.0.1/go.mod h1:3OZ4E3B2OuwhJjtxR4r7hPU9bCfB+A+hm4alLEsaeDc= -github.com/filecoin-project/go-sectorbuilder v0.0.2-0.20200203173614-42d67726bb62/go.mod h1:jNGVCDihkMFnraYVLH1xl4ceZQVxx/u4dOORrTKeRi0= -github.com/filecoin-project/go-statestore v0.1.0/go.mod h1:LFc9hD+fRxPqiHiaqUEZOinUJB4WARkRfNl10O7kTnI= -github.com/filecoin-project/lotus v0.2.10 h1:ijrj/nYdKu5GiMo9r1+Zcp2A4jKHSOMZ2WNy2K/mtOE= -github.com/filecoin-project/lotus v0.2.10/go.mod h1:om5PQA9ZT0lf16qI7Fz/ZGLn4LDCMqPC8ntZA9uncRE= github.com/filecoin-project/specs-actors v0.0.0-20200409043918-e569f4a2f504 h1:mwuAaqxKThl70+7FkGdFKVLdwaQZQ8XmscKdhSBBtnc= github.com/filecoin-project/specs-actors v0.0.0-20200409043918-e569f4a2f504/go.mod h1:mdJraXq5vMy0+/FqVQIrnNlpQ/Em6zeu06G/ltQ0/lA= github.com/filecoin-project/specs-actors v0.3.0 h1:QxgAuTrZr5TPqjyprZk0nTYW5o0JWpzbb5v+4UHHvN0= @@ -114,129 +45,44 @@ github.com/filecoin-project/specs-actors v0.3.0/go.mod h1:nQYnFbQ7Y0bHZyq6HDEuVl github.com/filecoin-project/specs-storage v0.0.0-20200417134612-61b2d91a6102 h1:T3f/zkuvgtgqcXrb0NO3BicuveGOxxUAMPa/Yif2kuE= github.com/filecoin-project/specs-storage v0.0.0-20200417134612-61b2d91a6102/go.mod h1:xJ1/xl9+8zZeSSSFmDC3Wr6uusCTxyYPI0VeNVSFmPE= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1/go.mod h1:0eHX/BVySxPc6SE2mZRoppGq7qcEagxdmQnA3dzork8= -github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI= -github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f h1:KMlcu9X58lhTA/KrfX8Bi1LQSO4pzoVjTiL3h4Jk+Zk= github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.4 h1:VuZ8uybHlWmqV03+zRzdwKL4tUnIp1MAQtp1mIFE1bc= github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= -github.com/gxed/pubsub v0.0.0-20180201040156-26ebdf44f824/go.mod h1:OiEWyHgK+CWrmOlVquHaIK1vhpUJydC9m0Je6mhaiNE= -github.com/hannahhoward/cbor-gen-for v0.0.0-20191216214420-3e450425c40c/go.mod h1:WVPCl0HO/0RAL5+vBH2GMxBomlxBF70MAS78+Lu1//k= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/huin/goupnp v0.0.0-20180415215157-1395d1447324/go.mod h1:MZ2ZmwcBpvOoJ22IJsc7va19ZwoheaBk43rKg12SKag= -github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= -github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/influxdata/influxdb1-client v0.0.0-20190809212627-fc22c7df067e/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= -github.com/ipfs/bbloom v0.0.1/go.mod h1:oqo8CVWsJFMOZqTglBG4wydCE4IQA/G2/SEofB0rjUI= -github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0= -github.com/ipfs/go-bitswap v0.0.9/go.mod h1:kAPf5qgn2W2DrgAcscZ3HrM9qh4pH+X8Fkk3UPrwvis= -github.com/ipfs/go-bitswap v0.1.0/go.mod h1:FFJEf18E9izuCqUtHxbWEvq+reg7o4CW5wSAE1wsxj0= -github.com/ipfs/go-bitswap v0.1.2/go.mod h1:qxSWS4NXGs7jQ6zQvoPY3+NmOfHHG47mhkiLzBpJQIs= -github.com/ipfs/go-bitswap v0.1.3/go.mod h1:YEQlFy0kkxops5Vy+OxWdRSEZIoS7I7KDIwoa5Chkps= -github.com/ipfs/go-bitswap v0.1.8/go.mod h1:TOWoxllhccevbWFUR2N7B1MTSVVge1s6XSMiCSA4MzM= -github.com/ipfs/go-block-format v0.0.1/go.mod h1:DK/YYcsSUIVAFNwo/KZCdIIbpN0ROH/baNLgayt4pFc= github.com/ipfs/go-block-format v0.0.2 h1:qPDvcP19izTjU8rgo6p7gTXZlkMkF5bz5G3fqIsSCPE= github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY= -github.com/ipfs/go-blockservice v0.0.7/go.mod h1:EOfb9k/Y878ZTRY/CH0x5+ATtaipfbRhbvNSdgc/7So= -github.com/ipfs/go-blockservice v0.1.0/go.mod h1:hzmMScl1kXHg3M2BjTymbVPjv627N7sYcvYaKbop39M= -github.com/ipfs/go-blockservice v0.1.3-0.20190908200855-f22eea50656c/go.mod h1:t+411r7psEUhLueM8C7aPA7cxCclv4O3VsUVxt9kz2I= -github.com/ipfs/go-car v0.0.3-0.20191203022317-23b0a85fd1b1/go.mod h1:rmd887mJxQRDfndfDEY3Liyx8gQVyfFFRSHdsnDSAlk= -github.com/ipfs/go-car v0.0.3-0.20200121013634-f188c0e24291/go.mod h1:AG6sBpd2PWMccpAG7XLFBBQ/4rfBEtzUNeO2GSMesYk= github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= -github.com/ipfs/go-cid v0.0.4-0.20191112011718-79e75dffeb10/go.mod h1:/BYOuUoxkE+0f6tGzlzMvycuN+5l35VOR4Bpg2sCmds= github.com/ipfs/go-cid v0.0.4/go.mod h1:4LLaPOQwmk5z9LBgQnpkivrx8BJjUyGwTXCd5Xfj6+M= github.com/ipfs/go-cid v0.0.5 h1:o0Ix8e/ql7Zb5UVUJEUfjsWCIY8t48++9lR8qi6oiJU= github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= -github.com/ipfs/go-datastore v0.0.1/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= -github.com/ipfs/go-datastore v0.0.5/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= -github.com/ipfs/go-datastore v0.1.0/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= -github.com/ipfs/go-datastore v0.1.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= -github.com/ipfs/go-datastore v0.3.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= -github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= -github.com/ipfs/go-ds-badger v0.0.2/go.mod h1:Y3QpeSFWQf6MopLTiZD+VT6IC1yZqaGmjvRcKeSGij8= -github.com/ipfs/go-ds-badger v0.0.5/go.mod h1:g5AuuCGmr7efyzQhLL8MzwqcauPojGPUaHzfGTzuE3s= -github.com/ipfs/go-ds-badger v0.0.7/go.mod h1:qt0/fWzZDoPW6jpQeqUjR5kBfhDNB65jd9YlmAvpQBk= -github.com/ipfs/go-ds-badger2 v0.0.0-20200123200730-d75eb2678a5d/go.mod h1:sTQFaWUoW0OvhXzfHnQ9j39L6fdlqDkptDYcpC1XrYE= -github.com/ipfs/go-ds-leveldb v0.0.1/go.mod h1:feO8V3kubwsEF22n0YRQCffeb79OOYIykR4L04tMOYc= -github.com/ipfs/go-ds-leveldb v0.1.0/go.mod h1:hqAW8y4bwX5LWcCtku2rFNX3vjDZCy5LZCg+cSZvYb8= -github.com/ipfs/go-filestore v0.0.2/go.mod h1:KnZ41qJsCt2OX2mxZS0xsK3Psr0/oB93HMMssLujjVc= -github.com/ipfs/go-fs-lock v0.0.1/go.mod h1:DNBekbboPKcxs1aukPSaOtFA3QfSdi5C855v0i9XJ8Y= -github.com/ipfs/go-graphsync v0.0.4/go.mod h1:6UACBjfOXEa8rQL3Q/JpZpWS0nZDCLx134WUkjrmFpQ= -github.com/ipfs/go-hamt-ipld v0.0.14-0.20191218031521-b2c774a54db1/go.mod h1:8yRx0xLUps1Xq8ZDnIwIVdQRp7JjA55gGvCiRHT91Vk= github.com/ipfs/go-hamt-ipld v0.0.15-0.20200131012125-dd88a59d3f2e/go.mod h1:9aQJu/i/TaRDW6jqB5U217dLIDopn50wxLdHXM2CTfE= -github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08= -github.com/ipfs/go-ipfs-blockstore v0.1.0/go.mod h1:5aD0AvHPi7mZc6Ci1WCAhiBQu2IsfTduLl+422H6Rqw= -github.com/ipfs/go-ipfs-blockstore v0.1.1/go.mod h1:8gZOgIN5e+Xdg2YSGdwTTRbguSVjYyosIDRQCY8E9QM= -github.com/ipfs/go-ipfs-blocksutil v0.0.1/go.mod h1:Yq4M86uIOmxmGPUHv/uI7uKqZNtLb449gwKqXjIsnRk= -github.com/ipfs/go-ipfs-chunker v0.0.1/go.mod h1:tWewYK0we3+rMbOh7pPFGDyypCtvGcBFymgY4rSDLAw= -github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= -github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= -github.com/ipfs/go-ipfs-ds-help v0.0.1/go.mod h1:gtP9xRaZXqIQRh1HRpp595KbBEdgqWFxefeVKOV8sxo= -github.com/ipfs/go-ipfs-exchange-interface v0.0.1/go.mod h1:c8MwfHjtQjPoDyiy9cFquVtVHkO9b9Ob3FG91qJnWCM= -github.com/ipfs/go-ipfs-exchange-offline v0.0.1/go.mod h1:WhHSFCVYX36H/anEKQboAzpUws3x7UeEGkzQc3iNkM0= -github.com/ipfs/go-ipfs-files v0.0.3/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4= -github.com/ipfs/go-ipfs-files v0.0.4/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4= github.com/ipfs/go-ipfs-files v0.0.7 h1:s5BRD12ndahqYifeH1S8Z73zqZhR+3IdKYAG9PiETs0= github.com/ipfs/go-ipfs-files v0.0.7/go.mod h1:wiN/jSG8FKyk7N0WyctKSvq3ljIa2NNTiZB55kpTdOs= -github.com/ipfs/go-ipfs-flags v0.0.1/go.mod h1:RnXBb9WV53GSfTrSDVK61NLTFKvWc60n+K9EgCDh+rA= -github.com/ipfs/go-ipfs-posinfo v0.0.1/go.mod h1:SwyeVP+jCwiDu0C313l/8jg6ZxM0qqtlt2a0vILTc1A= -github.com/ipfs/go-ipfs-pq v0.0.1/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7BgUmUfY= -github.com/ipfs/go-ipfs-routing v0.0.1/go.mod h1:k76lf20iKFxQTjcJokbPM9iBXVXVZhcOwc360N4nuKs= -github.com/ipfs/go-ipfs-routing v0.1.0/go.mod h1:hYoUkJLyAUKhF58tysKpids8RNDPO42BVMgK5dNsoqY= github.com/ipfs/go-ipfs-util v0.0.1 h1:Wz9bL2wB2YBJqggkA4dD7oSmqB4cAnpNbGrlHJulv50= github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc= -github.com/ipfs/go-ipld-cbor v0.0.2/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= github.com/ipfs/go-ipld-cbor v0.0.3/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= github.com/ipfs/go-ipld-cbor v0.0.4 h1:Aw3KPOKXjvrm6VjwJvFf1F1ekR/BH3jdof3Bk7OTiSA= github.com/ipfs/go-ipld-cbor v0.0.4/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= @@ -246,38 +92,13 @@ github.com/ipfs/go-ipld-format v0.0.2 h1:OVAGlyYT6JPZ0pEfGntFPS40lfrDmaDbQwNHEY2 github.com/ipfs/go-ipld-format v0.0.2/go.mod h1:4B6+FM2u9OJ9zCV+kSbgFAZlOrv1Hqbf0INGQgiKf9k= github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM= github.com/ipfs/go-log v1.0.0/go.mod h1:JO7RzlMK6rA+CIxFMLOuB6Wf5b81GDiKElL7UPSIKjA= -github.com/ipfs/go-log v1.0.1/go.mod h1:HuWlQttfN6FWNHRhlY5yMk/lW7evQC0HHGOxEwMRR8I= github.com/ipfs/go-log v1.0.3 h1:Gg7SUYSZ7BrqaKMwM+hRgcAkKv4QLfzP4XPQt5Sx/OI= github.com/ipfs/go-log v1.0.3/go.mod h1:OsLySYkwIbiSUR/yBTdv1qPtcE4FW3WPWk/ewz9Ru+A= -github.com/ipfs/go-log/v2 v2.0.1/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= -github.com/ipfs/go-log/v2 v2.0.2/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= github.com/ipfs/go-log/v2 v2.0.3 h1:Q2gXcBoCALyLN/pUQlz1qgu0x3uFV6FzP9oXhpfyJpc= github.com/ipfs/go-log/v2 v2.0.3/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= -github.com/ipfs/go-merkledag v0.0.6/go.mod h1:QYPdnlvkOg7GnQRofu9XZimC5ZW5Wi3bKys/4GQQfto= -github.com/ipfs/go-merkledag v0.1.0/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk= -github.com/ipfs/go-merkledag v0.2.3/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk= -github.com/ipfs/go-merkledag v0.2.4/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk= -github.com/ipfs/go-metrics-interface v0.0.1/go.mod h1:6s6euYU4zowdslK0GKHmqaIZ3j/b/tL7HTWtJ4VPgWY= -github.com/ipfs/go-path v0.0.7/go.mod h1:6KTKmeRnBXgqrTvzFrPV3CamxcgvXX/4z79tfAd2Sno= -github.com/ipfs/go-peertaskqueue v0.0.4/go.mod h1:03H8fhyeMfKNFWqzYEVyMbcPUeYrqP1MX6Kd+aN+rMQ= -github.com/ipfs/go-peertaskqueue v0.1.0/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3Vzz/jUmWw8Z0U= -github.com/ipfs/go-peertaskqueue v0.1.1/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3Vzz/jUmWw8Z0U= -github.com/ipfs/go-todocounter v0.0.1/go.mod h1:l5aErvQc8qKE2r7NDMjmq5UNAvuZy0rC8BHOplkWvZ4= -github.com/ipfs/go-unixfs v0.2.2-0.20190827150610-868af2e9e5cb/go.mod h1:IwAAgul1UQIcNZzKPYZWOCijryFBeCV79cNubPzol+k= -github.com/ipfs/go-verifcid v0.0.1/go.mod h1:5Hrva5KBeIog4A+UpqlaIU+DEstipcJYQQZc0g37pY0= -github.com/ipld/go-ipld-prime v0.0.1/go.mod h1:bDDSvVz7vaK12FNvMeRYnpRFkSUPNQOiCYQezMD/P3w= -github.com/ipld/go-ipld-prime v0.0.2-0.20191108012745-28a82f04c785/go.mod h1:bDDSvVz7vaK12FNvMeRYnpRFkSUPNQOiCYQezMD/P3w= -github.com/ipld/go-ipld-prime-proto v0.0.0-20191113031812-e32bd156a1e5/go.mod h1:gcvzoEDBjwycpXt3LBE061wT9f46szXGHAmj9uoP6fU= github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52 h1:QG4CGBqCeuBo6aZlGAamSkxWdgWfZGeE49eUOWJPA4c= github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52/go.mod h1:fdg+/X9Gg4AsAIzWpEHwnqd+QY3b7lajxyjE1m4hkq4= -github.com/jackpal/gateway v1.0.4/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA= -github.com/jackpal/gateway v1.0.5/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA= -github.com/jackpal/go-nat-pmp v1.0.1/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= -github.com/jbenet/go-cienv v0.0.0-20150120210510-1bb1476777ec/go.mod h1:rGaEvXB4uRSZMmzKNLoXvTu1sfx+1kv/DojUlPrSZGs= github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= -github.com/jbenet/go-random v0.0.0-20190219211222-123a90aedc0c/go.mod h1:sdx1xVM9UuLw1tXnhJWN3piypTUO3vCIHYmG15KE/dU= -github.com/jbenet/go-temp-err-catcher v0.0.0-20150120210811-aac704a3f4f2/go.mod h1:8GXXJV31xl8whumTzdZsTt3RnUIiPqzkyf7mxToRCMs= -github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8/go.mod h1:Ly/wlsjFq/qrU3Rar62tu1gASgGw6chQbSh/XgIIXCY= github.com/jbenet/goprocess v0.1.3/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= @@ -288,171 +109,25 @@ github.com/jtolds/gls v4.2.1+incompatible h1:fSuqC+Gmlu6l/ZYAoZzx2pyucC8Xza35fpR github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kami-zh/go-capturer v0.0.0-20171211120116-e492ea43421d/go.mod h1:P2viExyCEfeWGU259JnaQ34Inuec4R38JCyBx2edgD0= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/koron/go-ssdp v0.0.0-20180514024734-4a0ed625a78b/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= -github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/libp2p/go-addr-util v0.0.1/go.mod h1:4ac6O7n9rIAKB1dnd+s8IbbMXkt+oBpzX4/+RACcnlQ= -github.com/libp2p/go-buffer-pool v0.0.1/go.mod h1:xtyIz9PMobb13WaxR6Zo1Pd1zXJKYg0a8KiIvDp3TzQ= -github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM= -github.com/libp2p/go-conn-security v0.0.1/go.mod h1:bGmu51N0KU9IEjX7kl2PQjgZa40JQWnayTvNMgD/vyk= -github.com/libp2p/go-conn-security-multistream v0.0.2/go.mod h1:nc9vud7inQ+d6SO0I/6dSWrdMnHnzZNHeyUQqrAJulE= -github.com/libp2p/go-conn-security-multistream v0.1.0/go.mod h1:aw6eD7LOsHEX7+2hJkDxw1MteijaVcI+/eP2/x3J1xc= -github.com/libp2p/go-eventbus v0.0.2/go.mod h1:Hr/yGlwxA/stuLnpMiu82lpNKpvRy3EaJxPu40XYOwk= -github.com/libp2p/go-eventbus v0.0.3/go.mod h1:Hr/yGlwxA/stuLnpMiu82lpNKpvRy3EaJxPu40XYOwk= -github.com/libp2p/go-eventbus v0.1.0/go.mod h1:vROgu5cs5T7cv7POWlWxBaVLxfSegC5UGQf8A2eEmx4= -github.com/libp2p/go-flow-metrics v0.0.1/go.mod h1:Iv1GH0sG8DtYN3SVJ2eG221wMiNpZxBdp967ls1g+k8= github.com/libp2p/go-flow-metrics v0.0.3/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs= -github.com/libp2p/go-libp2p v0.0.30/go.mod h1:XWT8FGHlhptAv1+3V/+J5mEpzyui/5bvFsNuWYs611A= -github.com/libp2p/go-libp2p v0.1.0/go.mod h1:6D/2OBauqLUoqcADOJpn9WbKqvaM07tDw68qHM0BxUM= -github.com/libp2p/go-libp2p v0.1.1/go.mod h1:I00BRo1UuUSdpuc8Q2mN7yDF/oTUTRAX6JWpTiK9Rp8= -github.com/libp2p/go-libp2p v0.2.1/go.mod h1:HZbtEOrgZN4F1fGZVvkV+930Wx3DkqlpBlO8dIoZWds= -github.com/libp2p/go-libp2p v0.3.0/go.mod h1:J7DPB1+zB5VLc8v/kKSD8+u2cbyIGI0Dh/Pf3Wprt+0= -github.com/libp2p/go-libp2p v0.4.2/go.mod h1:MNmgUxUw5pMsdOzMlT0EE7oKjRasl+WyVwM0IBlpKgQ= -github.com/libp2p/go-libp2p-autonat v0.0.6/go.mod h1:uZneLdOkZHro35xIhpbtTzLlgYturpu4J5+0cZK3MqE= -github.com/libp2p/go-libp2p-autonat v0.1.0/go.mod h1:1tLf2yXxiE/oKGtDwPYWTSYG3PtvYlJmg7NeVtPRqH8= -github.com/libp2p/go-libp2p-autonat v0.1.1/go.mod h1:OXqkeGOY2xJVWKAGV2inNF5aKN/djNA3fdpCWloIudE= -github.com/libp2p/go-libp2p-blankhost v0.0.1/go.mod h1:Ibpbw/7cPPYwFb7PACIWdvxxv0t0XCCI10t7czjAjTc= -github.com/libp2p/go-libp2p-blankhost v0.1.1/go.mod h1:pf2fvdLJPsC1FsVrNP3DUUvMzUts2dsLLBEpo1vW1ro= -github.com/libp2p/go-libp2p-blankhost v0.1.3/go.mod h1:KML1//wiKR8vuuJO0y3LUd1uLv+tlkGTAr3jC0S5cLg= -github.com/libp2p/go-libp2p-blankhost v0.1.4/go.mod h1:oJF0saYsAXQCSfDq254GMNmLNz6ZTHTOvtF4ZydUvwU= -github.com/libp2p/go-libp2p-circuit v0.0.9/go.mod h1:uU+IBvEQzCu953/ps7bYzC/D/R0Ho2A9LfKVVCatlqU= -github.com/libp2p/go-libp2p-circuit v0.1.0/go.mod h1:Ahq4cY3V9VJcHcn1SBXjr78AbFkZeIRmfunbA7pmFh8= -github.com/libp2p/go-libp2p-circuit v0.1.1/go.mod h1:Ahq4cY3V9VJcHcn1SBXjr78AbFkZeIRmfunbA7pmFh8= -github.com/libp2p/go-libp2p-circuit v0.1.4/go.mod h1:CY67BrEjKNDhdTk8UgBX1Y/H5c3xkAcs3gnksxY7osU= -github.com/libp2p/go-libp2p-connmgr v0.1.0/go.mod h1:wZxh8veAmU5qdrfJ0ZBLcU8oJe9L82ciVP/fl1VHjXk= -github.com/libp2p/go-libp2p-core v0.0.1/go.mod h1:g/VxnTZ/1ygHxH3dKok7Vno1VfpvGcGip57wjTU4fco= -github.com/libp2p/go-libp2p-core v0.0.2/go.mod h1:9dAcntw/n46XycV4RnlBq3BpgrmyUi9LuoTNdPrbUco= -github.com/libp2p/go-libp2p-core v0.0.3/go.mod h1:j+YQMNz9WNSkNezXOsahp9kwZBKBvxLpKD316QWSJXE= -github.com/libp2p/go-libp2p-core v0.0.4/go.mod h1:jyuCQP356gzfCFtRKyvAbNkyeuxb7OlyhWZ3nls5d2I= -github.com/libp2p/go-libp2p-core v0.0.6/go.mod h1:0d9xmaYAVY5qmbp/fcgxHT3ZJsLjYeYPMJAUKpaCHrE= -github.com/libp2p/go-libp2p-core v0.0.9/go.mod h1:0d9xmaYAVY5qmbp/fcgxHT3ZJsLjYeYPMJAUKpaCHrE= -github.com/libp2p/go-libp2p-core v0.2.0/go.mod h1:X0eyB0Gy93v0DZtSYbEM7RnMChm9Uv3j7yRXjO77xSI= -github.com/libp2p/go-libp2p-core v0.2.2/go.mod h1:8fcwTbsG2B+lTgRJ1ICZtiM5GWCWZVoVrLaDRvIRng0= -github.com/libp2p/go-libp2p-core v0.2.4/go.mod h1:STh4fdfa5vDYr0/SzYYeqnt+E6KfEV5VxfIrm0bcI0g= github.com/libp2p/go-libp2p-core v0.3.0 h1:F7PqduvrztDtFsAa/bcheQ3azmNo+Nq7m8hQY5GiUW8= github.com/libp2p/go-libp2p-core v0.3.0/go.mod h1:ACp3DmS3/N64c2jDzcV429ukDpicbL6+TrrxANBjPGw= -github.com/libp2p/go-libp2p-crypto v0.0.1/go.mod h1:yJkNyDmO341d5wwXxDUGO0LykUVT72ImHNUqh5D/dBE= -github.com/libp2p/go-libp2p-crypto v0.0.2/go.mod h1:eETI5OUfBnvARGOHrJz2eWNyTUxEGZnBxMcbUjfIj4I= -github.com/libp2p/go-libp2p-crypto v0.1.0/go.mod h1:sPUokVISZiy+nNuTTH/TY+leRSxnFj/2GLjtOTW90hI= -github.com/libp2p/go-libp2p-discovery v0.0.5/go.mod h1:YtF20GUxjgoKZ4zmXj8j3Nb2TUSBHFlOCetzYdbZL5I= -github.com/libp2p/go-libp2p-discovery v0.1.0/go.mod h1:4F/x+aldVHjHDHuX85x1zWoFTGElt8HnoDzwkFZm29g= -github.com/libp2p/go-libp2p-discovery v0.2.0/go.mod h1:s4VGaxYMbw4+4+tsoQTqh7wfxg97AEdo4GYBt6BadWg= -github.com/libp2p/go-libp2p-host v0.0.1/go.mod h1:qWd+H1yuU0m5CwzAkvbSjqKairayEHdR5MMl7Cwa7Go= -github.com/libp2p/go-libp2p-host v0.0.3/go.mod h1:Y/qPyA6C8j2coYyos1dfRm0I8+nvd4TGrDGt4tA7JR8= -github.com/libp2p/go-libp2p-interface-connmgr v0.0.1/go.mod h1:GarlRLH0LdeWcLnYM/SaBykKFl9U5JFnbBGruAk/D5k= -github.com/libp2p/go-libp2p-interface-connmgr v0.0.4/go.mod h1:GarlRLH0LdeWcLnYM/SaBykKFl9U5JFnbBGruAk/D5k= -github.com/libp2p/go-libp2p-interface-connmgr v0.0.5/go.mod h1:GarlRLH0LdeWcLnYM/SaBykKFl9U5JFnbBGruAk/D5k= -github.com/libp2p/go-libp2p-interface-pnet v0.0.1/go.mod h1:el9jHpQAXK5dnTpKA4yfCNBZXvrzdOU75zz+C6ryp3k= -github.com/libp2p/go-libp2p-kad-dht v0.1.1/go.mod h1:1kj2Rk5pX3/0RwqMm9AMNCT7DzcMHYhgDN5VTi+cY0M= -github.com/libp2p/go-libp2p-kbucket v0.2.0/go.mod h1:JNymBToym3QXKBMKGy3m29+xprg0EVr/GJFHxFEdgh8= -github.com/libp2p/go-libp2p-loggables v0.0.1/go.mod h1:lDipDlBNYbpyqyPX/KcoO+eq0sJYEVR2JgOexcivchg= -github.com/libp2p/go-libp2p-loggables v0.1.0/go.mod h1:EyumB2Y6PrYjr55Q3/tiJ/o3xoDasoRYM7nOzEpoa90= -github.com/libp2p/go-libp2p-metrics v0.0.1/go.mod h1:jQJ95SXXA/K1VZi13h52WZMa9ja78zjyy5rspMsC/08= -github.com/libp2p/go-libp2p-mplex v0.1.1/go.mod h1:KUQWpGkCzfV7UIpi8SKsAVxyBgz1c9R5EvxgnwLsb/I= -github.com/libp2p/go-libp2p-mplex v0.2.0/go.mod h1:Ejl9IyjvXJ0T9iqUTE1jpYATQ9NM3g+OtR+EMMODbKo= -github.com/libp2p/go-libp2p-mplex v0.2.1/go.mod h1:SC99Rxs8Vuzrf/6WhmH41kNn13TiYdAWNYHrwImKLnE= -github.com/libp2p/go-libp2p-nat v0.0.4/go.mod h1:N9Js/zVtAXqaeT99cXgTV9e75KpnWCvVOiGzlcHmBbY= -github.com/libp2p/go-libp2p-nat v0.0.5/go.mod h1:1qubaE5bTZMJE+E/uu2URroMbzdubFz1ChgiN79yKPE= -github.com/libp2p/go-libp2p-net v0.0.1/go.mod h1:Yt3zgmlsHOgUWSXmt5V/Jpz9upuJBE8EgNU9DrCcR8c= -github.com/libp2p/go-libp2p-net v0.0.2/go.mod h1:Yt3zgmlsHOgUWSXmt5V/Jpz9upuJBE8EgNU9DrCcR8c= -github.com/libp2p/go-libp2p-netutil v0.0.1/go.mod h1:GdusFvujWZI9Vt0X5BKqwWWmZFxecf9Gt03cKxm2f/Q= -github.com/libp2p/go-libp2p-netutil v0.1.0/go.mod h1:3Qv/aDqtMLTUyQeundkKsA+YCThNdbQD54k3TqjpbFU= -github.com/libp2p/go-libp2p-peer v0.0.1/go.mod h1:nXQvOBbwVqoP+T5Y5nCjeH4sP9IX/J0AMzcDUVruVoo= -github.com/libp2p/go-libp2p-peer v0.1.1/go.mod h1:jkF12jGB4Gk/IOo+yomm+7oLWxF278F7UnrYUQ1Q8es= -github.com/libp2p/go-libp2p-peer v0.2.0/go.mod h1:RCffaCvUyW2CJmG2gAWVqwePwW7JMgxjsHm7+J5kjWY= -github.com/libp2p/go-libp2p-peerstore v0.0.1/go.mod h1:RabLyPVJLuNQ+GFyoEkfi8H4Ti6k/HtZJ7YKgtSq+20= -github.com/libp2p/go-libp2p-peerstore v0.0.6/go.mod h1:RabLyPVJLuNQ+GFyoEkfi8H4Ti6k/HtZJ7YKgtSq+20= -github.com/libp2p/go-libp2p-peerstore v0.1.0/go.mod h1:2CeHkQsr8svp4fZ+Oi9ykN1HBb6u0MOvdJ7YIsmcwtY= -github.com/libp2p/go-libp2p-peerstore v0.1.2/go.mod h1:BJ9sHlm59/80oSkpWgr1MyY1ciXAXV397W6h1GH/uKI= -github.com/libp2p/go-libp2p-peerstore v0.1.3/go.mod h1:BJ9sHlm59/80oSkpWgr1MyY1ciXAXV397W6h1GH/uKI= -github.com/libp2p/go-libp2p-peerstore v0.1.4/go.mod h1:+4BDbDiiKf4PzpANZDAT+knVdLxvqh7hXOujessqdzs= -github.com/libp2p/go-libp2p-protocol v0.0.1/go.mod h1:Af9n4PiruirSDjHycM1QuiMi/1VZNHYcK8cLgFJLZ4s= -github.com/libp2p/go-libp2p-protocol v0.1.0/go.mod h1:KQPHpAabB57XQxGrXCNvbL6UEXfQqUgC/1adR2Xtflk= -github.com/libp2p/go-libp2p-pubsub v0.2.6/go.mod h1:5jEp7R3ItQ0pgcEMrPZYE9DQTg/H3CTc7Mu1j2G4Y5o= -github.com/libp2p/go-libp2p-quic-transport v0.1.1/go.mod h1:wqG/jzhF3Pu2NrhJEvE+IE0NTHNXslOPn9JQzyCAxzU= -github.com/libp2p/go-libp2p-record v0.0.1/go.mod h1:grzqg263Rug/sRex85QrDOLntdFAymLDLm7lxMgU79Q= -github.com/libp2p/go-libp2p-record v0.1.0/go.mod h1:ujNc8iuE5dlKWVy6wuL6dd58t0n7xI4hAIl8pE6wu5Q= -github.com/libp2p/go-libp2p-record v0.1.1/go.mod h1:VRgKajOyMVgP/F0L5g3kH7SVskp17vFi2xheb5uMJtg= -github.com/libp2p/go-libp2p-routing v0.0.1/go.mod h1:N51q3yTr4Zdr7V8Jt2JIktVU+3xBBylx1MZeVA6t1Ys= -github.com/libp2p/go-libp2p-routing v0.1.0/go.mod h1:zfLhI1RI8RLEzmEaaPwzonRvXeeSHddONWkcTcB54nE= -github.com/libp2p/go-libp2p-routing-helpers v0.1.0/go.mod h1:oUs0h39vNwYtYXnQWOTU5BaafbedSyWCCal3gqHuoOQ= -github.com/libp2p/go-libp2p-secio v0.0.3/go.mod h1:hS7HQ00MgLhRO/Wyu1bTX6ctJKhVpm+j2/S2A5UqYb0= -github.com/libp2p/go-libp2p-secio v0.1.0/go.mod h1:tMJo2w7h3+wN4pgU2LSYeiKPrfqBgkOsdiKK77hE7c8= -github.com/libp2p/go-libp2p-secio v0.1.1/go.mod h1:tMJo2w7h3+wN4pgU2LSYeiKPrfqBgkOsdiKK77hE7c8= -github.com/libp2p/go-libp2p-secio v0.2.0/go.mod h1:2JdZepB8J5V9mBp79BmwsaPQhRPNN2NrnB2lKQcdy6g= -github.com/libp2p/go-libp2p-secio v0.2.1/go.mod h1:cWtZpILJqkqrSkiYcDBh5lA3wbT2Q+hz3rJQq3iftD8= -github.com/libp2p/go-libp2p-swarm v0.0.6/go.mod h1:s5GZvzg9xXe8sbeESuFpjt8CJPTCa8mhEusweJqyFy8= -github.com/libp2p/go-libp2p-swarm v0.1.0/go.mod h1:wQVsCdjsuZoc730CgOvh5ox6K8evllckjebkdiY5ta4= -github.com/libp2p/go-libp2p-swarm v0.1.1/go.mod h1:4NVJaLwq/dr5kEq79Jo6pMin7ZFwLx73ln1FTefR91Q= -github.com/libp2p/go-libp2p-swarm v0.2.0/go.mod h1:x07b4zkMFo2EvgPV2bMTlNmdQc8i+74Jjio7xGvsTgU= -github.com/libp2p/go-libp2p-swarm v0.2.2/go.mod h1:fvmtQ0T1nErXym1/aa1uJEyN7JzaTNyBcHImCxRpPKU= -github.com/libp2p/go-libp2p-testing v0.0.1/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= -github.com/libp2p/go-libp2p-testing v0.0.2/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= -github.com/libp2p/go-libp2p-testing v0.0.3/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= -github.com/libp2p/go-libp2p-testing v0.0.4/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= -github.com/libp2p/go-libp2p-testing v0.1.0/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0= -github.com/libp2p/go-libp2p-tls v0.1.0/go.mod h1:VZdoSWQDeNpIIAFJFv+6uqTqpnIIDHcqZQSTC/A1TT0= -github.com/libp2p/go-libp2p-transport v0.0.1/go.mod h1:UzbUs9X+PHOSw7S3ZmeOxfnwaQY5vGDzZmKPod3N3tk= -github.com/libp2p/go-libp2p-transport v0.0.4/go.mod h1:StoY3sx6IqsP6XKoabsPnHCwqKXWUMWU7Rfcsubee/A= -github.com/libp2p/go-libp2p-transport v0.0.5/go.mod h1:StoY3sx6IqsP6XKoabsPnHCwqKXWUMWU7Rfcsubee/A= -github.com/libp2p/go-libp2p-transport-upgrader v0.0.4/go.mod h1:RGq+tupk+oj7PzL2kn/m1w6YXxcIAYJYeI90h6BGgUc= -github.com/libp2p/go-libp2p-transport-upgrader v0.1.1/go.mod h1:IEtA6or8JUbsV07qPW4r01GnTenLW4oi3lOPbUMGJJA= -github.com/libp2p/go-libp2p-yamux v0.1.2/go.mod h1:xUoV/RmYkg6BW/qGxA9XJyg+HzXFYkeXbnhjmnYzKp8= -github.com/libp2p/go-libp2p-yamux v0.1.3/go.mod h1:VGSQVrqkh6y4nm0189qqxMtvyBft44MOYYPpYKXiVt4= -github.com/libp2p/go-libp2p-yamux v0.2.0/go.mod h1:Db2gU+XfLpm6E4rG5uGCFX6uXA8MEXOxFcRoXUODaK8= -github.com/libp2p/go-libp2p-yamux v0.2.1/go.mod h1:1FBXiHDk1VyRM1C0aez2bCfHQ4vMZKkAQzZbkSQt5fI= -github.com/libp2p/go-maddr-filter v0.0.1/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q= -github.com/libp2p/go-maddr-filter v0.0.4/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q= -github.com/libp2p/go-maddr-filter v0.0.5/go.mod h1:Jk+36PMfIqCJhAnaASRH83bdAvfDRp/w6ENFaC9bG+M= -github.com/libp2p/go-mplex v0.0.3/go.mod h1:pK5yMLmOoBR1pNCqDlA2GQrdAVTMkqFalaTWe7l4Yd0= -github.com/libp2p/go-mplex v0.0.4/go.mod h1:pK5yMLmOoBR1pNCqDlA2GQrdAVTMkqFalaTWe7l4Yd0= -github.com/libp2p/go-mplex v0.1.0/go.mod h1:SXgmdki2kwCUlCCbfGLEgHjC4pFqhTp0ZoV6aiKgxDU= -github.com/libp2p/go-msgio v0.0.2/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= -github.com/libp2p/go-msgio v0.0.3/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= -github.com/libp2p/go-msgio v0.0.4/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= -github.com/libp2p/go-nat v0.0.3/go.mod h1:88nUEt0k0JD45Bk93NIwDqjlhiOwOoV36GchpcVc1yI= -github.com/libp2p/go-nat v0.0.4/go.mod h1:Nmw50VAvKuk38jUBcmNh6p9lUJLoODbJRvYAa/+KSDo= -github.com/libp2p/go-openssl v0.0.2/go.mod h1:v8Zw2ijCSWBQi8Pq5GAixw6DbFfa9u6VIYDXnvOXkc0= -github.com/libp2p/go-openssl v0.0.3/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= github.com/libp2p/go-openssl v0.0.4 h1:d27YZvLoTyMhIN4njrkr8zMDOM4lfpHIp6A+TK9fovg= github.com/libp2p/go-openssl v0.0.4/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= -github.com/libp2p/go-reuseport v0.0.1/go.mod h1:jn6RmB1ufnQwl0Q1f+YxAj8isJgDCQzaaxIFYDhcYEA= -github.com/libp2p/go-reuseport-transport v0.0.2/go.mod h1:YkbSDrvjUVDL6b8XqriyA20obEtsW9BLkuOUyQAOCbs= -github.com/libp2p/go-stream-muxer v0.0.1/go.mod h1:bAo8x7YkSpadMTbtTaxGVHWUQsR/l5MEaHbKaliuT14= -github.com/libp2p/go-stream-muxer v0.1.0/go.mod h1:8JAVsjeRBCWwPoZeH0W1imLOcriqXJyFvB0mR4A04sQ= -github.com/libp2p/go-stream-muxer-multistream v0.1.1/go.mod h1:zmGdfkQ1AzOECIAcccoL8L//laqawOsO03zX8Sa+eGw= -github.com/libp2p/go-stream-muxer-multistream v0.2.0/go.mod h1:j9eyPol/LLRqT+GPLSxvimPhNph4sfYfMoDPd7HkzIc= -github.com/libp2p/go-tcp-transport v0.0.4/go.mod h1:+E8HvC8ezEVOxIo3V5vCK9l1y/19K427vCzQ+xHKH/o= -github.com/libp2p/go-tcp-transport v0.1.0/go.mod h1:oJ8I5VXryj493DEJ7OsBieu8fcg2nHGctwtInJVpipc= -github.com/libp2p/go-tcp-transport v0.1.1/go.mod h1:3HzGvLbx6etZjnFlERyakbaYPdfjg2pWP97dFZworkY= -github.com/libp2p/go-testutil v0.0.1/go.mod h1:iAcJc/DKJQanJ5ws2V+u5ywdL2n12X1WbbEG+Jjy69I= -github.com/libp2p/go-testutil v0.1.0/go.mod h1:81b2n5HypcVyrCg/MJx4Wgfp/VHojytjVe/gLzZ2Ehc= -github.com/libp2p/go-ws-transport v0.0.5/go.mod h1:Qbl4BxPfXXhhd/o0wcrgoaItHqA9tnZjoFZnxykuaXU= -github.com/libp2p/go-ws-transport v0.1.0/go.mod h1:rjw1MG1LU9YDC6gzmwObkPd/Sqwhw7yT74kj3raBFuo= -github.com/libp2p/go-ws-transport v0.1.2/go.mod h1:dsh2Ld8F+XNmzpkaAijmg5Is+e9l6/1tK/6VFOdN69Y= -github.com/libp2p/go-yamux v1.2.1/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= -github.com/libp2p/go-yamux v1.2.2/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= -github.com/libp2p/go-yamux v1.2.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= -github.com/lucas-clemente/quic-go v0.11.2/go.mod h1:PpMmPfPKO9nKJ/psF49ESTAGQSdfXxlg1otPbEB2nOw= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/marten-seemann/qtls v0.2.3/go.mod h1:xzjG7avBwGGbdZ8dTGxlBnLArsVKLvwmjgmPuiQEcYk= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= -github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= @@ -463,136 +138,67 @@ github.com/mattn/go-isatty v0.0.9 h1:d5US/mDsogSGW37IV293h//ZFaeajb69h+EHFsv2xGg github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-runewidth v0.0.7 h1:Ei8KR0497xHyKJPAv59M1dkC+rOZCMBJ+t3fZ+twI54= github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= -github.com/miekg/dns v1.1.12/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g= github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= -github.com/minio/sha256-simd v0.0.0-20190328051042-05b4dd3047e5/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= -github.com/minio/sha256-simd v0.1.0/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU= github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= -github.com/mr-tron/base58 v1.1.1/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/mr-tron/base58 v1.1.3 h1:v+sk57XuaCKGXpWtVBX8YJzO7hMGx4Aajh4TQbdEFdc= github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/multiformats/go-base32 v0.0.3 h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp8Nq/kkI= github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= -github.com/multiformats/go-multiaddr v0.0.1/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= -github.com/multiformats/go-multiaddr v0.0.2/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= -github.com/multiformats/go-multiaddr v0.0.4/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= -github.com/multiformats/go-multiaddr v0.1.0/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= -github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= github.com/multiformats/go-multiaddr v0.2.0 h1:lR52sFwcTCuQb6bTfnXF6zA2XfyYvyd+5a9qECv/J90= github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4= -github.com/multiformats/go-multiaddr-dns v0.0.1/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= -github.com/multiformats/go-multiaddr-dns v0.0.2/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= -github.com/multiformats/go-multiaddr-dns v0.0.3/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= -github.com/multiformats/go-multiaddr-dns v0.2.0 h1:YWJoIDwLePniH7OU5hBnDZV6SWuvJqJ0YtN6pLeH9zA= -github.com/multiformats/go-multiaddr-dns v0.2.0/go.mod h1:TJ5pr5bBO7Y1B18djPuRsVkduhQH2YqYSbxWJzYGdK0= -github.com/multiformats/go-multiaddr-fmt v0.0.1/go.mod h1:aBYjqL4T/7j4Qx+R73XSv/8JsgnRFlf0w2KGLCmXl3Q= -github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo= -github.com/multiformats/go-multiaddr-net v0.0.1/go.mod h1:nw6HSxNmCIQH27XPGBuX+d1tnvM7ihcFwHMSstNAVUU= -github.com/multiformats/go-multiaddr-net v0.1.0/go.mod h1:5JNbcfBOP4dnhoZOv10JJVkJO0pCCEf8mTnipAo2UZQ= -github.com/multiformats/go-multiaddr-net v0.1.1/go.mod h1:5JNbcfBOP4dnhoZOv10JJVkJO0pCCEf8mTnipAo2UZQ= github.com/multiformats/go-multibase v0.0.1 h1:PN9/v21eLywrFWdFNsFKaU04kLJzuYzmrJR+ubhT9qA= github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= -github.com/multiformats/go-multihash v0.0.5/go.mod h1:lt/HCbqlQwlPBz7lv0sQCdtfcMtlJvakRUn/0Ual8po= -github.com/multiformats/go-multihash v0.0.6/go.mod h1:XuKXPp8VHcTygube3OWZC+aZrA+H1IhmjoCDtJc7PXM= github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= -github.com/multiformats/go-multihash v0.0.9/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= github.com/multiformats/go-multihash v0.0.13 h1:06x+mk/zj1FoMsgNejLpy6QTvJqlSt/BhLEy87zidlc= github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= -github.com/multiformats/go-multistream v0.0.1/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= -github.com/multiformats/go-multistream v0.0.4/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= -github.com/multiformats/go-multistream v0.1.0/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= -github.com/multiformats/go-multistream v0.1.1/go.mod h1:KmHZ40hzVxiaiwlj3MEbYgK9JFk2/9UktWZAF54Du38= github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.2/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.5 h1:XVZwSo04Cs3j/jS0uAEPpT3JY6DzMcVLLoWOSnCxOjg= github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/nkovacs/streamquote v0.0.0-20170412213628-49af9bddb229/go.mod h1:0aYXnNPJ8l7uZxf45rWW1a/uME32OF0rhiYGNQ2oF2E= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.9.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.6.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= -github.com/otiai10/copy v1.0.2/go.mod h1:c7RpqBkwMom4bYTSkLSym4VSJz/XtncWRAj/J4PEIMY= -github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= -github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/polydawn/refmt v0.0.0-20190221155625-df39d6c2d992/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= -github.com/polydawn/refmt v0.0.0-20190408063855-01bf1e26dd14/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1 h1:CskT+S6Ay54OwxBGB0R3Rsx4Muto6UnEYTyKJbyRIAI= github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= github.com/polydawn/refmt v0.0.0-20190809202753-05966cbd336a h1:hjZfReYVLbqFkAtr2us7vdy04YWz3LVAirzP7reh8+M= github.com/polydawn/refmt v0.0.0-20190809202753-05966cbd336a/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= -github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190425082905-87a4384529e0 h1:c8R11WC8m7KNMkTv/0+Be8vvwo4I3/Ut9AC2FW8fX3U= github.com/prometheus/procfs v0.0.0-20190425082905-87a4384529e0/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/shirou/gopsutil v2.18.12+incompatible h1:1eaJvGomDnH74/5cF4CTmTbLHAriGFsTZppLXDX93OM= -github.com/shirou/gopsutil v2.18.12+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= github.com/smartystreets/assertions v1.0.1 h1:voD4ITNjPL5jjBfgR/r8fPIIBrliWrWHeiJApdr3r4w= github.com/smartystreets/assertions v1.0.1/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa h1:E+gaaifzi2xF65PbDmuKI3PhLWY6G5opMLniFq8vmXA= github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa/go.mod h1:2RVY1rIf+2J2o/IM9+vPq9RzmHDSseB7FoXiSNIUsoU= -github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/smartystreets/goconvey v0.0.0-20190710185942-9d28bd7c0945/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337 h1:WN9BUFbdyOsSH/XohnWpXOlq9NBD5sGAB2FciQMUEe8= github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smola/gocompat v0.2.0/go.mod h1:1B0MlxbmoZNo3h8guHp8HztB3BSYR5itql9qtVc0ypY= -github.com/spacemonkeygo/openssl v0.0.0-20181017203307-c2dcc5cca94a/go.mod h1:7AyxJNCJ7SBZ1MfVQCWD6Uqo2oubI2Eq2y2eqf+A5r0= github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 h1:RC6RW7j+1+HkWaX/Yh71Ee5ZHaHYt7ZP4sQgUrm6cDU= github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572/go.mod h1:w0SWMsp6j9O/dk4/ZpIhL+3CkG8ofA2vuv7k+ltqUMc= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/src-d/envconfig v1.0.0/go.mod h1:Q9YQZ7BKITldTBnoxsE5gOeB5y66RyPXeue/R4aaNBc= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -600,50 +206,18 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli/v2 v2.0.0/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ= -github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436 h1:qOpVTI+BrstcjTZLm2Yz/3sOnqkzj3FQoh0g+E5s3Gc= github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= github.com/warpfork/go-wish v0.0.0-20190328234359-8b3e70f8e830 h1:8kxMKmKzXXL4Ru1nyhvdms/JjWt+3YLpvRb/bAjO/y0= github.com/warpfork/go-wish v0.0.0-20190328234359-8b3e70f8e830/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= -github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc/go.mod h1:r45hJU7yEoA81k6MWNhpMj/kms0n14dkzkxYHoB96UM= -github.com/whyrusleeping/bencher v0.0.0-20190829221104-bb6607aa8bba h1:X4n8JG2e2biEZZXdBKt9HX7DN3bYGFUqljqqy0DqgnY= -github.com/whyrusleeping/bencher v0.0.0-20190829221104-bb6607aa8bba/go.mod h1:CHQnYnQUEPydYCwuy8lmTHfGmdw9TKrhWV0xLx8l0oM= -github.com/whyrusleeping/cbor-gen v0.0.0-20190910031516-c1cbffdb01bb/go.mod h1:xdlJQaiqipF0HW+Mzpg7XRM3fWbGvfgFlcppuvlkIvY= -github.com/whyrusleeping/cbor-gen v0.0.0-20190917003517-d78d67427694/go.mod h1:xdlJQaiqipF0HW+Mzpg7XRM3fWbGvfgFlcppuvlkIvY= -github.com/whyrusleeping/cbor-gen v0.0.0-20191116002219-891f55cd449d/go.mod h1:xdlJQaiqipF0HW+Mzpg7XRM3fWbGvfgFlcppuvlkIvY= -github.com/whyrusleeping/cbor-gen v0.0.0-20191212224538-d370462a7e8a/go.mod h1:xdlJQaiqipF0HW+Mzpg7XRM3fWbGvfgFlcppuvlkIvY= github.com/whyrusleeping/cbor-gen v0.0.0-20191216205031-b047b6acb3c0/go.mod h1:xdlJQaiqipF0HW+Mzpg7XRM3fWbGvfgFlcppuvlkIvY= -github.com/whyrusleeping/cbor-gen v0.0.0-20200121162646-b63bacf5eaf8/go.mod h1:xdlJQaiqipF0HW+Mzpg7XRM3fWbGvfgFlcppuvlkIvY= github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= github.com/whyrusleeping/cbor-gen v0.0.0-20200206220010-03c9665e2a66 h1:LolR9FiEfQNn5U031bAhn/46po2JgWHKadYbcWFIJ+0= github.com/whyrusleeping/cbor-gen v0.0.0-20200206220010-03c9665e2a66/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= github.com/whyrusleeping/cbor-gen v0.0.0-20200414195334-429a0b5e922e h1:JY8o/ebUUrCYetWmjRCNghxC59cOEaili83rxPRQCLw= github.com/whyrusleeping/cbor-gen v0.0.0-20200414195334-429a0b5e922e/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= -github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f/go.mod h1:p9UJB6dDgdPgMJZs7UjUOdulKyRr9fqkS+6JKAInPy8= -github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc= github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc/go.mod h1:bopw91TMyo8J3tvftk8xmU2kPmlrt4nScJQZU2hE5EM= -github.com/whyrusleeping/go-notifier v0.0.0-20170827234753-097c5d47330f/go.mod h1:cZNvX9cFybI01GriPRMXDtczuvUhgbcYr9iCGaNlRv8= -github.com/whyrusleeping/go-smux-multiplex v3.0.16+incompatible/go.mod h1:34LEDbeKFZInPUrAG+bjuJmUXONGdEFW7XL0SpTY1y4= -github.com/whyrusleeping/go-smux-multistream v2.0.2+incompatible/go.mod h1:dRWHHvc4HDQSHh9gbKEBbUZ+f2Q8iZTPG3UOGYODxSQ= -github.com/whyrusleeping/go-smux-yamux v2.0.8+incompatible/go.mod h1:6qHUzBXUbB9MXmw3AUdB52L8sEb/hScCqOdW2kj/wuI= -github.com/whyrusleeping/mafmt v1.2.8/go.mod h1:faQJFPbLSxzD9xpA02ttW/tS9vZykNvXwGvqIpk20FA= -github.com/whyrusleeping/mdns v0.0.0-20180901202407-ef14215e6b30/go.mod h1:j4l84WPFclQPj320J9gp0XwNKBb3U0zt5CBqjPp22G4= -github.com/whyrusleeping/mdns v0.0.0-20190826153040-b9b60ed33aa9/go.mod h1:j4l84WPFclQPj320J9gp0XwNKBb3U0zt5CBqjPp22G4= -github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7/go.mod h1:X2c0RVCI1eSUFI8eLcY3c0423ykwiUdxLJtkDvruhjI= -github.com/whyrusleeping/pubsub v0.0.0-20131020042734-02de8aa2db3d/go.mod h1:g7ckxrjiFh8mi1AY7ox23PZD0g6QU/TxW3U3unX7I3A= -github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee/go.mod h1:m2aV4LZI4Aez7dP5PMyVKEHhUyEJ/RjmPEDOpDvudHg= -github.com/whyrusleeping/yamux v1.1.5/go.mod h1:E8LnQQ8HKx5KD29HZFUwM1PxCOdPRzGwur1mcYhXcD8= github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -652,9 +226,6 @@ go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.5.1 h1:rsqfU5vBkVknbhUGbAUwQKR2H4ItV8tjJ+6kJX4cxHM= go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/dig v1.7.0/go.mod h1:z+dSd2TP9Usi48jL8M3v63iSBVkiwtVyMKxMZYYauPg= -go.uber.org/fx v1.9.0/go.mod h1:mFdUyAUuJ3w4jAckiKSKbldsxy1ojpAMJ+dVZg5Y0Aw= -go.uber.org/goleak v0.10.0/go.mod h1:VCZuO8V8mFPlL0F5J5GK1rtHV3DrFcQ1R8ryq7FK0aI= go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= @@ -666,22 +237,12 @@ go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0 h1:nR6NoDBgAf67s68NhaXbsojM+2gxp3S1hWkHDl27pVU= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= -go4.org v0.0.0-20190218023631-ce4c26f7be8e/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= -go4.org v0.0.0-20190313082347-94abd6928b1d/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190225124518-7f87c0fbb88b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190618222545-ea8f1a30c443/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g= @@ -689,33 +250,21 @@ golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f h1:J5lckAjkw6qYlOZNj90mLYNTEKDvWeuc1yieZ8qUzUE= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/net v0.0.0-20180524181706-dfa909b99c79/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190227160552-c95aed5357e7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190611141213-3f473d35a33a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -725,67 +274,45 @@ golang.org/x/sys v0.0.0-20180202135801-37707fdb30a5/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190302025703-b6889370fb10/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190524122548-abf6ff778158/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190524152521-dbbf3f1254d4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190526052359-791d8a0f4d09/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190610200419-93c9922d18ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191025021431-6c3a3bfe00ae h1:QoJmnb9uyPCrH8GIg9uRLn4Ta45yhcQtpymCd0AavO8= golang.org/x/sys v0.0.0-20191025021431-6c3a3bfe00ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200107162124-548cf772de50 h1:YvQ10rzcqWXLlJZ3XCUoO25savxmscf4+SC+ZqiCHhA= golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181130052023-1c3d964395ce/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200108195415-316d2f248479 h1:csuS+MHeEA2eWhyjQCMaPMq4z1+/PohkBSjJZHSIbOE= golang.org/x/tools v0.0.0-20200108195415-316d2f248479/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= -google.golang.org/api v0.3.2/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= @@ -797,16 +324,12 @@ gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMy gopkg.in/src-d/go-cli.v0 v0.0.0-20181105080154-d492247bbc0d/go.mod h1:z+K8VcOYVYcSwSjGebuDL6176A1XskgbtNl64NSg+n8= gopkg.in/src-d/go-log.v1 v1.0.1/go.mod h1:GN34hKP0g305ysm2/hctJ0Y8nWP3zxXXJ8GFabTyABE= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/urfave/cli.v2 v2.0.0-20180128182452-d3ae77c26ac8/go.mod h1:cKXr3E0k4aosgycml1b5z33BVV6hai1Kh7uDgFOkbcs= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= howett.net/plist v0.0.0-20181124034731-591f970eefbb h1:jhnBjNi9UFpfpl8YZhA9CrOqpnJdvzuiHsl/dnxl11M= howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= -launchpad.net/gocheck v0.0.0-20140225173054-000000000087/go.mod h1:hj7XX3B/0A+80Vse0e+BUHsHMTEhd0O4cpUHr/e/BUM= From 593d2fce21e001b4eb24bbc6591b518798bfeb63 Mon Sep 17 00:00:00 2001 From: laser Date: Fri, 1 May 2020 09:18:57 -0700 Subject: [PATCH 064/199] ignore build artifacts --- .gitignore | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 .gitignore diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..c90dde94c --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +.update-modules +.filecoin-build From a4ee6e27b9f83f7ea90ec048ae226c35a9b685a1 Mon Sep 17 00:00:00 2001 From: laser Date: Fri, 1 May 2020 09:31:34 -0700 Subject: [PATCH 065/199] ignore unused field --- selector_task.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/selector_task.go b/selector_task.go index 2f20f9a28..3298c9e5d 100644 --- a/selector_task.go +++ b/selector_task.go @@ -10,7 +10,7 @@ import ( ) type taskSelector struct { - best []stores.StorageInfo + best []stores.StorageInfo //nolint: unused, structcheck } func newTaskSelector() *taskSelector { From cb0bb31d06f8e2286f3f9f045a357bbe98714751 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Fri, 1 May 2020 20:00:17 +0200 Subject: [PATCH 066/199] sched: Handle closing workers --- localworker.go | 4 +++ manager.go | 3 ++ sched.go | 61 +++++++++++++++++++++++++++------ sched_watch.go | 93 ++++++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 150 insertions(+), 11 deletions(-) create mode 100644 sched_watch.go diff --git a/localworker.go b/localworker.go index f3f12e8c1..a92f01a89 100644 --- a/localworker.go +++ b/localworker.go @@ -211,6 +211,10 @@ func (l *LocalWorker) Info(context.Context) (storiface.WorkerInfo, error) { }, nil } +func (l *LocalWorker) Closing(ctx context.Context) (<-chan struct{}, error) { + return make(chan struct{}), nil +} + func (l *LocalWorker) Close() error { return nil } diff --git a/manager.go b/manager.go index f4408b6e0..065370ed6 100644 --- a/manager.go +++ b/manager.go @@ -37,6 +37,9 @@ type Worker interface { Info(context.Context) (storiface.WorkerInfo, error) + // returns channel signalling worker shutdown + Closing(context.Context) (<-chan struct{}, error) + Close() error } diff --git a/sched.go b/sched.go index aaf8b9e26..3822a8683 100644 --- a/sched.go +++ b/sched.go @@ -33,6 +33,10 @@ type scheduler struct { workers map[WorkerID]*workerHandle newWorkers chan *workerHandle + + watchClosing chan WorkerID + workerClosing chan WorkerID + schedule chan *workerRequest workerFree chan WorkerID closing chan struct{} @@ -47,10 +51,14 @@ func newScheduler(spt abi.RegisteredProof) *scheduler { nextWorker: 0, workers: map[WorkerID]*workerHandle{}, - newWorkers: make(chan *workerHandle), - schedule: make(chan *workerRequest), - workerFree: make(chan WorkerID), - closing: make(chan struct{}), + newWorkers: make(chan *workerHandle), + + watchClosing: make(chan WorkerID), + workerClosing: make(chan WorkerID), + + schedule: make(chan *workerRequest), + workerFree: make(chan WorkerID), + closing: make(chan struct{}), schedQueue: list.New(), } @@ -128,12 +136,14 @@ type workerHandle struct { } func (sh *scheduler) runSched() { + go sh.runWorkerWatcher() + for { select { case w := <-sh.newWorkers: - wid := sh.schedNewWorker(w) - - sh.onWorkerFreed(wid) + sh.schedNewWorker(w) + case wid := <-sh.workerClosing: + sh.schedDropWorker(wid) case req := <-sh.schedule: scheduled, err := sh.maybeSchedRequest(req) if err != nil { @@ -155,10 +165,18 @@ func (sh *scheduler) runSched() { } func (sh *scheduler) onWorkerFreed(wid WorkerID) { + sh.workersLk.Lock() + w, ok := sh.workers[wid] + sh.workersLk.Unlock() + if !ok { + log.Warnf("onWorkerFreed on invalid worker %d", wid) + return + } + for e := sh.schedQueue.Front(); e != nil; e = e.Next() { req := e.Value.(*workerRequest) - ok, err := req.sel.Ok(req.ctx, req.taskType, sh.workers[wid]) + ok, err := req.sel.Ok(req.ctx, req.taskType, w) if err != nil { log.Errorf("onWorkerFreed req.sel.Ok error: %+v", err) continue @@ -411,15 +429,36 @@ func (a *activeResources) utilization(wr storiface.WorkerResources) float64 { return max } -func (sh *scheduler) schedNewWorker(w *workerHandle) WorkerID { +func (sh *scheduler) schedNewWorker(w *workerHandle) { sh.workersLk.Lock() - defer sh.workersLk.Unlock() id := sh.nextWorker sh.workers[id] = w sh.nextWorker++ - return id + sh.workersLk.Unlock() + + select { + case sh.watchClosing <- id: + case <-sh.closing: + return + } + + sh.onWorkerFreed(id) +} + +func (sh *scheduler) schedDropWorker(wid WorkerID) { + sh.workersLk.Lock() + defer sh.workersLk.Unlock() + + w := sh.workers[wid] + delete(sh.workers, wid) + + go func() { + if err := w.w.Close(); err != nil { + log.Warnf("closing worker %d: %+v", err) + } + }() } func (sh *scheduler) schedClose() { diff --git a/sched_watch.go b/sched_watch.go new file mode 100644 index 000000000..c2716aae9 --- /dev/null +++ b/sched_watch.go @@ -0,0 +1,93 @@ +package sectorstorage + +import ( + "context" + "reflect" +) + +func (sh *scheduler) runWorkerWatcher() { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + nilch := reflect.ValueOf(new(chan struct{})).Elem() + + cases := []reflect.SelectCase{ + { + Dir: reflect.SelectRecv, + Chan: reflect.ValueOf(sh.closing), + }, + { + Dir: reflect.SelectRecv, + Chan: reflect.ValueOf(sh.watchClosing), + }, + } + + caseToWorker := map[int]WorkerID{} + + for { + n, rv, ok := reflect.Select(cases) + + switch { + case n == 0: // sh.closing + return + case n == 1: // sh.watchClosing + if !ok { + log.Errorf("watchClosing channel closed") + return + } + + wid, ok := rv.Interface().(WorkerID) + if !ok { + panic("got a non-WorkerID message") + } + + sh.workersLk.Lock() + workerClosing, err := sh.workers[wid].w.Closing(ctx) + sh.workersLk.Unlock() + if err != nil { + log.Errorf("getting worker closing channel: %+v", err) + select { + case sh.workerClosing <- wid: + case <-sh.closing: + return + } + + continue + } + + toSet := -1 + for i, sc := range cases { + if sc.Chan == nilch { + toSet = i + break + } + } + if toSet == -1 { + toSet = len(cases) + cases = append(cases, reflect.SelectCase{}) + } + + cases[toSet] = reflect.SelectCase{ + Dir: reflect.SelectRecv, + Chan: reflect.ValueOf(workerClosing), + } + + caseToWorker[toSet] = wid + default: + wid := caseToWorker[n] + + delete(caseToWorker, n) + cases[n] = reflect.SelectCase{ + Dir: reflect.SelectRecv, + Chan: nilch, + } + + log.Warnf("worker %d dropped", wid) + select { + case sh.workerClosing <- wid: + case <-sh.closing: + return + } + } + } +} From 780596477cfeb64fcab30f8f95699bc878d42024 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Fri, 1 May 2020 20:04:21 +0200 Subject: [PATCH 067/199] gofmt --- sched.go | 14 +++++++------- sched_watch.go | 4 ++-- stores/remote.go | 4 ++-- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/sched.go b/sched.go index 3822a8683..d7745da36 100644 --- a/sched.go +++ b/sched.go @@ -51,14 +51,14 @@ func newScheduler(spt abi.RegisteredProof) *scheduler { nextWorker: 0, workers: map[WorkerID]*workerHandle{}, - newWorkers: make(chan *workerHandle), + newWorkers: make(chan *workerHandle), watchClosing: make(chan WorkerID), workerClosing: make(chan WorkerID), - schedule: make(chan *workerRequest), - workerFree: make(chan WorkerID), - closing: make(chan struct{}), + schedule: make(chan *workerRequest), + workerFree: make(chan WorkerID), + closing: make(chan struct{}), schedQueue: list.New(), } @@ -394,7 +394,7 @@ func canHandleRequest(needRes Resources, spt abi.RegisteredProof, wid WorkerID, return false } } else { - if active.cpuUse + uint64(needRes.Threads) > res.CPUs { + if active.cpuUse+uint64(needRes.Threads) > res.CPUs { log.Debugf("sched: not scheduling on worker %d; not enough threads, need %d, %d in use, target %d", wid, needRes.Threads, active.cpuUse, res.CPUs) return false } @@ -416,12 +416,12 @@ func (a *activeResources) utilization(wr storiface.WorkerResources) float64 { cpu := float64(a.cpuUse) / float64(wr.CPUs) max = cpu - memMin := float64(a.memUsedMin + wr.MemReserved) / float64(wr.MemPhysical) + memMin := float64(a.memUsedMin+wr.MemReserved) / float64(wr.MemPhysical) if memMin > max { max = memMin } - memMax := float64(a.memUsedMax + wr.MemReserved) / float64(wr.MemPhysical + wr.MemSwap) + memMax := float64(a.memUsedMax+wr.MemReserved) / float64(wr.MemPhysical+wr.MemSwap) if memMax > max { max = memMax } diff --git a/sched_watch.go b/sched_watch.go index c2716aae9..214489083 100644 --- a/sched_watch.go +++ b/sched_watch.go @@ -68,7 +68,7 @@ func (sh *scheduler) runWorkerWatcher() { } cases[toSet] = reflect.SelectCase{ - Dir: reflect.SelectRecv, + Dir: reflect.SelectRecv, Chan: reflect.ValueOf(workerClosing), } @@ -78,7 +78,7 @@ func (sh *scheduler) runWorkerWatcher() { delete(caseToWorker, n) cases[n] = reflect.SelectCase{ - Dir: reflect.SelectRecv, + Dir: reflect.SelectRecv, Chan: nilch, } diff --git a/stores/remote.go b/stores/remote.go index 863bee083..a750d4841 100644 --- a/stores/remote.go +++ b/stores/remote.go @@ -27,7 +27,7 @@ type Remote struct { index SectorIndex auth http.Header - fetchLk sync.Mutex + fetchLk sync.Mutex fetching map[abi.SectorID]chan struct{} } @@ -121,7 +121,7 @@ func (r *Remote) acquireFromRemote(ctx context.Context, s abi.SectorID, fileType return "", "", "", nil, xerrors.Errorf("failed to acquire sector %v from remote(%d): not found", s, fileType) } - sort.Slice(si, func(i, j int) bool { + sort.Slice(si, func(i, j int) bool { return si[i].Weight < si[j].Weight }) From 10f7750d806f349d9f7ec3aa838cd3035178bb95 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Fri, 8 May 2020 00:22:37 +0200 Subject: [PATCH 068/199] v26 resource table --- resources.go | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/resources.go b/resources.go index afe4c166c..0407816fa 100644 --- a/resources.go +++ b/resources.go @@ -73,7 +73,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ sealtasks.TTPreCommit1: { abi.RegisteredProof_StackedDRG32GiBSeal: Resources{ MaxMemory: 64 << 30, - MinMemory: 32 << 30, + MinMemory: 48 << 30, Threads: 1, @@ -106,10 +106,11 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ }, sealtasks.TTPreCommit2: { abi.RegisteredProof_StackedDRG32GiBSeal: Resources{ - MaxMemory: 96 << 30, - MinMemory: 64 << 30, + MaxMemory: 32 << 30, + MinMemory: 32 << 30, Threads: -1, + CanGPU: true, BaseMinMemory: 30 << 30, }, @@ -172,9 +173,9 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ BaseMinMemory: 8 << 20, }, }, - sealtasks.TTCommit2: { // TODO: Measure more accurately + sealtasks.TTCommit2: { abi.RegisteredProof_StackedDRG32GiBSeal: Resources{ - MaxMemory: 110 << 30, + MaxMemory: 130 << 30, MinMemory: 60 << 30, Threads: -1, From 1bbfb7cffda82fd46f2bf604939dba4d18fcff02 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Fri, 8 May 2020 00:22:58 +0200 Subject: [PATCH 069/199] Fix ffiwrapper unseal --- ffiwrapper/sealer_cgo.go | 65 ++++++++++++++++++++++++---------------- resources.go | 2 +- 2 files changed, 41 insertions(+), 26 deletions(-) diff --git a/ffiwrapper/sealer_cgo.go b/ffiwrapper/sealer_cgo.go index 6764c3d96..f90a6020e 100644 --- a/ffiwrapper/sealer_cgo.go +++ b/ffiwrapper/sealer_cgo.go @@ -108,33 +108,48 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector abi.SectorID, existingPie }, werr() } +type closerFunc func() error + +func (cf closerFunc) Close() error { + return cf() +} + func (sb *Sealer) ReadPieceFromSealedSector(ctx context.Context, sector abi.SectorID, offset UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealedCID cid.Cid) (io.ReadCloser, error) { - path, doneUnsealed, err := sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, stores.FTUnsealed, false) - if err != nil { - return nil, xerrors.Errorf("acquire unsealed sector path: %w", err) - } - defer doneUnsealed() - f, err := os.OpenFile(path.Unsealed, os.O_RDONLY, 0644) - if err == nil { - if _, err := f.Seek(int64(offset), io.SeekStart); err != nil { - return nil, xerrors.Errorf("seek: %w", err) + { + path, doneUnsealed, err := sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, stores.FTNone, false) + if err != nil { + return nil, xerrors.Errorf("acquire unsealed sector path: %w", err) } - lr := io.LimitReader(f, int64(size)) + f, err := os.OpenFile(path.Unsealed, os.O_RDONLY, 0644) + if err == nil { + if _, err := f.Seek(int64(offset), io.SeekStart); err != nil { + doneUnsealed() + return nil, xerrors.Errorf("seek: %w", err) + } - return &struct { - io.Reader - io.Closer - }{ - Reader: lr, - Closer: f, - }, nil - } - if !os.IsNotExist(err) { - return nil, err + lr := io.LimitReader(f, int64(size)) + + return &struct { + io.Reader + io.Closer + }{ + Reader: lr, + Closer: closerFunc(func() error { + doneUnsealed() + return f.Close() + }), + }, nil + } + + doneUnsealed() + + if !os.IsNotExist(err) { + return nil, err + } } - sealed, doneSealed, err := sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed|stores.FTCache, 0, false) + paths, doneSealed, err := sb.sectors.AcquireSector(ctx, sector, stores.FTSealed|stores.FTCache, stores.FTUnsealed, false) if err != nil { return nil, xerrors.Errorf("acquire sealed/cache sector path: %w", err) } @@ -145,9 +160,9 @@ func (sb *Sealer) ReadPieceFromSealedSector(ctx context.Context, sector abi.Sect // remove last used one (or use whatever other cache policy makes sense)) err = ffi.Unseal( sb.sealProofType, - sealed.Cache, - sealed.Sealed, - path.Unsealed, + paths.Cache, + paths.Sealed, + paths.Unsealed, sector.Number, sector.Miner, ticket, @@ -157,7 +172,7 @@ func (sb *Sealer) ReadPieceFromSealedSector(ctx context.Context, sector abi.Sect return nil, xerrors.Errorf("unseal failed: %w", err) } - f, err = os.OpenFile(string(path.Unsealed), os.O_RDONLY, 0644) + f, err := os.OpenFile(paths.Unsealed, os.O_RDONLY, 0644) if err != nil { return nil, err } diff --git a/resources.go b/resources.go index 0407816fa..87058e80a 100644 --- a/resources.go +++ b/resources.go @@ -110,7 +110,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ MinMemory: 32 << 30, Threads: -1, - CanGPU: true, + CanGPU: true, BaseMinMemory: 30 << 30, }, From ced1d6e0875c5b535b346ef6f4be2ab1e3ec51b1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Fri, 8 May 2020 01:38:05 +0200 Subject: [PATCH 070/199] Priority queue for tasks --- request_queue.go | 36 ++++++++++++++++++++++++++++++++++++ request_queue_test.go | 30 ++++++++++++++++++++++++++++++ sched.go | 25 ++++++++++--------------- sealtasks/task.go | 14 ++++++++++++++ 4 files changed, 90 insertions(+), 15 deletions(-) create mode 100644 request_queue.go create mode 100644 request_queue_test.go diff --git a/request_queue.go b/request_queue.go new file mode 100644 index 000000000..09ca7ae3f --- /dev/null +++ b/request_queue.go @@ -0,0 +1,36 @@ +package sectorstorage + +import "container/heap" + +type requestQueue []*workerRequest + +func (q requestQueue) Len() int { return len(q) } + +func (q requestQueue) Less(i, j int) bool { + return q[i].taskType.Less(q[j].taskType) +} + +func (q requestQueue) Swap(i, j int) { + q[i], q[j] = q[j], q[i] + q[i].index = i + q[j].index = j +} + +func (q *requestQueue) Push(x interface{}) { + n := len(*q) + item := x.(*workerRequest) + item.index = n + *q = append(*q, item) +} + +func (q *requestQueue) Pop() interface{} { + old := *q + n := len(old) + item := old[n-1] + old[n-1] = nil // avoid memory leak + item.index = -1 // for safety + *q = old[0 : n-1] + return item +} + +var _ heap.Interface = &requestQueue{} diff --git a/request_queue_test.go b/request_queue_test.go new file mode 100644 index 000000000..9bf231e39 --- /dev/null +++ b/request_queue_test.go @@ -0,0 +1,30 @@ +package sectorstorage + +import ( + "container/heap" + "testing" + + "github.com/filecoin-project/sector-storage/sealtasks" +) + +func TestRequestQueue(t *testing.T) { + rq := &requestQueue{} + + heap.Push(rq, &workerRequest{taskType: sealtasks.TTAddPiece}) + heap.Push(rq, &workerRequest{taskType: sealtasks.TTPreCommit1}) + heap.Push(rq, &workerRequest{taskType: sealtasks.TTPreCommit2}) + heap.Push(rq, &workerRequest{taskType: sealtasks.TTPreCommit1}) + heap.Push(rq, &workerRequest{taskType: sealtasks.TTAddPiece}) + + pt := heap.Pop(rq).(*workerRequest) + + if pt.taskType != sealtasks.TTPreCommit2 { + t.Error("expected precommit2, got", pt.taskType) + } + + pt = heap.Pop(rq).(*workerRequest) + + if pt.taskType != sealtasks.TTPreCommit1 { + t.Error("expected precommit1, got", pt.taskType) + } +} diff --git a/sched.go b/sched.go index d7745da36..019febda4 100644 --- a/sched.go +++ b/sched.go @@ -1,7 +1,7 @@ package sectorstorage import ( - "container/list" + "container/heap" "context" "sort" "sync" @@ -41,7 +41,7 @@ type scheduler struct { workerFree chan WorkerID closing chan struct{} - schedQueue *list.List // List[*workerRequest] + schedQueue *requestQueue } func newScheduler(spt abi.RegisteredProof) *scheduler { @@ -60,7 +60,7 @@ func newScheduler(spt abi.RegisteredProof) *scheduler { workerFree: make(chan WorkerID), closing: make(chan struct{}), - schedQueue: list.New(), + schedQueue: &requestQueue{}, } } @@ -101,6 +101,8 @@ type workerRequest struct { prepare WorkerAction work WorkerAction + index int // The index of the item in the heap. + ret chan<- workerResponse ctx context.Context } @@ -154,7 +156,7 @@ func (sh *scheduler) runSched() { continue } - sh.schedQueue.PushBack(req) + heap.Push(sh.schedQueue, req) case wid := <-sh.workerFree: sh.onWorkerFreed(wid) case <-sh.closing: @@ -173,8 +175,8 @@ func (sh *scheduler) onWorkerFreed(wid WorkerID) { return } - for e := sh.schedQueue.Front(); e != nil; e = e.Next() { - req := e.Value.(*workerRequest) + for i := 0; i < sh.schedQueue.Len(); i++ { + req := (*sh.schedQueue)[i] ok, err := req.sel.Ok(req.ctx, req.taskType, w) if err != nil { @@ -193,15 +195,8 @@ func (sh *scheduler) onWorkerFreed(wid WorkerID) { } if scheduled { - pe := e.Prev() - sh.schedQueue.Remove(e) - if pe == nil { - pe = sh.schedQueue.Front() - } - if pe == nil { - break - } - e = pe + heap.Remove(sh.schedQueue, i) + i-- continue } } diff --git a/sealtasks/task.go b/sealtasks/task.go index 8fbe7a7b4..0a94d2c04 100644 --- a/sealtasks/task.go +++ b/sealtasks/task.go @@ -13,3 +13,17 @@ const ( TTFetch TaskType = "seal/v0/fetch" ) + +var order = map[TaskType]int{ + TTAddPiece: 7, + TTPreCommit1: 6, + TTPreCommit2: 5, + TTCommit2: 4, + TTCommit1: 3, + TTFetch: 2, + TTFinalize: 1, +} + +func (a TaskType) Less(b TaskType) bool { + return order[a] < order[b] +} From fc637552b77b70722d50e1b470ed7508b513a2cd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Fri, 8 May 2020 18:08:48 +0200 Subject: [PATCH 071/199] stores: Simple health reporting --- stores/index.go | 35 ++++++++++++++++++++++++++++++++++- stores/local.go | 37 +++++++++++++++++++++++++++++++++++++ 2 files changed, 71 insertions(+), 1 deletion(-) diff --git a/stores/index.go b/stores/index.go index c3271225e..f0ff22a01 100644 --- a/stores/index.go +++ b/stores/index.go @@ -6,6 +6,7 @@ import ( gopath "path" "sort" "sync" + "time" "golang.org/x/xerrors" @@ -13,6 +14,9 @@ import ( "github.com/filecoin-project/specs-actors/actors/abi/big" ) +var HeartBeatInterval = 10 * time.Second +var SkippedHeartbeatThresh = HeartBeatInterval * 5 + // ID identifies sector storage by UUID. One sector storage should map to one // filesystem, local or networked / shared by multiple machines type ID string @@ -24,12 +28,20 @@ type StorageInfo struct { CanSeal bool CanStore bool + + LastHeartbeat time.Time + HeartbeatErr error +} + +type HealthReport struct { + Stat FsStat + Err error } type SectorIndex interface { // part of storage-miner api StorageAttach(context.Context, StorageInfo, FsStat) error StorageInfo(context.Context, ID) (StorageInfo, error) - // TODO: StorageUpdateStats(FsStat) + StorageReportHealth(context.Context, ID, HealthReport) error StorageDeclareSector(ctx context.Context, storageId ID, s abi.SectorID, ft SectorFileType) error StorageDropSector(ctx context.Context, storageId ID, s abi.SectorID, ft SectorFileType) error @@ -46,6 +58,9 @@ type Decl struct { type storageEntry struct { info *StorageInfo fsi FsStat + + lastHeartbeat time.Time + heartbeatErr error } type Index struct { @@ -120,10 +135,28 @@ func (i *Index) StorageAttach(ctx context.Context, si StorageInfo, st FsStat) er i.stores[si.ID] = &storageEntry{ info: &si, fsi: st, + + lastHeartbeat: time.Now(), } return nil } +func (i *Index) StorageReportHealth(ctx context.Context, id ID, report HealthReport) error { + i.lk.Lock() + defer i.lk.Unlock() + + ent, ok := i.stores[id] + if !ok { + return xerrors.Errorf("health report for unknown storage: %s", id) + } + + ent.fsi = report.Stat + ent.heartbeatErr = report.Err + ent.lastHeartbeat = time.Now() + + return nil +} + func (i *Index) StorageDeclareSector(ctx context.Context, storageId ID, s abi.SectorID, ft SectorFileType) error { i.lk.Lock() defer i.lk.Unlock() diff --git a/stores/local.go b/stores/local.go index 281475b1c..673583c6a 100644 --- a/stores/local.go +++ b/stores/local.go @@ -5,9 +5,11 @@ import ( "encoding/json" "io/ioutil" "math/bits" + "math/rand" "os" "path/filepath" "sync" + "time" "golang.org/x/xerrors" @@ -155,9 +157,44 @@ func (st *Local) open(ctx context.Context) error { } } + go st.reportHealth(ctx) + return nil } +func (st *Local) reportHealth(ctx context.Context) { + // randomize interval by ~10% + interval := (HeartBeatInterval*100_000 + time.Duration(rand.Int63n(10_000))) / 100_000 + + for { + select { + case <-time.After(interval): + case <-ctx.Done(): + return + } + + st.localLk.RLock() + + toReport := map[ID]HealthReport{} + for id, p := range st.paths { + stat, err := Stat(p.local) + + toReport[id] = HealthReport{ + Stat: stat, + Err: err, + } + } + + st.localLk.RUnlock() + + for id, report := range toReport { + if err := st.index.StorageReportHealth(ctx, id, report); err != nil { + log.Warnf("error reporting storage health for %s: %+v", id, report) + } + } + } +} + func (st *Local) AcquireSector(ctx context.Context, sid abi.SectorID, existing SectorFileType, allocate SectorFileType, sealing bool) (SectorPaths, SectorPaths, func(), error) { if existing|allocate != existing^allocate { return SectorPaths{}, SectorPaths{}, nil, xerrors.New("can't both find and allocate a sector") From e721b8910cbbaf80b8006f68a36b62af0ca4f1cc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Fri, 8 May 2020 18:54:06 +0200 Subject: [PATCH 072/199] stores: use heartbeat info in selecting alloc storage --- localworker.go | 4 ++-- resources.go | 13 ------------- roprov.go | 3 ++- sched.go | 6 +++--- selector_alloc.go | 22 +++++++++++++--------- selector_existing.go | 5 +++-- selector_task.go | 4 +++- stores/filetype.go | 35 +++++++++++++++++++++++++++++++++++ stores/http_handler.go | 4 +++- stores/index.go | 28 +++++++++++++++++++++++----- stores/interface.go | 4 ++-- stores/local.go | 12 ++++++------ stores/remote.go | 16 ++++++++-------- 13 files changed, 103 insertions(+), 53 deletions(-) diff --git a/localworker.go b/localworker.go index a92f01a89..969007d93 100644 --- a/localworker.go +++ b/localworker.go @@ -58,7 +58,7 @@ type localWorkerPathProvider struct { } func (l *localWorkerPathProvider) AcquireSector(ctx context.Context, sector abi.SectorID, existing stores.SectorFileType, allocate stores.SectorFileType, sealing bool) (stores.SectorPaths, func(), error) { - paths, storageIDs, done, err := l.w.storage.AcquireSector(ctx, sector, existing, allocate, sealing) + paths, storageIDs, done, err := l.w.storage.AcquireSector(ctx, sector, l.w.scfg.SealProofType, existing, allocate, sealing) if err != nil { return stores.SectorPaths{}, nil, err } @@ -163,7 +163,7 @@ func (l *LocalWorker) FinalizeSector(ctx context.Context, sector abi.SectorID) e return xerrors.Errorf("removing unsealed data: %w", err) } - if err := l.storage.MoveStorage(ctx, sector, stores.FTSealed|stores.FTCache); err != nil { + if err := l.storage.MoveStorage(ctx, sector, l.scfg.SealProofType, stores.FTSealed|stores.FTCache); err != nil { return xerrors.Errorf("moving sealed data to storage: %w", err) } diff --git a/resources.go b/resources.go index 87058e80a..23dcc2085 100644 --- a/resources.go +++ b/resources.go @@ -4,21 +4,8 @@ import ( "github.com/filecoin-project/specs-actors/actors/abi" "github.com/filecoin-project/sector-storage/sealtasks" - "github.com/filecoin-project/sector-storage/stores" ) -var FSOverheadSeal = map[stores.SectorFileType]int{ // 10x overheads - stores.FTUnsealed: 10, - stores.FTSealed: 10, - stores.FTCache: 70, // TODO: confirm for 32G -} - -var FsOverheadFinalized = map[stores.SectorFileType]int{ - stores.FTUnsealed: 10, - stores.FTSealed: 10, - stores.FTCache: 2, -} - type Resources struct { MinMemory uint64 // What Must be in RAM for decent perf MaxMemory uint64 // Memory required (swap + ram) diff --git a/roprov.go b/roprov.go index e6ec1e8f2..172cf7cf8 100644 --- a/roprov.go +++ b/roprov.go @@ -12,6 +12,7 @@ import ( type readonlyProvider struct { stor *stores.Local + spt abi.RegisteredProof } func (l *readonlyProvider) AcquireSector(ctx context.Context, id abi.SectorID, existing stores.SectorFileType, allocate stores.SectorFileType, sealing bool) (stores.SectorPaths, func(), error) { @@ -19,7 +20,7 @@ func (l *readonlyProvider) AcquireSector(ctx context.Context, id abi.SectorID, e return stores.SectorPaths{}, nil, xerrors.New("read-only storage") } - p, _, done, err := l.stor.AcquireSector(ctx, id, existing, allocate, sealing) + p, _, done, err := l.stor.AcquireSector(ctx, id, l.spt, existing, allocate, sealing) return p, done, err } diff --git a/sched.go b/sched.go index 019febda4..c48440757 100644 --- a/sched.go +++ b/sched.go @@ -20,7 +20,7 @@ const mib = 1 << 20 type WorkerAction func(ctx context.Context, w Worker) error type WorkerSelector interface { - Ok(ctx context.Context, task sealtasks.TaskType, a *workerHandle) (bool, error) // true if worker is acceptable for performing a task + Ok(ctx context.Context, task sealtasks.TaskType, spt abi.RegisteredProof, a *workerHandle) (bool, error) // true if worker is acceptable for performing a task Cmp(ctx context.Context, task sealtasks.TaskType, a, b *workerHandle) (bool, error) // true if a is preferred over b } @@ -178,7 +178,7 @@ func (sh *scheduler) onWorkerFreed(wid WorkerID) { for i := 0; i < sh.schedQueue.Len(); i++ { req := (*sh.schedQueue)[i] - ok, err := req.sel.Ok(req.ctx, req.taskType, w) + ok, err := req.sel.Ok(req.ctx, req.taskType, sh.spt, w) if err != nil { log.Errorf("onWorkerFreed req.sel.Ok error: %+v", err) continue @@ -212,7 +212,7 @@ func (sh *scheduler) maybeSchedRequest(req *workerRequest) (bool, error) { needRes := ResourceTable[req.taskType][sh.spt] for wid, worker := range sh.workers { - ok, err := req.sel.Ok(req.ctx, req.taskType, worker) + ok, err := req.sel.Ok(req.ctx, req.taskType, sh.spt, worker) if err != nil { return false, err } diff --git a/selector_alloc.go b/selector_alloc.go index c7d06a7bc..0a7850424 100644 --- a/selector_alloc.go +++ b/selector_alloc.go @@ -5,26 +5,25 @@ import ( "golang.org/x/xerrors" + "github.com/filecoin-project/specs-actors/actors/abi" + "github.com/filecoin-project/sector-storage/sealtasks" "github.com/filecoin-project/sector-storage/stores" ) type allocSelector struct { - best []stores.StorageInfo + index stores.SectorIndex + alloc stores.SectorFileType } func newAllocSelector(ctx context.Context, index stores.SectorIndex, alloc stores.SectorFileType) (*allocSelector, error) { - best, err := index.StorageBestAlloc(ctx, alloc, true) - if err != nil { - return nil, err - } - return &allocSelector{ - best: best, + index: index, + alloc: alloc, }, nil } -func (s *allocSelector) Ok(ctx context.Context, task sealtasks.TaskType, whnd *workerHandle) (bool, error) { +func (s *allocSelector) Ok(ctx context.Context, task sealtasks.TaskType, spt abi.RegisteredProof, whnd *workerHandle) (bool, error) { tasks, err := whnd.w.TaskTypes(ctx) if err != nil { return false, xerrors.Errorf("getting supported worker task types: %w", err) @@ -43,7 +42,12 @@ func (s *allocSelector) Ok(ctx context.Context, task sealtasks.TaskType, whnd *w have[path.ID] = struct{}{} } - for _, info := range s.best { + best, err := s.index.StorageBestAlloc(ctx, s.alloc, spt, true) + if err != nil { + return false, xerrors.Errorf("finding best alloc storage: %w", err) + } + + for _, info := range best { if _, ok := have[info.ID]; ok { return true, nil } diff --git a/selector_existing.go b/selector_existing.go index 46dd3278e..14e6dbefd 100644 --- a/selector_existing.go +++ b/selector_existing.go @@ -5,9 +5,10 @@ import ( "golang.org/x/xerrors" + "github.com/filecoin-project/specs-actors/actors/abi" + "github.com/filecoin-project/sector-storage/sealtasks" "github.com/filecoin-project/sector-storage/stores" - "github.com/filecoin-project/specs-actors/actors/abi" ) type existingSelector struct { @@ -25,7 +26,7 @@ func newExistingSelector(ctx context.Context, index stores.SectorIndex, sector a }, nil } -func (s *existingSelector) Ok(ctx context.Context, task sealtasks.TaskType, whnd *workerHandle) (bool, error) { +func (s *existingSelector) Ok(ctx context.Context, task sealtasks.TaskType, spt abi.RegisteredProof, whnd *workerHandle) (bool, error) { tasks, err := whnd.w.TaskTypes(ctx) if err != nil { return false, xerrors.Errorf("getting supported worker task types: %w", err) diff --git a/selector_task.go b/selector_task.go index 3298c9e5d..d2cf73476 100644 --- a/selector_task.go +++ b/selector_task.go @@ -5,6 +5,8 @@ import ( "golang.org/x/xerrors" + "github.com/filecoin-project/specs-actors/actors/abi" + "github.com/filecoin-project/sector-storage/sealtasks" "github.com/filecoin-project/sector-storage/stores" ) @@ -17,7 +19,7 @@ func newTaskSelector() *taskSelector { return &taskSelector{} } -func (s *taskSelector) Ok(ctx context.Context, task sealtasks.TaskType, whnd *workerHandle) (bool, error) { +func (s *taskSelector) Ok(ctx context.Context, task sealtasks.TaskType, spt abi.RegisteredProof, whnd *workerHandle) (bool, error) { tasks, err := whnd.w.TaskTypes(ctx) if err != nil { return false, xerrors.Errorf("getting supported worker task types: %w", err) diff --git a/stores/filetype.go b/stores/filetype.go index 784b5b71e..1810054d8 100644 --- a/stores/filetype.go +++ b/stores/filetype.go @@ -17,6 +17,18 @@ const ( FTNone SectorFileType = 0 ) +var FSOverheadSeal = map[SectorFileType]int{ // 10x overheads + FTUnsealed: 10, + FTSealed: 10, + FTCache: 70, // TODO: confirm for 32G +} + +var FsOverheadFinalized = map[SectorFileType]int{ + FTUnsealed: 10, + FTSealed: 10, + FTCache: 2, +} + type SectorFileType int func (t SectorFileType) String() string { @@ -36,6 +48,29 @@ func (t SectorFileType) Has(singleType SectorFileType) bool { return t&singleType == singleType } +func (t SectorFileType) SealSpaceUse(spt abi.RegisteredProof) (uint64, error) { + ssize, err := spt.SectorSize() + if err != nil { + return 0, xerrors.Errorf("getting sector size: %w", err) + } + + var need uint64 + for _, pathType := range PathTypes { + if !t.Has(pathType) { + continue + } + + oh, ok := FSOverheadSeal[pathType] + if !ok { + return 0, xerrors.Errorf("no seal overhead info for %s", pathType) + } + + need += uint64(oh) * uint64(ssize) / 10 + } + + return need, nil +} + type SectorPaths struct { Id abi.SectorID diff --git a/stores/http_handler.go b/stores/http_handler.go index b14dac54f..c39ca4510 100644 --- a/stores/http_handler.go +++ b/stores/http_handler.go @@ -68,7 +68,9 @@ func (handler *FetchHandler) remoteGetSector(w http.ResponseWriter, r *http.Requ w.WriteHeader(500) return } - paths, _, done, err := handler.Local.AcquireSector(r.Context(), id, ft, FTNone, false) + + // passing 0 spt because we don't allocate anything + paths, _, done, err := handler.Local.AcquireSector(r.Context(), id, 0, ft, FTNone, false) if err != nil { log.Error("%+v", err) w.WriteHeader(500) diff --git a/stores/index.go b/stores/index.go index f0ff22a01..6659a4422 100644 --- a/stores/index.go +++ b/stores/index.go @@ -14,8 +14,8 @@ import ( "github.com/filecoin-project/specs-actors/actors/abi/big" ) -var HeartBeatInterval = 10 * time.Second -var SkippedHeartbeatThresh = HeartBeatInterval * 5 +var HeartbeatInterval = 10 * time.Second +var SkippedHeartbeatThresh = HeartbeatInterval * 5 // ID identifies sector storage by UUID. One sector storage should map to one // filesystem, local or networked / shared by multiple machines @@ -47,7 +47,7 @@ type SectorIndex interface { // part of storage-miner api StorageDropSector(ctx context.Context, storageId ID, s abi.SectorID, ft SectorFileType) error StorageFindSector(ctx context.Context, sector abi.SectorID, ft SectorFileType, allowFetch bool) ([]StorageInfo, error) - StorageBestAlloc(ctx context.Context, allocate SectorFileType, sealing bool) ([]StorageInfo, error) + StorageBestAlloc(ctx context.Context, allocate SectorFileType, spt abi.RegisteredProof, sealing bool) ([]StorageInfo, error) } type Decl struct { @@ -302,12 +302,17 @@ func (i *Index) StorageInfo(ctx context.Context, id ID) (StorageInfo, error) { return *si.info, nil } -func (i *Index) StorageBestAlloc(ctx context.Context, allocate SectorFileType, sealing bool) ([]StorageInfo, error) { +func (i *Index) StorageBestAlloc(ctx context.Context, allocate SectorFileType, spt abi.RegisteredProof, sealing bool) ([]StorageInfo, error) { i.lk.RLock() defer i.lk.RUnlock() var candidates []storageEntry + spaceReq, err := allocate.SealSpaceUse(spt) + if err != nil { + return nil, xerrors.Errorf("estimating required space: %w", err) + } + for _, p := range i.stores { if sealing && !p.info.CanSeal { continue @@ -316,7 +321,20 @@ func (i *Index) StorageBestAlloc(ctx context.Context, allocate SectorFileType, s continue } - // TODO: filter out of space + if spaceReq > p.fsi.Available { + log.Debugf("not allocating on %s, out of space (available: %d, need: %d)", p.info.ID, p.fsi.Available, spaceReq) + continue + } + + if time.Since(p.lastHeartbeat) > SkippedHeartbeatThresh { + log.Debugf("not allocating on %s, didn't receive heartbeats for %s", p.info.ID, time.Since(p.lastHeartbeat)) + continue + } + + if p.heartbeatErr != nil { + log.Debugf("not allocating on %s, heartbeat error: %s", p.info.ID, p.heartbeatErr) + continue + } candidates = append(candidates, *p) } diff --git a/stores/interface.go b/stores/interface.go index 556cd4dbf..4a1361904 100644 --- a/stores/interface.go +++ b/stores/interface.go @@ -10,11 +10,11 @@ import ( ) type Store interface { - AcquireSector(ctx context.Context, s abi.SectorID, existing SectorFileType, allocate SectorFileType, sealing bool) (paths SectorPaths, stores SectorPaths, done func(), err error) + AcquireSector(ctx context.Context, s abi.SectorID, spt abi.RegisteredProof, existing SectorFileType, allocate SectorFileType, sealing bool) (paths SectorPaths, stores SectorPaths, done func(), err error) Remove(ctx context.Context, s abi.SectorID, types SectorFileType) error // move sectors into storage - MoveStorage(ctx context.Context, s abi.SectorID, types SectorFileType) error + MoveStorage(ctx context.Context, s abi.SectorID, spt abi.RegisteredProof, types SectorFileType) error FsStat(ctx context.Context, id ID) (FsStat, error) } diff --git a/stores/local.go b/stores/local.go index 673583c6a..52ba0afbc 100644 --- a/stores/local.go +++ b/stores/local.go @@ -164,7 +164,7 @@ func (st *Local) open(ctx context.Context) error { func (st *Local) reportHealth(ctx context.Context) { // randomize interval by ~10% - interval := (HeartBeatInterval*100_000 + time.Duration(rand.Int63n(10_000))) / 100_000 + interval := (HeartbeatInterval*100_000 + time.Duration(rand.Int63n(10_000))) / 100_000 for { select { @@ -195,7 +195,7 @@ func (st *Local) reportHealth(ctx context.Context) { } } -func (st *Local) AcquireSector(ctx context.Context, sid abi.SectorID, existing SectorFileType, allocate SectorFileType, sealing bool) (SectorPaths, SectorPaths, func(), error) { +func (st *Local) AcquireSector(ctx context.Context, sid abi.SectorID, spt abi.RegisteredProof, existing SectorFileType, allocate SectorFileType, sealing bool) (SectorPaths, SectorPaths, func(), error) { if existing|allocate != existing^allocate { return SectorPaths{}, SectorPaths{}, nil, xerrors.New("can't both find and allocate a sector") } @@ -240,7 +240,7 @@ func (st *Local) AcquireSector(ctx context.Context, sid abi.SectorID, existing S continue } - sis, err := st.index.StorageBestAlloc(ctx, fileType, sealing) + sis, err := st.index.StorageBestAlloc(ctx, fileType, spt, sealing) if err != nil { st.localLk.RUnlock() return SectorPaths{}, SectorPaths{}, nil, xerrors.Errorf("finding best storage for allocating : %w", err) @@ -352,14 +352,14 @@ func (st *Local) Remove(ctx context.Context, sid abi.SectorID, typ SectorFileTyp return nil } -func (st *Local) MoveStorage(ctx context.Context, s abi.SectorID, types SectorFileType) error { - dest, destIds, sdone, err := st.AcquireSector(ctx, s, FTNone, types, false) +func (st *Local) MoveStorage(ctx context.Context, s abi.SectorID, spt abi.RegisteredProof, types SectorFileType) error { + dest, destIds, sdone, err := st.AcquireSector(ctx, s, spt, FTNone, types, false) if err != nil { return xerrors.Errorf("acquire dest storage: %w", err) } defer sdone() - src, srcIds, ddone, err := st.AcquireSector(ctx, s, types, FTNone, false) + src, srcIds, ddone, err := st.AcquireSector(ctx, s, spt, types, FTNone, false) if err != nil { return xerrors.Errorf("acquire src storage: %w", err) } diff --git a/stores/remote.go b/stores/remote.go index a750d4841..c5d570ffa 100644 --- a/stores/remote.go +++ b/stores/remote.go @@ -41,7 +41,7 @@ func NewRemote(local *Local, index SectorIndex, auth http.Header) *Remote { } } -func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, existing SectorFileType, allocate SectorFileType, sealing bool) (SectorPaths, SectorPaths, func(), error) { +func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, spt abi.RegisteredProof, existing SectorFileType, allocate SectorFileType, sealing bool) (SectorPaths, SectorPaths, func(), error) { if existing|allocate != existing^allocate { return SectorPaths{}, SectorPaths{}, nil, xerrors.New("can't both find and allocate a sector") } @@ -73,7 +73,7 @@ func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, existing Sec r.fetchLk.Unlock() }() - paths, stores, done, err := r.local.AcquireSector(ctx, s, existing, allocate, sealing) + paths, stores, done, err := r.local.AcquireSector(ctx, s, spt, existing, allocate, sealing) if err != nil { return SectorPaths{}, SectorPaths{}, nil, xerrors.Errorf("local acquire error: %w", err) } @@ -87,7 +87,7 @@ func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, existing Sec continue } - ap, storageID, url, rdone, err := r.acquireFromRemote(ctx, s, fileType, sealing) + ap, storageID, url, rdone, err := r.acquireFromRemote(ctx, s, spt, fileType, sealing) if err != nil { done() return SectorPaths{}, SectorPaths{}, nil, err @@ -111,7 +111,7 @@ func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, existing Sec return paths, stores, done, nil } -func (r *Remote) acquireFromRemote(ctx context.Context, s abi.SectorID, fileType SectorFileType, sealing bool) (string, ID, string, func(), error) { +func (r *Remote) acquireFromRemote(ctx context.Context, s abi.SectorID, spt abi.RegisteredProof, fileType SectorFileType, sealing bool) (string, ID, string, func(), error) { si, err := r.index.StorageFindSector(ctx, s, fileType, false) if err != nil { return "", "", "", nil, err @@ -125,7 +125,7 @@ func (r *Remote) acquireFromRemote(ctx context.Context, s abi.SectorID, fileType return si[i].Weight < si[j].Weight }) - apaths, ids, done, err := r.local.AcquireSector(ctx, s, FTNone, fileType, sealing) + apaths, ids, done, err := r.local.AcquireSector(ctx, s, spt, FTNone, fileType, sealing) if err != nil { return "", "", "", nil, xerrors.Errorf("allocate local sector for fetching: %w", err) } @@ -203,15 +203,15 @@ func (r *Remote) fetch(ctx context.Context, url, outname string) error { } } -func (r *Remote) MoveStorage(ctx context.Context, s abi.SectorID, types SectorFileType) error { +func (r *Remote) MoveStorage(ctx context.Context, s abi.SectorID, spt abi.RegisteredProof, types SectorFileType) error { // Make sure we have the data local - _, _, ddone, err := r.AcquireSector(ctx, s, types, FTNone, false) + _, _, ddone, err := r.AcquireSector(ctx, s, spt, types, FTNone, false) if err != nil { return xerrors.Errorf("acquire src storage (remote): %w", err) } ddone() - return r.local.MoveStorage(ctx, s, types) + return r.local.MoveStorage(ctx, s, spt, types) } func (r *Remote) Remove(ctx context.Context, sid abi.SectorID, typ SectorFileType) error { From 170cc0e275c5fa279100bf53f4de085a43534e12 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Fri, 8 May 2020 13:36:08 +0200 Subject: [PATCH 073/199] Add some basic tests --- go.mod | 2 + go.sum | 2 + manager_test.go | 139 +++++++++++++++++++++++++++++++++++++++++++++ mock/mock.go | 15 +---- mock/mock_test.go | 2 +- testworker_test.go | 104 +++++++++++++++++++++++++++++++++ 6 files changed, 249 insertions(+), 15 deletions(-) create mode 100644 manager_test.go create mode 100644 testworker_test.go diff --git a/go.mod b/go.mod index 80fa7f46c..d9a396d07 100644 --- a/go.mod +++ b/go.mod @@ -9,6 +9,7 @@ require ( github.com/filecoin-project/go-paramfetch v0.0.1 github.com/filecoin-project/specs-actors v0.3.0 github.com/filecoin-project/specs-storage v0.0.0-20200417134612-61b2d91a6102 + github.com/google/uuid v1.1.1 github.com/gorilla/mux v1.7.4 github.com/hashicorp/go-multierror v1.0.0 github.com/ipfs/go-cid v0.0.5 @@ -17,6 +18,7 @@ require ( github.com/ipfs/go-log/v2 v2.0.3 github.com/mattn/go-isatty v0.0.9 // indirect github.com/mitchellh/go-homedir v1.1.0 + github.com/stretchr/testify v1.4.0 go.opencensus.io v0.22.3 go.uber.org/atomic v1.5.1 // indirect go.uber.org/zap v1.13.0 // indirect diff --git a/go.sum b/go.sum index de87462da..25dfea911 100644 --- a/go.sum +++ b/go.sum @@ -57,6 +57,8 @@ github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f h1:KMlcu9X58lhTA/KrfX8Bi1LQSO4pzoVjTiL3h4Jk+Zk= diff --git a/manager_test.go b/manager_test.go new file mode 100644 index 000000000..f89989989 --- /dev/null +++ b/manager_test.go @@ -0,0 +1,139 @@ +package sectorstorage + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "github.com/filecoin-project/sector-storage/sealtasks" + logging "github.com/ipfs/go-log" + "io/ioutil" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/google/uuid" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/specs-actors/actors/abi" + + "github.com/filecoin-project/sector-storage/ffiwrapper" + "github.com/filecoin-project/sector-storage/stores" +) + +type testStorage stores.StorageConfig + +func newTestStorage(t *testing.T) *testStorage { + tp, err := ioutil.TempDir(os.TempDir(), "sector-storage-test-") + require.NoError(t, err) + + { + b, err := json.MarshalIndent(&stores.LocalStorageMeta{ + ID: stores.ID(uuid.New().String()), + Weight: 1, + CanSeal: true, + CanStore: true, + }, "", " ") + require.NoError(t, err) + + err = ioutil.WriteFile(filepath.Join(tp, "sectorstore.json"), b, 0644) + require.NoError(t, err) + } + + return &testStorage{ + StoragePaths: []stores.LocalPath{ + {Path: tp}, + }, + } +} + +func (t testStorage) cleanup() { + for _, path := range t.StoragePaths { + if err := os.RemoveAll(path.Path); err != nil { + fmt.Println("Cleanup error:", err) + } + } +} + +func (t testStorage) GetStorage() (stores.StorageConfig, error) { + return stores.StorageConfig(t), nil +} + +func (t *testStorage) SetStorage(f func(*stores.StorageConfig)) error { + f((*stores.StorageConfig)(t)) + return nil +} + +var _ stores.LocalStorage = &testStorage{} + +func newTestMgr(ctx context.Context, t *testing.T) (*Manager, *stores.Local, *stores.Remote, *stores.Index) { + st := newTestStorage(t) + defer st.cleanup() + + si := stores.NewIndex() + cfg := &ffiwrapper.Config{ + SealProofType: abi.RegisteredProof_StackedDRG2KiBSeal, + } + + lstor, err := stores.NewLocal(ctx, st, si, nil) + require.NoError(t, err) + + prover, err := ffiwrapper.New(&readonlyProvider{stor: lstor}, cfg) + require.NoError(t, err) + + stor := stores.NewRemote(lstor, si, nil) + + m := &Manager{ + scfg: cfg, + + ls: st, + storage: stor, + localStore: lstor, + remoteHnd: &stores.FetchHandler{Local: lstor}, + index: si, + + sched: newScheduler(cfg.SealProofType), + + Prover: prover, + } + + go m.sched.runSched() + + return m, lstor, stor, si +} + +func TestSimple(t *testing.T) { + logging.SetAllLoggers(logging.LevelDebug) + + ctx := context.Background() + m, lstor, _, _ := newTestMgr(ctx, t) + + localTasks := []sealtasks.TaskType{ + sealtasks.TTAddPiece, sealtasks.TTPreCommit1, sealtasks.TTCommit1, sealtasks.TTFinalize, sealtasks.TTFetch, + } + + err := m.AddWorker(ctx, newTestWorker(WorkerConfig{ + SealProof: abi.RegisteredProof_StackedDRG2KiBSeal, + TaskTypes: localTasks, + }, lstor)) + require.NoError(t, err) + + sid := abi.SectorID{Miner: 1000, Number: 1} + + pi, err := m.AddPiece(ctx, sid, nil, 1016, strings.NewReader(strings.Repeat("testthis", 127))) + require.NoError(t, err) + require.Equal(t, abi.PaddedPieceSize(1024), pi.Size) + + piz, err := m.AddPiece(ctx, sid, nil, 1016, bytes.NewReader(make([]byte, 1016)[:])) + require.NoError(t, err) + require.Equal(t, abi.PaddedPieceSize(1024), piz.Size) + + pieces := []abi.PieceInfo{pi, piz} + + ticket := abi.SealRandomness{9, 9, 9, 9, 9, 9, 9, 9} + + _, err = m.SealPreCommit1(ctx, sid, ticket, pieces) + require.NoError(t, err) + +} diff --git a/mock/mock.go b/mock/mock.go index 854d89870..1e3985be0 100644 --- a/mock/mock.go +++ b/mock/mock.go @@ -16,7 +16,6 @@ import ( logging "github.com/ipfs/go-log" "golang.org/x/xerrors" - "github.com/filecoin-project/sector-storage" "github.com/filecoin-project/sector-storage/ffiwrapper" ) @@ -26,7 +25,6 @@ type SectorMgr struct { sectors map[abi.SectorID]*sectorState sectorSize abi.SectorSize nextSectorID abi.SectorNumber - rateLimit chan struct{} proofType abi.RegisteredProof lk sync.Mutex @@ -34,7 +32,7 @@ type SectorMgr struct { type mockVerif struct{} -func NewMockSectorMgr(threads int, ssize abi.SectorSize) *SectorMgr { +func NewMockSectorMgr(ssize abi.SectorSize) *SectorMgr { rt, err := ffiwrapper.SealProofTypeFromSectorSize(ssize) if err != nil { panic(err) @@ -44,7 +42,6 @@ func NewMockSectorMgr(threads int, ssize abi.SectorSize) *SectorMgr { sectors: make(map[abi.SectorID]*sectorState), sectorSize: ssize, nextSectorID: 5, - rateLimit: make(chan struct{}, threads), proofType: rt, } } @@ -64,15 +61,6 @@ type sectorState struct { lk sync.Mutex } -func (mgr *SectorMgr) RateLimit() func() { - mgr.rateLimit <- struct{}{} - - // TODO: probably want to copy over rate limit code - return func() { - <-mgr.rateLimit - } -} - func (mgr *SectorMgr) NewSector(ctx context.Context, sector abi.SectorID) error { return nil } @@ -333,4 +321,3 @@ func (m mockVerif) GenerateWinningPoStSectorChallenge(ctx context.Context, proof var MockVerifier = mockVerif{} var _ ffiwrapper.Verifier = MockVerifier -var _ sectorstorage.SectorManager = &SectorMgr{} diff --git a/mock/mock_test.go b/mock/mock_test.go index 524e8d615..5f4b9c428 100644 --- a/mock/mock_test.go +++ b/mock/mock_test.go @@ -9,7 +9,7 @@ import ( ) func TestOpFinish(t *testing.T) { - sb := NewMockSectorMgr(1, 2048) + sb := NewMockSectorMgr(2048) sid, pieces, err := sb.StageFakeData(123) if err != nil { diff --git a/testworker_test.go b/testworker_test.go new file mode 100644 index 000000000..99fa4abec --- /dev/null +++ b/testworker_test.go @@ -0,0 +1,104 @@ +package sectorstorage + +import ( + "context" + + "github.com/filecoin-project/specs-actors/actors/abi" + "github.com/filecoin-project/specs-storage/storage" + + "github.com/filecoin-project/sector-storage/mock" + "github.com/filecoin-project/sector-storage/sealtasks" + "github.com/filecoin-project/sector-storage/stores" + "github.com/filecoin-project/sector-storage/storiface" +) + +type testWorker struct { + acceptTasks map[sealtasks.TaskType]struct{} + lstor *stores.Local + + mockSeal *mock.SectorMgr +} + +func newTestWorker(wcfg WorkerConfig, lstor *stores.Local) *testWorker { + ssize, err := wcfg.SealProof.SectorSize() + if err != nil { + panic(err) + } + + acceptTasks := map[sealtasks.TaskType]struct{}{} + for _, taskType := range wcfg.TaskTypes { + acceptTasks[taskType] = struct{}{} + } + + return &testWorker{ + acceptTasks: acceptTasks, + lstor: lstor, + + mockSeal: mock.NewMockSectorMgr(ssize), + } +} + +func (t *testWorker) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storage.PreCommit1Out, error) { + return t.mockSeal.SealPreCommit1(ctx, sector, ticket, pieces) +} + +func (t *testWorker) NewSector(ctx context.Context, sector abi.SectorID) error { + panic("implement me") +} + +func (t *testWorker) AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (abi.PieceInfo, error) { + return t.mockSeal.AddPiece(ctx, sector, pieceSizes, newPieceSize, pieceData) +} + +func (t *testWorker) SealPreCommit2(ctx context.Context, sector abi.SectorID, pc1o storage.PreCommit1Out) (storage.SectorCids, error) { + panic("implement me") +} + +func (t *testWorker) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) { + panic("implement me") +} + +func (t *testWorker) SealCommit2(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (storage.Proof, error) { + panic("implement me") +} + +func (t *testWorker) FinalizeSector(ctx context.Context, sector abi.SectorID) error { + panic("implement me") +} + +func (t *testWorker) Fetch(ctx context.Context, id abi.SectorID, fileType stores.SectorFileType, b bool) error { + return nil +} + +func (t *testWorker) TaskTypes(ctx context.Context) (map[sealtasks.TaskType]struct{}, error) { + return t.acceptTasks, nil +} + +func (t *testWorker) Paths(ctx context.Context) ([]stores.StoragePath, error) { + return t.lstor.Local(ctx) +} + +func (t *testWorker) Info(ctx context.Context) (storiface.WorkerInfo, error) { + res := ResourceTable[sealtasks.TTPreCommit2][abi.RegisteredProof_StackedDRG2KiBSeal] + + return storiface.WorkerInfo{ + Hostname: "testworkerer", + Resources: storiface.WorkerResources{ + MemPhysical: res.MinMemory * 3, + MemSwap: 0, + MemReserved: res.MinMemory, + CPUs: 32, + GPUs: nil, + }, + }, nil +} + +func (t *testWorker) Closing(ctx context.Context) (<-chan struct{}, error) { + return ctx.Done(), nil +} + +func (t *testWorker) Close() error { + panic("implement me") +} + +var _ Worker = &testWorker{} From b24920c606d0da0b88f417eece6f89fefed0ad47 Mon Sep 17 00:00:00 2001 From: laser Date: Fri, 8 May 2020 13:05:01 -0700 Subject: [PATCH 074/199] update to v26 proofs --- .circleci/config.yml | 4 +- extern/filecoin-ffi | 2 +- parameters.json | 176 +++++++++++++++++++++++++------------------ 3 files changed, 106 insertions(+), 76 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index a7cb9a24b..339fd4d4d 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -22,7 +22,7 @@ commands: - restore_cache: name: Restore parameters cache keys: - - 'v24-2k-lotus-params' + - 'v26a-2k-lotus-params' paths: - /var/tmp/filecoin-proof-parameters/ - run: | @@ -33,7 +33,7 @@ commands: ./go-paramfetch 2048 "${DIR}/parameters.json" - save_cache: name: Save parameters cache - key: 'v24-2k-lotus-params' + key: 'v26a-2k-lotus-params' paths: - /var/tmp/filecoin-proof-parameters/ jobs: diff --git a/extern/filecoin-ffi b/extern/filecoin-ffi index 870251cd0..05b30afeb 160000 --- a/extern/filecoin-ffi +++ b/extern/filecoin-ffi @@ -1 +1 @@ -Subproject commit 870251cd04c54e7a3a08b714f3e71a9edec28445 +Subproject commit 05b30afeb00df254e72c0dadab8fa694dd68a4bc diff --git a/parameters.json b/parameters.json index 37ada4d6c..4ca3e6d2d 100644 --- a/parameters.json +++ b/parameters.json @@ -1,122 +1,152 @@ { - "v25-proof-of-spacetime-fallback-MerkleTree-0170db1f394b35d995252228ee359194b13199d259380541dc529fb0099096b0.params": { - "cid": "QmNUKXCEcjMRh8ayFG2X9RYUuc2SK5XRVsSVTqJmNWAgSp", - "digest": "fe10d43b607dd6687f30428476076ebb", + "v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0170db1f394b35d995252228ee359194b13199d259380541dc529fb0099096b0.params": { + "cid": "QmYkygifkXnrnsN4MJsjBFHTQJHx294CyikDgDK8nYxdGh", + "digest": "df3f30442a6d6b4192f5071fb17e820c", "sector_size": 2048 }, - "v25-proof-of-spacetime-fallback-MerkleTree-0170db1f394b35d995252228ee359194b13199d259380541dc529fb0099096b0.vk": { - "cid": "QmRyV1DvF57cSnnwUoocKbPiULoLdfnfWpVWi8BSsMN6KR", - "digest": "8aaca32ca9a1c6a431b99e695b443e69", + "v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0170db1f394b35d995252228ee359194b13199d259380541dc529fb0099096b0.vk": { + "cid": "QmdXyqbmy2bkJA9Kyhh6z25GrTCq48LwX6c1mxPsm54wi7", + "digest": "0bea3951abf9557a3569f68e52a30c6c", "sector_size": 2048 }, - "v25-proof-of-spacetime-fallback-MerkleTree-0cfb4f178bbb71cf2ecfcd42accce558b27199ab4fb59cb78f2483fe21ef36d9.params": { - "cid": "QmTvwEyFVcjivKUX9AqZrC4mfjLSN2JJTucLJfNaWqCPmD", - "digest": "1cc1bf83c9e3d9b2d994ad2ec946a79f", + "v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0cfb4f178bbb71cf2ecfcd42accce558b27199ab4fb59cb78f2483fe21ef36d9.params": { + "cid": "Qmf5XZZtP5VcYTf65MbKjLVabcS6cYMbr2rFShmfJzh5e5", + "digest": "655e6277638edc8c658094f6f0b33d54", "sector_size": 536870912 }, - "v25-proof-of-spacetime-fallback-MerkleTree-0cfb4f178bbb71cf2ecfcd42accce558b27199ab4fb59cb78f2483fe21ef36d9.vk": { - "cid": "QmVfgowqdh3ruAHqQ8LA6L4VdSYwam5e8VmSEtZXBoAudC", - "digest": "377659f83c6714703b17828f603038fc", + "v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0cfb4f178bbb71cf2ecfcd42accce558b27199ab4fb59cb78f2483fe21ef36d9.vk": { + "cid": "QmPuhdWnAXBks43emnkqi9FQzyU1gASKyz23zrD27BPGs8", + "digest": "57690e3a6a94c3f704802a674b34f36b", "sector_size": 536870912 }, - "v25-proof-of-spacetime-fallback-MerkleTree-3ea05428c9d11689f23529cde32fd30aabd50f7d2c93657c1d3650bca3e8ea9e.params": { - "cid": "QmQ2HrKCWbtWQNNQiBj3BFE8QrqMyed8P5Vw5vyyzuSMsF", - "digest": "2e15ec3fbff51abf66d241252fb8babd", + "v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-3ea05428c9d11689f23529cde32fd30aabd50f7d2c93657c1d3650bca3e8ea9e.params": { + "cid": "QmPNVgTN7N5vDtD5u7ERMTLcvUtrKRBfYVUDr6uW3pKhX7", + "digest": "3d390654f58e603b896ac70c653f5676", "sector_size": 2048 }, - "v25-proof-of-spacetime-fallback-MerkleTree-3ea05428c9d11689f23529cde32fd30aabd50f7d2c93657c1d3650bca3e8ea9e.vk": { - "cid": "QmVZRduda8L1AYsT3u3uk2kqiMnwm5Sx9D8pZbTVHAZG5i", - "digest": "11c74ae0068ca7e4a5fd8cb1eaf5b511", + "v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-3ea05428c9d11689f23529cde32fd30aabd50f7d2c93657c1d3650bca3e8ea9e.vk": { + "cid": "Qmbj61Zez7v5xA7nSCnmWbyLYznWJDWeusz7Yg8EcgVdoN", + "digest": "8c170a164743c39576a7f47a1b51e6f3", "sector_size": 2048 }, - "v25-proof-of-spacetime-fallback-MerkleTree-50c7368dea9593ed0989e70974d28024efa9d156d585b7eea1be22b2e753f331.params": { - "cid": "QmPQkry7TXuE8nxHFAySp3X8qRXMYj2ArffoFxF2C1hYwf", - "digest": "526edf009176616771af4ba915eb5073", + "v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-50c7368dea9593ed0989e70974d28024efa9d156d585b7eea1be22b2e753f331.params": { + "cid": "QmRApb8RZoBK3cqicT7V3ydXg8yVvqPFMPrQNXP33aBihp", + "digest": "b1b58ff9a297b82885e8a7dfb035f83c", "sector_size": 8388608 }, - "v25-proof-of-spacetime-fallback-MerkleTree-50c7368dea9593ed0989e70974d28024efa9d156d585b7eea1be22b2e753f331.vk": { - "cid": "QmT5bjrKBUpWEfaveWoPCu96EuHN2HuzbRzS9tSxttPCzw", - "digest": "c29e6b2927b8a28593f7c0c035b32cf5", + "v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-50c7368dea9593ed0989e70974d28024efa9d156d585b7eea1be22b2e753f331.vk": { + "cid": "QmcytF1dTdqMFoyXi931j1RgmGtLfR9LLLaBznRt1tPQyD", + "digest": "1a09e00c641f192f55af3433a028f050", "sector_size": 8388608 }, - "v25-proof-of-spacetime-fallback-MerkleTree-5294475db5237a2e83c3e52fd6c2b03859a1831d45ed08c4f35dbf9a803165a9.params": { - "cid": "QmXn1v64YTKLAH6yemhotr2dp1ZtjfspT328itKrMfnBW6", - "digest": "66459a78bd5e0225a19f140068620b7f", + "v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-5294475db5237a2e83c3e52fd6c2b03859a1831d45ed08c4f35dbf9a803165a9.params": { + "cid": "QmPvr54tWaVeP4WnekivzUAJitTqsQfvikBvAHNEaDNQSw", + "digest": "9380e41368ed4083dbc922b290d3b786", "sector_size": 8388608 }, - "v25-proof-of-spacetime-fallback-MerkleTree-5294475db5237a2e83c3e52fd6c2b03859a1831d45ed08c4f35dbf9a803165a9.vk": { - "cid": "QmTax8iBqjyP3EMUSnkSoxpjxh7dWrpE5RbfN2FA4oUgc4", - "digest": "e482988346217c846cecd80dfffef35f", + "v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-5294475db5237a2e83c3e52fd6c2b03859a1831d45ed08c4f35dbf9a803165a9.vk": { + "cid": "QmXyVLVDRCcxA9SjT7PeK8HFtyxZ2ZH3SHa8KoGLw8VGJt", + "digest": "f0731a7e20f90704bd38fc5d27882f6d", "sector_size": 8388608 }, - "v25-proof-of-spacetime-fallback-MerkleTree-7d739b8cf60f1b0709eeebee7730e297683552e4b69cab6984ec0285663c5781.params": { - "cid": "QmdVN2xTAJtKLrUdXfP7JjGpMGnZRmbDT8FHdkzxruRoLQ", - "digest": "4b27a62d2179523a2176ec7a1f2837be", + "v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-7d739b8cf60f1b0709eeebee7730e297683552e4b69cab6984ec0285663c5781.params": { + "cid": "Qmf5f6ko3dqj7qauzXpZqxM9B2x2sL977K6gE2ppNwuJPv", + "digest": "273ebb8c896326b7c292bee8b775fd38", "sector_size": 536870912 }, - "v25-proof-of-spacetime-fallback-MerkleTree-7d739b8cf60f1b0709eeebee7730e297683552e4b69cab6984ec0285663c5781.vk": { - "cid": "QmakhHMzRBB85LLniDeRif71prLckqj7RHCc3NSgZsevQF", - "digest": "21271b25537a42e79247bd403e3ba37e", + "v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-7d739b8cf60f1b0709eeebee7730e297683552e4b69cab6984ec0285663c5781.vk": { + "cid": "QmfP3MQe8koW63n5MkDENENVHxib78MJYYyZvbneCsuze8", + "digest": "3dd94da9da64e51b3445bc528d84e76d", "sector_size": 536870912 }, - "v25-proof-of-spacetime-fallback-MerkleTree-0377ded656c6f524f1618760bffe4e0a1c51d5a70c4509eedae8a27555733edc.params": { - "cid": "QmZwPa4C5iUKPwGL7pkzZVNpn1Z9QkELneLAX4JFdRc7m5", - "digest": "263b3ee83cfff7c287900346742e363a", + "v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-0377ded656c6f524f1618760bffe4e0a1c51d5a70c4509eedae8a27555733edc.params": { + "cid": "QmYEeeCE8uT2bsVkxcqqUYeMmMEbe6rfmo8wQCv7jFHqqm", + "digest": "c947f2021304ed43b7216f7a8436e294", "sector_size": 34359738368 }, - "v25-proof-of-spacetime-fallback-MerkleTree-0377ded656c6f524f1618760bffe4e0a1c51d5a70c4509eedae8a27555733edc.vk": { - "cid": "QmUVAe53gJ4eC7wmDG2K5WWEtTvfQJaAPBstEtfznJrPhR", - "digest": "e6bc2cb5808b6a5cde7b51bfe0543313", + "v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-0377ded656c6f524f1618760bffe4e0a1c51d5a70c4509eedae8a27555733edc.vk": { + "cid": "QmXB63ExriFjB4ywWnXTnFwCcLFfCeEP3h15qtL5i7F4aX", + "digest": "ab20d7b253e7e9a0d2ccdf7599ec8ec3", "sector_size": 34359738368 }, - "v25-proof-of-spacetime-fallback-MerkleTree-559e581f022bb4e4ec6e719e563bf0e026ad6de42e56c18714a2c692b1b88d7e.params": { - "cid": "QmXiiXheXvZV8rVkdDCFPdUYJVCNa67THGa7VgQRkqNojy", - "digest": "f031cdaf063c00baa637eae5e4b338c8", + "v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-559e581f022bb4e4ec6e719e563bf0e026ad6de42e56c18714a2c692b1b88d7e.params": { + "cid": "QmW5Yxg3L1NSzuQVcRMHMbG3uvVoi4dTLzVaDpnEUPQpnA", + "digest": "079ba19645828ae42b22b0e3f4866e8d", "sector_size": 34359738368 }, - "v25-proof-of-spacetime-fallback-MerkleTree-559e581f022bb4e4ec6e719e563bf0e026ad6de42e56c18714a2c692b1b88d7e.vk": { - "cid": "QmXSzhELrQMBhJgYqpT8qTL9Piwti3eziCYt49EJ77368r", - "digest": "3f7f6e287a32083f131d4948e04e6e5b", + "v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-559e581f022bb4e4ec6e719e563bf0e026ad6de42e56c18714a2c692b1b88d7e.vk": { + "cid": "QmQzZ5dJ11tcSBees38WX41tZLXS9BqpEti253m5QcnTNs", + "digest": "c76125a50a7de315165de359b5174ae4", "sector_size": 34359738368 }, - "v25-stacked-proof-of-replication-MerkleTree-Sha256Hasher-840969a6a9533823ecdc37310ef8c99d35991a2145300e10be0b883f1226a0f6.params": { - "cid": "QmbaFhfNtz6TuQdiC5oyL5rWSyUNQzcD68A6PT9mCTbvd7", - "digest": "c0cbe5bd951eb944557784a5a423fd18", + "v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-2627e4006b67f99cef990c0a47d5426cb7ab0a0ad58fc1061547bf2d28b09def.params": { + "cid": "QmNk3wga1tS53FUu1QnkK8ehWA2cqpCnSEAPv3KLxdJxNa", + "digest": "421e4790c0b80e0107a7ff67acf14084", + "sector_size": 68719476736 + }, + "v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-2627e4006b67f99cef990c0a47d5426cb7ab0a0ad58fc1061547bf2d28b09def.vk": { + "cid": "QmVQCHGsrUtbn9RjHs1e6GXfeXDW5m9w4ge48PSX3Z2as2", + "digest": "8b60e9cc1470a6729c687d6cf0a1f79c", + "sector_size": 68719476736 + }, + "v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-b62098629d07946e9028127e70295ed996fe3ed25b0f9f88eb610a0ab4385a3c.params": { + "cid": "QmTL3VvydaMFWKvE5VzxjgKsJYgL9JMM4JVYNtQxdj9JK1", + "digest": "2685f31124b22ea6b2857e5a5e87ffa3", + "sector_size": 68719476736 + }, + "v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-b62098629d07946e9028127e70295ed996fe3ed25b0f9f88eb610a0ab4385a3c.vk": { + "cid": "QmSVWbLqQYbUbbJyfsRMzEib2rfSqMtnPks1Nw22omcBQm", + "digest": "efe703cd2839597c7ca5c2a906b74296", + "sector_size": 68719476736 + }, + "v26-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-032d3138d22506ec0082ed72b2dcba18df18477904e35bafee82b3793b06832f.params": { + "cid": "QmU9dH31nZZUJnsogR4Ld4ySUcH6wm2RgmGiujwnqtbU6k", + "digest": "fcef8e87ae2afd7a28aae44347b804cf", "sector_size": 2048 }, - "v25-stacked-proof-of-replication-MerkleTree-Sha256Hasher-840969a6a9533823ecdc37310ef8c99d35991a2145300e10be0b883f1226a0f6.vk": { - "cid": "QmYfeAWeg7mKQJvoUCVatqa36WFbWYH2B9JMrJTorhJdUu", - "digest": "3ed77a85380eeacfea658fc4b1ad8b95", + "v26-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-032d3138d22506ec0082ed72b2dcba18df18477904e35bafee82b3793b06832f.vk": { + "cid": "QmdJ15DMGPooye5NaPcRfXUdHUDibcN7hKjbmTGuu1K4AQ", + "digest": "2ee2b3518229680db15161d4f582af37", "sector_size": 2048 }, - "v25-stacked-proof-of-replication-MerkleTree-Sha256Hasher-e3c3fd959a83bf60522a401dc3bf0e2d48f0e2172bcdf4c0cb3c39fa4deacd87.params": { - "cid": "QmYuGgnRHx9x4DAVtkGYGir8SDvRE17pUMH17riEpWguuN", - "digest": "b59249298e9d1bb9d25891b828e03c94", + "v26-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-6babf46ce344ae495d558e7770a585b2382d54f225af8ed0397b8be7c3fcd472.params": { + "cid": "QmZgtxcY3tMXXQxZTA7ZTUDXLVUnfxNcerXgeW4gG2NnfP", + "digest": "3273c7135cb75684248b475781b738ee", "sector_size": 536870912 }, - "v25-stacked-proof-of-replication-MerkleTree-Sha256Hasher-e3c3fd959a83bf60522a401dc3bf0e2d48f0e2172bcdf4c0cb3c39fa4deacd87.vk": { - "cid": "QmUE4Qhd3vUPMQwh1TPJkVxZVisxoLKj93ZDU3zfW7koc4", - "digest": "b4e3e2ea3eba88d2eba3d59472ef4094", + "v26-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-6babf46ce344ae495d558e7770a585b2382d54f225af8ed0397b8be7c3fcd472.vk": { + "cid": "QmSS6ZkAV2aGZcgKgdPpEEgihXF1ryZX8PSAZDWSoeL1d4", + "digest": "1519b5f61d9044a59f2bdc57537c094b", "sector_size": 536870912 }, - "v25-stacked-proof-of-replication-MerkleTree-Sha256Hasher-e4a49558d04647264048879511e843136e4488499e23bc442a341083a19ee79c.params": { - "cid": "QmePVNPMxzDuPF3mQaZ9Ld1hTGhResvGZgZ61NXy5cDQPK", - "digest": "0deb36662833379267609fc4e5f4176b", + "v26-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-ecd683648512ab1765faa2a5f14bab48f676e633467f0aa8aad4b55dcb0652bb.params": { + "cid": "QmQBGXeiNn6hVwbR6qFarQqiNGDdKk4h9ucfyvcXyfYz2N", + "digest": "7d5f896f435c38e93bcda6dd168d860b", "sector_size": 8388608 }, - "v25-stacked-proof-of-replication-MerkleTree-Sha256Hasher-e4a49558d04647264048879511e843136e4488499e23bc442a341083a19ee79c.vk": { - "cid": "QmWLpw8pLwuCGiUQGQiwuXTjKcvPwsaS573gQ6YPc67jVm", - "digest": "1618f598e3a5c26acee17540aa5cd536", + "v26-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-ecd683648512ab1765faa2a5f14bab48f676e633467f0aa8aad4b55dcb0652bb.vk": { + "cid": "QmPrZgBVGMckEAeu5eSJnLmiAwcPQjKjZe5ir6VaQ5AxKs", + "digest": "fe6d2de44580a0db5a4934688899b92f", "sector_size": 8388608 }, - "v25-stacked-proof-of-replication-MerkleTree-Sha256Hasher-8a0719d8b9de3605f89b084c73210dfe2a557407c6343f8d32640094f2c9d074.params": { - "cid": "QmdtfjaJpqE8pRt1cmceh8c2Qj8GNwrzmmSmckZr6VDAWR", - "digest": "18796da53b41f23e341d19ce7954f647", + "v26-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-0-sha256_hasher-82a357d2f2ca81dc61bb45f4a762807aedee1b0a53fd6c4e77b46a01bfef7820.params": { + "cid": "QmZL2cq45XJn5BFzagAZwgFmLrcM1W6CXoiEF9C5j5tjEF", + "digest": "acdfed9f0512bc85a01a9fb871d475d5", "sector_size": 34359738368 }, - "v25-stacked-proof-of-replication-MerkleTree-Sha256Hasher-8a0719d8b9de3605f89b084c73210dfe2a557407c6343f8d32640094f2c9d074.vk": { - "cid": "QmYF8Y17nHYAvbRA7NCQMs31VsBiMcAbwrViZwyT4Gvb8C", - "digest": "39d80879d4d7353e2ed5771670d97dfc", + "v26-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-0-sha256_hasher-82a357d2f2ca81dc61bb45f4a762807aedee1b0a53fd6c4e77b46a01bfef7820.vk": { + "cid": "QmQ4zB7nNa1tDYNifBkExRnZtwtxZw775iaqvVsZyRi6Q2", + "digest": "524a2f3e9d6826593caebc41bb545c40", "sector_size": 34359738368 + }, + "v26-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-2-sha256_hasher-96f1b4a04c5c51e4759bbf224bbc2ef5a42c7100f16ec0637123f16a845ddfb2.params": { + "cid": "QmY7DitNKXFeLQt9QoVQkfjM1EvRnprqUVxjmkTXkHDNka", + "digest": "f27271c0537ba65ade2ec045f8fbd069", + "sector_size": 68719476736 + }, + "v26-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-2-sha256_hasher-96f1b4a04c5c51e4759bbf224bbc2ef5a42c7100f16ec0637123f16a845ddfb2.vk": { + "cid": "QmUJsvoCuQ4LszPmeRVAkMYb5qY95ctz3UXKhu8xLzyFKo", + "digest": "576b292938c6c9d0a0e721bd867a543b", + "sector_size": 68719476736 } -} +} \ No newline at end of file From 845502f522df6247a3b1e7510604af9d7ad85d17 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Fri, 8 May 2020 22:32:34 +0200 Subject: [PATCH 075/199] Add entries for 64G sectors --- ffiwrapper/config.go | 2 ++ go.mod | 2 +- go.sum | 5 +++-- resources.go | 51 ++++++++++++++++++++++++++++++++++++++++++++ sched.go | 3 +++ 5 files changed, 60 insertions(+), 3 deletions(-) diff --git a/ffiwrapper/config.go b/ffiwrapper/config.go index be8e2833b..143283c0a 100644 --- a/ffiwrapper/config.go +++ b/ffiwrapper/config.go @@ -30,6 +30,8 @@ func SealProofTypeFromSectorSize(ssize abi.SectorSize) (abi.RegisteredProof, err return abi.RegisteredProof_StackedDRG512MiBSeal, nil case 32 << 30: return abi.RegisteredProof_StackedDRG32GiBSeal, nil + case 64 << 30: + return abi.RegisteredProof_StackedDRG64GiBSeal, nil default: return 0, xerrors.Errorf("unsupported sector size for miner: %v", ssize) } diff --git a/go.mod b/go.mod index d9a396d07..21d268986 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( github.com/filecoin-project/filecoin-ffi v0.0.0-20200326153646-e899cc1dd072 github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5 github.com/filecoin-project/go-paramfetch v0.0.1 - github.com/filecoin-project/specs-actors v0.3.0 + github.com/filecoin-project/specs-actors v0.4.1-0.20200508202406-42be6629284d github.com/filecoin-project/specs-storage v0.0.0-20200417134612-61b2d91a6102 github.com/google/uuid v1.1.1 github.com/gorilla/mux v1.7.4 diff --git a/go.sum b/go.sum index 25dfea911..ee12b7090 100644 --- a/go.sum +++ b/go.sum @@ -28,6 +28,7 @@ github.com/fatih/color v1.8.0/go.mod h1:3l45GVGkyrnYNl9HoIjnp2NnNWvh6hLAqD8yTfGj github.com/filecoin-project/go-address v0.0.2-0.20200218010043-eb9bb40ed5be h1:TooKBwR/g8jG0hZ3lqe9S5sy2vTUcLOZLlz3M5wGn2E= github.com/filecoin-project/go-address v0.0.2-0.20200218010043-eb9bb40ed5be/go.mod h1:SAOwJoakQ8EPjwNIsiakIQKsoKdkcbx8U3IapgCg9R0= github.com/filecoin-project/go-amt-ipld/v2 v2.0.1-0.20200131012142-05d80eeccc5e/go.mod h1:boRtQhzmxNocrMxOXo1NYn4oUc1NGvR8tEa79wApNXg= +github.com/filecoin-project/go-amt-ipld/v2 v2.0.1-0.20200424220931-6263827e49f2/go.mod h1:boRtQhzmxNocrMxOXo1NYn4oUc1NGvR8tEa79wApNXg= github.com/filecoin-project/go-bitfield v0.0.0-20200309034705-8c7ac40bd550 h1:aockulLU8Qjkdj4FQz53WQpNosAIYk8DxRediRLkE5c= github.com/filecoin-project/go-bitfield v0.0.0-20200309034705-8c7ac40bd550/go.mod h1:iodsLxOFZnqKtjj2zkgqzoGNrv6vUqj69AT/J8DKXEw= github.com/filecoin-project/go-bitfield v0.0.0-20200416002808-b3ee67ec9060 h1:/3qjGMn6ukXgZJHsIbuwGL7ipla8DOV3uHZDBJkBYfU= @@ -40,8 +41,8 @@ github.com/filecoin-project/go-paramfetch v0.0.1 h1:gV7bs5YaqlgpGFMiLxInGK2L1FyC github.com/filecoin-project/go-paramfetch v0.0.1/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc= github.com/filecoin-project/specs-actors v0.0.0-20200409043918-e569f4a2f504 h1:mwuAaqxKThl70+7FkGdFKVLdwaQZQ8XmscKdhSBBtnc= github.com/filecoin-project/specs-actors v0.0.0-20200409043918-e569f4a2f504/go.mod h1:mdJraXq5vMy0+/FqVQIrnNlpQ/Em6zeu06G/ltQ0/lA= -github.com/filecoin-project/specs-actors v0.3.0 h1:QxgAuTrZr5TPqjyprZk0nTYW5o0JWpzbb5v+4UHHvN0= -github.com/filecoin-project/specs-actors v0.3.0/go.mod h1:nQYnFbQ7Y0bHZyq6HDEuVlCPR+U3z5Q3wMOQ+2aiV+Y= +github.com/filecoin-project/specs-actors v0.4.1-0.20200508202406-42be6629284d h1:vzuhvR+huV95QU+KSBCyQvLP6LUtwrPUyeUTzOx1B5I= +github.com/filecoin-project/specs-actors v0.4.1-0.20200508202406-42be6629284d/go.mod h1:UW3ft23q6VS8wQoNqLWjENsu9gu1uh6lxOd+H8cwhT8= github.com/filecoin-project/specs-storage v0.0.0-20200417134612-61b2d91a6102 h1:T3f/zkuvgtgqcXrb0NO3BicuveGOxxUAMPa/Yif2kuE= github.com/filecoin-project/specs-storage v0.0.0-20200417134612-61b2d91a6102/go.mod h1:xJ1/xl9+8zZeSSSFmDC3Wr6uusCTxyYPI0VeNVSFmPE= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= diff --git a/resources.go b/resources.go index 23dcc2085..1dcbbc97c 100644 --- a/resources.go +++ b/resources.go @@ -24,6 +24,14 @@ const MaxCachingOverhead = 32 << 30 var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ sealtasks.TTAddPiece: { + abi.RegisteredProof_StackedDRG64GiBSeal: Resources{ // This is probably a bit conservative + MaxMemory: 64 << 30, + MinMemory: 64 << 30, + + Threads: 1, + + BaseMinMemory: 1 << 30, + }, abi.RegisteredProof_StackedDRG32GiBSeal: Resources{ // This is probably a bit conservative MaxMemory: 32 << 30, MinMemory: 32 << 30, @@ -58,6 +66,14 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ }, }, sealtasks.TTPreCommit1: { + abi.RegisteredProof_StackedDRG64GiBSeal: Resources{ + MaxMemory: 128 << 30, + MinMemory: 96 << 30, + + Threads: 1, + + BaseMinMemory: 60 << 30, + }, abi.RegisteredProof_StackedDRG32GiBSeal: Resources{ MaxMemory: 64 << 30, MinMemory: 48 << 30, @@ -92,6 +108,15 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ }, }, sealtasks.TTPreCommit2: { + abi.RegisteredProof_StackedDRG64GiBSeal: Resources{ + MaxMemory: 64 << 30, + MinMemory: 64 << 30, + + Threads: -1, + CanGPU: true, + + BaseMinMemory: 60 << 30, + }, abi.RegisteredProof_StackedDRG32GiBSeal: Resources{ MaxMemory: 32 << 30, MinMemory: 32 << 30, @@ -127,6 +152,14 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ }, }, sealtasks.TTCommit1: { // Very short (~100ms), so params are very light + abi.RegisteredProof_StackedDRG64GiBSeal: Resources{ + MaxMemory: 1 << 30, + MinMemory: 1 << 30, + + Threads: 0, + + BaseMinMemory: 1 << 30, + }, abi.RegisteredProof_StackedDRG32GiBSeal: Resources{ MaxMemory: 1 << 30, MinMemory: 1 << 30, @@ -161,6 +194,15 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ }, }, sealtasks.TTCommit2: { + abi.RegisteredProof_StackedDRG64GiBSeal: Resources{ + MaxMemory: 260 << 30, // TODO: Confirm + MinMemory: 120 << 30, + + Threads: -1, + CanGPU: true, + + BaseMinMemory: 128 << 30, // params + }, abi.RegisteredProof_StackedDRG32GiBSeal: Resources{ MaxMemory: 130 << 30, MinMemory: 60 << 30, @@ -199,6 +241,15 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ }, }, sealtasks.TTFetch: { + abi.RegisteredProof_StackedDRG64GiBSeal: Resources{ + MaxMemory: 1 << 20, + MinMemory: 1 << 20, + + Threads: 0, + CanGPU: false, + + BaseMinMemory: 0, + }, abi.RegisteredProof_StackedDRG32GiBSeal: Resources{ MaxMemory: 1 << 20, MinMemory: 1 << 20, diff --git a/sched.go b/sched.go index c48440757..91b7f8fa6 100644 --- a/sched.go +++ b/sched.go @@ -378,6 +378,9 @@ func canHandleRequest(needRes Resources, spt abi.RegisteredProof, wid WorkerID, if spt == abi.RegisteredProof_StackedDRG32GiBSeal { maxNeedMem += MaxCachingOverhead } + if spt == abi.RegisteredProof_StackedDRG64GiBSeal { + maxNeedMem += MaxCachingOverhead * 2 // ewwrhmwh + } if maxNeedMem > res.MemSwap+res.MemPhysical { log.Debugf("sched: not scheduling on worker %d; not enough virtual memory - need: %dM, have %dM", wid, maxNeedMem/mib, (res.MemSwap+res.MemPhysical)/mib) return false From 2bc21be319830a55712032d6f0ae1aa6f0a7d3b8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Wed, 13 May 2020 20:45:14 +0200 Subject: [PATCH 076/199] Cleanup before retrying precommit1 --- go.sum | 1 + localworker.go | 13 ++++++++++++- stores/http_handler.go | 2 +- stores/interface.go | 2 +- stores/local.go | 4 ++-- stores/remote.go | 4 ++-- 6 files changed, 19 insertions(+), 7 deletions(-) diff --git a/go.sum b/go.sum index ee12b7090..22f75c198 100644 --- a/go.sum +++ b/go.sum @@ -41,6 +41,7 @@ github.com/filecoin-project/go-paramfetch v0.0.1 h1:gV7bs5YaqlgpGFMiLxInGK2L1FyC github.com/filecoin-project/go-paramfetch v0.0.1/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc= github.com/filecoin-project/specs-actors v0.0.0-20200409043918-e569f4a2f504 h1:mwuAaqxKThl70+7FkGdFKVLdwaQZQ8XmscKdhSBBtnc= github.com/filecoin-project/specs-actors v0.0.0-20200409043918-e569f4a2f504/go.mod h1:mdJraXq5vMy0+/FqVQIrnNlpQ/Em6zeu06G/ltQ0/lA= +github.com/filecoin-project/specs-actors v0.3.0/go.mod h1:nQYnFbQ7Y0bHZyq6HDEuVlCPR+U3z5Q3wMOQ+2aiV+Y= github.com/filecoin-project/specs-actors v0.4.1-0.20200508202406-42be6629284d h1:vzuhvR+huV95QU+KSBCyQvLP6LUtwrPUyeUTzOx1B5I= github.com/filecoin-project/specs-actors v0.4.1-0.20200508202406-42be6629284d/go.mod h1:UW3ft23q6VS8wQoNqLWjENsu9gu1uh6lxOd+H8cwhT8= github.com/filecoin-project/specs-storage v0.0.0-20200417134612-61b2d91a6102 h1:T3f/zkuvgtgqcXrb0NO3BicuveGOxxUAMPa/Yif2kuE= diff --git a/localworker.go b/localworker.go index 969007d93..1c7e030e5 100644 --- a/localworker.go +++ b/localworker.go @@ -114,6 +114,17 @@ func (l *LocalWorker) Fetch(ctx context.Context, sector abi.SectorID, fileType s } func (l *LocalWorker) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage2.PreCommit1Out, err error) { + { + // cleanup previous failed attempts if they exist + if err := l.storage.Remove(ctx, sector, stores.FTSealed, true); err != nil { + return nil, xerrors.Errorf("cleaning up sealed data: %w", err) + } + + if err := l.storage.Remove(ctx, sector, stores.FTCache, true); err != nil { + return nil, xerrors.Errorf("cleaning up cache data: %w", err) + } + } + sb, err := l.sb() if err != nil { return nil, err @@ -159,7 +170,7 @@ func (l *LocalWorker) FinalizeSector(ctx context.Context, sector abi.SectorID) e return xerrors.Errorf("finalizing sector: %w", err) } - if err := l.storage.Remove(ctx, sector, stores.FTUnsealed); err != nil { + if err := l.storage.Remove(ctx, sector, stores.FTUnsealed, true); err != nil { return xerrors.Errorf("removing unsealed data: %w", err) } diff --git a/stores/http_handler.go b/stores/http_handler.go index c39ca4510..2a3e85aef 100644 --- a/stores/http_handler.go +++ b/stores/http_handler.go @@ -131,7 +131,7 @@ func (handler *FetchHandler) remoteDeleteSector(w http.ResponseWriter, r *http.R return } - if err := handler.Remove(r.Context(), id, ft); err != nil { + if err := handler.Remove(r.Context(), id, ft, false); err != nil { log.Error("%+v", err) w.WriteHeader(500) return diff --git a/stores/interface.go b/stores/interface.go index 4a1361904..0735f7bf8 100644 --- a/stores/interface.go +++ b/stores/interface.go @@ -11,7 +11,7 @@ import ( type Store interface { AcquireSector(ctx context.Context, s abi.SectorID, spt abi.RegisteredProof, existing SectorFileType, allocate SectorFileType, sealing bool) (paths SectorPaths, stores SectorPaths, done func(), err error) - Remove(ctx context.Context, s abi.SectorID, types SectorFileType) error + Remove(ctx context.Context, s abi.SectorID, types SectorFileType, force bool) error // move sectors into storage MoveStorage(ctx context.Context, s abi.SectorID, spt abi.RegisteredProof, types SectorFileType) error diff --git a/stores/local.go b/stores/local.go index 52ba0afbc..16f333ac4 100644 --- a/stores/local.go +++ b/stores/local.go @@ -313,7 +313,7 @@ func (st *Local) Local(ctx context.Context) ([]StoragePath, error) { return out, nil } -func (st *Local) Remove(ctx context.Context, sid abi.SectorID, typ SectorFileType) error { +func (st *Local) Remove(ctx context.Context, sid abi.SectorID, typ SectorFileType, force bool) error { if bits.OnesCount(uint(typ)) != 1 { return xerrors.New("delete expects one file type") } @@ -323,7 +323,7 @@ func (st *Local) Remove(ctx context.Context, sid abi.SectorID, typ SectorFileTyp return xerrors.Errorf("finding existing sector %d(t:%d) failed: %w", sid, typ, err) } - if len(si) == 0 { + if len(si) == 0 && !force { return xerrors.Errorf("can't delete sector %v(%d), not found", sid, typ) } diff --git a/stores/remote.go b/stores/remote.go index c5d570ffa..325060747 100644 --- a/stores/remote.go +++ b/stores/remote.go @@ -214,12 +214,12 @@ func (r *Remote) MoveStorage(ctx context.Context, s abi.SectorID, spt abi.Regist return r.local.MoveStorage(ctx, s, spt, types) } -func (r *Remote) Remove(ctx context.Context, sid abi.SectorID, typ SectorFileType) error { +func (r *Remote) Remove(ctx context.Context, sid abi.SectorID, typ SectorFileType, force bool) error { if bits.OnesCount(uint(typ)) != 1 { return xerrors.New("delete expects one file type") } - if err := r.local.Remove(ctx, sid, typ); err != nil { + if err := r.local.Remove(ctx, sid, typ, force); err != nil { return xerrors.Errorf("remove from local: %w", err) } From 617d34b9de29491c87aea2637914f39d4f81e9f1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Thu, 14 May 2020 01:56:21 +0200 Subject: [PATCH 077/199] sched: Take sector numbers into accout when scheduling work --- manager.go | 12 ++++++------ request_queue.go | 6 +++++- sched.go | 4 +++- 3 files changed, 14 insertions(+), 8 deletions(-) diff --git a/manager.go b/manager.go index 065370ed6..7c39c13ce 100644 --- a/manager.go +++ b/manager.go @@ -204,7 +204,7 @@ func (m *Manager) AddPiece(ctx context.Context, sector abi.SectorID, existingPie } var out abi.PieceInfo - err = m.sched.Schedule(ctx, sealtasks.TTAddPiece, selector, schedNop, func(ctx context.Context, w Worker) error { + err = m.sched.Schedule(ctx, sector, sealtasks.TTAddPiece, selector, schedNop, func(ctx context.Context, w Worker) error { p, err := w.AddPiece(ctx, sector, existingPieces, sz, r) if err != nil { return err @@ -224,7 +224,7 @@ func (m *Manager) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticke return nil, xerrors.Errorf("creating path selector: %w", err) } - err = m.sched.Schedule(ctx, sealtasks.TTPreCommit1, selector, schedFetch(sector, stores.FTUnsealed, true), func(ctx context.Context, w Worker) error { + err = m.sched.Schedule(ctx, sector, sealtasks.TTPreCommit1, selector, schedFetch(sector, stores.FTUnsealed, true), func(ctx context.Context, w Worker) error { p, err := w.SealPreCommit1(ctx, sector, ticket, pieces) if err != nil { return err @@ -242,7 +242,7 @@ func (m *Manager) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase return storage.SectorCids{}, xerrors.Errorf("creating path selector: %w", err) } - err = m.sched.Schedule(ctx, sealtasks.TTPreCommit2, selector, schedFetch(sector, stores.FTCache|stores.FTSealed, true), func(ctx context.Context, w Worker) error { + err = m.sched.Schedule(ctx, sector, sealtasks.TTPreCommit2, selector, schedFetch(sector, stores.FTCache|stores.FTSealed, true), func(ctx context.Context, w Worker) error { p, err := w.SealPreCommit2(ctx, sector, phase1Out) if err != nil { return err @@ -263,7 +263,7 @@ func (m *Manager) SealCommit1(ctx context.Context, sector abi.SectorID, ticket a // (except, don't.. for now at least - we are using this step to bring data // into 'provable' storage. Optimally we'd do that in commit2, in parallel // with snark compute) - err = m.sched.Schedule(ctx, sealtasks.TTCommit1, selector, schedFetch(sector, stores.FTCache|stores.FTSealed, true), func(ctx context.Context, w Worker) error { + err = m.sched.Schedule(ctx, sector, sealtasks.TTCommit1, selector, schedFetch(sector, stores.FTCache|stores.FTSealed, true), func(ctx context.Context, w Worker) error { p, err := w.SealCommit1(ctx, sector, ticket, seed, pieces, cids) if err != nil { return err @@ -277,7 +277,7 @@ func (m *Manager) SealCommit1(ctx context.Context, sector abi.SectorID, ticket a func (m *Manager) SealCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage.Commit1Out) (out storage.Proof, err error) { selector := newTaskSelector() - err = m.sched.Schedule(ctx, sealtasks.TTCommit2, selector, schedNop, func(ctx context.Context, w Worker) error { + err = m.sched.Schedule(ctx, sector, sealtasks.TTCommit2, selector, schedNop, func(ctx context.Context, w Worker) error { p, err := w.SealCommit2(ctx, sector, phase1Out) if err != nil { return err @@ -295,7 +295,7 @@ func (m *Manager) FinalizeSector(ctx context.Context, sector abi.SectorID) error return xerrors.Errorf("creating path selector: %w", err) } - return m.sched.Schedule(ctx, sealtasks.TTFinalize, selector, + return m.sched.Schedule(ctx, sector, sealtasks.TTFinalize, selector, schedFetch(sector, stores.FTCache|stores.FTSealed|stores.FTUnsealed, false), func(ctx context.Context, w Worker) error { return w.FinalizeSector(ctx, sector) diff --git a/request_queue.go b/request_queue.go index 09ca7ae3f..e5b3fd234 100644 --- a/request_queue.go +++ b/request_queue.go @@ -7,7 +7,11 @@ type requestQueue []*workerRequest func (q requestQueue) Len() int { return len(q) } func (q requestQueue) Less(i, j int) bool { - return q[i].taskType.Less(q[j].taskType) + if q[i].taskType != q[j].taskType { + return q[i].taskType.Less(q[j].taskType) + } + + return q[i].sector.Number < q[j].sector.Number // optimize minerActor.NewSectors bitfield } func (q requestQueue) Swap(i, j int) { diff --git a/sched.go b/sched.go index 91b7f8fa6..ba41a013b 100644 --- a/sched.go +++ b/sched.go @@ -64,11 +64,12 @@ func newScheduler(spt abi.RegisteredProof) *scheduler { } } -func (sh *scheduler) Schedule(ctx context.Context, taskType sealtasks.TaskType, sel WorkerSelector, prepare WorkerAction, work WorkerAction) error { +func (sh *scheduler) Schedule(ctx context.Context, sector abi.SectorID, taskType sealtasks.TaskType, sel WorkerSelector, prepare WorkerAction, work WorkerAction) error { ret := make(chan workerResponse) select { case sh.schedule <- &workerRequest{ + sector: sector, taskType: taskType, sel: sel, @@ -95,6 +96,7 @@ func (sh *scheduler) Schedule(ctx context.Context, taskType sealtasks.TaskType, } type workerRequest struct { + sector abi.SectorID taskType sealtasks.TaskType sel WorkerSelector From 20817dc51db5e27771575d6d320aa2c0dbb6acc7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Fri, 15 May 2020 14:33:04 +0200 Subject: [PATCH 078/199] resources: relax memory requirements for commit2 --- resources.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/resources.go b/resources.go index 1dcbbc97c..23512b85c 100644 --- a/resources.go +++ b/resources.go @@ -196,7 +196,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ sealtasks.TTCommit2: { abi.RegisteredProof_StackedDRG64GiBSeal: Resources{ MaxMemory: 260 << 30, // TODO: Confirm - MinMemory: 120 << 30, + MinMemory: 60 << 30, Threads: -1, CanGPU: true, @@ -205,7 +205,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ }, abi.RegisteredProof_StackedDRG32GiBSeal: Resources{ MaxMemory: 130 << 30, - MinMemory: 60 << 30, + MinMemory: 30 << 30, Threads: -1, CanGPU: true, From 4065c94c1f301f35727019d86c0c2597ba8b6f13 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Thu, 14 May 2020 03:01:38 +0200 Subject: [PATCH 079/199] Scaffolding for UnsealRange --- manager.go | 60 +++++++++++++++++++++++++++++++++++++++++------ resources.go | 5 ++++ sealtasks/task.go | 20 +++++++++------- 3 files changed, 70 insertions(+), 15 deletions(-) diff --git a/manager.go b/manager.go index 065370ed6..40e5c5788 100644 --- a/manager.go +++ b/manager.go @@ -28,7 +28,10 @@ type URLs []string type Worker interface { ffiwrapper.StorageSealer + Fetch(context.Context, abi.SectorID, stores.SectorFileType, bool) error + UnsealPiece(context.Context, abi.SectorID, ffiwrapper.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) error + ReadPiece(context.Context, io.Writer, abi.SectorID, ffiwrapper.UnpaddedByteIndex, abi.UnpaddedPieceSize) error TaskTypes(context.Context) (map[sealtasks.TaskType]struct{}, error) @@ -46,7 +49,7 @@ type Worker interface { type SectorManager interface { SectorSize() abi.SectorSize - ReadPieceFromSealedSector(context.Context, abi.SectorID, ffiwrapper.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (io.ReadCloser, error) + ReadPieceFromSealedSector(context.Context, io.Writer, abi.SectorID, ffiwrapper.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) error ffiwrapper.StorageSealer storage.Prover @@ -73,6 +76,7 @@ type SealerConfig struct { AllowPreCommit1 bool AllowPreCommit2 bool AllowCommit bool + AllowUnseal bool } type StorageAuth http.Header @@ -107,7 +111,7 @@ func New(ctx context.Context, ls stores.LocalStorage, si stores.SectorIndex, cfg go m.sched.runSched() localTasks := []sealtasks.TaskType{ - sealtasks.TTAddPiece, sealtasks.TTCommit1, sealtasks.TTFinalize, sealtasks.TTFetch, + sealtasks.TTAddPiece, sealtasks.TTCommit1, sealtasks.TTFinalize, sealtasks.TTFetch, sealtasks.TTReadUnsealed, } if sc.AllowPreCommit1 { localTasks = append(localTasks, sealtasks.TTPreCommit1) @@ -118,6 +122,9 @@ func New(ctx context.Context, ls stores.LocalStorage, si stores.SectorIndex, cfg if sc.AllowCommit { localTasks = append(localTasks, sealtasks.TTCommit2) } + if sc.AllowUnseal { + localTasks = append(localTasks, sealtasks.TTUnseal) + } err = m.AddWorker(ctx, NewLocalWorker(WorkerConfig{ SealProof: cfg.SealProofType, @@ -172,10 +179,6 @@ func (m *Manager) SectorSize() abi.SectorSize { return sz } -func (m *Manager) ReadPieceFromSealedSector(context.Context, abi.SectorID, ffiwrapper.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (io.ReadCloser, error) { - panic("implement me") -} - func schedNop(context.Context, Worker) error { return nil } @@ -186,6 +189,49 @@ func schedFetch(sector abi.SectorID, ft stores.SectorFileType, sealing bool) fun } } +func (m *Manager) ReadPieceFromSealedSector(ctx context.Context, sink io.Writer, sector abi.SectorID, offset ffiwrapper.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) error { + best, err := m.index.StorageFindSector(ctx, sector, stores.FTUnsealed, false) + if err != nil { + return xerrors.Errorf("read piece: checking for already existing unsealed sector: %w", err) + } + + var selector WorkerSelector + if len(best) == 0 { // new + selector, err = newAllocSelector(ctx, m.index, stores.FTUnsealed) + } else { // append to existing + selector, err = newExistingSelector(ctx, m.index, sector, stores.FTUnsealed, false) + } + if err != nil { + return xerrors.Errorf("creating unsealPiece selector: %w", err) + } + + // TODO: Optimization: don't send unseal to a worker if the requested range is already unsealed + + // TODO!!!! make schedFetch COPY stores.FTSealed and stores.FTCache + // Moving those to a temp sealing storage may make PoSts fail + + err = m.sched.Schedule(ctx, sealtasks.TTUnseal, selector, schedFetch(sector, stores.FTUnsealed|stores.FTSealed|stores.FTCache, true), func(ctx context.Context, w Worker) error { + return w.UnsealPiece(ctx, sector, offset, size, ticket, unsealed) + }) + if err != nil { + return err + } + + selector, err = newExistingSelector(ctx, m.index, sector, stores.FTUnsealed, false) + if err != nil { + return xerrors.Errorf("creating readPiece selector: %w", err) + } + + err = m.sched.Schedule(ctx, sealtasks.TTReadUnsealed, selector, schedFetch(sector, stores.FTUnsealed, true), func(ctx context.Context, w Worker) error { + return w.ReadPiece(ctx, sink, sector, offset, size) + }) + if err != nil { + return xerrors.Errorf("reading piece from sealed sector: %w", err) + } + + return nil +} + func (m *Manager) NewSector(ctx context.Context, sector abi.SectorID) error { log.Warnf("stub NewSector") return nil @@ -196,7 +242,7 @@ func (m *Manager) AddPiece(ctx context.Context, sector abi.SectorID, existingPie var err error if len(existingPieces) == 0 { // new selector, err = newAllocSelector(ctx, m.index, stores.FTUnsealed) - } else { // append to existing + } else { // use existing selector, err = newExistingSelector(ctx, m.index, sector, stores.FTUnsealed, false) } if err != nil { diff --git a/resources.go b/resources.go index 23512b85c..46999a280 100644 --- a/resources.go +++ b/resources.go @@ -288,3 +288,8 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ }, }, } + +func init() { + ResourceTable[sealtasks.TTUnseal] = ResourceTable[sealtasks.TTPreCommit1] // TODO: measure accurately + ResourceTable[sealtasks.TTReadUnsealed] = ResourceTable[sealtasks.TTFetch] +} diff --git a/sealtasks/task.go b/sealtasks/task.go index 0a94d2c04..978107c85 100644 --- a/sealtasks/task.go +++ b/sealtasks/task.go @@ -11,17 +11,21 @@ const ( TTFinalize TaskType = "seal/v0/finalize" - TTFetch TaskType = "seal/v0/fetch" + TTFetch TaskType = "seal/v0/fetch" + TTUnseal TaskType = "seal/v0/unseal" + TTReadUnsealed TaskType = "seal/v0/unsealread" ) var order = map[TaskType]int{ - TTAddPiece: 7, - TTPreCommit1: 6, - TTPreCommit2: 5, - TTCommit2: 4, - TTCommit1: 3, - TTFetch: 2, - TTFinalize: 1, + TTAddPiece: 7, + TTPreCommit1: 6, + TTPreCommit2: 5, + TTCommit2: 4, + TTCommit1: 3, + TTFetch: 2, + TTFinalize: 1, + TTUnseal: 0, + TTReadUnsealed: 0, } func (a TaskType) Less(b TaskType) bool { From 450b0b8acb0ecc463b0cf15ad4cb4e935c3ba0ca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Sat, 16 May 2020 23:03:29 +0200 Subject: [PATCH 080/199] very simple fault check function --- faults.go | 48 ++++++++++++++++++++++++++++++++++++++++++++++++ manager.go | 1 + mock/mock.go | 4 ++++ resources.go | 6 +++--- 4 files changed, 56 insertions(+), 3 deletions(-) create mode 100644 faults.go diff --git a/faults.go b/faults.go new file mode 100644 index 000000000..670de8793 --- /dev/null +++ b/faults.go @@ -0,0 +1,48 @@ +package sectorstorage + +import ( + "context" + + "golang.org/x/xerrors" + + "github.com/filecoin-project/sector-storage/stores" + "github.com/filecoin-project/specs-actors/actors/abi" +) + +// TODO: Track things more actively +type FaultTracker interface { + CheckProvable(ctx context.Context, spt abi.RegisteredProof, sectors []abi.SectorID) ([]abi.SectorID, error) +} + +// Returns unprovable sectors +func (m *Manager) CheckProvable(ctx context.Context, spt abi.RegisteredProof, sectors []abi.SectorID) ([]abi.SectorID, error) { + var bad []abi.SectorID + + // TODO: More better checks + for _, sector := range sectors { + err := func() error { + lp, _, done, err := m.localStore.AcquireSector(ctx, sector, spt, stores.FTSealed|stores.FTCache, stores.FTNone, false) + if err != nil { + return xerrors.Errorf("acquire sector in checkProvable: %w", err) + } + defer done() + + if lp.Sealed == "" || lp.Cache == "" { + log.Warnw("CheckProvable Sector FAULT: cache an/or sealed paths not found", "sector", sector, "sealed", lp.Sealed, "cache", lp.Cache) + bad = append(bad, sector) + return nil + } + + // must be fine + + return nil + }() + if err != nil { + return nil, err + } + } + + return bad, nil +} + +var _ FaultTracker = &Manager{} diff --git a/manager.go b/manager.go index 065370ed6..629d672bd 100644 --- a/manager.go +++ b/manager.go @@ -50,6 +50,7 @@ type SectorManager interface { ffiwrapper.StorageSealer storage.Prover + FaultTracker } type WorkerID uint64 diff --git a/mock/mock.go b/mock/mock.go index 1e3985be0..05e424307 100644 --- a/mock/mock.go +++ b/mock/mock.go @@ -288,6 +288,10 @@ func (mgr *SectorMgr) FinalizeSector(context.Context, abi.SectorID) error { return nil } +func (mgr *SectorMgr) CheckProvable(context.Context, abi.RegisteredProof, []abi.SectorID) ([]abi.SectorID, error) { + return nil, nil +} + func (m mockVerif) VerifySeal(svi abi.SealVerifyInfo) (bool, error) { if len(svi.OnChain.Proof) != 32 { // Real ones are longer, but this should be fine return false, nil diff --git a/resources.go b/resources.go index 23512b85c..7c281eda0 100644 --- a/resources.go +++ b/resources.go @@ -201,16 +201,16 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ Threads: -1, CanGPU: true, - BaseMinMemory: 128 << 30, // params + BaseMinMemory: 64 << 30, // params }, abi.RegisteredProof_StackedDRG32GiBSeal: Resources{ - MaxMemory: 130 << 30, + MaxMemory: 150 << 30, // TODO: ~30G of this should really be BaseMaxMemory MinMemory: 30 << 30, Threads: -1, CanGPU: true, - BaseMinMemory: 64 << 30, // params + BaseMinMemory: 32 << 30, // params }, abi.RegisteredProof_StackedDRG512MiBSeal: Resources{ MaxMemory: 3 << 29, // 1.5G From f577c2120ce66e635f3a0c3671011eb143a7dc03 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Thu, 14 May 2020 17:35:38 +0200 Subject: [PATCH 081/199] Partial Files, use for sealing --- ffiwrapper/partialfile.go | 255 ++++++++++++++++++++++++++++++++++++++ ffiwrapper/sealer_cgo.go | 108 +++++++++++++--- ffiwrapper/sealer_test.go | 13 ++ go.mod | 2 + go.sum | 4 + localworker.go | 9 ++ manager.go | 4 +- 7 files changed, 377 insertions(+), 18 deletions(-) create mode 100644 ffiwrapper/partialfile.go diff --git a/ffiwrapper/partialfile.go b/ffiwrapper/partialfile.go new file mode 100644 index 000000000..256a416f3 --- /dev/null +++ b/ffiwrapper/partialfile.go @@ -0,0 +1,255 @@ +package ffiwrapper + +import ( + "encoding/binary" + "io" + "os" + + "github.com/detailyang/go-fallocate" + "golang.org/x/xerrors" + + rlepluslazy "github.com/filecoin-project/go-bitfield/rle" + "github.com/filecoin-project/specs-actors/actors/abi" +) + +const veryLargeRle = 1 << 20 + +// Sectors can be partially unsealed. We support this by appending a small +// trailer to each unsealed sector file containing an RLE+ marking which bytes +// in a sector are unsealed, and which are not (holes) + +// unsealed sector files internally have this structure +// [unpadded (raw) data][rle+][4B LE length fo the rle+ field] + +type partialFile struct { + maxPiece abi.UnpaddedPieceSize + + path string + allocated rlepluslazy.RLE + + file *os.File +} + +func writeTrailer(psz int64, w *os.File, r rlepluslazy.RunIterator) error { + trailer, err := rlepluslazy.EncodeRuns(r, nil) + if err != nil { + return xerrors.Errorf("encoding trailer: %w", err) + } + + if _, err := w.Seek(psz, io.SeekStart); err != nil { + return xerrors.Errorf("seek to trailer start: %w", err) + } + + rb, err := w.Write(trailer) + if err != nil { + return xerrors.Errorf("writing trailer data: %w", err) + } + + if err := binary.Write(w, binary.LittleEndian, uint32(len(trailer))); err != nil { + return xerrors.Errorf("writing trailer length: %w", err) + } + + return w.Truncate(psz + int64(rb) + 4) +} + +func createPartialFile(maxPieceSize abi.UnpaddedPieceSize, path string) (*partialFile, error) { + f, err := os.OpenFile(path, os.O_RDWR | os.O_CREATE, 0644) + if err != nil { + return nil, xerrors.Errorf("openning partial file '%s': %w", path, err) + } + + err = func() error { + err := fallocate.Fallocate(f, 0, int64(maxPieceSize)) + if err != nil { + return xerrors.Errorf("fallocate '%s': %w", path, err) + } + + if err := writeTrailer(int64(maxPieceSize), f, &rlepluslazy.RunSliceIterator{}); err != nil { + return xerrors.Errorf("writing trailer: %w", err) + } + + return nil + }() + if err != nil { + f.Close() + return nil, err + } + if err := f.Close(); err != nil { + return nil, xerrors.Errorf("close empty partial file: %w", err) + } + + return openPartialFile(maxPieceSize, path) +} + +func openPartialFile(maxPieceSize abi.UnpaddedPieceSize, path string) (*partialFile, error) { + f, err := os.OpenFile(path, os.O_RDWR, 0644) + if err != nil { + return nil, xerrors.Errorf("openning partial file '%s': %w", path, err) + } + + var rle rlepluslazy.RLE + err = func() error { + st, err := f.Stat() + if err != nil { + return xerrors.Errorf("stat '%s': %w", path, err) + } + if st.Size() < int64(maxPieceSize) { + return xerrors.Errorf("sector file '%s' was smaller than the sector size %d < %d", path, st.Size(), maxPieceSize) + } + // read trailer + var tlen [4]byte + _, err = f.ReadAt(tlen[:], st.Size() - int64(len(tlen))) + if err != nil { + return xerrors.Errorf("reading trailer length: %w", err) + } + + // sanity-check the length + trailerLen := binary.LittleEndian.Uint32(tlen[:]) + expectLen := int64(trailerLen) + int64(len(tlen)) + int64(maxPieceSize) + if expectLen != st.Size() { + return xerrors.Errorf("file '%d' has inconsistent length; has %d bytes; expected %d (%d trailer, %d sector data)", path, st.Size(), expectLen, int64(trailerLen) + int64(len(tlen)), maxPieceSize) + } + if trailerLen > veryLargeRle { + log.Warnf("Partial file '%s' has a VERY large trailer with %d bytes", path, trailerLen) + } + + trailerStart := st.Size() - int64(len(tlen)) - int64(trailerLen) + if trailerStart != int64(maxPieceSize) { + return xerrors.Errorf("expected sector size to equal trailer start index") + } + + trailerBytes := make([]byte, trailerLen) + _, err = f.ReadAt(trailerBytes, trailerStart) + if err != nil { + return xerrors.Errorf("reading trailer: %w", err) + } + + rle, err = rlepluslazy.FromBuf(trailerBytes) + if err != nil { + return xerrors.Errorf("decoding trailer: %w", err) + } + + it, err := rle.RunIterator() + if err != nil { + return xerrors.Errorf("getting trailer run iterator: %w", err) + } + + lastSet, err := rlepluslazy.LastIndex(it, true) + if err != nil { + return xerrors.Errorf("finding last set byte index: %w", err) + } + if lastSet > uint64(maxPieceSize) { + return xerrors.Errorf("last set byte at index higher than sector size: %d > %d", lastSet, maxPieceSize) + } + + return nil + }() + if err != nil { + f.Close() + return nil, err + } + + return &partialFile{ + maxPiece: maxPieceSize, + path: path, + allocated: rle, + file: f, + }, nil +} + +func (pf *partialFile) Close() error { + return pf.file.Close() +} + +func (pf *partialFile) Writer(offset abi.UnpaddedPieceSize, size abi.UnpaddedPieceSize) (io.Writer, error) { + if _, err := pf.file.Seek(int64(offset), io.SeekStart); err != nil { + return nil, xerrors.Errorf("seek piece start: %w", err) + } + + { + have, err := pf.allocated.RunIterator() + if err != nil { + return nil, err + } + + and, err := rlepluslazy.And(have, pieceRun(offset, size)) + if err != nil { + return nil, err + } + + c, err := rlepluslazy.Count(and) + if err != nil { + return nil, err + } + + if c > 0 { + log.Warnf("getting partial file writer overwriting %d allocated bytes", c) + } + } + + return pf.file, nil +} + +func (pf *partialFile) MarkAllocated(offset abi.UnpaddedPieceSize, size abi.UnpaddedPieceSize) error { + have, err := pf.allocated.RunIterator() + if err != nil { + return err + } + + ored, err := rlepluslazy.Or(have, pieceRun(offset, size)) + if err != nil { + return err + } + + if err := writeTrailer(int64(pf.maxPiece), pf.file, ored); err != nil { + return xerrors.Errorf("writing trailer: %w", err) + } + + return nil +} + +func (pf *partialFile) Reader(offset abi.UnpaddedPieceSize, size abi.UnpaddedPieceSize) (*os.File, error) { + if _, err := pf.file.Seek(int64(offset), io.SeekStart); err != nil { + return nil, xerrors.Errorf("seek piece start: %w", err) + } + + { + have, err := pf.allocated.RunIterator() + if err != nil { + return nil, err + } + + and, err := rlepluslazy.And(have, pieceRun(offset, size)) + if err != nil { + return nil, err + } + + c, err := rlepluslazy.Count(and) + if err != nil { + return nil, err + } + + if c != uint64(size) { + log.Warnf("getting partial file reader reading %d unallocated bytes", uint64(size) - c) + } + } + + return pf.file, nil +} + +func pieceRun(offset abi.UnpaddedPieceSize, size abi.UnpaddedPieceSize) rlepluslazy.RunIterator { + var runs []rlepluslazy.Run + if offset > 0 { + runs = append(runs, rlepluslazy.Run{ + Val: false, + Len: uint64(offset), + }) + } + + runs = append(runs, rlepluslazy.Run{ + Val: true, + Len: uint64(size), + }) + + return &rlepluslazy.RunSliceIterator{Runs: runs} +} diff --git a/ffiwrapper/sealer_cgo.go b/ffiwrapper/sealer_cgo.go index f90a6020e..900d728c7 100644 --- a/ffiwrapper/sealer_cgo.go +++ b/ffiwrapper/sealer_cgo.go @@ -7,6 +7,7 @@ import ( "io" "math/bits" "os" + "path/filepath" "github.com/ipfs/go-cid" "golang.org/x/xerrors" @@ -46,13 +47,20 @@ func (sb *Sealer) NewSector(ctx context.Context, sector abi.SectorID) error { } func (sb *Sealer) AddPiece(ctx context.Context, sector abi.SectorID, existingPieceSizes []abi.UnpaddedPieceSize, pieceSize abi.UnpaddedPieceSize, file storage.Data) (abi.PieceInfo, error) { - f, werr, err := toReadableFile(file, int64(pieceSize)) - if err != nil { - return abi.PieceInfo{}, err + var offset abi.UnpaddedPieceSize + for _, size := range existingPieceSizes { + offset += size } + maxPieceSize := abi.PaddedPieceSize(sb.ssize).Unpadded() + + if offset + pieceSize > maxPieceSize { + return abi.PieceInfo{}, xerrors.Errorf("can't add %d byte piece to sector %v with %d bytes of existing pieces", pieceSize, sector, offset) + } + + var err error var done func() - var stagedFile *os.File + var stagedFile *partialFile defer func() { if done != nil { @@ -73,9 +81,9 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector abi.SectorID, existingPie return abi.PieceInfo{}, xerrors.Errorf("acquire unsealed sector: %w", err) } - stagedFile, err = os.Create(stagedPath.Unsealed) + stagedFile, err = createPartialFile(maxPieceSize, stagedPath.Unsealed) if err != nil { - return abi.PieceInfo{}, xerrors.Errorf("opening sector file: %w", err) + return abi.PieceInfo{}, xerrors.Errorf("creating unsealed sector file: %w", err) } } else { stagedPath, done, err = sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, 0, true) @@ -83,24 +91,35 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector abi.SectorID, existingPie return abi.PieceInfo{}, xerrors.Errorf("acquire unsealed sector: %w", err) } - stagedFile, err = os.OpenFile(stagedPath.Unsealed, os.O_RDWR, 0644) + stagedFile, err = openPartialFile(maxPieceSize, stagedPath.Unsealed) if err != nil { - return abi.PieceInfo{}, xerrors.Errorf("opening sector file: %w", err) - } - - if _, err := stagedFile.Seek(0, io.SeekEnd); err != nil { - return abi.PieceInfo{}, xerrors.Errorf("seek end: %w", err) + return abi.PieceInfo{}, xerrors.Errorf("opening unsealed sector file: %w", err) } } - _, _, pieceCID, err := ffi.WriteWithAlignment(sb.sealProofType, f, pieceSize, stagedFile, existingPieceSizes) + w, err := stagedFile.Writer(offset, pieceSize) if err != nil { - return abi.PieceInfo{}, err + return abi.PieceInfo{}, xerrors.Errorf("getting partial file writer: %w", err) + } + pr := io.TeeReader(io.LimitReader(file, int64(pieceSize)), w) + prf, werr, err := toReadableFile(pr, int64(pieceSize)) + if err != nil { + return abi.PieceInfo{}, xerrors.Errorf("getting tee reader pipe: %w", err) } - if err := f.Close(); err != nil { + pieceCID, err := ffi.GeneratePieceCIDFromFile(sb.sealProofType, prf, pieceSize) + if err != nil { + return abi.PieceInfo{}, xerrors.Errorf("generating piece commitment: %w", err) + } + + if err := stagedFile.MarkAllocated(offset, pieceSize); err != nil { + return abi.PieceInfo{}, xerrors.Errorf("marking data range as allocated: %w", err) + } + + if err := stagedFile.Close(); err != nil { return abi.PieceInfo{}, err } + stagedFile = nil return abi.PieceInfo{ Size: pieceSize.Padded(), @@ -232,11 +251,22 @@ func (sb *Sealer) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticke return nil, xerrors.Errorf("aggregated piece sizes don't match sector size: %d != %d (%d)", sum, ussize, int64(ussize-sum)) } + staged := filepath.Join(paths.Cache, "staged") + + if err := sb.rewriteAsPadded(paths.Unsealed, staged); err != nil { + return nil, xerrors.Errorf("rewriting sector as padded: %w", err) + } + defer func() { + if err := os.Remove(staged); err != nil { + log.Warnf("Removing staged sector file(%v): %s", sector, err) + } + }() + // TODO: context cancellation respect p1o, err := ffi.SealPreCommitPhase1( sb.sealProofType, paths.Cache, - paths.Unsealed, + staged, paths.Sealed, sector.Number, sector.Miner, @@ -249,6 +279,52 @@ func (sb *Sealer) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticke return p1o, nil } +func (sb *Sealer) rewriteAsPadded(unsealed string, staged string) error { + maxPieceSize := abi.PaddedPieceSize(sb.ssize).Unpadded() + + pf, err := openPartialFile(maxPieceSize, unsealed) + if err != nil { + return xerrors.Errorf("opening unsealed file: %w", err) + } + + upr, err := pf.Reader(0, maxPieceSize) + if err != nil { + pf.Close() + return err + } + + st, err := os.OpenFile(staged, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + pf.Close() + return xerrors.Errorf("openning staged file: %w", err) + } + + // OPTIMIZATION: upr is a file, so it could be passed straight to + // WriteWithAlignment IF it wouldn't care about the trailer + lupr, werr, err := toReadableFile(io.LimitReader(upr, int64(maxPieceSize)), int64(maxPieceSize)) + if err != nil { + return err + } + + _, _, _, err = ffi.WriteWithAlignment(sb.sealProofType, lupr, maxPieceSize, st, nil) + if err != nil { + pf.Close() + st.Close() + return xerrors.Errorf("write with alignment: %w", err) + } + + if err := st.Close(); err != nil { + pf.Close() + return err + } + + if err := pf.Close(); err != nil { + return err + } + + return werr() +} + func (sb *Sealer) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage.PreCommit1Out) (storage.SectorCids, error) { paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTSealed|stores.FTCache, 0, true) if err != nil { diff --git a/ffiwrapper/sealer_test.go b/ffiwrapper/sealer_test.go index 0f4918862..2fd1a5f32 100644 --- a/ffiwrapper/sealer_test.go +++ b/ffiwrapper/sealer_test.go @@ -1,8 +1,10 @@ package ffiwrapper import ( + "bytes" "context" "fmt" + ffi "github.com/filecoin-project/filecoin-ffi" "io" "io/ioutil" "math/rand" @@ -351,3 +353,14 @@ func TestSealAndVerify2(t *testing.T) { post(t, sb, s1, s2) } + +func TestScribbles(t *testing.T) { + rf, w, _ := toReadableFile(bytes.NewReader(bytes.Repeat([]byte{0xff, 0}, 127)), 254) + defer w() + + tf, _ := ioutil.TempFile("/tmp/", "scrb-") + + fmt.Println(tf.Name()) + + fmt.Println(ffi.WriteWithAlignment(abi.RegisteredProof_StackedDRG2KiBSeal, rf, 254, tf, nil)) +} diff --git a/go.mod b/go.mod index 21d268986..fe4a255ef 100644 --- a/go.mod +++ b/go.mod @@ -3,8 +3,10 @@ module github.com/filecoin-project/sector-storage go 1.13 require ( + github.com/detailyang/go-fallocate v0.0.0-20180908115635-432fa640bd2e github.com/elastic/go-sysinfo v1.3.0 github.com/filecoin-project/filecoin-ffi v0.0.0-20200326153646-e899cc1dd072 + github.com/filecoin-project/go-bitfield v0.0.2-0.20200518131841-989ba5ae71af github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5 github.com/filecoin-project/go-paramfetch v0.0.1 github.com/filecoin-project/specs-actors v0.4.1-0.20200508202406-42be6629284d diff --git a/go.sum b/go.sum index 22f75c198..1538984aa 100644 --- a/go.sum +++ b/go.sum @@ -19,6 +19,8 @@ github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/detailyang/go-fallocate v0.0.0-20180908115635-432fa640bd2e h1:lj77EKYUpYXTd8CD/+QMIf8b6OIOTsfEBSXiAzuEHTU= +github.com/detailyang/go-fallocate v0.0.0-20180908115635-432fa640bd2e/go.mod h1:3ZQK6DMPSz/QZ73jlWxBtUhNA8xZx7LzUFSq/OfP8vk= github.com/elastic/go-sysinfo v1.3.0 h1:eb2XFGTMlSwG/yyU9Y8jVAYLIzU2sFzWXwo2gmetyrE= github.com/elastic/go-sysinfo v1.3.0/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= github.com/elastic/go-windows v1.0.0 h1:qLURgZFkkrYyTTkvYpsZIgf83AUsdIHfvlJaqaZ7aSY= @@ -33,6 +35,8 @@ github.com/filecoin-project/go-bitfield v0.0.0-20200309034705-8c7ac40bd550 h1:ao github.com/filecoin-project/go-bitfield v0.0.0-20200309034705-8c7ac40bd550/go.mod h1:iodsLxOFZnqKtjj2zkgqzoGNrv6vUqj69AT/J8DKXEw= github.com/filecoin-project/go-bitfield v0.0.0-20200416002808-b3ee67ec9060 h1:/3qjGMn6ukXgZJHsIbuwGL7ipla8DOV3uHZDBJkBYfU= github.com/filecoin-project/go-bitfield v0.0.0-20200416002808-b3ee67ec9060/go.mod h1:iodsLxOFZnqKtjj2zkgqzoGNrv6vUqj69AT/J8DKXEw= +github.com/filecoin-project/go-bitfield v0.0.2-0.20200518131841-989ba5ae71af h1:g34Sk2coFzyNUv61ZLQ+yyS4Fm8aJCqEaZMKf8Dv6Hs= +github.com/filecoin-project/go-bitfield v0.0.2-0.20200518131841-989ba5ae71af/go.mod h1:Ry9/iUlWSyjPUzlAvdnfy4Gtvrq4kWmWDztCU1yEgJY= github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 h1:2pMXdBnCiXjfCYx/hLqFxccPoqsSveQFxVLvNxy9bus= github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ= github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5 h1:yvQJCW9mmi9zy+51xA01Ea2X7/dL7r8eKDPuGUjRmbo= diff --git a/localworker.go b/localworker.go index 1c7e030e5..bc4499e5b 100644 --- a/localworker.go +++ b/localworker.go @@ -7,6 +7,7 @@ import ( "runtime" "github.com/elastic/go-sysinfo" + "github.com/ipfs/go-cid" "golang.org/x/xerrors" ffi "github.com/filecoin-project/filecoin-ffi" @@ -181,6 +182,14 @@ func (l *LocalWorker) FinalizeSector(ctx context.Context, sector abi.SectorID) e return nil } +func (l *LocalWorker) UnsealPiece(ctx context.Context, id abi.SectorID, index ffiwrapper.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, cid cid.Cid) error { + panic("implement me") +} + +func (l *LocalWorker) ReadPiece(ctx context.Context, writer io.Writer, id abi.SectorID, index ffiwrapper.UnpaddedByteIndex, size abi.UnpaddedPieceSize) error { + panic("implement me") +} + func (l *LocalWorker) TaskTypes(context.Context) (map[sealtasks.TaskType]struct{}, error) { return l.acceptTasks, nil } diff --git a/manager.go b/manager.go index 40e5c5788..284ff272a 100644 --- a/manager.go +++ b/manager.go @@ -49,7 +49,7 @@ type Worker interface { type SectorManager interface { SectorSize() abi.SectorSize - ReadPieceFromSealedSector(context.Context, io.Writer, abi.SectorID, ffiwrapper.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) error + ReadPiece(context.Context, io.Writer, abi.SectorID, ffiwrapper.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) error ffiwrapper.StorageSealer storage.Prover @@ -189,7 +189,7 @@ func schedFetch(sector abi.SectorID, ft stores.SectorFileType, sealing bool) fun } } -func (m *Manager) ReadPieceFromSealedSector(ctx context.Context, sink io.Writer, sector abi.SectorID, offset ffiwrapper.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) error { +func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector abi.SectorID, offset ffiwrapper.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) error { best, err := m.index.StorageFindSector(ctx, sector, stores.FTUnsealed, false) if err != nil { return xerrors.Errorf("read piece: checking for already existing unsealed sector: %w", err) From 86f3c0916e355c4f6fb6549e19c4fcb7a6b33b60 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Tue, 19 May 2020 00:08:11 +0200 Subject: [PATCH 082/199] ffiwrapper: UnsealPiece --- ffiwrapper/partialfile.go | 20 ++-- ffiwrapper/sealer_cgo.go | 187 +++++++++++++++++++++++++++++++++++++- ffiwrapper/types.go | 5 +- go.mod | 2 +- go.sum | 2 + stores/remote.go | 3 +- 6 files changed, 202 insertions(+), 17 deletions(-) diff --git a/ffiwrapper/partialfile.go b/ffiwrapper/partialfile.go index 256a416f3..a5b8f2548 100644 --- a/ffiwrapper/partialfile.go +++ b/ffiwrapper/partialfile.go @@ -53,7 +53,7 @@ func writeTrailer(psz int64, w *os.File, r rlepluslazy.RunIterator) error { } func createPartialFile(maxPieceSize abi.UnpaddedPieceSize, path string) (*partialFile, error) { - f, err := os.OpenFile(path, os.O_RDWR | os.O_CREATE, 0644) + f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0644) if err != nil { return nil, xerrors.Errorf("openning partial file '%s': %w", path, err) } @@ -98,7 +98,7 @@ func openPartialFile(maxPieceSize abi.UnpaddedPieceSize, path string) (*partialF } // read trailer var tlen [4]byte - _, err = f.ReadAt(tlen[:], st.Size() - int64(len(tlen))) + _, err = f.ReadAt(tlen[:], st.Size()-int64(len(tlen))) if err != nil { return xerrors.Errorf("reading trailer length: %w", err) } @@ -107,7 +107,7 @@ func openPartialFile(maxPieceSize abi.UnpaddedPieceSize, path string) (*partialF trailerLen := binary.LittleEndian.Uint32(tlen[:]) expectLen := int64(trailerLen) + int64(len(tlen)) + int64(maxPieceSize) if expectLen != st.Size() { - return xerrors.Errorf("file '%d' has inconsistent length; has %d bytes; expected %d (%d trailer, %d sector data)", path, st.Size(), expectLen, int64(trailerLen) + int64(len(tlen)), maxPieceSize) + return xerrors.Errorf("file '%d' has inconsistent length; has %d bytes; expected %d (%d trailer, %d sector data)", path, st.Size(), expectLen, int64(trailerLen)+int64(len(tlen)), maxPieceSize) } if trailerLen > veryLargeRle { log.Warnf("Partial file '%s' has a VERY large trailer with %d bytes", path, trailerLen) @@ -161,7 +161,7 @@ func (pf *partialFile) Close() error { return pf.file.Close() } -func (pf *partialFile) Writer(offset abi.UnpaddedPieceSize, size abi.UnpaddedPieceSize) (io.Writer, error) { +func (pf *partialFile) Writer(offset UnpaddedByteIndex, size abi.UnpaddedPieceSize) (io.Writer, error) { if _, err := pf.file.Seek(int64(offset), io.SeekStart); err != nil { return nil, xerrors.Errorf("seek piece start: %w", err) } @@ -190,7 +190,7 @@ func (pf *partialFile) Writer(offset abi.UnpaddedPieceSize, size abi.UnpaddedPie return pf.file, nil } -func (pf *partialFile) MarkAllocated(offset abi.UnpaddedPieceSize, size abi.UnpaddedPieceSize) error { +func (pf *partialFile) MarkAllocated(offset UnpaddedByteIndex, size abi.UnpaddedPieceSize) error { have, err := pf.allocated.RunIterator() if err != nil { return err @@ -208,7 +208,7 @@ func (pf *partialFile) MarkAllocated(offset abi.UnpaddedPieceSize, size abi.Unpa return nil } -func (pf *partialFile) Reader(offset abi.UnpaddedPieceSize, size abi.UnpaddedPieceSize) (*os.File, error) { +func (pf *partialFile) Reader(offset UnpaddedByteIndex, size abi.UnpaddedPieceSize) (*os.File, error) { if _, err := pf.file.Seek(int64(offset), io.SeekStart); err != nil { return nil, xerrors.Errorf("seek piece start: %w", err) } @@ -230,14 +230,18 @@ func (pf *partialFile) Reader(offset abi.UnpaddedPieceSize, size abi.UnpaddedPie } if c != uint64(size) { - log.Warnf("getting partial file reader reading %d unallocated bytes", uint64(size) - c) + log.Warnf("getting partial file reader reading %d unallocated bytes", uint64(size)-c) } } return pf.file, nil } -func pieceRun(offset abi.UnpaddedPieceSize, size abi.UnpaddedPieceSize) rlepluslazy.RunIterator { +func (pf *partialFile) Allocated() (rlepluslazy.RunIterator, error) { + return pf.allocated.RunIterator() +} + +func pieceRun(offset UnpaddedByteIndex, size abi.UnpaddedPieceSize) rlepluslazy.RunIterator { var runs []rlepluslazy.Run if offset > 0 { runs = append(runs, rlepluslazy.Run{ diff --git a/ffiwrapper/sealer_cgo.go b/ffiwrapper/sealer_cgo.go index 900d728c7..0ed665f2d 100644 --- a/ffiwrapper/sealer_cgo.go +++ b/ffiwrapper/sealer_cgo.go @@ -5,9 +5,11 @@ package ffiwrapper import ( "context" "io" + "io/ioutil" "math/bits" "os" "path/filepath" + "syscall" "github.com/ipfs/go-cid" "golang.org/x/xerrors" @@ -17,6 +19,7 @@ import ( "github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/sector-storage/stores" + "github.com/filecoin-project/sector-storage/storiface" "github.com/filecoin-project/sector-storage/zerocomm" ) @@ -54,7 +57,7 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector abi.SectorID, existingPie maxPieceSize := abi.PaddedPieceSize(sb.ssize).Unpadded() - if offset + pieceSize > maxPieceSize { + if offset+pieceSize > maxPieceSize { return abi.PieceInfo{}, xerrors.Errorf("can't add %d byte piece to sector %v with %d bytes of existing pieces", pieceSize, sector, offset) } @@ -97,7 +100,7 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector abi.SectorID, existingPie } } - w, err := stagedFile.Writer(offset, pieceSize) + w, err := stagedFile.Writer(UnpaddedByteIndex(offset), pieceSize) if err != nil { return abi.PieceInfo{}, xerrors.Errorf("getting partial file writer: %w", err) } @@ -112,7 +115,7 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector abi.SectorID, existingPie return abi.PieceInfo{}, xerrors.Errorf("generating piece commitment: %w", err) } - if err := stagedFile.MarkAllocated(offset, pieceSize); err != nil { + if err := stagedFile.MarkAllocated(UnpaddedByteIndex(offset), pieceSize); err != nil { return abi.PieceInfo{}, xerrors.Errorf("marking data range as allocated: %w", err) } @@ -133,6 +136,184 @@ func (cf closerFunc) Close() error { return cf() } +func (sb *Sealer) UnsealPiece(ctx context.Context, sector abi.SectorID, offset UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, cid cid.Cid) error { + maxPieceSize := abi.PaddedPieceSize(sb.ssize).Unpadded() + + // try finding existing + unsealedPath, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, stores.FTNone, false) + var pf *partialFile + + switch { + case xerrors.Is(err, storiface.ErrSectorNotFound): + unsealedPath, done, err = sb.sectors.AcquireSector(ctx, sector, stores.FTNone, stores.FTUnsealed, false) + if err != nil { + return xerrors.Errorf("acquire unsealed sector path (allocate): %w", err) + } + defer done() + + pf, err = createPartialFile(maxPieceSize, unsealedPath.Unsealed) + if err != nil { + return xerrors.Errorf("create unsealed file: %w", err) + } + + case err == nil: + defer done() + + pf, err = openPartialFile(maxPieceSize, unsealedPath.Unsealed) + if err != nil { + return xerrors.Errorf("opening partial file: %w", err) + } + default: + return xerrors.Errorf("acquire unsealed sector path (existing): %w", err) + } + defer pf.Close() + + allocated, err := pf.Allocated() + if err != nil { + return xerrors.Errorf("getting bitruns of allocated data: %w", err) + } + + toUnseal, err := computeUnsealRanges(allocated, offset, size) + if err != nil { + return xerrors.Errorf("computing unseal ranges: %w", err) + } + + if !toUnseal.HasNext() { + return nil + } + + srcPaths, srcDone, err := sb.sectors.AcquireSector(ctx, sector, stores.FTCache|stores.FTSealed, stores.FTNone, false) + if err != nil { + return xerrors.Errorf("acquire sealed sector paths: %w", err) + } + defer srcDone() + + var at, nextat uint64 + for { + piece, err := toUnseal.NextRun() + if err != nil { + return xerrors.Errorf("getting next range to unseal: %w", err) + } + + at = nextat + nextat += piece.Len + + if !piece.Val { + continue + } + + out, err := pf.Writer(offset, size) + if err != nil { + return xerrors.Errorf("getting partial file writer: %w", err) + } + + // + outpipe, err := ioutil.TempFile(os.TempDir(), "sector-storage-unseal-") + if err != nil { + return xerrors.Errorf("creating temp pipe file: %w", err) + } + var outpath string + var perr error + outWait := make(chan struct{}) + + { + outpath = outpipe.Name() + if err := outpipe.Close(); err != nil { + return xerrors.Errorf("close pipe temp: %w", err) + } + if err := os.Remove(outpath); err != nil { + return xerrors.Errorf("rm pipe temp: %w", err) + } + + // TODO: Make UnsealRange write to an FD + if err := syscall.Mkfifo(outpath, 0600); err != nil { + return xerrors.Errorf("mk temp fifo: %w", err) + } + + outpipe, err = os.OpenFile(outpath, os.O_RDONLY, 0600) + if err != nil { + return xerrors.Errorf("open temp pipe: %w", err) + } + + go func() { + defer close(outWait) + defer os.Remove(outpath) + defer outpipe.Close() + + _, perr = io.CopyN(out, outpipe, int64(size)) + }() + } + // + + // TODO: This may be possible to do in parallel + err = ffi.UnsealRange(sb.sealProofType, + srcPaths.Cache, + srcPaths.Sealed, + outpath, + sector.Number, + sector.Miner, + randomness, + cid, + at, + piece.Len) + if err != nil { + return xerrors.Errorf("unseal range: %w", err) + } + + select { + case <-outWait: + case <-ctx.Done(): + return ctx.Err() + } + + if perr != nil { + return xerrors.Errorf("piping output to unsealed file: %w", perr) + } + + if err := pf.MarkAllocated(UnpaddedByteIndex(at), abi.UnpaddedPieceSize(piece.Len)); err != nil { + return xerrors.Errorf("marking unsealed range as allocated: %w", err) + } + + if !toUnseal.HasNext() { + break + } + } + + return nil +} + +func (sb *Sealer) ReadPiece(ctx context.Context, writer io.Writer, sector abi.SectorID, offset UnpaddedByteIndex, size abi.UnpaddedPieceSize) error { + path, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, stores.FTNone, false) + if err != nil { + return xerrors.Errorf("acquire unsealed sector path: %w", err) + } + defer done() + + maxPieceSize := abi.PaddedPieceSize(sb.ssize).Unpadded() + + pf, err := openPartialFile(maxPieceSize, path.Unsealed) + if xerrors.Is(err, os.ErrNotExist) { + return xerrors.Errorf("opening partial file: %w", err) + } + + f, err := pf.Reader(offset, size) + if err != nil { + pf.Close() + return xerrors.Errorf("getting partial file reader: %w", err) + } + + if _, err := io.CopyN(writer, f, int64(size)); err != nil { + pf.Close() + return xerrors.Errorf("reading unsealed file: %w", err) + } + + if err := pf.Close(); err != nil { + return xerrors.Errorf("closing partial file: %w", err) + } + + return nil +} + func (sb *Sealer) ReadPieceFromSealedSector(ctx context.Context, sector abi.SectorID, offset UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealedCID cid.Cid) (io.ReadCloser, error) { { path, doneUnsealed, err := sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, stores.FTNone, false) diff --git a/ffiwrapper/types.go b/ffiwrapper/types.go index 98612175d..932dc3c42 100644 --- a/ffiwrapper/types.go +++ b/ffiwrapper/types.go @@ -2,7 +2,6 @@ package ffiwrapper import ( "context" - "errors" "io" "github.com/ipfs/go-cid" @@ -41,10 +40,8 @@ type Verifier interface { GenerateWinningPoStSectorChallenge(context.Context, abi.RegisteredProof, abi.ActorID, abi.PoStRandomness, uint64) ([]uint64, error) } -var ErrSectorNotFound = errors.New("sector not found") - type SectorProvider interface { - // * returns ErrSectorNotFound if a requested existing sector doesn't exist + // * returns storiface.ErrSectorNotFound if a requested existing sector doesn't exist // * returns an error when allocate is set, and existing isn't, and the sector exists AcquireSector(ctx context.Context, id abi.SectorID, existing stores.SectorFileType, allocate stores.SectorFileType, sealing bool) (stores.SectorPaths, func(), error) } diff --git a/go.mod b/go.mod index fe4a255ef..429911583 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( github.com/detailyang/go-fallocate v0.0.0-20180908115635-432fa640bd2e github.com/elastic/go-sysinfo v1.3.0 github.com/filecoin-project/filecoin-ffi v0.0.0-20200326153646-e899cc1dd072 - github.com/filecoin-project/go-bitfield v0.0.2-0.20200518131841-989ba5ae71af + github.com/filecoin-project/go-bitfield v0.0.2-0.20200518150651-562fdb554b6e github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5 github.com/filecoin-project/go-paramfetch v0.0.1 github.com/filecoin-project/specs-actors v0.4.1-0.20200508202406-42be6629284d diff --git a/go.sum b/go.sum index 1538984aa..fcff267f9 100644 --- a/go.sum +++ b/go.sum @@ -37,6 +37,8 @@ github.com/filecoin-project/go-bitfield v0.0.0-20200416002808-b3ee67ec9060 h1:/3 github.com/filecoin-project/go-bitfield v0.0.0-20200416002808-b3ee67ec9060/go.mod h1:iodsLxOFZnqKtjj2zkgqzoGNrv6vUqj69AT/J8DKXEw= github.com/filecoin-project/go-bitfield v0.0.2-0.20200518131841-989ba5ae71af h1:g34Sk2coFzyNUv61ZLQ+yyS4Fm8aJCqEaZMKf8Dv6Hs= github.com/filecoin-project/go-bitfield v0.0.2-0.20200518131841-989ba5ae71af/go.mod h1:Ry9/iUlWSyjPUzlAvdnfy4Gtvrq4kWmWDztCU1yEgJY= +github.com/filecoin-project/go-bitfield v0.0.2-0.20200518150651-562fdb554b6e h1:gkG/7G+iKy4He+IiQNeQn+nndFznb/vCoOR8iRQsm60= +github.com/filecoin-project/go-bitfield v0.0.2-0.20200518150651-562fdb554b6e/go.mod h1:Ry9/iUlWSyjPUzlAvdnfy4Gtvrq4kWmWDztCU1yEgJY= github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 h1:2pMXdBnCiXjfCYx/hLqFxccPoqsSveQFxVLvNxy9bus= github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ= github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5 h1:yvQJCW9mmi9zy+51xA01Ea2X7/dL7r8eKDPuGUjRmbo= diff --git a/stores/remote.go b/stores/remote.go index 325060747..151c0ed2f 100644 --- a/stores/remote.go +++ b/stores/remote.go @@ -19,6 +19,7 @@ import ( "github.com/filecoin-project/specs-actors/actors/abi" + "github.com/filecoin-project/sector-storage/storiface" "github.com/filecoin-project/sector-storage/tarutil" ) @@ -118,7 +119,7 @@ func (r *Remote) acquireFromRemote(ctx context.Context, s abi.SectorID, spt abi. } if len(si) == 0 { - return "", "", "", nil, xerrors.Errorf("failed to acquire sector %v from remote(%d): not found", s, fileType) + return "", "", "", nil, xerrors.Errorf("failed to acquire sector %v from remote(%d): %w", s, fileType, storiface.ErrSectorNotFound) } sort.Slice(si, func(i, j int) bool { From 78c0b8f9858eeadde16e3614df6541811220c378 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Tue, 19 May 2020 01:03:42 +0200 Subject: [PATCH 083/199] ffiwrapper: Test unsealing --- ffiwrapper/basicfs/fs.go | 12 ++++++- ffiwrapper/sealer_cgo.go | 15 ++++---- ffiwrapper/sealer_test.go | 75 ++++++++++++++++++++++++++++++++++++--- 3 files changed, 89 insertions(+), 13 deletions(-) diff --git a/ffiwrapper/basicfs/fs.go b/ffiwrapper/basicfs/fs.go index e7e755a77..41ec8d4b4 100644 --- a/ffiwrapper/basicfs/fs.go +++ b/ffiwrapper/basicfs/fs.go @@ -9,6 +9,7 @@ import ( "github.com/filecoin-project/specs-actors/actors/abi" "github.com/filecoin-project/sector-storage/stores" + "github.com/filecoin-project/sector-storage/storiface" ) type sectorFile struct { @@ -63,13 +64,22 @@ func (b *Provider) AcquireSector(ctx context.Context, id abi.SectorID, existing return stores.SectorPaths{}, nil, ctx.Err() } + path := filepath.Join(b.Root, fileType.String(), stores.SectorName(id)) + prevDone := done done = func() { prevDone() <-ch } - stores.SetPathByType(&out, fileType, filepath.Join(b.Root, fileType.String(), stores.SectorName(id))) + if !allocate.Has(fileType) { + if _, err := os.Stat(path); os.IsNotExist(err) { + done() + return stores.SectorPaths{}, nil, storiface.ErrSectorNotFound + } + } + + stores.SetPathByType(&out, fileType, path) } return out, done, nil diff --git a/ffiwrapper/sealer_cgo.go b/ffiwrapper/sealer_cgo.go index 0ed665f2d..879b04afb 100644 --- a/ffiwrapper/sealer_cgo.go +++ b/ffiwrapper/sealer_cgo.go @@ -136,7 +136,7 @@ func (cf closerFunc) Close() error { return cf() } -func (sb *Sealer) UnsealPiece(ctx context.Context, sector abi.SectorID, offset UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, cid cid.Cid) error { +func (sb *Sealer) UnsealPiece(ctx context.Context, sector abi.SectorID, offset UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd cid.Cid) error { maxPieceSize := abi.PaddedPieceSize(sb.ssize).Unpadded() // try finding existing @@ -230,14 +230,15 @@ func (sb *Sealer) UnsealPiece(ctx context.Context, sector abi.SectorID, offset U return xerrors.Errorf("mk temp fifo: %w", err) } - outpipe, err = os.OpenFile(outpath, os.O_RDONLY, 0600) - if err != nil { - return xerrors.Errorf("open temp pipe: %w", err) - } - go func() { defer close(outWait) defer os.Remove(outpath) + + outpipe, err = os.OpenFile(outpath, os.O_RDONLY, 0600) + if err != nil { + perr = xerrors.Errorf("open temp pipe: %w", err) + return + } defer outpipe.Close() _, perr = io.CopyN(out, outpipe, int64(size)) @@ -253,7 +254,7 @@ func (sb *Sealer) UnsealPiece(ctx context.Context, sector abi.SectorID, offset U sector.Number, sector.Miner, randomness, - cid, + commd, at, piece.Len) if err != nil { diff --git a/ffiwrapper/sealer_test.go b/ffiwrapper/sealer_test.go index 2fd1a5f32..c39d0ade0 100644 --- a/ffiwrapper/sealer_test.go +++ b/ffiwrapper/sealer_test.go @@ -4,7 +4,6 @@ import ( "bytes" "context" "fmt" - ffi "github.com/filecoin-project/filecoin-ffi" "io" "io/ioutil" "math/rand" @@ -17,18 +16,22 @@ import ( logging "github.com/ipfs/go-log" "golang.org/x/xerrors" + ffi "github.com/filecoin-project/filecoin-ffi" paramfetch "github.com/filecoin-project/go-paramfetch" - "github.com/filecoin-project/sector-storage/ffiwrapper/basicfs" "github.com/filecoin-project/specs-actors/actors/abi" + + "github.com/filecoin-project/sector-storage/ffiwrapper/basicfs" + "github.com/filecoin-project/sector-storage/stores" "github.com/filecoin-project/specs-storage/storage" ) func init() { - logging.SetLogLevel("*", "INFO") //nolint: errcheck + logging.SetLogLevel("*", "DEBUG") //nolint: errcheck } var sectorSize = abi.SectorSize(2048) var sealProofType = abi.RegisteredProof_StackedDRG2KiBSeal +var sealRand = abi.SealRandomness{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2} type seal struct { id abi.SectorID @@ -37,18 +40,22 @@ type seal struct { ticket abi.SealRandomness } +func data(sn abi.SectorNumber, dlen abi.UnpaddedPieceSize) io.Reader { + return io.LimitReader(rand.New(rand.NewSource(42+int64(sn))), int64(dlen)) +} + func (s *seal) precommit(t *testing.T, sb *Sealer, id abi.SectorID, done func()) { defer done() dlen := abi.PaddedPieceSize(sectorSize).Unpadded() var err error - r := io.LimitReader(rand.New(rand.NewSource(42+int64(id.Number))), int64(dlen)) + r := data(id.Number, dlen) s.pi, err = sb.AddPiece(context.TODO(), id, []abi.UnpaddedPieceSize{}, dlen, r) if err != nil { t.Fatalf("%+v", err) } - s.ticket = abi.SealRandomness{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2} + s.ticket = sealRand p1, err := sb.SealPreCommit1(context.TODO(), id, s.ticket, []abi.PieceInfo{s.pi}) if err != nil { @@ -95,6 +102,62 @@ func (s *seal) commit(t *testing.T, sb *Sealer, done func()) { } } +func (s *seal) unseal(t *testing.T, sb *Sealer, sp *basicfs.Provider, si abi.SectorID, done func()) { + defer done() + + var b bytes.Buffer + err := sb.ReadPiece(context.TODO(), &b, si, 0, 1016) + if err != nil { + t.Fatal(err) + } + + expect, _ := ioutil.ReadAll(data(si.Number, 1016)) + if !bytes.Equal(b.Bytes(), expect) { + t.Fatal("read wrong bytes") + } + + p, sd, err := sp.AcquireSector(context.TODO(), si, stores.FTUnsealed, stores.FTNone, false) + if err != nil { + t.Fatal(err) + } + if err := os.Remove(p.Unsealed); err != nil { + t.Fatal(err) + } + sd() + + err = sb.ReadPiece(context.TODO(), &b, si, 0, 1016) + if err == nil { + t.Fatal("HOW?!") + } + log.Info("this is what we expect: ", err) + + if err := sb.UnsealPiece(context.TODO(), si, 0, 1016, sealRand, s.cids.Unsealed); err != nil { + t.Fatal(err) + } + + b.Reset() + err = sb.ReadPiece(context.TODO(), &b, si, 0, 1016) + if err != nil { + t.Fatal(err) + } + + expect, _ = ioutil.ReadAll(data(si.Number, 1016)) + if !bytes.Equal(b.Bytes(), expect) { + t.Fatal("read wrong bytes") + } + + b.Reset() + err = sb.ReadPiece(context.TODO(), &b, si, 0, 2032) + if err != nil { + t.Fatal(err) + } + + expect = append(expect, bytes.Repeat([]byte{0}, 1016)...) + if !bytes.Equal(b.Bytes(), expect) { + t.Fatal("read wrong bytes") + } +} + func post(t *testing.T, sealer *Sealer, seals ...seal) time.Time { /*randomness := abi.PoStRandomness{0, 9, 2, 7, 6, 5, 4, 3, 2, 1, 0, 9, 8, 7, 6, 45, 3, 2, 1, 0, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 9, 7} @@ -232,6 +295,8 @@ func TestSealAndVerify(t *testing.T) { t.Fatalf("%+v", err) } + s.unseal(t, sb, sp, si, func() {}) + fmt.Printf("PreCommit: %s\n", precommit.Sub(start).String()) fmt.Printf("Commit: %s\n", commit.Sub(precommit).String()) fmt.Printf("GenCandidates: %s\n", genCandidiates.Sub(commit).String()) From 05bc399afc851319c501720a0bcb099466356354 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Tue, 19 May 2020 18:09:36 +0200 Subject: [PATCH 084/199] Update testworker --- go.sum | 2 -- testworker_test.go | 12 ++++++++++++ 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/go.sum b/go.sum index fcff267f9..dc3e46be2 100644 --- a/go.sum +++ b/go.sum @@ -35,8 +35,6 @@ github.com/filecoin-project/go-bitfield v0.0.0-20200309034705-8c7ac40bd550 h1:ao github.com/filecoin-project/go-bitfield v0.0.0-20200309034705-8c7ac40bd550/go.mod h1:iodsLxOFZnqKtjj2zkgqzoGNrv6vUqj69AT/J8DKXEw= github.com/filecoin-project/go-bitfield v0.0.0-20200416002808-b3ee67ec9060 h1:/3qjGMn6ukXgZJHsIbuwGL7ipla8DOV3uHZDBJkBYfU= github.com/filecoin-project/go-bitfield v0.0.0-20200416002808-b3ee67ec9060/go.mod h1:iodsLxOFZnqKtjj2zkgqzoGNrv6vUqj69AT/J8DKXEw= -github.com/filecoin-project/go-bitfield v0.0.2-0.20200518131841-989ba5ae71af h1:g34Sk2coFzyNUv61ZLQ+yyS4Fm8aJCqEaZMKf8Dv6Hs= -github.com/filecoin-project/go-bitfield v0.0.2-0.20200518131841-989ba5ae71af/go.mod h1:Ry9/iUlWSyjPUzlAvdnfy4Gtvrq4kWmWDztCU1yEgJY= github.com/filecoin-project/go-bitfield v0.0.2-0.20200518150651-562fdb554b6e h1:gkG/7G+iKy4He+IiQNeQn+nndFznb/vCoOR8iRQsm60= github.com/filecoin-project/go-bitfield v0.0.2-0.20200518150651-562fdb554b6e/go.mod h1:Ry9/iUlWSyjPUzlAvdnfy4Gtvrq4kWmWDztCU1yEgJY= github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 h1:2pMXdBnCiXjfCYx/hLqFxccPoqsSveQFxVLvNxy9bus= diff --git a/testworker_test.go b/testworker_test.go index 99fa4abec..cb9a82a2c 100644 --- a/testworker_test.go +++ b/testworker_test.go @@ -2,10 +2,14 @@ package sectorstorage import ( "context" + "io" + + "github.com/ipfs/go-cid" "github.com/filecoin-project/specs-actors/actors/abi" "github.com/filecoin-project/specs-storage/storage" + "github.com/filecoin-project/sector-storage/ffiwrapper" "github.com/filecoin-project/sector-storage/mock" "github.com/filecoin-project/sector-storage/sealtasks" "github.com/filecoin-project/sector-storage/stores" @@ -46,6 +50,14 @@ func (t *testWorker) NewSector(ctx context.Context, sector abi.SectorID) error { panic("implement me") } +func (t *testWorker) UnsealPiece(ctx context.Context, id abi.SectorID, index ffiwrapper.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, cid cid.Cid) error { + panic("implement me") +} + +func (t *testWorker) ReadPiece(ctx context.Context, writer io.Writer, id abi.SectorID, index ffiwrapper.UnpaddedByteIndex, size abi.UnpaddedPieceSize) error { + panic("implement me") +} + func (t *testWorker) AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (abi.PieceInfo, error) { return t.mockSeal.AddPiece(ctx, sector, pieceSizes, newPieceSize, pieceData) } From 8f70192bf3f4adcb10b6f80d01dbef197bdbf205 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Tue, 19 May 2020 18:11:56 +0200 Subject: [PATCH 085/199] Missing stuff --- ffiwrapper/unseal_ranges.go | 24 ++++++++++++++++++++++++ storiface/ffi.go | 5 +++++ 2 files changed, 29 insertions(+) create mode 100644 ffiwrapper/unseal_ranges.go create mode 100644 storiface/ffi.go diff --git a/ffiwrapper/unseal_ranges.go b/ffiwrapper/unseal_ranges.go new file mode 100644 index 000000000..873ac45d0 --- /dev/null +++ b/ffiwrapper/unseal_ranges.go @@ -0,0 +1,24 @@ +package ffiwrapper + +import ( + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-bitfield/rle" + "github.com/filecoin-project/specs-actors/actors/abi" +) + +// merge gaps between ranges which are close to each other +// TODO: more benchmarking to come up with more optimal number +const mergeGaps = 32 << 20 + +// TODO const expandRuns = 16 << 20 // unseal more than requested for future requests + +func computeUnsealRanges(unsealed rlepluslazy.RunIterator, offset UnpaddedByteIndex, size abi.UnpaddedPieceSize) (rlepluslazy.RunIterator, error) { + todo := pieceRun(offset, size) + todo, err := rlepluslazy.Subtract(todo, unsealed) + if err != nil { + return nil, xerrors.Errorf("compute todo-unsealed: %w", err) + } + + return rlepluslazy.JoinClose(todo, mergeGaps) +} diff --git a/storiface/ffi.go b/storiface/ffi.go new file mode 100644 index 000000000..354b1e9c4 --- /dev/null +++ b/storiface/ffi.go @@ -0,0 +1,5 @@ +package storiface + +import "errors" + +var ErrSectorNotFound = errors.New("sector not found") From f9e495ac4960693e717536d943cd1a8555fd9852 Mon Sep 17 00:00:00 2001 From: Ignacio Hagopian Date: Tue, 19 May 2020 21:27:21 -0300 Subject: [PATCH 086/199] mock: add faked post Signed-off-by: Ignacio Hagopian --- mock/mock.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/mock/mock.go b/mock/mock.go index 05e424307..b21b82d34 100644 --- a/mock/mock.go +++ b/mock/mock.go @@ -247,11 +247,11 @@ func AddOpFinish(ctx context.Context) (context.Context, func()) { } func (mgr *SectorMgr) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []abi.SectorInfo, randomness abi.PoStRandomness) ([]abi.PoStProof, error) { - panic("implement me") + return []abi.PoStProof{}, nil } func (mgr *SectorMgr) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []abi.SectorInfo, randomness abi.PoStRandomness) ([]abi.PoStProof, error) { - panic("implement me") + return []abi.PoStProof{}, nil } func (mgr *SectorMgr) ReadPieceFromSealedSector(ctx context.Context, sectorID abi.SectorID, offset ffiwrapper.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, commD cid.Cid) (io.ReadCloser, error) { @@ -307,11 +307,11 @@ func (m mockVerif) VerifySeal(svi abi.SealVerifyInfo) (bool, error) { } func (m mockVerif) VerifyWinningPoSt(ctx context.Context, info abi.WinningPoStVerifyInfo) (bool, error) { - panic("implement me") + return true, nil } func (m mockVerif) VerifyWindowPoSt(ctx context.Context, info abi.WindowPoStVerifyInfo) (bool, error) { - panic("implement me") + return true, nil } func (m mockVerif) GenerateDataCommitment(pt abi.RegisteredProof, pieces []abi.PieceInfo) (cid.Cid, error) { From 806f7b33aabc244fe38f34b0efdeeca24d36c149 Mon Sep 17 00:00:00 2001 From: Ignacio Hagopian Date: Wed, 20 May 2020 11:16:45 -0300 Subject: [PATCH 087/199] produce more valid post proof Signed-off-by: Ignacio Hagopian --- mock/mock.go | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/mock/mock.go b/mock/mock.go index b21b82d34..3a3c8bb5f 100644 --- a/mock/mock.go +++ b/mock/mock.go @@ -6,6 +6,7 @@ import ( "fmt" "io" "io/ioutil" + "math" "math/rand" "sync" @@ -247,11 +248,24 @@ func AddOpFinish(ctx context.Context) (context.Context, func()) { } func (mgr *SectorMgr) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []abi.SectorInfo, randomness abi.PoStRandomness) ([]abi.PoStProof, error) { - return []abi.PoStProof{}, nil + return generateFakePoSt(sectorInfo), nil } func (mgr *SectorMgr) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []abi.SectorInfo, randomness abi.PoStRandomness) ([]abi.PoStProof, error) { - return []abi.PoStProof{}, nil + return generateFakePoSt(sectorInfo), nil +} + +func generateFakePoSt(sectorInfo []abi.SectorInfo) []abi.PoStProof { + se, err := sectorInfo[0].RegisteredProof.WindowPoStPartitionSectors() + if err != nil { + panic(err) + } + return []abi.PoStProof{ + { + RegisteredProof: sectorInfo[0].RegisteredProof, + ProofBytes: make([]byte, 192*int(math.Ceil(float64(len(sectorInfo))/float64(se)))), + }, + } } func (mgr *SectorMgr) ReadPieceFromSealedSector(ctx context.Context, sectorID abi.SectorID, offset ffiwrapper.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, commD cid.Cid) (io.ReadCloser, error) { From 33673a30c7fa07a565f06992ad7d298dfa5a9922 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Wed, 20 May 2020 18:36:46 +0200 Subject: [PATCH 088/199] Wire up unsealing logic, track primary sector copies --- ffiwrapper/sealer_cgo.go | 78 ----------------------------- ffiwrapper/types.go | 3 +- localworker.go | 35 ++++++++++--- manager.go | 30 ++++++----- manager_test.go | 4 ++ roprov.go | 2 +- selector_existing.go | 2 +- stores/http_handler.go | 2 +- stores/index.go | 87 +++++++++++++++++++++++--------- stores/interface.go | 18 ++++++- stores/local.go | 104 ++++++++++++++++++++++++++++----------- stores/local_test.go | 91 ++++++++++++++++++++++++++++++++++ stores/remote.go | 29 +++++++---- testworker_test.go | 2 +- 14 files changed, 322 insertions(+), 165 deletions(-) create mode 100644 stores/local_test.go diff --git a/ffiwrapper/sealer_cgo.go b/ffiwrapper/sealer_cgo.go index 879b04afb..1ffc10b72 100644 --- a/ffiwrapper/sealer_cgo.go +++ b/ffiwrapper/sealer_cgo.go @@ -315,84 +315,6 @@ func (sb *Sealer) ReadPiece(ctx context.Context, writer io.Writer, sector abi.Se return nil } -func (sb *Sealer) ReadPieceFromSealedSector(ctx context.Context, sector abi.SectorID, offset UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealedCID cid.Cid) (io.ReadCloser, error) { - { - path, doneUnsealed, err := sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, stores.FTNone, false) - if err != nil { - return nil, xerrors.Errorf("acquire unsealed sector path: %w", err) - } - - f, err := os.OpenFile(path.Unsealed, os.O_RDONLY, 0644) - if err == nil { - if _, err := f.Seek(int64(offset), io.SeekStart); err != nil { - doneUnsealed() - return nil, xerrors.Errorf("seek: %w", err) - } - - lr := io.LimitReader(f, int64(size)) - - return &struct { - io.Reader - io.Closer - }{ - Reader: lr, - Closer: closerFunc(func() error { - doneUnsealed() - return f.Close() - }), - }, nil - } - - doneUnsealed() - - if !os.IsNotExist(err) { - return nil, err - } - } - - paths, doneSealed, err := sb.sectors.AcquireSector(ctx, sector, stores.FTSealed|stores.FTCache, stores.FTUnsealed, false) - if err != nil { - return nil, xerrors.Errorf("acquire sealed/cache sector path: %w", err) - } - defer doneSealed() - - // TODO: GC for those - // (Probably configurable count of sectors to be kept unsealed, and just - // remove last used one (or use whatever other cache policy makes sense)) - err = ffi.Unseal( - sb.sealProofType, - paths.Cache, - paths.Sealed, - paths.Unsealed, - sector.Number, - sector.Miner, - ticket, - unsealedCID, - ) - if err != nil { - return nil, xerrors.Errorf("unseal failed: %w", err) - } - - f, err := os.OpenFile(paths.Unsealed, os.O_RDONLY, 0644) - if err != nil { - return nil, err - } - - if _, err := f.Seek(int64(offset), io.SeekStart); err != nil { - return nil, xerrors.Errorf("seek: %w", err) - } - - lr := io.LimitReader(f, int64(size)) - - return &struct { - io.Reader - io.Closer - }{ - Reader: lr, - Closer: f, - }, nil -} - func (sb *Sealer) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) { paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, stores.FTSealed|stores.FTCache, true) if err != nil { diff --git a/ffiwrapper/types.go b/ffiwrapper/types.go index 932dc3c42..06c07b715 100644 --- a/ffiwrapper/types.go +++ b/ffiwrapper/types.go @@ -29,7 +29,8 @@ type Storage interface { storage.Prover StorageSealer - ReadPieceFromSealedSector(context.Context, abi.SectorID, UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (io.ReadCloser, error) + UnsealPiece(ctx context.Context, sector abi.SectorID, offset UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd cid.Cid) error + ReadPiece(ctx context.Context, writer io.Writer, sector abi.SectorID, offset UnpaddedByteIndex, size abi.UnpaddedPieceSize) error } type Verifier interface { diff --git a/localworker.go b/localworker.go index bc4499e5b..a01623bc1 100644 --- a/localworker.go +++ b/localworker.go @@ -56,10 +56,11 @@ func NewLocalWorker(wcfg WorkerConfig, store stores.Store, local *stores.Local, type localWorkerPathProvider struct { w *LocalWorker + op stores.AcquireMode } func (l *localWorkerPathProvider) AcquireSector(ctx context.Context, sector abi.SectorID, existing stores.SectorFileType, allocate stores.SectorFileType, sealing bool) (stores.SectorPaths, func(), error) { - paths, storageIDs, done, err := l.w.storage.AcquireSector(ctx, sector, l.w.scfg.SealProofType, existing, allocate, sealing) + paths, storageIDs, done, err := l.w.storage.AcquireSector(ctx, sector, l.w.scfg.SealProofType, existing, allocate, stores.PathType(sealing), l.op) if err != nil { return stores.SectorPaths{}, nil, err } @@ -76,7 +77,7 @@ func (l *localWorkerPathProvider) AcquireSector(ctx context.Context, sector abi. sid := stores.PathByType(storageIDs, fileType) - if err := l.w.sindex.StorageDeclareSector(ctx, stores.ID(sid), sector, fileType); err != nil { + if err := l.w.sindex.StorageDeclareSector(ctx, stores.ID(sid), sector, fileType, l.op == stores.AcquireMove); err != nil { log.Errorf("declare sector error: %+v", err) } } @@ -105,8 +106,8 @@ func (l *LocalWorker) AddPiece(ctx context.Context, sector abi.SectorID, epcs [] return sb.AddPiece(ctx, sector, epcs, sz, r) } -func (l *LocalWorker) Fetch(ctx context.Context, sector abi.SectorID, fileType stores.SectorFileType, sealing bool) error { - _, done, err := (&localWorkerPathProvider{w: l}).AcquireSector(ctx, sector, fileType, stores.FTNone, sealing) +func (l *LocalWorker) Fetch(ctx context.Context, sector abi.SectorID, fileType stores.SectorFileType, sealing bool, am stores.AcquireMode) error { + _, done, err := (&localWorkerPathProvider{w: l, op: am}).AcquireSector(ctx, sector, fileType, stores.FTNone, sealing) if err != nil { return err } @@ -182,12 +183,30 @@ func (l *LocalWorker) FinalizeSector(ctx context.Context, sector abi.SectorID) e return nil } -func (l *LocalWorker) UnsealPiece(ctx context.Context, id abi.SectorID, index ffiwrapper.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, cid cid.Cid) error { - panic("implement me") +func (l *LocalWorker) UnsealPiece(ctx context.Context, sector abi.SectorID, index ffiwrapper.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, cid cid.Cid) error { + sb, err := l.sb() + if err != nil { + return err + } + + if err := sb.UnsealPiece(ctx, sector, index, size, randomness, cid); err != nil { + return xerrors.Errorf("unsealing sector: %w", err) + } + + if err := l.storage.RemoveCopies(ctx, sector, stores.FTSealed | stores.FTCache); err != nil { + return xerrors.Errorf("removing source data: %w", err) + } + + return nil } -func (l *LocalWorker) ReadPiece(ctx context.Context, writer io.Writer, id abi.SectorID, index ffiwrapper.UnpaddedByteIndex, size abi.UnpaddedPieceSize) error { - panic("implement me") +func (l *LocalWorker) ReadPiece(ctx context.Context, writer io.Writer, sector abi.SectorID, index ffiwrapper.UnpaddedByteIndex, size abi.UnpaddedPieceSize) error { + sb, err := l.sb() + if err != nil { + return err + } + + return sb.ReadPiece(ctx, writer, sector, index, size) } func (l *LocalWorker) TaskTypes(context.Context) (map[sealtasks.TaskType]struct{}, error) { diff --git a/manager.go b/manager.go index 284ff272a..7450c130f 100644 --- a/manager.go +++ b/manager.go @@ -29,7 +29,7 @@ type URLs []string type Worker interface { ffiwrapper.StorageSealer - Fetch(context.Context, abi.SectorID, stores.SectorFileType, bool) error + Fetch(ctx context.Context, s abi.SectorID, ft stores.SectorFileType, sealing bool, am stores.AcquireMode) error UnsealPiece(context.Context, abi.SectorID, ffiwrapper.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) error ReadPiece(context.Context, io.Writer, abi.SectorID, ffiwrapper.UnpaddedByteIndex, abi.UnpaddedPieceSize) error @@ -183,9 +183,9 @@ func schedNop(context.Context, Worker) error { return nil } -func schedFetch(sector abi.SectorID, ft stores.SectorFileType, sealing bool) func(context.Context, Worker) error { +func schedFetch(sector abi.SectorID, ft stores.SectorFileType, sealing bool, am stores.AcquireMode) func(context.Context, Worker) error { return func(ctx context.Context, worker Worker) error { - return worker.Fetch(ctx, sector, ft, sealing) + return worker.Fetch(ctx, sector, ft, sealing, am) } } @@ -207,10 +207,18 @@ func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector abi.Sect // TODO: Optimization: don't send unseal to a worker if the requested range is already unsealed - // TODO!!!! make schedFetch COPY stores.FTSealed and stores.FTCache - // Moving those to a temp sealing storage may make PoSts fail + unsealFetch := func(ctx context.Context, worker Worker) error { + if err := worker.Fetch(ctx, sector, stores.FTSealed|stores.FTCache, true, stores.AcquireCopy); err != nil { + return xerrors.Errorf("copy sealed/cache sector data: %w", err) + } - err = m.sched.Schedule(ctx, sealtasks.TTUnseal, selector, schedFetch(sector, stores.FTUnsealed|stores.FTSealed|stores.FTCache, true), func(ctx context.Context, w Worker) error { + if err := worker.Fetch(ctx, sector, stores.FTUnsealed, true, stores.AcquireMove); err != nil { + return xerrors.Errorf("copy unsealed sector data: %w", err) + } + return nil + } + + err = m.sched.Schedule(ctx, sealtasks.TTUnseal, selector, unsealFetch, func(ctx context.Context, w Worker) error { return w.UnsealPiece(ctx, sector, offset, size, ticket, unsealed) }) if err != nil { @@ -222,7 +230,7 @@ func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector abi.Sect return xerrors.Errorf("creating readPiece selector: %w", err) } - err = m.sched.Schedule(ctx, sealtasks.TTReadUnsealed, selector, schedFetch(sector, stores.FTUnsealed, true), func(ctx context.Context, w Worker) error { + err = m.sched.Schedule(ctx, sealtasks.TTReadUnsealed, selector, schedFetch(sector, stores.FTUnsealed, true, stores.AcquireMove), func(ctx context.Context, w Worker) error { return w.ReadPiece(ctx, sink, sector, offset, size) }) if err != nil { @@ -270,7 +278,7 @@ func (m *Manager) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticke return nil, xerrors.Errorf("creating path selector: %w", err) } - err = m.sched.Schedule(ctx, sealtasks.TTPreCommit1, selector, schedFetch(sector, stores.FTUnsealed, true), func(ctx context.Context, w Worker) error { + err = m.sched.Schedule(ctx, sealtasks.TTPreCommit1, selector, schedFetch(sector, stores.FTUnsealed, true, stores.AcquireMove), func(ctx context.Context, w Worker) error { p, err := w.SealPreCommit1(ctx, sector, ticket, pieces) if err != nil { return err @@ -288,7 +296,7 @@ func (m *Manager) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase return storage.SectorCids{}, xerrors.Errorf("creating path selector: %w", err) } - err = m.sched.Schedule(ctx, sealtasks.TTPreCommit2, selector, schedFetch(sector, stores.FTCache|stores.FTSealed, true), func(ctx context.Context, w Worker) error { + err = m.sched.Schedule(ctx, sealtasks.TTPreCommit2, selector, schedFetch(sector, stores.FTCache|stores.FTSealed, true, stores.AcquireMove), func(ctx context.Context, w Worker) error { p, err := w.SealPreCommit2(ctx, sector, phase1Out) if err != nil { return err @@ -309,7 +317,7 @@ func (m *Manager) SealCommit1(ctx context.Context, sector abi.SectorID, ticket a // (except, don't.. for now at least - we are using this step to bring data // into 'provable' storage. Optimally we'd do that in commit2, in parallel // with snark compute) - err = m.sched.Schedule(ctx, sealtasks.TTCommit1, selector, schedFetch(sector, stores.FTCache|stores.FTSealed, true), func(ctx context.Context, w Worker) error { + err = m.sched.Schedule(ctx, sealtasks.TTCommit1, selector, schedFetch(sector, stores.FTCache|stores.FTSealed, true, stores.AcquireMove), func(ctx context.Context, w Worker) error { p, err := w.SealCommit1(ctx, sector, ticket, seed, pieces, cids) if err != nil { return err @@ -342,7 +350,7 @@ func (m *Manager) FinalizeSector(ctx context.Context, sector abi.SectorID) error } return m.sched.Schedule(ctx, sealtasks.TTFinalize, selector, - schedFetch(sector, stores.FTCache|stores.FTSealed|stores.FTUnsealed, false), + schedFetch(sector, stores.FTCache|stores.FTSealed|stores.FTUnsealed, false, stores.AcquireMove), func(ctx context.Context, w Worker) error { return w.FinalizeSector(ctx, sector) }) diff --git a/manager_test.go b/manager_test.go index f89989989..165ecf280 100644 --- a/manager_test.go +++ b/manager_test.go @@ -65,6 +65,10 @@ func (t *testStorage) SetStorage(f func(*stores.StorageConfig)) error { return nil } +func (t *testStorage) Stat(path string) (stores.FsStat, error) { + return stores.Stat(path) +} + var _ stores.LocalStorage = &testStorage{} func newTestMgr(ctx context.Context, t *testing.T) (*Manager, *stores.Local, *stores.Remote, *stores.Index) { diff --git a/roprov.go b/roprov.go index 172cf7cf8..aba6bb5d9 100644 --- a/roprov.go +++ b/roprov.go @@ -20,7 +20,7 @@ func (l *readonlyProvider) AcquireSector(ctx context.Context, id abi.SectorID, e return stores.SectorPaths{}, nil, xerrors.New("read-only storage") } - p, _, done, err := l.stor.AcquireSector(ctx, id, l.spt, existing, allocate, sealing) + p, _, done, err := l.stor.AcquireSector(ctx, id, l.spt, existing, allocate, stores.PathType(sealing), stores.AcquireMove) return p, done, err } diff --git a/selector_existing.go b/selector_existing.go index 14e6dbefd..bba48b965 100644 --- a/selector_existing.go +++ b/selector_existing.go @@ -12,7 +12,7 @@ import ( ) type existingSelector struct { - best []stores.StorageInfo + best []stores.SectorStorageInfo } func newExistingSelector(ctx context.Context, index stores.SectorIndex, sector abi.SectorID, alloc stores.SectorFileType, allowFetch bool) (*existingSelector, error) { diff --git a/stores/http_handler.go b/stores/http_handler.go index 2a3e85aef..7e2330dbd 100644 --- a/stores/http_handler.go +++ b/stores/http_handler.go @@ -70,7 +70,7 @@ func (handler *FetchHandler) remoteGetSector(w http.ResponseWriter, r *http.Requ } // passing 0 spt because we don't allocate anything - paths, _, done, err := handler.Local.AcquireSector(r.Context(), id, 0, ft, FTNone, false) + paths, _, done, err := handler.Local.AcquireSector(r.Context(), id, 0, ft, FTNone, false, AcquireMove) if err != nil { log.Error("%+v", err) w.WriteHeader(500) diff --git a/stores/index.go b/stores/index.go index 6659a4422..0dafd87ed 100644 --- a/stores/index.go +++ b/stores/index.go @@ -38,16 +38,27 @@ type HealthReport struct { Err error } +type SectorStorageInfo struct { + ID ID + URLs []string // TODO: Support non-http transports + Weight uint64 + + CanSeal bool + CanStore bool + + Primary bool +} + type SectorIndex interface { // part of storage-miner api StorageAttach(context.Context, StorageInfo, FsStat) error StorageInfo(context.Context, ID) (StorageInfo, error) StorageReportHealth(context.Context, ID, HealthReport) error - StorageDeclareSector(ctx context.Context, storageId ID, s abi.SectorID, ft SectorFileType) error + StorageDeclareSector(ctx context.Context, storageId ID, s abi.SectorID, ft SectorFileType, primary bool) error StorageDropSector(ctx context.Context, storageId ID, s abi.SectorID, ft SectorFileType) error - StorageFindSector(ctx context.Context, sector abi.SectorID, ft SectorFileType, allowFetch bool) ([]StorageInfo, error) + StorageFindSector(ctx context.Context, sector abi.SectorID, ft SectorFileType, allowFetch bool) ([]SectorStorageInfo, error) - StorageBestAlloc(ctx context.Context, allocate SectorFileType, spt abi.RegisteredProof, sealing bool) ([]StorageInfo, error) + StorageBestAlloc(ctx context.Context, allocate SectorFileType, spt abi.RegisteredProof, pathType PathType) ([]StorageInfo, error) } type Decl struct { @@ -55,6 +66,11 @@ type Decl struct { SectorFileType } +type declMeta struct { + storage ID + primary bool +} + type storageEntry struct { info *StorageInfo fsi FsStat @@ -66,13 +82,13 @@ type storageEntry struct { type Index struct { lk sync.RWMutex - sectors map[Decl][]ID + sectors map[Decl][]*declMeta stores map[ID]*storageEntry } func NewIndex() *Index { return &Index{ - sectors: map[Decl][]ID{}, + sectors: map[Decl][]*declMeta{}, stores: map[ID]*storageEntry{}, } } @@ -88,7 +104,7 @@ func (i *Index) StorageList(ctx context.Context) (map[ID][]Decl, error) { } for decl, ids := range i.sectors { for _, id := range ids { - byID[id][decl.SectorID] |= decl.SectorFileType + byID[id.storage][decl.SectorID] |= decl.SectorFileType } } @@ -157,10 +173,11 @@ func (i *Index) StorageReportHealth(ctx context.Context, id ID, report HealthRep return nil } -func (i *Index) StorageDeclareSector(ctx context.Context, storageId ID, s abi.SectorID, ft SectorFileType) error { +func (i *Index) StorageDeclareSector(ctx context.Context, storageId ID, s abi.SectorID, ft SectorFileType, primary bool) error { i.lk.Lock() defer i.lk.Unlock() +loop: for _, fileType := range PathTypes { if fileType&ft == 0 { continue @@ -169,13 +186,20 @@ func (i *Index) StorageDeclareSector(ctx context.Context, storageId ID, s abi.Se d := Decl{s, fileType} for _, sid := range i.sectors[d] { - if sid == storageId { - log.Warnf("sector %v redeclared in %s", s, storageId) - return nil + if sid.storage == storageId { + if !sid.primary && primary { + sid.primary = true + } else { + log.Warnf("sector %v redeclared in %s", s, storageId) + } + continue loop } } - i.sectors[d] = append(i.sectors[d], storageId) + i.sectors[d] = append(i.sectors[d], &declMeta{ + storage: storageId, + primary: primary, + }) } return nil @@ -196,9 +220,9 @@ func (i *Index) StorageDropSector(ctx context.Context, storageId ID, s abi.Secto return nil } - rewritten := make([]ID, 0, len(i.sectors[d])-1) + rewritten := make([]*declMeta, 0, len(i.sectors[d])-1) for _, sid := range i.sectors[d] { - if sid == storageId { + if sid.storage == storageId { continue } @@ -215,11 +239,12 @@ func (i *Index) StorageDropSector(ctx context.Context, storageId ID, s abi.Secto return nil } -func (i *Index) StorageFindSector(ctx context.Context, s abi.SectorID, ft SectorFileType, allowFetch bool) ([]StorageInfo, error) { +func (i *Index) StorageFindSector(ctx context.Context, s abi.SectorID, ft SectorFileType, allowFetch bool) ([]SectorStorageInfo, error) { i.lk.RLock() defer i.lk.RUnlock() storageIDs := map[ID]uint64{} + isprimary := map[ID]bool{} for _, pathType := range PathTypes { if ft&pathType == 0 { @@ -227,11 +252,12 @@ func (i *Index) StorageFindSector(ctx context.Context, s abi.SectorID, ft Sector } for _, id := range i.sectors[Decl{s, pathType}] { - storageIDs[id]++ + storageIDs[id.storage]++ + isprimary[id.storage] = isprimary[id.storage] || id.primary } } - out := make([]StorageInfo, 0, len(storageIDs)) + out := make([]SectorStorageInfo, 0, len(storageIDs)) for id, n := range storageIDs { st, ok := i.stores[id] @@ -251,12 +277,15 @@ func (i *Index) StorageFindSector(ctx context.Context, s abi.SectorID, ft Sector urls[k] = rl.String() } - out = append(out, StorageInfo{ + out = append(out, SectorStorageInfo{ ID: id, URLs: urls, Weight: st.info.Weight * n, // storage with more sector types is better + CanSeal: st.info.CanSeal, CanStore: st.info.CanStore, + + Primary: isprimary[id], }) } @@ -277,12 +306,15 @@ func (i *Index) StorageFindSector(ctx context.Context, s abi.SectorID, ft Sector urls[k] = rl.String() } - out = append(out, StorageInfo{ + out = append(out, SectorStorageInfo{ ID: id, URLs: urls, Weight: st.info.Weight * 0, // TODO: something better than just '0' + CanSeal: st.info.CanSeal, CanStore: st.info.CanStore, + + Primary: false, }) } } @@ -302,7 +334,7 @@ func (i *Index) StorageInfo(ctx context.Context, id ID) (StorageInfo, error) { return *si.info, nil } -func (i *Index) StorageBestAlloc(ctx context.Context, allocate SectorFileType, spt abi.RegisteredProof, sealing bool) ([]StorageInfo, error) { +func (i *Index) StorageBestAlloc(ctx context.Context, allocate SectorFileType, spt abi.RegisteredProof, pathType PathType) ([]StorageInfo, error) { i.lk.RLock() defer i.lk.RUnlock() @@ -314,10 +346,10 @@ func (i *Index) StorageBestAlloc(ctx context.Context, allocate SectorFileType, s } for _, p := range i.stores { - if sealing && !p.info.CanSeal { + if (pathType == PathSealing) && !p.info.CanSeal { continue } - if !sealing && !p.info.CanStore { + if (pathType == PathStorage) && !p.info.CanStore { continue } @@ -362,10 +394,19 @@ func (i *Index) FindSector(id abi.SectorID, typ SectorFileType) ([]ID, error) { i.lk.RLock() defer i.lk.RUnlock() - return i.sectors[Decl{ + f, ok := i.sectors[Decl{ SectorID: id, SectorFileType: typ, - }], nil + }] + if !ok { + return nil, nil + } + out := make([]ID, 0, len(f)) + for _, meta := range f { + out = append(out, meta.storage) + } + + return out, nil } var _ SectorIndex = &Index{} diff --git a/stores/interface.go b/stores/interface.go index 0735f7bf8..a818406a5 100644 --- a/stores/interface.go +++ b/stores/interface.go @@ -9,10 +9,26 @@ import ( "github.com/filecoin-project/specs-actors/actors/abi" ) +type PathType bool +const ( + PathStorage = false + PathSealing = true +) + +type AcquireMode string +const ( + AcquireMove = "move" + AcquireCopy = "copy" +) + type Store interface { - AcquireSector(ctx context.Context, s abi.SectorID, spt abi.RegisteredProof, existing SectorFileType, allocate SectorFileType, sealing bool) (paths SectorPaths, stores SectorPaths, done func(), err error) + AcquireSector(ctx context.Context, s abi.SectorID, spt abi.RegisteredProof, existing SectorFileType, allocate SectorFileType, sealing PathType, op AcquireMode) (paths SectorPaths, stores SectorPaths, done func(), err error) Remove(ctx context.Context, s abi.SectorID, types SectorFileType, force bool) error + // like remove, but doesn't remove the primary sector copy, nor the last + // non-primary copy if there no primary copies + RemoveCopies(ctx context.Context, s abi.SectorID, types SectorFileType) error + // move sectors into storage MoveStorage(ctx context.Context, s abi.SectorID, spt abi.RegisteredProof, types SectorFileType) error diff --git a/stores/local.go b/stores/local.go index 16f333ac4..9c0dc4477 100644 --- a/stores/local.go +++ b/stores/local.go @@ -47,6 +47,8 @@ type LocalPath struct { type LocalStorage interface { GetStorage() (StorageConfig, error) SetStorage(func(*StorageConfig)) error + + Stat(path string) (FsStat, error) } const MetaFile = "sectorstore.json" @@ -98,7 +100,7 @@ func (st *Local) OpenPath(ctx context.Context, p string) error { local: p, } - fst, err := Stat(p) + fst, err := st.localStorage.Stat(p) if err != nil { return err } @@ -133,7 +135,7 @@ func (st *Local) OpenPath(ctx context.Context, p string) error { return xerrors.Errorf("parse sector id %s: %w", ent.Name(), err) } - if err := st.index.StorageDeclareSector(ctx, meta.ID, sid, t); err != nil { + if err := st.index.StorageDeclareSector(ctx, meta.ID, sid, t, meta.CanStore); err != nil { return xerrors.Errorf("declare sector %d(t:%d) -> %s: %w", sid, t, meta.ID, err) } } @@ -177,7 +179,7 @@ func (st *Local) reportHealth(ctx context.Context) { toReport := map[ID]HealthReport{} for id, p := range st.paths { - stat, err := Stat(p.local) + stat, err := st.localStorage.Stat(p.local) toReport[id] = HealthReport{ Stat: stat, @@ -195,7 +197,7 @@ func (st *Local) reportHealth(ctx context.Context) { } } -func (st *Local) AcquireSector(ctx context.Context, sid abi.SectorID, spt abi.RegisteredProof, existing SectorFileType, allocate SectorFileType, sealing bool) (SectorPaths, SectorPaths, func(), error) { +func (st *Local) AcquireSector(ctx context.Context, sid abi.SectorID, spt abi.RegisteredProof, existing SectorFileType, allocate SectorFileType, pathType PathType, op AcquireMode) (SectorPaths, SectorPaths, func(), error) { if existing|allocate != existing^allocate { return SectorPaths{}, SectorPaths{}, nil, xerrors.New("can't both find and allocate a sector") } @@ -240,7 +242,7 @@ func (st *Local) AcquireSector(ctx context.Context, sid abi.SectorID, spt abi.Re continue } - sis, err := st.index.StorageBestAlloc(ctx, fileType, spt, sealing) + sis, err := st.index.StorageBestAlloc(ctx, fileType, spt, pathType) if err != nil { st.localLk.RUnlock() return SectorPaths{}, SectorPaths{}, nil, xerrors.Errorf("finding best storage for allocating : %w", err) @@ -259,11 +261,11 @@ func (st *Local) AcquireSector(ctx context.Context, sid abi.SectorID, spt abi.Re continue } - if sealing && !si.CanSeal { + if (pathType == PathSealing) && !si.CanSeal { continue } - if !sealing && !si.CanStore { + if (pathType == PathStorage) && !si.CanStore { continue } @@ -328,38 +330,82 @@ func (st *Local) Remove(ctx context.Context, sid abi.SectorID, typ SectorFileTyp } for _, info := range si { - p, ok := st.paths[info.ID] - if !ok { - continue - } - - if p.local == "" { // TODO: can that even be the case? - continue - } - - if err := st.index.StorageDropSector(ctx, info.ID, sid, typ); err != nil { - return xerrors.Errorf("dropping sector from index: %w", err) - } - - spath := filepath.Join(p.local, typ.String(), SectorName(sid)) - log.Infof("remove %s", spath) - - if err := os.RemoveAll(spath); err != nil { - log.Errorf("removing sector (%v) from %s: %+v", sid, spath, err) + if err := st.removeSector(ctx, sid, typ, info.ID); err != nil { + return err } } return nil } +func (st *Local) RemoveCopies(ctx context.Context, sid abi.SectorID, typ SectorFileType) error { + if bits.OnesCount(uint(typ)) != 1 { + return xerrors.New("delete expects one file type") + } + + si, err := st.index.StorageFindSector(ctx, sid, typ, false) + if err != nil { + return xerrors.Errorf("finding existing sector %d(t:%d) failed: %w", sid, typ, err) + } + + var hasPrimary bool + for _, info := range si { + if info.Primary { + hasPrimary = true + break + } + } + + if !hasPrimary { + log.Warnf("RemoveCopies: no primary copies of sector %v (%s), not removing anything", sid, typ) + return nil + } + + for _, info := range si { + if info.Primary { + continue + } + + if err := st.removeSector(ctx, sid, typ, info.ID); err != nil { + return err + } + } + + return nil +} + +func (st *Local) removeSector(ctx context.Context, sid abi.SectorID, typ SectorFileType, storage ID) error { + p, ok := st.paths[storage] + if !ok { + return nil + } + + if p.local == "" { // TODO: can that even be the case? + return nil + } + + if err := st.index.StorageDropSector(ctx, storage, sid, typ); err != nil { + return xerrors.Errorf("dropping sector from index: %w", err) + } + + spath := filepath.Join(p.local, typ.String(), SectorName(sid)) + log.Infof("remove %s", spath) + + if err := os.RemoveAll(spath); err != nil { + log.Errorf("removing sector (%v) from %s: %+v", sid, spath, err) + } + + return nil +} + func (st *Local) MoveStorage(ctx context.Context, s abi.SectorID, spt abi.RegisteredProof, types SectorFileType) error { - dest, destIds, sdone, err := st.AcquireSector(ctx, s, spt, FTNone, types, false) + dest, destIds, sdone, err := st.AcquireSector(ctx, s, spt, FTNone, types, false, AcquireMove) if err != nil { return xerrors.Errorf("acquire dest storage: %w", err) } defer sdone() - src, srcIds, ddone, err := st.AcquireSector(ctx, s, spt, types, FTNone, false) + src, srcIds, ddone, err := st.AcquireSector(ctx, s, spt, types, FTNone, false, AcquireMove) if err != nil { return xerrors.Errorf("acquire src storage: %w", err) } @@ -401,7 +447,7 @@ func (st *Local) MoveStorage(ctx context.Context, s abi.SectorID, spt abi.Regist return xerrors.Errorf("moving sector %v(%d): %w", s, fileType, err) } - if err := st.index.StorageDeclareSector(ctx, ID(PathByType(destIds, fileType)), s, fileType); err != nil { + if err := st.index.StorageDeclareSector(ctx, ID(PathByType(destIds, fileType)), s, fileType, true); err != nil { return xerrors.Errorf("declare sector %d(t:%d) -> %s: %w", s, fileType, ID(PathByType(destIds, fileType)), err) } } @@ -420,7 +466,7 @@ func (st *Local) FsStat(ctx context.Context, id ID) (FsStat, error) { return FsStat{}, errPathNotFound } - return Stat(p.local) + return st.localStorage.Stat(p.local) } var _ Store = &Local{} diff --git a/stores/local_test.go b/stores/local_test.go new file mode 100644 index 000000000..d98c59182 --- /dev/null +++ b/stores/local_test.go @@ -0,0 +1,91 @@ +package stores + +import ( + "context" + "encoding/json" + "github.com/google/uuid" + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" +) + +const pathSize = 16 << 20 + +type TestingLocalStorage struct { + root string + c StorageConfig +} + +func (t *TestingLocalStorage) GetStorage() (StorageConfig, error) { + return t.c, nil +} + +func (t *TestingLocalStorage) SetStorage(f func(*StorageConfig)) error { + f(&t.c) + return nil +} + +func (t *TestingLocalStorage) Stat(path string) (FsStat, error) { + return FsStat{ + Capacity: pathSize, + Available: pathSize, + Used: 0, + }, nil +} + +func (t *TestingLocalStorage) init(subpath string) error { + path := filepath.Join(t.root, subpath) + if err := os.Mkdir(path, 0755); err != nil { + return err + } + + metaFile := filepath.Join(path, MetaFile) + + meta := &LocalStorageMeta{ + ID: ID(uuid.New().String()), + Weight: 1, + CanSeal: true, + CanStore: true, + } + + mb, err := json.MarshalIndent(meta, "", " ") + if err != nil { + return err + } + + if err := ioutil.WriteFile(metaFile, mb, 0644); err != nil { + return err + } + + return nil +} + + +var _ LocalStorage = &TestingLocalStorage{} + +func TestLocalStorage(t *testing.T) { + ctx := context.TODO() + + root, err := ioutil.TempDir("", "sector-storage-teststorage-") + require.NoError(t, err) + + tstor := &TestingLocalStorage{ + root: root, + } + + index := NewIndex() + + st, err := NewLocal(ctx, tstor, index, nil) + require.NoError(t, err) + + p1 := "1" + require.NoError(t, tstor.init("1")) + + err = st.OpenPath(ctx, filepath.Join(tstor.root, p1)) + require.NoError(t, err) + + // TODO: put more things here +} diff --git a/stores/remote.go b/stores/remote.go index 151c0ed2f..e510d71d1 100644 --- a/stores/remote.go +++ b/stores/remote.go @@ -32,6 +32,14 @@ type Remote struct { fetching map[abi.SectorID]chan struct{} } +func (r *Remote) RemoveCopies(ctx context.Context, s abi.SectorID, types SectorFileType) error { + // TODO: do this on remotes too + // (not that we really need to do that since it's always called by the + // worker which pulled the copy) + + return r.local.RemoveCopies(ctx, s, types) +} + func NewRemote(local *Local, index SectorIndex, auth http.Header) *Remote { return &Remote{ local: local, @@ -42,7 +50,7 @@ func NewRemote(local *Local, index SectorIndex, auth http.Header) *Remote { } } -func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, spt abi.RegisteredProof, existing SectorFileType, allocate SectorFileType, sealing bool) (SectorPaths, SectorPaths, func(), error) { +func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, spt abi.RegisteredProof, existing SectorFileType, allocate SectorFileType, pathType PathType, op AcquireMode) (SectorPaths, SectorPaths, func(), error) { if existing|allocate != existing^allocate { return SectorPaths{}, SectorPaths{}, nil, xerrors.New("can't both find and allocate a sector") } @@ -74,7 +82,7 @@ func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, spt abi.Regi r.fetchLk.Unlock() }() - paths, stores, done, err := r.local.AcquireSector(ctx, s, spt, existing, allocate, sealing) + paths, stores, done, err := r.local.AcquireSector(ctx, s, spt, existing, allocate, pathType, op) if err != nil { return SectorPaths{}, SectorPaths{}, nil, xerrors.Errorf("local acquire error: %w", err) } @@ -88,7 +96,7 @@ func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, spt abi.Regi continue } - ap, storageID, url, rdone, err := r.acquireFromRemote(ctx, s, spt, fileType, sealing) + ap, storageID, url, rdone, err := r.acquireFromRemote(ctx, s, spt, fileType, pathType, op) if err != nil { done() return SectorPaths{}, SectorPaths{}, nil, err @@ -98,21 +106,22 @@ func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, spt abi.Regi SetPathByType(&paths, fileType, ap) SetPathByType(&stores, fileType, string(storageID)) - if err := r.index.StorageDeclareSector(ctx, storageID, s, fileType); err != nil { + if err := r.index.StorageDeclareSector(ctx, storageID, s, fileType, op == AcquireMove); err != nil { log.Warnf("declaring sector %v in %s failed: %+v", s, storageID, err) continue } - // TODO: some way to allow having duplicated sectors in the system for perf - if err := r.deleteFromRemote(ctx, url); err != nil { - log.Warnf("deleting sector %v from %s (delete %s): %+v", s, storageID, url, err) + if op == AcquireMove { + if err := r.deleteFromRemote(ctx, url); err != nil { + log.Warnf("deleting sector %v from %s (delete %s): %+v", s, storageID, url, err) + } } } return paths, stores, done, nil } -func (r *Remote) acquireFromRemote(ctx context.Context, s abi.SectorID, spt abi.RegisteredProof, fileType SectorFileType, sealing bool) (string, ID, string, func(), error) { +func (r *Remote) acquireFromRemote(ctx context.Context, s abi.SectorID, spt abi.RegisteredProof, fileType SectorFileType, pathType PathType, op AcquireMode) (string, ID, string, func(), error) { si, err := r.index.StorageFindSector(ctx, s, fileType, false) if err != nil { return "", "", "", nil, err @@ -126,7 +135,7 @@ func (r *Remote) acquireFromRemote(ctx context.Context, s abi.SectorID, spt abi. return si[i].Weight < si[j].Weight }) - apaths, ids, done, err := r.local.AcquireSector(ctx, s, spt, FTNone, fileType, sealing) + apaths, ids, done, err := r.local.AcquireSector(ctx, s, spt, FTNone, fileType, pathType, op) if err != nil { return "", "", "", nil, xerrors.Errorf("allocate local sector for fetching: %w", err) } @@ -206,7 +215,7 @@ func (r *Remote) fetch(ctx context.Context, url, outname string) error { func (r *Remote) MoveStorage(ctx context.Context, s abi.SectorID, spt abi.RegisteredProof, types SectorFileType) error { // Make sure we have the data local - _, _, ddone, err := r.AcquireSector(ctx, s, spt, types, FTNone, false) + _, _, ddone, err := r.AcquireSector(ctx, s, spt, types, FTNone, PathStorage, AcquireMove) if err != nil { return xerrors.Errorf("acquire src storage (remote): %w", err) } diff --git a/testworker_test.go b/testworker_test.go index cb9a82a2c..68d70c838 100644 --- a/testworker_test.go +++ b/testworker_test.go @@ -78,7 +78,7 @@ func (t *testWorker) FinalizeSector(ctx context.Context, sector abi.SectorID) er panic("implement me") } -func (t *testWorker) Fetch(ctx context.Context, id abi.SectorID, fileType stores.SectorFileType, b bool) error { +func (t *testWorker) Fetch(ctx context.Context, id abi.SectorID, fileType stores.SectorFileType, b bool, am stores.AcquireMode) error { return nil } From a59ca7536a9517c8e16edd27d126e7742fc6b7c7 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Thu, 21 May 2020 18:19:46 -0700 Subject: [PATCH 089/199] update to latest filecoin-ffi --- extern/filecoin-ffi | 2 +- ffiwrapper/sealer_test.go | 11 ++++------- go.mod | 2 +- go.sum | 4 ++++ mock/mock.go | 6 +++--- 5 files changed, 13 insertions(+), 12 deletions(-) diff --git a/extern/filecoin-ffi b/extern/filecoin-ffi index 05b30afeb..6109b6ad2 160000 --- a/extern/filecoin-ffi +++ b/extern/filecoin-ffi @@ -1 +1 @@ -Subproject commit 05b30afeb00df254e72c0dadab8fa694dd68a4bc +Subproject commit 6109b6ad2fa9968941c206161dd01ac059011d4e diff --git a/ffiwrapper/sealer_test.go b/ffiwrapper/sealer_test.go index 0f4918862..09ba1fba7 100644 --- a/ffiwrapper/sealer_test.go +++ b/ffiwrapper/sealer_test.go @@ -73,13 +73,10 @@ func (s *seal) commit(t *testing.T, sb *Sealer, done func()) { } ok, err := ProofVerifier.VerifySeal(abi.SealVerifyInfo{ - SectorID: s.id, - OnChain: abi.OnChainSealVerifyInfo{ - SealedCID: s.cids.Sealed, - RegisteredProof: sealProofType, - Proof: proof, - SectorNumber: s.id.Number, - }, + SectorID: s.id, + SealedCID: s.cids.Sealed, + RegisteredProof: sealProofType, + Proof: proof, Randomness: s.ticket, InteractiveRandomness: seed, UnsealedCID: s.cids.Unsealed, diff --git a/go.mod b/go.mod index 21d268986..4f9d943e3 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( github.com/filecoin-project/filecoin-ffi v0.0.0-20200326153646-e899cc1dd072 github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5 github.com/filecoin-project/go-paramfetch v0.0.1 - github.com/filecoin-project/specs-actors v0.4.1-0.20200508202406-42be6629284d + github.com/filecoin-project/specs-actors v0.5.4-0.20200521014528-0df536f7e461 github.com/filecoin-project/specs-storage v0.0.0-20200417134612-61b2d91a6102 github.com/google/uuid v1.1.1 github.com/gorilla/mux v1.7.4 diff --git a/go.sum b/go.sum index 22f75c198..e0f7a0f83 100644 --- a/go.sum +++ b/go.sum @@ -33,6 +33,8 @@ github.com/filecoin-project/go-bitfield v0.0.0-20200309034705-8c7ac40bd550 h1:ao github.com/filecoin-project/go-bitfield v0.0.0-20200309034705-8c7ac40bd550/go.mod h1:iodsLxOFZnqKtjj2zkgqzoGNrv6vUqj69AT/J8DKXEw= github.com/filecoin-project/go-bitfield v0.0.0-20200416002808-b3ee67ec9060 h1:/3qjGMn6ukXgZJHsIbuwGL7ipla8DOV3uHZDBJkBYfU= github.com/filecoin-project/go-bitfield v0.0.0-20200416002808-b3ee67ec9060/go.mod h1:iodsLxOFZnqKtjj2zkgqzoGNrv6vUqj69AT/J8DKXEw= +github.com/filecoin-project/go-bitfield v0.0.1 h1:Xg/JnrqqE77aJVKdbEyR04n9FZQWhwrN+buDgQCVpZU= +github.com/filecoin-project/go-bitfield v0.0.1/go.mod h1:Ry9/iUlWSyjPUzlAvdnfy4Gtvrq4kWmWDztCU1yEgJY= github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 h1:2pMXdBnCiXjfCYx/hLqFxccPoqsSveQFxVLvNxy9bus= github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ= github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5 h1:yvQJCW9mmi9zy+51xA01Ea2X7/dL7r8eKDPuGUjRmbo= @@ -44,6 +46,8 @@ github.com/filecoin-project/specs-actors v0.0.0-20200409043918-e569f4a2f504/go.m github.com/filecoin-project/specs-actors v0.3.0/go.mod h1:nQYnFbQ7Y0bHZyq6HDEuVlCPR+U3z5Q3wMOQ+2aiV+Y= github.com/filecoin-project/specs-actors v0.4.1-0.20200508202406-42be6629284d h1:vzuhvR+huV95QU+KSBCyQvLP6LUtwrPUyeUTzOx1B5I= github.com/filecoin-project/specs-actors v0.4.1-0.20200508202406-42be6629284d/go.mod h1:UW3ft23q6VS8wQoNqLWjENsu9gu1uh6lxOd+H8cwhT8= +github.com/filecoin-project/specs-actors v0.5.4-0.20200521014528-0df536f7e461 h1:yyAoJ9dNboljDWj0uBdJEbdaTak/YhkjYUQt0GzlY0A= +github.com/filecoin-project/specs-actors v0.5.4-0.20200521014528-0df536f7e461/go.mod h1:r5btrNzZD0oBkEz1pohv80gSCXQnqGrD0kYwOTiExyE= github.com/filecoin-project/specs-storage v0.0.0-20200417134612-61b2d91a6102 h1:T3f/zkuvgtgqcXrb0NO3BicuveGOxxUAMPa/Yif2kuE= github.com/filecoin-project/specs-storage v0.0.0-20200417134612-61b2d91a6102/go.mod h1:xJ1/xl9+8zZeSSSFmDC3Wr6uusCTxyYPI0VeNVSFmPE= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= diff --git a/mock/mock.go b/mock/mock.go index 3a3c8bb5f..43a2087e3 100644 --- a/mock/mock.go +++ b/mock/mock.go @@ -307,12 +307,12 @@ func (mgr *SectorMgr) CheckProvable(context.Context, abi.RegisteredProof, []abi. } func (m mockVerif) VerifySeal(svi abi.SealVerifyInfo) (bool, error) { - if len(svi.OnChain.Proof) != 32 { // Real ones are longer, but this should be fine + if len(svi.Proof) != 32 { // Real ones are longer, but this should be fine return false, nil } - for i, b := range svi.OnChain.Proof { - if b != svi.UnsealedCID.Bytes()[i]+svi.OnChain.SealedCID.Bytes()[31-i]-svi.InteractiveRandomness[i]*svi.Randomness[i] { + for i, b := range svi.Proof { + if b != svi.UnsealedCID.Bytes()[i]+svi.SealedCID.Bytes()[31-i]-svi.InteractiveRandomness[i]*svi.Randomness[i] { return false, nil } } From e06cfd24cf46a15b8fcbbe52b4917e410bcd7fb6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Tue, 26 May 2020 10:19:42 +0200 Subject: [PATCH 090/199] mock: Update unseal method --- mock/mock.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/mock/mock.go b/mock/mock.go index 3a3c8bb5f..6fad6bc20 100644 --- a/mock/mock.go +++ b/mock/mock.go @@ -5,7 +5,6 @@ import ( "context" "fmt" "io" - "io/ioutil" "math" "math/rand" "sync" @@ -268,11 +267,13 @@ func generateFakePoSt(sectorInfo []abi.SectorInfo) []abi.PoStProof { } } -func (mgr *SectorMgr) ReadPieceFromSealedSector(ctx context.Context, sectorID abi.SectorID, offset ffiwrapper.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, commD cid.Cid) (io.ReadCloser, error) { +func (mgr *SectorMgr) ReadPiece(ctx context.Context, w io.Writer, sectorID abi.SectorID, offset ffiwrapper.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, c cid.Cid) error { if len(mgr.sectors[sectorID].pieces) > 1 { panic("implme") } - return ioutil.NopCloser(io.LimitReader(bytes.NewReader(mgr.sectors[sectorID].pieces[0].Bytes()[offset:]), int64(size))), nil + + _, err := io.CopyN(w, bytes.NewReader(mgr.sectors[sectorID].pieces[0].Bytes()[offset:]), int64(size)) + return err } func (mgr *SectorMgr) StageFakeData(mid abi.ActorID) (abi.SectorID, []abi.PieceInfo, error) { From 793d332002b17eda79a1cc38a3aedb04efde9bc6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Tue, 26 May 2020 10:25:17 +0200 Subject: [PATCH 091/199] Move UnpaddedByteIndex from FFI wrapper --- ffiwrapper/partialfile.go | 10 ++++++---- ffiwrapper/sealer_cgo.go | 10 +++++----- ffiwrapper/types.go | 7 +++---- ffiwrapper/unseal_ranges.go | 4 +++- localworker.go | 4 ++-- manager.go | 8 ++++---- mock/mock.go | 3 ++- storiface/ffi.go | 2 ++ testworker_test.go | 5 ++--- 9 files changed, 29 insertions(+), 24 deletions(-) diff --git a/ffiwrapper/partialfile.go b/ffiwrapper/partialfile.go index a5b8f2548..a278e7347 100644 --- a/ffiwrapper/partialfile.go +++ b/ffiwrapper/partialfile.go @@ -10,6 +10,8 @@ import ( rlepluslazy "github.com/filecoin-project/go-bitfield/rle" "github.com/filecoin-project/specs-actors/actors/abi" + + "github.com/filecoin-project/sector-storage/storiface" ) const veryLargeRle = 1 << 20 @@ -161,7 +163,7 @@ func (pf *partialFile) Close() error { return pf.file.Close() } -func (pf *partialFile) Writer(offset UnpaddedByteIndex, size abi.UnpaddedPieceSize) (io.Writer, error) { +func (pf *partialFile) Writer(offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (io.Writer, error) { if _, err := pf.file.Seek(int64(offset), io.SeekStart); err != nil { return nil, xerrors.Errorf("seek piece start: %w", err) } @@ -190,7 +192,7 @@ func (pf *partialFile) Writer(offset UnpaddedByteIndex, size abi.UnpaddedPieceSi return pf.file, nil } -func (pf *partialFile) MarkAllocated(offset UnpaddedByteIndex, size abi.UnpaddedPieceSize) error { +func (pf *partialFile) MarkAllocated(offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) error { have, err := pf.allocated.RunIterator() if err != nil { return err @@ -208,7 +210,7 @@ func (pf *partialFile) MarkAllocated(offset UnpaddedByteIndex, size abi.Unpadded return nil } -func (pf *partialFile) Reader(offset UnpaddedByteIndex, size abi.UnpaddedPieceSize) (*os.File, error) { +func (pf *partialFile) Reader(offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (*os.File, error) { if _, err := pf.file.Seek(int64(offset), io.SeekStart); err != nil { return nil, xerrors.Errorf("seek piece start: %w", err) } @@ -241,7 +243,7 @@ func (pf *partialFile) Allocated() (rlepluslazy.RunIterator, error) { return pf.allocated.RunIterator() } -func pieceRun(offset UnpaddedByteIndex, size abi.UnpaddedPieceSize) rlepluslazy.RunIterator { +func pieceRun(offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) rlepluslazy.RunIterator { var runs []rlepluslazy.Run if offset > 0 { runs = append(runs, rlepluslazy.Run{ diff --git a/ffiwrapper/sealer_cgo.go b/ffiwrapper/sealer_cgo.go index 1ffc10b72..382d3853f 100644 --- a/ffiwrapper/sealer_cgo.go +++ b/ffiwrapper/sealer_cgo.go @@ -100,7 +100,7 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector abi.SectorID, existingPie } } - w, err := stagedFile.Writer(UnpaddedByteIndex(offset), pieceSize) + w, err := stagedFile.Writer(storiface.UnpaddedByteIndex(offset), pieceSize) if err != nil { return abi.PieceInfo{}, xerrors.Errorf("getting partial file writer: %w", err) } @@ -115,7 +115,7 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector abi.SectorID, existingPie return abi.PieceInfo{}, xerrors.Errorf("generating piece commitment: %w", err) } - if err := stagedFile.MarkAllocated(UnpaddedByteIndex(offset), pieceSize); err != nil { + if err := stagedFile.MarkAllocated(storiface.UnpaddedByteIndex(offset), pieceSize); err != nil { return abi.PieceInfo{}, xerrors.Errorf("marking data range as allocated: %w", err) } @@ -136,7 +136,7 @@ func (cf closerFunc) Close() error { return cf() } -func (sb *Sealer) UnsealPiece(ctx context.Context, sector abi.SectorID, offset UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd cid.Cid) error { +func (sb *Sealer) UnsealPiece(ctx context.Context, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd cid.Cid) error { maxPieceSize := abi.PaddedPieceSize(sb.ssize).Unpadded() // try finding existing @@ -271,7 +271,7 @@ func (sb *Sealer) UnsealPiece(ctx context.Context, sector abi.SectorID, offset U return xerrors.Errorf("piping output to unsealed file: %w", perr) } - if err := pf.MarkAllocated(UnpaddedByteIndex(at), abi.UnpaddedPieceSize(piece.Len)); err != nil { + if err := pf.MarkAllocated(storiface.UnpaddedByteIndex(at), abi.UnpaddedPieceSize(piece.Len)); err != nil { return xerrors.Errorf("marking unsealed range as allocated: %w", err) } @@ -283,7 +283,7 @@ func (sb *Sealer) UnsealPiece(ctx context.Context, sector abi.SectorID, offset U return nil } -func (sb *Sealer) ReadPiece(ctx context.Context, writer io.Writer, sector abi.SectorID, offset UnpaddedByteIndex, size abi.UnpaddedPieceSize) error { +func (sb *Sealer) ReadPiece(ctx context.Context, writer io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) error { path, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, stores.FTNone, false) if err != nil { return xerrors.Errorf("acquire unsealed sector path: %w", err) diff --git a/ffiwrapper/types.go b/ffiwrapper/types.go index 06c07b715..cf211056f 100644 --- a/ffiwrapper/types.go +++ b/ffiwrapper/types.go @@ -11,10 +11,9 @@ import ( "github.com/filecoin-project/sector-storage/ffiwrapper/basicfs" "github.com/filecoin-project/sector-storage/stores" + "github.com/filecoin-project/sector-storage/storiface" ) -type UnpaddedByteIndex uint64 - type Validator interface { CanCommit(sector stores.SectorPaths) (bool, error) CanProve(sector stores.SectorPaths) (bool, error) @@ -29,8 +28,8 @@ type Storage interface { storage.Prover StorageSealer - UnsealPiece(ctx context.Context, sector abi.SectorID, offset UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd cid.Cid) error - ReadPiece(ctx context.Context, writer io.Writer, sector abi.SectorID, offset UnpaddedByteIndex, size abi.UnpaddedPieceSize) error + UnsealPiece(ctx context.Context, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd cid.Cid) error + ReadPiece(ctx context.Context, writer io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) error } type Verifier interface { diff --git a/ffiwrapper/unseal_ranges.go b/ffiwrapper/unseal_ranges.go index 873ac45d0..522b58138 100644 --- a/ffiwrapper/unseal_ranges.go +++ b/ffiwrapper/unseal_ranges.go @@ -5,6 +5,8 @@ import ( "github.com/filecoin-project/go-bitfield/rle" "github.com/filecoin-project/specs-actors/actors/abi" + + "github.com/filecoin-project/sector-storage/storiface" ) // merge gaps between ranges which are close to each other @@ -13,7 +15,7 @@ const mergeGaps = 32 << 20 // TODO const expandRuns = 16 << 20 // unseal more than requested for future requests -func computeUnsealRanges(unsealed rlepluslazy.RunIterator, offset UnpaddedByteIndex, size abi.UnpaddedPieceSize) (rlepluslazy.RunIterator, error) { +func computeUnsealRanges(unsealed rlepluslazy.RunIterator, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (rlepluslazy.RunIterator, error) { todo := pieceRun(offset, size) todo, err := rlepluslazy.Subtract(todo, unsealed) if err != nil { diff --git a/localworker.go b/localworker.go index a01623bc1..710bd47fb 100644 --- a/localworker.go +++ b/localworker.go @@ -183,7 +183,7 @@ func (l *LocalWorker) FinalizeSector(ctx context.Context, sector abi.SectorID) e return nil } -func (l *LocalWorker) UnsealPiece(ctx context.Context, sector abi.SectorID, index ffiwrapper.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, cid cid.Cid) error { +func (l *LocalWorker) UnsealPiece(ctx context.Context, sector abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, cid cid.Cid) error { sb, err := l.sb() if err != nil { return err @@ -200,7 +200,7 @@ func (l *LocalWorker) UnsealPiece(ctx context.Context, sector abi.SectorID, inde return nil } -func (l *LocalWorker) ReadPiece(ctx context.Context, writer io.Writer, sector abi.SectorID, index ffiwrapper.UnpaddedByteIndex, size abi.UnpaddedPieceSize) error { +func (l *LocalWorker) ReadPiece(ctx context.Context, writer io.Writer, sector abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) error { sb, err := l.sb() if err != nil { return err diff --git a/manager.go b/manager.go index 7f1cc4a4d..daa522c92 100644 --- a/manager.go +++ b/manager.go @@ -30,8 +30,8 @@ type Worker interface { ffiwrapper.StorageSealer Fetch(ctx context.Context, s abi.SectorID, ft stores.SectorFileType, sealing bool, am stores.AcquireMode) error - UnsealPiece(context.Context, abi.SectorID, ffiwrapper.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) error - ReadPiece(context.Context, io.Writer, abi.SectorID, ffiwrapper.UnpaddedByteIndex, abi.UnpaddedPieceSize) error + UnsealPiece(context.Context, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) error + ReadPiece(context.Context, io.Writer, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize) error TaskTypes(context.Context) (map[sealtasks.TaskType]struct{}, error) @@ -49,7 +49,7 @@ type Worker interface { type SectorManager interface { SectorSize() abi.SectorSize - ReadPiece(context.Context, io.Writer, abi.SectorID, ffiwrapper.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) error + ReadPiece(context.Context, io.Writer, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) error ffiwrapper.StorageSealer storage.Prover @@ -190,7 +190,7 @@ func schedFetch(sector abi.SectorID, ft stores.SectorFileType, sealing bool, am } } -func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector abi.SectorID, offset ffiwrapper.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) error { +func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) error { best, err := m.index.StorageFindSector(ctx, sector, stores.FTUnsealed, false) if err != nil { return xerrors.Errorf("read piece: checking for already existing unsealed sector: %w", err) diff --git a/mock/mock.go b/mock/mock.go index 6fad6bc20..403a1cbf6 100644 --- a/mock/mock.go +++ b/mock/mock.go @@ -17,6 +17,7 @@ import ( "golang.org/x/xerrors" "github.com/filecoin-project/sector-storage/ffiwrapper" + "github.com/filecoin-project/sector-storage/storiface" ) var log = logging.Logger("sbmock") @@ -267,7 +268,7 @@ func generateFakePoSt(sectorInfo []abi.SectorInfo) []abi.PoStProof { } } -func (mgr *SectorMgr) ReadPiece(ctx context.Context, w io.Writer, sectorID abi.SectorID, offset ffiwrapper.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, c cid.Cid) error { +func (mgr *SectorMgr) ReadPiece(ctx context.Context, w io.Writer, sectorID abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, c cid.Cid) error { if len(mgr.sectors[sectorID].pieces) > 1 { panic("implme") } diff --git a/storiface/ffi.go b/storiface/ffi.go index 354b1e9c4..6821f9b35 100644 --- a/storiface/ffi.go +++ b/storiface/ffi.go @@ -3,3 +3,5 @@ package storiface import "errors" var ErrSectorNotFound = errors.New("sector not found") + +type UnpaddedByteIndex uint64 diff --git a/testworker_test.go b/testworker_test.go index 68d70c838..e61cf96ba 100644 --- a/testworker_test.go +++ b/testworker_test.go @@ -9,7 +9,6 @@ import ( "github.com/filecoin-project/specs-actors/actors/abi" "github.com/filecoin-project/specs-storage/storage" - "github.com/filecoin-project/sector-storage/ffiwrapper" "github.com/filecoin-project/sector-storage/mock" "github.com/filecoin-project/sector-storage/sealtasks" "github.com/filecoin-project/sector-storage/stores" @@ -50,11 +49,11 @@ func (t *testWorker) NewSector(ctx context.Context, sector abi.SectorID) error { panic("implement me") } -func (t *testWorker) UnsealPiece(ctx context.Context, id abi.SectorID, index ffiwrapper.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, cid cid.Cid) error { +func (t *testWorker) UnsealPiece(ctx context.Context, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, cid cid.Cid) error { panic("implement me") } -func (t *testWorker) ReadPiece(ctx context.Context, writer io.Writer, id abi.SectorID, index ffiwrapper.UnpaddedByteIndex, size abi.UnpaddedPieceSize) error { +func (t *testWorker) ReadPiece(ctx context.Context, writer io.Writer, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) error { panic("implement me") } From 6fe92ff13c6534eae95e78b365b96a299d558a5d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Tue, 26 May 2020 10:25:29 +0200 Subject: [PATCH 092/199] go fmt --- localworker.go | 4 ++-- stores/index.go | 12 ++++++------ stores/interface.go | 2 ++ stores/local_test.go | 3 +-- 4 files changed, 11 insertions(+), 10 deletions(-) diff --git a/localworker.go b/localworker.go index 710bd47fb..694e87276 100644 --- a/localworker.go +++ b/localworker.go @@ -55,7 +55,7 @@ func NewLocalWorker(wcfg WorkerConfig, store stores.Store, local *stores.Local, } type localWorkerPathProvider struct { - w *LocalWorker + w *LocalWorker op stores.AcquireMode } @@ -193,7 +193,7 @@ func (l *LocalWorker) UnsealPiece(ctx context.Context, sector abi.SectorID, inde return xerrors.Errorf("unsealing sector: %w", err) } - if err := l.storage.RemoveCopies(ctx, sector, stores.FTSealed | stores.FTCache); err != nil { + if err := l.storage.RemoveCopies(ctx, sector, stores.FTSealed|stores.FTCache); err != nil { return xerrors.Errorf("removing source data: %w", err) } diff --git a/stores/index.go b/stores/index.go index 0dafd87ed..e1e35875d 100644 --- a/stores/index.go +++ b/stores/index.go @@ -278,9 +278,9 @@ func (i *Index) StorageFindSector(ctx context.Context, s abi.SectorID, ft Sector } out = append(out, SectorStorageInfo{ - ID: id, - URLs: urls, - Weight: st.info.Weight * n, // storage with more sector types is better + ID: id, + URLs: urls, + Weight: st.info.Weight * n, // storage with more sector types is better CanSeal: st.info.CanSeal, CanStore: st.info.CanStore, @@ -307,9 +307,9 @@ func (i *Index) StorageFindSector(ctx context.Context, s abi.SectorID, ft Sector } out = append(out, SectorStorageInfo{ - ID: id, - URLs: urls, - Weight: st.info.Weight * 0, // TODO: something better than just '0' + ID: id, + URLs: urls, + Weight: st.info.Weight * 0, // TODO: something better than just '0' CanSeal: st.info.CanSeal, CanStore: st.info.CanStore, diff --git a/stores/interface.go b/stores/interface.go index a818406a5..01ac2bffe 100644 --- a/stores/interface.go +++ b/stores/interface.go @@ -10,12 +10,14 @@ import ( ) type PathType bool + const ( PathStorage = false PathSealing = true ) type AcquireMode string + const ( AcquireMove = "move" AcquireCopy = "copy" diff --git a/stores/local_test.go b/stores/local_test.go index d98c59182..8e654d725 100644 --- a/stores/local_test.go +++ b/stores/local_test.go @@ -16,7 +16,7 @@ const pathSize = 16 << 20 type TestingLocalStorage struct { root string - c StorageConfig + c StorageConfig } func (t *TestingLocalStorage) GetStorage() (StorageConfig, error) { @@ -63,7 +63,6 @@ func (t *TestingLocalStorage) init(subpath string) error { return nil } - var _ LocalStorage = &TestingLocalStorage{} func TestLocalStorage(t *testing.T) { From 65f04da920f9139a802825469dfd0373b2a3bf5f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Tue, 26 May 2020 16:39:25 +0200 Subject: [PATCH 093/199] mock: Update interface --- ffiwrapper/sealer_test.go | 12 ------------ mock/mock.go | 12 +++++++++--- 2 files changed, 9 insertions(+), 15 deletions(-) diff --git a/ffiwrapper/sealer_test.go b/ffiwrapper/sealer_test.go index c39d0ade0..73eaa2652 100644 --- a/ffiwrapper/sealer_test.go +++ b/ffiwrapper/sealer_test.go @@ -16,7 +16,6 @@ import ( logging "github.com/ipfs/go-log" "golang.org/x/xerrors" - ffi "github.com/filecoin-project/filecoin-ffi" paramfetch "github.com/filecoin-project/go-paramfetch" "github.com/filecoin-project/specs-actors/actors/abi" @@ -418,14 +417,3 @@ func TestSealAndVerify2(t *testing.T) { post(t, sb, s1, s2) } - -func TestScribbles(t *testing.T) { - rf, w, _ := toReadableFile(bytes.NewReader(bytes.Repeat([]byte{0xff, 0}, 127)), 254) - defer w() - - tf, _ := ioutil.TempFile("/tmp/", "scrb-") - - fmt.Println(tf.Name()) - - fmt.Println(ffi.WriteWithAlignment(abi.RegisteredProof_StackedDRG2KiBSeal, rf, 254, tf, nil)) -} diff --git a/mock/mock.go b/mock/mock.go index 403a1cbf6..53777434c 100644 --- a/mock/mock.go +++ b/mock/mock.go @@ -24,6 +24,7 @@ var log = logging.Logger("sbmock") type SectorMgr struct { sectors map[abi.SectorID]*sectorState + pieces map[cid.Cid][]byte sectorSize abi.SectorSize nextSectorID abi.SectorNumber proofType abi.RegisteredProof @@ -41,6 +42,7 @@ func NewMockSectorMgr(ssize abi.SectorSize) *SectorMgr { return &SectorMgr{ sectors: make(map[abi.SectorID]*sectorState), + pieces: map[cid.Cid][]byte{}, sectorSize: ssize, nextSectorID: 5, proofType: rt, @@ -80,13 +82,17 @@ func (mgr *SectorMgr) AddPiece(ctx context.Context, sectorId abi.SectorID, exist ss.lk.Lock() defer ss.lk.Unlock() - c, err := ffiwrapper.GeneratePieceCIDFromFile(mgr.proofType, r, size) + var b bytes.Buffer + tr := io.TeeReader(r, &b) + + c, err := ffiwrapper.GeneratePieceCIDFromFile(mgr.proofType, tr, size) if err != nil { return abi.PieceInfo{}, xerrors.Errorf("failed to generate piece cid: %w", err) } log.Warn("Generated Piece CID: ", c) + mgr.pieces[c] = b.Bytes() ss.pieces = append(ss.pieces, c) return abi.PieceInfo{ Size: size.Padded(), @@ -269,11 +275,11 @@ func generateFakePoSt(sectorInfo []abi.SectorInfo) []abi.PoStProof { } func (mgr *SectorMgr) ReadPiece(ctx context.Context, w io.Writer, sectorID abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, c cid.Cid) error { - if len(mgr.sectors[sectorID].pieces) > 1 { + if len(mgr.sectors[sectorID].pieces) > 1 || offset != 0 { panic("implme") } - _, err := io.CopyN(w, bytes.NewReader(mgr.sectors[sectorID].pieces[0].Bytes()[offset:]), int64(size)) + _, err := io.CopyN(w, bytes.NewReader(mgr.pieces[mgr.sectors[sectorID].pieces[0]]), int64(size)) return err } From 9690e84722a3ebb5d070e2cbf7edf4af617ec3e1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Tue, 26 May 2020 18:36:19 +0200 Subject: [PATCH 094/199] mod tidy --- go.sum | 5 ----- 1 file changed, 5 deletions(-) diff --git a/go.sum b/go.sum index e0f7a0f83..4c461ae4e 100644 --- a/go.sum +++ b/go.sum @@ -31,8 +31,6 @@ github.com/filecoin-project/go-amt-ipld/v2 v2.0.1-0.20200131012142-05d80eeccc5e/ github.com/filecoin-project/go-amt-ipld/v2 v2.0.1-0.20200424220931-6263827e49f2/go.mod h1:boRtQhzmxNocrMxOXo1NYn4oUc1NGvR8tEa79wApNXg= github.com/filecoin-project/go-bitfield v0.0.0-20200309034705-8c7ac40bd550 h1:aockulLU8Qjkdj4FQz53WQpNosAIYk8DxRediRLkE5c= github.com/filecoin-project/go-bitfield v0.0.0-20200309034705-8c7ac40bd550/go.mod h1:iodsLxOFZnqKtjj2zkgqzoGNrv6vUqj69AT/J8DKXEw= -github.com/filecoin-project/go-bitfield v0.0.0-20200416002808-b3ee67ec9060 h1:/3qjGMn6ukXgZJHsIbuwGL7ipla8DOV3uHZDBJkBYfU= -github.com/filecoin-project/go-bitfield v0.0.0-20200416002808-b3ee67ec9060/go.mod h1:iodsLxOFZnqKtjj2zkgqzoGNrv6vUqj69AT/J8DKXEw= github.com/filecoin-project/go-bitfield v0.0.1 h1:Xg/JnrqqE77aJVKdbEyR04n9FZQWhwrN+buDgQCVpZU= github.com/filecoin-project/go-bitfield v0.0.1/go.mod h1:Ry9/iUlWSyjPUzlAvdnfy4Gtvrq4kWmWDztCU1yEgJY= github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 h1:2pMXdBnCiXjfCYx/hLqFxccPoqsSveQFxVLvNxy9bus= @@ -43,9 +41,6 @@ github.com/filecoin-project/go-paramfetch v0.0.1 h1:gV7bs5YaqlgpGFMiLxInGK2L1FyC github.com/filecoin-project/go-paramfetch v0.0.1/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc= github.com/filecoin-project/specs-actors v0.0.0-20200409043918-e569f4a2f504 h1:mwuAaqxKThl70+7FkGdFKVLdwaQZQ8XmscKdhSBBtnc= github.com/filecoin-project/specs-actors v0.0.0-20200409043918-e569f4a2f504/go.mod h1:mdJraXq5vMy0+/FqVQIrnNlpQ/Em6zeu06G/ltQ0/lA= -github.com/filecoin-project/specs-actors v0.3.0/go.mod h1:nQYnFbQ7Y0bHZyq6HDEuVlCPR+U3z5Q3wMOQ+2aiV+Y= -github.com/filecoin-project/specs-actors v0.4.1-0.20200508202406-42be6629284d h1:vzuhvR+huV95QU+KSBCyQvLP6LUtwrPUyeUTzOx1B5I= -github.com/filecoin-project/specs-actors v0.4.1-0.20200508202406-42be6629284d/go.mod h1:UW3ft23q6VS8wQoNqLWjENsu9gu1uh6lxOd+H8cwhT8= github.com/filecoin-project/specs-actors v0.5.4-0.20200521014528-0df536f7e461 h1:yyAoJ9dNboljDWj0uBdJEbdaTak/YhkjYUQt0GzlY0A= github.com/filecoin-project/specs-actors v0.5.4-0.20200521014528-0df536f7e461/go.mod h1:r5btrNzZD0oBkEz1pohv80gSCXQnqGrD0kYwOTiExyE= github.com/filecoin-project/specs-storage v0.0.0-20200417134612-61b2d91a6102 h1:T3f/zkuvgtgqcXrb0NO3BicuveGOxxUAMPa/Yif2kuE= From e3d3887a0b2b2c18b4c5e6f26c56b1dc34d3e009 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Tue, 26 May 2020 21:08:51 +0200 Subject: [PATCH 095/199] Address review --- ffiwrapper/partialfile.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/ffiwrapper/partialfile.go b/ffiwrapper/partialfile.go index a278e7347..e7f89302e 100644 --- a/ffiwrapper/partialfile.go +++ b/ffiwrapper/partialfile.go @@ -32,13 +32,14 @@ type partialFile struct { file *os.File } -func writeTrailer(psz int64, w *os.File, r rlepluslazy.RunIterator) error { +func writeTrailer(maxPieceSize int64, w *os.File, r rlepluslazy.RunIterator) error { trailer, err := rlepluslazy.EncodeRuns(r, nil) if err != nil { return xerrors.Errorf("encoding trailer: %w", err) } - if _, err := w.Seek(psz, io.SeekStart); err != nil { + // maxPieceSize == unpadded(sectorSize) == trailer start + if _, err := w.Seek(maxPieceSize, io.SeekStart); err != nil { return xerrors.Errorf("seek to trailer start: %w", err) } @@ -51,7 +52,7 @@ func writeTrailer(psz int64, w *os.File, r rlepluslazy.RunIterator) error { return xerrors.Errorf("writing trailer length: %w", err) } - return w.Truncate(psz + int64(rb) + 4) + return w.Truncate(maxPieceSize + int64(rb) + 4) } func createPartialFile(maxPieceSize abi.UnpaddedPieceSize, path string) (*partialFile, error) { From 4db83513482586d42f848b8e8b1b27cafa1e7bde Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Thu, 28 May 2020 19:15:15 +0200 Subject: [PATCH 096/199] fr32 utils --- ffiwrapper/files.go | 2 +- ffiwrapper/sealer_cgo.go | 6 +- ffiwrapper/sealer_test.go | 18 ++- fr32/fr32.go | 138 +++++++++++++++++++++ fr32/fr32_test.go | 248 ++++++++++++++++++++++++++++++++++++++ 5 files changed, 407 insertions(+), 5 deletions(-) create mode 100644 fr32/fr32.go create mode 100644 fr32/fr32_test.go diff --git a/ffiwrapper/files.go b/ffiwrapper/files.go index 30e4a6803..a13776d2d 100644 --- a/ffiwrapper/files.go +++ b/ffiwrapper/files.go @@ -8,7 +8,7 @@ import ( "golang.org/x/xerrors" ) -func toReadableFile(r io.Reader, n int64) (*os.File, func() error, error) { +func ToReadableFile(r io.Reader, n int64) (*os.File, func() error, error) { f, ok := r.(*os.File) if ok { return f, func() error { return nil }, nil diff --git a/ffiwrapper/sealer_cgo.go b/ffiwrapper/sealer_cgo.go index 382d3853f..b9b7975ab 100644 --- a/ffiwrapper/sealer_cgo.go +++ b/ffiwrapper/sealer_cgo.go @@ -105,7 +105,7 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector abi.SectorID, existingPie return abi.PieceInfo{}, xerrors.Errorf("getting partial file writer: %w", err) } pr := io.TeeReader(io.LimitReader(file, int64(pieceSize)), w) - prf, werr, err := toReadableFile(pr, int64(pieceSize)) + prf, werr, err := ToReadableFile(pr, int64(pieceSize)) if err != nil { return abi.PieceInfo{}, xerrors.Errorf("getting tee reader pipe: %w", err) } @@ -405,7 +405,7 @@ func (sb *Sealer) rewriteAsPadded(unsealed string, staged string) error { // OPTIMIZATION: upr is a file, so it could be passed straight to // WriteWithAlignment IF it wouldn't care about the trailer - lupr, werr, err := toReadableFile(io.LimitReader(upr, int64(maxPieceSize)), int64(maxPieceSize)) + lupr, werr, err := ToReadableFile(io.LimitReader(upr, int64(maxPieceSize)), int64(maxPieceSize)) if err != nil { return err } @@ -489,7 +489,7 @@ func (sb *Sealer) FinalizeSector(ctx context.Context, sector abi.SectorID) error } func GeneratePieceCIDFromFile(proofType abi.RegisteredProof, piece io.Reader, pieceSize abi.UnpaddedPieceSize) (cid.Cid, error) { - f, werr, err := toReadableFile(piece, int64(pieceSize)) + f, werr, err := ToReadableFile(piece, int64(pieceSize)) if err != nil { return cid.Undef, err } diff --git a/ffiwrapper/sealer_test.go b/ffiwrapper/sealer_test.go index 9c73b2496..9af563dc3 100644 --- a/ffiwrapper/sealer_test.go +++ b/ffiwrapper/sealer_test.go @@ -16,12 +16,13 @@ import ( logging "github.com/ipfs/go-log" "golang.org/x/xerrors" + ffi "github.com/filecoin-project/filecoin-ffi" paramfetch "github.com/filecoin-project/go-paramfetch" "github.com/filecoin-project/specs-actors/actors/abi" + "github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/sector-storage/ffiwrapper/basicfs" "github.com/filecoin-project/sector-storage/stores" - "github.com/filecoin-project/specs-storage/storage" ) func init() { @@ -414,3 +415,18 @@ func TestSealAndVerify2(t *testing.T) { post(t, sb, s1, s2) } + +func BenchmarkWriteWithAlignment(b *testing.B) { + bt := abi.UnpaddedPieceSize(2 * 127 * 1024 * 1024) + b.SetBytes(int64(bt)) + + for i := 0; i < b.N; i++ { + b.StopTimer() + rf, w, _ := ToReadableFile(bytes.NewReader(bytes.Repeat([]byte{0xff, 0}, int(bt/2))), int64(bt)) + tf, _ := ioutil.TempFile("/tmp/", "scrb-") + b.StartTimer() + + ffi.WriteWithAlignment(abi.RegisteredProof_StackedDRG2KiBSeal, rf, bt, tf, nil) + w() + } +} diff --git a/fr32/fr32.go b/fr32/fr32.go new file mode 100644 index 000000000..cb2dcaed6 --- /dev/null +++ b/fr32/fr32.go @@ -0,0 +1,138 @@ +package fr32 + +import ( + "math/bits" + "runtime" + "sync" + + "github.com/filecoin-project/specs-actors/actors/abi" +) + +var mtTresh = 32 << 20 + +func mt(in, out []byte, padLen int, op func(in, out []byte)) { + threads := padLen / mtTresh + if threads > runtime.NumCPU() { + threads = 1 << (32 - bits.LeadingZeros32(uint32(runtime.NumCPU()))) + } + threadBytes := abi.PaddedPieceSize(padLen / threads) + + var wg sync.WaitGroup + wg.Add(threads) + + for i := 0; i < threads; i++ { + go func(thread int) { + defer wg.Done() + + start := threadBytes * abi.PaddedPieceSize(thread) + end := start + threadBytes + + op(in[start.Unpadded():end.Unpadded()], out[start:end]) + }(i) + } + wg.Wait() +} + +// Assumes len(in)%127==0 and len(out)%128==0 +func Pad(in, out []byte) { + if len(out) > mtTresh { + mt(in, out, len(out), Pad) + return + } + + chunks := len(out) / 128 + for chunk := 0; chunk < chunks; chunk++ { + inOff := chunk * 127 + outOff := chunk * 128 + + copy(out[outOff:outOff+31], in[inOff:inOff+31]) + + t := in[inOff+31] >> 6 + out[outOff+31] = in[inOff+31] & 0x3f + var v byte + + for i := 32; i < 64; i++ { + v = in[inOff+i] + out[outOff+i] = (v << 2) | t + t = v >> 6 + } + + t = v >> 4 + out[outOff+63] &= 0x3f + + for i := 64; i < 96; i++ { + v = in[inOff+i] + out[outOff+i] = (v << 4) | t + t = v >> 4 + } + + t = v >> 2 + out[outOff+95] &= 0x3f + + for i := 96; i < 127; i++ { + v = in[inOff+i] + out[outOff+i] = (v << 6) | t + t = v >> 2 + } + + out[outOff+127] = t & 0x3f + } +} + +// Assumes len(in)%128==0 and len(out)%127==0 +func Unpad(in []byte, out []byte) { + if len(out) > mtTresh { + mt(in, out, len(in), Unpad) + return + } + + chunks := len(in) / 128 + for chunk := 0; chunk < chunks; chunk++ { + inOffNext := chunk*128 + 1 + outOff := chunk * 127 + + at := in[chunk*128] + + for i := 0; i < 32; i++ { + next := in[i+inOffNext] + + out[outOff+i] = at + //out[i] |= next << 8 + + at = next + } + + out[outOff+31] |= at << 6 + + for i := 32; i < 64; i++ { + next := in[i+inOffNext] + + out[outOff+i] = at >> 2 + out[outOff+i] |= next << 6 + + at = next + } + + out[outOff+63] ^= (at << 6) ^ (at << 4) + + for i := 64; i < 96; i++ { + next := in[i+inOffNext] + + out[outOff+i] = at >> 4 + out[outOff+i] |= next << 4 + + at = next + } + + out[outOff+95] ^= (at << 4) ^ (at << 2) + + for i := 96; i < 127; i++ { + next := in[i+inOffNext] + + out[outOff+i] = at >> 6 + out[outOff+i] |= next << 2 + + at = next + } + } +} diff --git a/fr32/fr32_test.go b/fr32/fr32_test.go new file mode 100644 index 000000000..df500035d --- /dev/null +++ b/fr32/fr32_test.go @@ -0,0 +1,248 @@ +package fr32 + +import ( + "bytes" + "io" + "io/ioutil" + "math/rand" + "os" + "testing" + + ffi "github.com/filecoin-project/filecoin-ffi" + "github.com/filecoin-project/sector-storage/ffiwrapper" + "github.com/filecoin-project/specs-actors/actors/abi" + "github.com/stretchr/testify/require" +) + +func padFFI(buf []byte) []byte { + rf, w, _ := ffiwrapper.ToReadableFile(bytes.NewReader(buf), int64(len(buf))) + tf, _ := ioutil.TempFile("/tmp/", "scrb-") + + _, _, _, err := ffi.WriteWithAlignment(abi.RegisteredProof_StackedDRG32GiBSeal, rf, abi.UnpaddedPieceSize(len(buf)), tf, nil) + if err != nil { + panic(err) + } + if err := w(); err != nil { + panic(err) + } + + if _, err := tf.Seek(io.SeekStart, 0); err != nil { + panic(err) + } + + padded, err := ioutil.ReadAll(tf) + if err != nil { + panic(err) + } + + if err := tf.Close(); err != nil { + panic(err) + } + + if err := os.Remove(tf.Name()); err != nil { + panic(err) + } + + return padded +} + +func TestPadChunkFFI(t *testing.T) { + testByteChunk := func(b byte) func(*testing.T) { + return func(t *testing.T) { + var buf [128]byte + copy(buf[:], bytes.Repeat([]byte{b}, 127)) + + Pad(buf[:], buf[:]) + + expect := padFFI(bytes.Repeat([]byte{b}, 127)) + + require.Equal(t, expect, buf[:]) + } + } + + t.Run("ones", testByteChunk(0xff)) + t.Run("lsb1", testByteChunk(0x01)) + t.Run("msb1", testByteChunk(0x80)) + t.Run("zero", testByteChunk(0x0)) + t.Run("mid", testByteChunk(0x3c)) +} + +func TestPadChunkRandEqFFI(t *testing.T) { + for i := 0; i < 200; i++ { + var input [127]byte + rand.Read(input[:]) + + var buf [128]byte + + Pad(input[:], buf[:]) + + expect := padFFI(input[:]) + + require.Equal(t, expect, buf[:]) + } +} + +func TestRoundtrip(t *testing.T) { + testByteChunk := func(b byte) func(*testing.T) { + return func(t *testing.T) { + var buf [128]byte + input := bytes.Repeat([]byte{0x01}, 127) + + Pad(input, buf[:]) + + var out [127]byte + Unpad(buf[:], out[:]) + + require.Equal(t, input, out[:]) + } + } + + t.Run("ones", testByteChunk(0xff)) + t.Run("lsb1", testByteChunk(0x01)) + t.Run("msb1", testByteChunk(0x80)) + t.Run("zero", testByteChunk(0x0)) + t.Run("mid", testByteChunk(0x3c)) +} + +func TestRoundtripChunkRand(t *testing.T) { + for i := 0; i < 200; i++ { + var input [127]byte + rand.Read(input[:]) + + var buf [128]byte + copy(buf[:], input[:]) + + Pad(buf[:], buf[:]) + + var out [127]byte + Unpad(buf[:], out[:]) + + require.Equal(t, input[:], out[:]) + } +} + +func TestRoundtrip16MRand(t *testing.T) { + up := abi.PaddedPieceSize(16 << 20).Unpadded() + + input := make([]byte, up) + rand.Read(input[:]) + + buf := make([]byte, 16<<20) + + Pad(input, buf) + + out := make([]byte, up) + Unpad(buf, out) + + require.Equal(t, input, out) + + ffi := padFFI(input) + require.Equal(t, ffi, buf) +} + +func BenchmarkPadChunk(b *testing.B) { + var buf [128]byte + in := bytes.Repeat([]byte{0xff}, 127) + + b.SetBytes(127) + + for i := 0; i < b.N; i++ { + Pad(in, buf[:]) + } +} + +func BenchmarkChunkRoundtrip(b *testing.B) { + var buf [128]byte + copy(buf[:], bytes.Repeat([]byte{0xff}, 127)) + var out [127]byte + + b.SetBytes(127) + + for i := 0; i < b.N; i++ { + Pad(buf[:], buf[:]) + Unpad(buf[:], out[:]) + } +} + +func BenchmarkUnpadChunk(b *testing.B) { + var buf [128]byte + copy(buf[:], bytes.Repeat([]byte{0xff}, 127)) + + Pad(buf[:], buf[:]) + var out [127]byte + + b.SetBytes(127) + b.ReportAllocs() + + bs := buf[:] + + for i := 0; i < b.N; i++ { + Unpad(bs, out[:]) + } +} + +func BenchmarkUnpad16MChunk(b *testing.B) { + up := abi.PaddedPieceSize(16 << 20).Unpadded() + + var buf [16 << 20]byte + + Pad(bytes.Repeat([]byte{0xff}, int(up)), buf[:]) + var out [16 << 20]byte + + b.SetBytes(16 << 20) + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + Unpad(buf[:], out[:]) + } +} + +func BenchmarkPad16MChunk(b *testing.B) { + up := abi.PaddedPieceSize(16 << 20).Unpadded() + + var buf [16 << 20]byte + + in := bytes.Repeat([]byte{0xff}, int(up)) + + b.SetBytes(16 << 20) + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + Pad(in, buf[:]) + } +} + +func BenchmarkPad1GChunk(b *testing.B) { + up := abi.PaddedPieceSize(1 << 30).Unpadded() + + var buf [1 << 30]byte + + in := bytes.Repeat([]byte{0xff}, int(up)) + + b.SetBytes(1 << 30) + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + Pad(in, buf[:]) + } +} + +func BenchmarkUnpad1GChunk(b *testing.B) { + up := abi.PaddedPieceSize(1 << 30).Unpadded() + + var buf [1 << 30]byte + + Pad(bytes.Repeat([]byte{0xff}, int(up)), buf[:]) + var out [1 << 30]byte + + b.SetBytes(1 << 30) + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + Unpad(buf[:], out[:]) + } +} From d38296a5536ccb92f3ad00adca506f8109929a04 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Thu, 28 May 2020 19:49:25 +0200 Subject: [PATCH 097/199] fr32: More testing --- fr32/fr32.go | 21 +++++++++++-- fr32/fr32_ffi_cmp_test.go | 66 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 84 insertions(+), 3 deletions(-) create mode 100644 fr32/fr32_ffi_cmp_test.go diff --git a/fr32/fr32.go b/fr32/fr32.go index cb2dcaed6..06579dd0d 100644 --- a/fr32/fr32.go +++ b/fr32/fr32.go @@ -10,7 +10,7 @@ import ( var mtTresh = 32 << 20 -func mt(in, out []byte, padLen int, op func(in, out []byte)) { +func mt(in, out []byte, padLen int, op func(unpadded, padded []byte)) { threads := padLen / mtTresh if threads > runtime.NumCPU() { threads = 1 << (32 - bits.LeadingZeros32(uint32(runtime.NumCPU()))) @@ -35,6 +35,15 @@ func mt(in, out []byte, padLen int, op func(in, out []byte)) { // Assumes len(in)%127==0 and len(out)%128==0 func Pad(in, out []byte) { + if len(out) > mtTresh { + mt(in, out, len(out), pad) + return + } + + pad(in, out) +} + +func pad(in, out []byte) { if len(out) > mtTresh { mt(in, out, len(out), Pad) return @@ -79,13 +88,19 @@ func Pad(in, out []byte) { } } + + // Assumes len(in)%128==0 and len(out)%127==0 func Unpad(in []byte, out []byte) { - if len(out) > mtTresh { - mt(in, out, len(in), Unpad) + if len(in) > mtTresh { + mt(out, in, len(in), unpad) return } + unpad(out, in) +} + +func unpad(out, in []byte) { chunks := len(in) / 128 for chunk := 0; chunk < chunks; chunk++ { inOffNext := chunk*128 + 1 diff --git a/fr32/fr32_ffi_cmp_test.go b/fr32/fr32_ffi_cmp_test.go new file mode 100644 index 000000000..24bb24d49 --- /dev/null +++ b/fr32/fr32_ffi_cmp_test.go @@ -0,0 +1,66 @@ +package fr32 + +import ( + "bytes" + "io" + "io/ioutil" + "os" + "testing" + + "github.com/stretchr/testify/require" + + ffi "github.com/filecoin-project/filecoin-ffi" + "github.com/filecoin-project/specs-actors/actors/abi" + + "github.com/filecoin-project/sector-storage/ffiwrapper" +) + +func TestWriteTwoPcs(t *testing.T) { + tf, _ := ioutil.TempFile("/tmp/", "scrb-") + + paddedSize := abi.PaddedPieceSize(16 << 20) + n := 2 + + var rawBytes []byte + + for i := 0; i < n; i++ { + buf := bytes.Repeat([]byte{0xab * byte(i)}, int(paddedSize.Unpadded())) + rawBytes = append(rawBytes, buf...) + + rf, w, _ := ffiwrapper.ToReadableFile(bytes.NewReader(buf), int64(len(buf))) + + _, _, _, err := ffi.WriteWithAlignment(abi.RegisteredProof_StackedDRG32GiBSeal, rf, abi.UnpaddedPieceSize(len(buf)), tf, nil) + if err != nil { + panic(err) + } + if err := w(); err != nil { + panic(err) + } + } + + if _, err := tf.Seek(io.SeekStart, 0); err != nil { + panic(err) + } + + ffiBytes, err := ioutil.ReadAll(tf) + if err != nil { + panic(err) + } + + if err := tf.Close(); err != nil { + panic(err) + } + + if err := os.Remove(tf.Name()); err != nil { + panic(err) + } + + outBytes := make([]byte, int(paddedSize) * n) + Pad(rawBytes, outBytes) + require.Equal(t, ffiBytes, outBytes) + + unpadBytes := make([]byte, int(paddedSize.Unpadded()) * n) + Unpad(ffiBytes, unpadBytes) + require.Equal(t, rawBytes, unpadBytes) +} + From 55867ab48b7037b800c5838f7cce1c462dba0c68 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Fri, 29 May 2020 00:17:23 +0200 Subject: [PATCH 098/199] fr32: io.Reader utils --- fr32/fr32.go | 33 +++++++---- fr32/fr32_ffi_cmp_test.go | 5 +- fr32/readers.go | 114 ++++++++++++++++++++++++++++++++++++++ fr32/readers_test.go | 53 ++++++++++++++++++ 4 files changed, 190 insertions(+), 15 deletions(-) create mode 100644 fr32/readers.go create mode 100644 fr32/readers_test.go diff --git a/fr32/fr32.go b/fr32/fr32.go index 06579dd0d..20d158b3d 100644 --- a/fr32/fr32.go +++ b/fr32/fr32.go @@ -8,19 +8,30 @@ import ( "github.com/filecoin-project/specs-actors/actors/abi" ) -var mtTresh = 32 << 20 +var mtTresh = uint64(32 << 20) -func mt(in, out []byte, padLen int, op func(unpadded, padded []byte)) { - threads := padLen / mtTresh - if threads > runtime.NumCPU() { +func mtChunkCount(usz abi.PaddedPieceSize) uint64 { + threads := (uint64(usz)) / mtTresh + if threads > uint64(runtime.NumCPU()) { threads = 1 << (32 - bits.LeadingZeros32(uint32(runtime.NumCPU()))) } - threadBytes := abi.PaddedPieceSize(padLen / threads) + if threads == 0 { + return 1 + } + if threads > 64 { + return 64 // avoid too large buffers + } + return threads +} + +func mt(in, out []byte, padLen int, op func(unpadded, padded []byte)) { + threads := mtChunkCount(abi.PaddedPieceSize(padLen)) + threadBytes := abi.PaddedPieceSize(padLen / int(threads)) var wg sync.WaitGroup - wg.Add(threads) + wg.Add(int(threads)) - for i := 0; i < threads; i++ { + for i := 0; i < int(threads); i++ { go func(thread int) { defer wg.Done() @@ -35,7 +46,7 @@ func mt(in, out []byte, padLen int, op func(unpadded, padded []byte)) { // Assumes len(in)%127==0 and len(out)%128==0 func Pad(in, out []byte) { - if len(out) > mtTresh { + if len(out) > int(mtTresh) { mt(in, out, len(out), pad) return } @@ -44,7 +55,7 @@ func Pad(in, out []byte) { } func pad(in, out []byte) { - if len(out) > mtTresh { + if len(out) > int(mtTresh) { mt(in, out, len(out), Pad) return } @@ -88,11 +99,9 @@ func pad(in, out []byte) { } } - - // Assumes len(in)%128==0 and len(out)%127==0 func Unpad(in []byte, out []byte) { - if len(in) > mtTresh { + if len(in) > int(mtTresh) { mt(out, in, len(in), unpad) return } diff --git a/fr32/fr32_ffi_cmp_test.go b/fr32/fr32_ffi_cmp_test.go index 24bb24d49..ece13051d 100644 --- a/fr32/fr32_ffi_cmp_test.go +++ b/fr32/fr32_ffi_cmp_test.go @@ -55,12 +55,11 @@ func TestWriteTwoPcs(t *testing.T) { panic(err) } - outBytes := make([]byte, int(paddedSize) * n) + outBytes := make([]byte, int(paddedSize)*n) Pad(rawBytes, outBytes) require.Equal(t, ffiBytes, outBytes) - unpadBytes := make([]byte, int(paddedSize.Unpadded()) * n) + unpadBytes := make([]byte, int(paddedSize.Unpadded())*n) Unpad(ffiBytes, unpadBytes) require.Equal(t, rawBytes, unpadBytes) } - diff --git a/fr32/readers.go b/fr32/readers.go new file mode 100644 index 000000000..b7fdc3843 --- /dev/null +++ b/fr32/readers.go @@ -0,0 +1,114 @@ +package fr32 + +import ( + "io" + "math/bits" + + "golang.org/x/xerrors" + + "github.com/filecoin-project/specs-actors/actors/abi" +) + +type padReader struct { + src io.Reader + + left uint64 + work []byte +} + +func NewPadReader(src io.Reader, sz abi.UnpaddedPieceSize) (io.Reader, error) { + if err := sz.Validate(); err != nil { + return nil, xerrors.Errorf("bad piece size: %w", err) + } + + buf := make([]byte, mtTresh*mtChunkCount(sz.Padded())) + + return &padReader{ + src: src, + + left: uint64(sz.Padded()), + work: buf, + }, nil +} + +func (r *padReader) Read(out []byte) (int, error) { + if r.left == 0 { + return 0, io.EOF + } + + outTwoPow := 1 << (63 - bits.LeadingZeros64(uint64(len(out)))) + + if err := abi.PaddedPieceSize(outTwoPow).Validate(); err != nil { + return 0, xerrors.Errorf("output must be of valid padded piece size: %w", err) + } + + todo := abi.PaddedPieceSize(outTwoPow).Unpadded() + if r.left < uint64(todo.Padded()) { + todo = abi.PaddedPieceSize(1 << (63 - bits.LeadingZeros64(r.left))).Unpadded() + } + + r.left -= uint64(todo.Padded()) + + n, err := r.src.Read(r.work[:todo]) + if err != nil && err != io.EOF { + return n, err + } + + Pad(r.work[:todo], out[:todo.Padded()]) + + return int(todo.Padded()), err +} + +type unpadReader struct { + src io.Reader + + left uint64 + work []byte +} + +func NewUnpadReader(src io.Reader, sz abi.PaddedPieceSize) (io.Reader, error) { + if err := sz.Validate(); err != nil { + return nil, xerrors.Errorf("bad piece size: %w", err) + } + + buf := make([]byte, mtTresh*mtChunkCount(sz)) + + return &unpadReader{ + src: src, + + left: uint64(sz), + work: buf, + }, nil +} + +func (r *unpadReader) Read(out []byte) (int, error) { + if r.left == 0 { + return 0, io.EOF + } + + outTwoPow := 1 << (63 - bits.LeadingZeros64(uint64(len(out)))) + + if err := abi.PaddedPieceSize(outTwoPow).Validate(); err != nil { + return 0, xerrors.Errorf("output must be of valid padded piece size: %w", err) + } + + todo := abi.PaddedPieceSize(outTwoPow) + if r.left < uint64(todo) { + todo = abi.PaddedPieceSize(1 << (63 - bits.LeadingZeros64(r.left))) + } + + r.left -= uint64(todo) + + n, err := r.src.Read(r.work[:todo]) + if err != nil && err != io.EOF { + return n, err + } + + if n != int(todo) { + return 0, xerrors.Errorf("didn't read enough: %w", err) + } + + Unpad(r.work[:todo], out[:todo.Unpadded()]) + + return int(todo.Unpadded()), err +} diff --git a/fr32/readers_test.go b/fr32/readers_test.go new file mode 100644 index 000000000..b987e8287 --- /dev/null +++ b/fr32/readers_test.go @@ -0,0 +1,53 @@ +package fr32 + +import ( + "bytes" + "io/ioutil" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/specs-actors/actors/abi" +) + +func TestPadReader(t *testing.T) { + ps := abi.PaddedPieceSize(64 << 20).Unpadded() + + raw := bytes.Repeat([]byte{0x55}, int(ps)) + + r, err := NewPadReader(bytes.NewReader(raw), ps) + if err != nil { + t.Fatal(err) + } + + readerPadded, err := ioutil.ReadAll(r) + if err != nil { + t.Fatal(err) + } + + padOut := make([]byte, ps.Padded()) + Pad(raw, padOut) + + require.Equal(t, padOut, readerPadded) +} + +func TestUnpadReader(t *testing.T) { + ps := abi.PaddedPieceSize(64 << 20).Unpadded() + + raw := bytes.Repeat([]byte{0x77}, int(ps)) + + padOut := make([]byte, ps.Padded()) + Pad(raw, padOut) + + r, err := NewUnpadReader(bytes.NewReader(padOut), ps.Padded()) + if err != nil { + t.Fatal(err) + } + + readered, err := ioutil.ReadAll(r) + if err != nil { + t.Fatal(err) + } + + require.Equal(t, raw, readered) +} From 2a70ff3cf3ed7a78a55c375728bc2985239041af Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Fri, 29 May 2020 01:33:00 +0200 Subject: [PATCH 099/199] wireup fr32 padding to the unsealed file --- ffiwrapper/partialfile.go | 14 ++++---- ffiwrapper/sealer_cgo.go | 64 ++++++++++++++++++++++++------------- ffiwrapper/unseal_ranges.go | 2 +- fr32/fr32.go | 5 --- fr32/readers.go | 37 ++++++++++++++++++++- storiface/ffi.go | 12 ++++++- 6 files changed, 96 insertions(+), 38 deletions(-) diff --git a/ffiwrapper/partialfile.go b/ffiwrapper/partialfile.go index e7f89302e..094448e89 100644 --- a/ffiwrapper/partialfile.go +++ b/ffiwrapper/partialfile.go @@ -24,7 +24,7 @@ const veryLargeRle = 1 << 20 // [unpadded (raw) data][rle+][4B LE length fo the rle+ field] type partialFile struct { - maxPiece abi.UnpaddedPieceSize + maxPiece abi.PaddedPieceSize path string allocated rlepluslazy.RLE @@ -55,7 +55,7 @@ func writeTrailer(maxPieceSize int64, w *os.File, r rlepluslazy.RunIterator) err return w.Truncate(maxPieceSize + int64(rb) + 4) } -func createPartialFile(maxPieceSize abi.UnpaddedPieceSize, path string) (*partialFile, error) { +func createPartialFile(maxPieceSize abi.PaddedPieceSize, path string) (*partialFile, error) { f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0644) if err != nil { return nil, xerrors.Errorf("openning partial file '%s': %w", path, err) @@ -84,7 +84,7 @@ func createPartialFile(maxPieceSize abi.UnpaddedPieceSize, path string) (*partia return openPartialFile(maxPieceSize, path) } -func openPartialFile(maxPieceSize abi.UnpaddedPieceSize, path string) (*partialFile, error) { +func openPartialFile(maxPieceSize abi.PaddedPieceSize, path string) (*partialFile, error) { f, err := os.OpenFile(path, os.O_RDWR, 0644) if err != nil { return nil, xerrors.Errorf("openning partial file '%s': %w", path, err) @@ -164,7 +164,7 @@ func (pf *partialFile) Close() error { return pf.file.Close() } -func (pf *partialFile) Writer(offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (io.Writer, error) { +func (pf *partialFile) Writer(offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) (io.Writer, error) { if _, err := pf.file.Seek(int64(offset), io.SeekStart); err != nil { return nil, xerrors.Errorf("seek piece start: %w", err) } @@ -193,7 +193,7 @@ func (pf *partialFile) Writer(offset storiface.UnpaddedByteIndex, size abi.Unpad return pf.file, nil } -func (pf *partialFile) MarkAllocated(offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) error { +func (pf *partialFile) MarkAllocated(offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) error { have, err := pf.allocated.RunIterator() if err != nil { return err @@ -211,7 +211,7 @@ func (pf *partialFile) MarkAllocated(offset storiface.UnpaddedByteIndex, size ab return nil } -func (pf *partialFile) Reader(offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (*os.File, error) { +func (pf *partialFile) Reader(offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) (*os.File, error) { if _, err := pf.file.Seek(int64(offset), io.SeekStart); err != nil { return nil, xerrors.Errorf("seek piece start: %w", err) } @@ -244,7 +244,7 @@ func (pf *partialFile) Allocated() (rlepluslazy.RunIterator, error) { return pf.allocated.RunIterator() } -func pieceRun(offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) rlepluslazy.RunIterator { +func pieceRun(offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) rlepluslazy.RunIterator { var runs []rlepluslazy.Run if offset > 0 { runs = append(runs, rlepluslazy.Run{ diff --git a/ffiwrapper/sealer_cgo.go b/ffiwrapper/sealer_cgo.go index b9b7975ab..aace82c44 100644 --- a/ffiwrapper/sealer_cgo.go +++ b/ffiwrapper/sealer_cgo.go @@ -8,7 +8,6 @@ import ( "io/ioutil" "math/bits" "os" - "path/filepath" "syscall" "github.com/ipfs/go-cid" @@ -18,6 +17,7 @@ import ( "github.com/filecoin-project/specs-actors/actors/abi" "github.com/filecoin-project/specs-storage/storage" + "github.com/filecoin-project/sector-storage/fr32" "github.com/filecoin-project/sector-storage/stores" "github.com/filecoin-project/sector-storage/storiface" "github.com/filecoin-project/sector-storage/zerocomm" @@ -55,9 +55,9 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector abi.SectorID, existingPie offset += size } - maxPieceSize := abi.PaddedPieceSize(sb.ssize).Unpadded() + maxPieceSize := abi.PaddedPieceSize(sb.ssize) - if offset+pieceSize > maxPieceSize { + if offset.Padded()+pieceSize.Padded() > maxPieceSize { return abi.PieceInfo{}, xerrors.Errorf("can't add %d byte piece to sector %v with %d bytes of existing pieces", pieceSize, sector, offset) } @@ -100,10 +100,16 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector abi.SectorID, existingPie } } - w, err := stagedFile.Writer(storiface.UnpaddedByteIndex(offset), pieceSize) + w, err := stagedFile.Writer(storiface.UnpaddedByteIndex(offset).Padded(), pieceSize.Padded()) if err != nil { return abi.PieceInfo{}, xerrors.Errorf("getting partial file writer: %w", err) } + + w, err = fr32.NewPadWriter(w, pieceSize) + if err != nil { + return abi.PieceInfo{}, xerrors.Errorf("creating padded reader: %w", err) + } + pr := io.TeeReader(io.LimitReader(file, int64(pieceSize)), w) prf, werr, err := ToReadableFile(pr, int64(pieceSize)) if err != nil { @@ -115,7 +121,7 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector abi.SectorID, existingPie return abi.PieceInfo{}, xerrors.Errorf("generating piece commitment: %w", err) } - if err := stagedFile.MarkAllocated(storiface.UnpaddedByteIndex(offset), pieceSize); err != nil { + if err := stagedFile.MarkAllocated(storiface.UnpaddedByteIndex(offset).Padded(), pieceSize.Padded()); err != nil { return abi.PieceInfo{}, xerrors.Errorf("marking data range as allocated: %w", err) } @@ -137,7 +143,7 @@ func (cf closerFunc) Close() error { } func (sb *Sealer) UnsealPiece(ctx context.Context, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd cid.Cid) error { - maxPieceSize := abi.PaddedPieceSize(sb.ssize).Unpadded() + maxPieceSize := abi.PaddedPieceSize(sb.ssize) // try finding existing unsealedPath, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, stores.FTNone, false) @@ -188,7 +194,7 @@ func (sb *Sealer) UnsealPiece(ctx context.Context, sector abi.SectorID, offset s } defer srcDone() - var at, nextat uint64 + var at, nextat abi.PaddedPieceSize for { piece, err := toUnseal.NextRun() if err != nil { @@ -196,13 +202,13 @@ func (sb *Sealer) UnsealPiece(ctx context.Context, sector abi.SectorID, offset s } at = nextat - nextat += piece.Len + nextat += abi.PaddedPieceSize(piece.Len) if !piece.Val { continue } - out, err := pf.Writer(offset, size) + out, err := pf.Writer(offset.Padded(), size.Padded()) if err != nil { return xerrors.Errorf("getting partial file writer: %w", err) } @@ -241,7 +247,13 @@ func (sb *Sealer) UnsealPiece(ctx context.Context, sector abi.SectorID, offset s } defer outpipe.Close() - _, perr = io.CopyN(out, outpipe, int64(size)) + padreader, err := fr32.NewPadReader(outpipe, abi.PaddedPieceSize(piece.Len).Unpadded()) + if err != nil { + perr = xerrors.Errorf("creating new padded reader: %w", err) + return + } + + _, perr = io.CopyN(out, padreader, int64(size)) }() } // @@ -255,8 +267,8 @@ func (sb *Sealer) UnsealPiece(ctx context.Context, sector abi.SectorID, offset s sector.Miner, randomness, commd, - at, - piece.Len) + uint64(at.Unpadded()), + uint64(abi.PaddedPieceSize(piece.Len).Unpadded())) if err != nil { return xerrors.Errorf("unseal range: %w", err) } @@ -271,7 +283,7 @@ func (sb *Sealer) UnsealPiece(ctx context.Context, sector abi.SectorID, offset s return xerrors.Errorf("piping output to unsealed file: %w", perr) } - if err := pf.MarkAllocated(storiface.UnpaddedByteIndex(at), abi.UnpaddedPieceSize(piece.Len)); err != nil { + if err := pf.MarkAllocated(storiface.PaddedByteIndex(at), abi.PaddedPieceSize(piece.Len)); err != nil { return xerrors.Errorf("marking unsealed range as allocated: %w", err) } @@ -290,20 +302,25 @@ func (sb *Sealer) ReadPiece(ctx context.Context, writer io.Writer, sector abi.Se } defer done() - maxPieceSize := abi.PaddedPieceSize(sb.ssize).Unpadded() + maxPieceSize := abi.PaddedPieceSize(sb.ssize) pf, err := openPartialFile(maxPieceSize, path.Unsealed) if xerrors.Is(err, os.ErrNotExist) { return xerrors.Errorf("opening partial file: %w", err) } - f, err := pf.Reader(offset, size) + f, err := pf.Reader(offset.Padded(), size.Padded()) if err != nil { pf.Close() return xerrors.Errorf("getting partial file reader: %w", err) } - if _, err := io.CopyN(writer, f, int64(size)); err != nil { + upr, err := fr32.NewUnpadReader(f, size.Padded()) + if err != nil { + return xerrors.Errorf("creating unpadded reader: %w", err) + } + + if _, err := io.CopyN(writer, upr, int64(size)); err != nil { pf.Close() return xerrors.Errorf("reading unsealed file: %w", err) } @@ -355,22 +372,22 @@ func (sb *Sealer) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticke return nil, xerrors.Errorf("aggregated piece sizes don't match sector size: %d != %d (%d)", sum, ussize, int64(ussize-sum)) } - staged := filepath.Join(paths.Cache, "staged") + //staged := filepath.Join(paths.Cache, "staged") - if err := sb.rewriteAsPadded(paths.Unsealed, staged); err != nil { + /*if err := sb.rewriteAsPadded(paths.Unsealed, staged); err != nil { return nil, xerrors.Errorf("rewriting sector as padded: %w", err) - } - defer func() { + }*/ + /*defer func() { if err := os.Remove(staged); err != nil { log.Warnf("Removing staged sector file(%v): %s", sector, err) } }() - + */ // TODO: context cancellation respect p1o, err := ffi.SealPreCommitPhase1( sb.sealProofType, paths.Cache, - staged, + paths.Unsealed, paths.Sealed, sector.Number, sector.Miner, @@ -383,6 +400,7 @@ func (sb *Sealer) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticke return p1o, nil } +/* func (sb *Sealer) rewriteAsPadded(unsealed string, staged string) error { maxPieceSize := abi.PaddedPieceSize(sb.ssize).Unpadded() @@ -427,7 +445,7 @@ func (sb *Sealer) rewriteAsPadded(unsealed string, staged string) error { } return werr() -} +}*/ func (sb *Sealer) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage.PreCommit1Out) (storage.SectorCids, error) { paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTSealed|stores.FTCache, 0, true) diff --git a/ffiwrapper/unseal_ranges.go b/ffiwrapper/unseal_ranges.go index 522b58138..0bc7b52df 100644 --- a/ffiwrapper/unseal_ranges.go +++ b/ffiwrapper/unseal_ranges.go @@ -16,7 +16,7 @@ const mergeGaps = 32 << 20 // TODO const expandRuns = 16 << 20 // unseal more than requested for future requests func computeUnsealRanges(unsealed rlepluslazy.RunIterator, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (rlepluslazy.RunIterator, error) { - todo := pieceRun(offset, size) + todo := pieceRun(offset.Padded(), size.Padded()) todo, err := rlepluslazy.Subtract(todo, unsealed) if err != nil { return nil, xerrors.Errorf("compute todo-unsealed: %w", err) diff --git a/fr32/fr32.go b/fr32/fr32.go index 20d158b3d..ab47311d6 100644 --- a/fr32/fr32.go +++ b/fr32/fr32.go @@ -55,11 +55,6 @@ func Pad(in, out []byte) { } func pad(in, out []byte) { - if len(out) > int(mtTresh) { - mt(in, out, len(out), Pad) - return - } - chunks := len(out) / 128 for chunk := 0; chunk < chunks; chunk++ { inOff := chunk * 127 diff --git a/fr32/readers.go b/fr32/readers.go index b7fdc3843..4e056b0e7 100644 --- a/fr32/readers.go +++ b/fr32/readers.go @@ -59,6 +59,39 @@ func (r *padReader) Read(out []byte) (int, error) { return int(todo.Padded()), err } +func NewPadWriter(dst io.Writer, sz abi.UnpaddedPieceSize) (io.Writer, error) { + if err := sz.Validate(); err != nil { + return nil, xerrors.Errorf("bad piece size: %w", err) + } + + buf := make([]byte, mtTresh*mtChunkCount(sz.Padded())) + + // TODO: Real writer + r, w := io.Pipe() + + pr, err := NewPadReader(r, sz) + if err != nil { + return nil, err + } + + go func() { + for { + n, err := pr.Read(buf) + if err != nil && err != io.EOF { + r.CloseWithError(err) + return + } + + if _, err := dst.Write(buf[:n]); err != nil { + r.CloseWithError(err) + return + } + } + }() + + return w, err +} + type unpadReader struct { src io.Reader @@ -86,7 +119,9 @@ func (r *unpadReader) Read(out []byte) (int, error) { return 0, io.EOF } - outTwoPow := 1 << (63 - bits.LeadingZeros64(uint64(len(out)))) + chunks := len(out) / 127 + + outTwoPow := 1 << (63 - bits.LeadingZeros64(uint64(chunks*128))) if err := abi.PaddedPieceSize(outTwoPow).Validate(); err != nil { return 0, xerrors.Errorf("output must be of valid padded piece size: %w", err) diff --git a/storiface/ffi.go b/storiface/ffi.go index 6821f9b35..6e16018f0 100644 --- a/storiface/ffi.go +++ b/storiface/ffi.go @@ -1,7 +1,17 @@ package storiface -import "errors" +import ( + "errors" + + "github.com/filecoin-project/specs-actors/actors/abi" +) var ErrSectorNotFound = errors.New("sector not found") type UnpaddedByteIndex uint64 + +func (i UnpaddedByteIndex) Padded() PaddedByteIndex { + return PaddedByteIndex(abi.UnpaddedPieceSize(i).Padded()) +} + +type PaddedByteIndex uint64 From 3b698db1277f58a7d8a5875e4c4df718794b1bc7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Fri, 29 May 2020 17:21:10 +0200 Subject: [PATCH 100/199] fr32: real PadWriter --- ffiwrapper/sealer_cgo.go | 19 +++++-- ffiwrapper/sealer_test.go | 8 +-- fr32/fr32.go | 8 +-- fr32/readers.go | 104 +++++++++++++++++++++++++------------- fr32/utils.go | 31 ++++++++++++ 5 files changed, 124 insertions(+), 46 deletions(-) create mode 100644 fr32/utils.go diff --git a/ffiwrapper/sealer_cgo.go b/ffiwrapper/sealer_cgo.go index aace82c44..d7c03ff58 100644 --- a/ffiwrapper/sealer_cgo.go +++ b/ffiwrapper/sealer_cgo.go @@ -3,11 +3,13 @@ package ffiwrapper import ( + "bufio" "context" "io" "io/ioutil" "math/bits" "os" + "runtime" "syscall" "github.com/ipfs/go-cid" @@ -105,12 +107,12 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector abi.SectorID, existingPie return abi.PieceInfo{}, xerrors.Errorf("getting partial file writer: %w", err) } - w, err = fr32.NewPadWriter(w, pieceSize) + pw, err := fr32.NewPadWriter(w) if err != nil { return abi.PieceInfo{}, xerrors.Errorf("creating padded reader: %w", err) } - pr := io.TeeReader(io.LimitReader(file, int64(pieceSize)), w) + pr := io.TeeReader(io.LimitReader(file, int64(pieceSize)), pw) prf, werr, err := ToReadableFile(pr, int64(pieceSize)) if err != nil { return abi.PieceInfo{}, xerrors.Errorf("getting tee reader pipe: %w", err) @@ -121,6 +123,10 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector abi.SectorID, existingPie return abi.PieceInfo{}, xerrors.Errorf("generating piece commitment: %w", err) } + if err := pw.Close(); err != nil { + return abi.PieceInfo{}, xerrors.Errorf("closing padded writer: %w", err) + } + if err := stagedFile.MarkAllocated(storiface.UnpaddedByteIndex(offset).Padded(), pieceSize.Padded()); err != nil { return abi.PieceInfo{}, xerrors.Errorf("marking data range as allocated: %w", err) } @@ -253,7 +259,14 @@ func (sb *Sealer) UnsealPiece(ctx context.Context, sector abi.SectorID, offset s return } - _, perr = io.CopyN(out, padreader, int64(size)) + bsize := uint64(size.Padded()) + if bsize > uint64(runtime.NumCPU())*fr32.MTTresh { + bsize = uint64(runtime.NumCPU()) * fr32.MTTresh + } + + padreader = bufio.NewReaderSize(padreader, int(bsize)) + + _, perr = io.CopyN(out, padreader, int64(size.Padded())) }() } // diff --git a/ffiwrapper/sealer_test.go b/ffiwrapper/sealer_test.go index 9af563dc3..fdc7db5c3 100644 --- a/ffiwrapper/sealer_test.go +++ b/ffiwrapper/sealer_test.go @@ -14,6 +14,7 @@ import ( "time" logging "github.com/ipfs/go-log" + "github.com/stretchr/testify/require" "golang.org/x/xerrors" ffi "github.com/filecoin-project/filecoin-ffi" @@ -29,8 +30,9 @@ func init() { logging.SetLogLevel("*", "DEBUG") //nolint: errcheck } -var sectorSize = abi.SectorSize(2048) var sealProofType = abi.RegisteredProof_StackedDRG2KiBSeal +var sectorSize, _ = sealProofType.SectorSize() + var sealRand = abi.SealRandomness{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2} type seal struct { @@ -139,9 +141,7 @@ func (s *seal) unseal(t *testing.T, sb *Sealer, sp *basicfs.Provider, si abi.Sec } expect, _ = ioutil.ReadAll(data(si.Number, 1016)) - if !bytes.Equal(b.Bytes(), expect) { - t.Fatal("read wrong bytes") - } + require.Equal(t, expect, b.Bytes()) b.Reset() err = sb.ReadPiece(context.TODO(), &b, si, 0, 2032) diff --git a/fr32/fr32.go b/fr32/fr32.go index ab47311d6..08ecb767c 100644 --- a/fr32/fr32.go +++ b/fr32/fr32.go @@ -8,10 +8,10 @@ import ( "github.com/filecoin-project/specs-actors/actors/abi" ) -var mtTresh = uint64(32 << 20) +var MTTresh = uint64(32 << 20) func mtChunkCount(usz abi.PaddedPieceSize) uint64 { - threads := (uint64(usz)) / mtTresh + threads := (uint64(usz)) / MTTresh if threads > uint64(runtime.NumCPU()) { threads = 1 << (32 - bits.LeadingZeros32(uint32(runtime.NumCPU()))) } @@ -46,7 +46,7 @@ func mt(in, out []byte, padLen int, op func(unpadded, padded []byte)) { // Assumes len(in)%127==0 and len(out)%128==0 func Pad(in, out []byte) { - if len(out) > int(mtTresh) { + if len(out) > int(MTTresh) { mt(in, out, len(out), pad) return } @@ -96,7 +96,7 @@ func pad(in, out []byte) { // Assumes len(in)%128==0 and len(out)%127==0 func Unpad(in []byte, out []byte) { - if len(in) > int(mtTresh) { + if len(in) > int(MTTresh) { mt(out, in, len(in), unpad) return } diff --git a/fr32/readers.go b/fr32/readers.go index 4e056b0e7..f974f2cd1 100644 --- a/fr32/readers.go +++ b/fr32/readers.go @@ -21,7 +21,7 @@ func NewPadReader(src io.Reader, sz abi.UnpaddedPieceSize) (io.Reader, error) { return nil, xerrors.Errorf("bad piece size: %w", err) } - buf := make([]byte, mtTresh*mtChunkCount(sz.Padded())) + buf := make([]byte, MTTresh*mtChunkCount(sz.Padded())) return &padReader{ src: src, @@ -59,39 +59,6 @@ func (r *padReader) Read(out []byte) (int, error) { return int(todo.Padded()), err } -func NewPadWriter(dst io.Writer, sz abi.UnpaddedPieceSize) (io.Writer, error) { - if err := sz.Validate(); err != nil { - return nil, xerrors.Errorf("bad piece size: %w", err) - } - - buf := make([]byte, mtTresh*mtChunkCount(sz.Padded())) - - // TODO: Real writer - r, w := io.Pipe() - - pr, err := NewPadReader(r, sz) - if err != nil { - return nil, err - } - - go func() { - for { - n, err := pr.Read(buf) - if err != nil && err != io.EOF { - r.CloseWithError(err) - return - } - - if _, err := dst.Write(buf[:n]); err != nil { - r.CloseWithError(err) - return - } - } - }() - - return w, err -} - type unpadReader struct { src io.Reader @@ -104,7 +71,7 @@ func NewUnpadReader(src io.Reader, sz abi.PaddedPieceSize) (io.Reader, error) { return nil, xerrors.Errorf("bad piece size: %w", err) } - buf := make([]byte, mtTresh*mtChunkCount(sz)) + buf := make([]byte, MTTresh*mtChunkCount(sz)) return &unpadReader{ src: src, @@ -147,3 +114,70 @@ func (r *unpadReader) Read(out []byte) (int, error) { return int(todo.Unpadded()), err } + +type padWriter struct { + dst io.Writer + + stash []byte + work []byte +} + +func NewPadWriter(dst io.Writer) (io.WriteCloser, error) { + return &padWriter{ + dst: dst, + }, nil +} + +func (w *padWriter) Write(p []byte) (int, error) { + in := p + + if len(p)+len(w.stash) < 127 { + w.stash = append(w.stash, p...) + return len(p), nil + } + + if len(w.stash) != 0 { + in = append(w.stash, in...) + } + + for { + pieces := subPieces(abi.UnpaddedPieceSize(len(in))) + biggest := pieces[len(pieces)-1] + + if abi.PaddedPieceSize(cap(w.work)) < biggest.Padded() { + w.work = make([]byte, 0, biggest.Padded()) + } + + Pad(in[:int(biggest)], w.work[:int(biggest.Padded())]) + + n, err := w.dst.Write(w.work[:int(biggest.Padded())]) + if err != nil { + return int(abi.PaddedPieceSize(n).Unpadded()), err + } + + in = in[biggest:] + + if len(in) < 127 { + if cap(w.stash) < len(in) { + w.stash = make([]byte, 0, len(in)) + } + w.stash = w.stash[:len(in)] + copy(w.stash, in) + + return len(p), nil + } + } +} + +func (w *padWriter) Close() error { + if len(w.stash) > 0 { + return xerrors.Errorf("still have %d unprocessed bytes", len(w.stash)) + } + + // allow gc + w.stash = nil + w.work = nil + w.dst = nil + + return nil +} diff --git a/fr32/utils.go b/fr32/utils.go new file mode 100644 index 000000000..9f4093c40 --- /dev/null +++ b/fr32/utils.go @@ -0,0 +1,31 @@ +package fr32 + +import ( + "math/bits" + + "github.com/filecoin-project/specs-actors/actors/abi" +) + +func subPieces(in abi.UnpaddedPieceSize) []abi.UnpaddedPieceSize { + // Convert to in-sector bytes for easier math: + // + // (we convert to sector bytes as they are nice round binary numbers) + + w := uint64(in.Padded()) + + out := make([]abi.UnpaddedPieceSize, bits.OnesCount64(w)) + for i := range out { + // Extract the next lowest non-zero bit + next := bits.TrailingZeros64(w) + psize := uint64(1) << next + // e.g: if the number is 0b010100, psize will be 0b000100 + + // set that bit to 0 by XORing it, so the next iteration looks at the + // next bit + w ^= psize + + // Add the piece size to the list of pieces we need to create + out[i] = abi.PaddedPieceSize(psize).Unpadded() + } + return out +} From 2184f21dd25d4f11beb35a94766e232dfcd63142 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Fri, 29 May 2020 18:57:44 +0200 Subject: [PATCH 101/199] make unseal actually work in lotus --- localworker.go | 6 +++++- manager.go | 6 ++++-- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/localworker.go b/localworker.go index 694e87276..31d357bb0 100644 --- a/localworker.go +++ b/localworker.go @@ -193,7 +193,11 @@ func (l *LocalWorker) UnsealPiece(ctx context.Context, sector abi.SectorID, inde return xerrors.Errorf("unsealing sector: %w", err) } - if err := l.storage.RemoveCopies(ctx, sector, stores.FTSealed|stores.FTCache); err != nil { + if err := l.storage.RemoveCopies(ctx, sector, stores.FTSealed); err != nil { + return xerrors.Errorf("removing source data: %w", err) + } + + if err := l.storage.RemoveCopies(ctx, sector, stores.FTCache); err != nil { return xerrors.Errorf("removing source data: %w", err) } diff --git a/manager.go b/manager.go index daa522c92..07484e041 100644 --- a/manager.go +++ b/manager.go @@ -213,8 +213,10 @@ func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector abi.Sect return xerrors.Errorf("copy sealed/cache sector data: %w", err) } - if err := worker.Fetch(ctx, sector, stores.FTUnsealed, true, stores.AcquireMove); err != nil { - return xerrors.Errorf("copy unsealed sector data: %w", err) + if len(best) > 0 { + if err := worker.Fetch(ctx, sector, stores.FTUnsealed, true, stores.AcquireMove); err != nil { + return xerrors.Errorf("copy unsealed sector data: %w", err) + } } return nil } From 8b9dbc22e527cd97b9880f1b5f4447e43203ff3d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Fri, 29 May 2020 19:06:44 +0200 Subject: [PATCH 102/199] fr32: Fix tests --- fr32/fr32_ffi_cmp_test.go | 7 ++++--- fr32/fr32_test.go | 44 ++++++++++++++++++++------------------- fr32/readers_test.go | 12 ++++++----- 3 files changed, 34 insertions(+), 29 deletions(-) diff --git a/fr32/fr32_ffi_cmp_test.go b/fr32/fr32_ffi_cmp_test.go index ece13051d..c2ae04a96 100644 --- a/fr32/fr32_ffi_cmp_test.go +++ b/fr32/fr32_ffi_cmp_test.go @@ -1,7 +1,8 @@ -package fr32 +package fr32_test import ( "bytes" + "github.com/filecoin-project/sector-storage/fr32" "io" "io/ioutil" "os" @@ -56,10 +57,10 @@ func TestWriteTwoPcs(t *testing.T) { } outBytes := make([]byte, int(paddedSize)*n) - Pad(rawBytes, outBytes) + fr32.Pad(rawBytes, outBytes) require.Equal(t, ffiBytes, outBytes) unpadBytes := make([]byte, int(paddedSize.Unpadded())*n) - Unpad(ffiBytes, unpadBytes) + fr32.Unpad(ffiBytes, unpadBytes) require.Equal(t, rawBytes, unpadBytes) } diff --git a/fr32/fr32_test.go b/fr32/fr32_test.go index df500035d..a41c9f7ab 100644 --- a/fr32/fr32_test.go +++ b/fr32/fr32_test.go @@ -1,4 +1,4 @@ -package fr32 +package fr32_test import ( "bytes" @@ -9,9 +9,11 @@ import ( "testing" ffi "github.com/filecoin-project/filecoin-ffi" - "github.com/filecoin-project/sector-storage/ffiwrapper" "github.com/filecoin-project/specs-actors/actors/abi" "github.com/stretchr/testify/require" + + "github.com/filecoin-project/sector-storage/ffiwrapper" + "github.com/filecoin-project/sector-storage/fr32" ) func padFFI(buf []byte) []byte { @@ -52,7 +54,7 @@ func TestPadChunkFFI(t *testing.T) { var buf [128]byte copy(buf[:], bytes.Repeat([]byte{b}, 127)) - Pad(buf[:], buf[:]) + fr32.Pad(buf[:], buf[:]) expect := padFFI(bytes.Repeat([]byte{b}, 127)) @@ -74,7 +76,7 @@ func TestPadChunkRandEqFFI(t *testing.T) { var buf [128]byte - Pad(input[:], buf[:]) + fr32.Pad(input[:], buf[:]) expect := padFFI(input[:]) @@ -88,10 +90,10 @@ func TestRoundtrip(t *testing.T) { var buf [128]byte input := bytes.Repeat([]byte{0x01}, 127) - Pad(input, buf[:]) + fr32.Pad(input, buf[:]) var out [127]byte - Unpad(buf[:], out[:]) + fr32.Unpad(buf[:], out[:]) require.Equal(t, input, out[:]) } @@ -112,10 +114,10 @@ func TestRoundtripChunkRand(t *testing.T) { var buf [128]byte copy(buf[:], input[:]) - Pad(buf[:], buf[:]) + fr32.Pad(buf[:], buf[:]) var out [127]byte - Unpad(buf[:], out[:]) + fr32.Unpad(buf[:], out[:]) require.Equal(t, input[:], out[:]) } @@ -129,10 +131,10 @@ func TestRoundtrip16MRand(t *testing.T) { buf := make([]byte, 16<<20) - Pad(input, buf) + fr32.Pad(input, buf) out := make([]byte, up) - Unpad(buf, out) + fr32.Unpad(buf, out) require.Equal(t, input, out) @@ -147,7 +149,7 @@ func BenchmarkPadChunk(b *testing.B) { b.SetBytes(127) for i := 0; i < b.N; i++ { - Pad(in, buf[:]) + fr32.Pad(in, buf[:]) } } @@ -159,8 +161,8 @@ func BenchmarkChunkRoundtrip(b *testing.B) { b.SetBytes(127) for i := 0; i < b.N; i++ { - Pad(buf[:], buf[:]) - Unpad(buf[:], out[:]) + fr32.Pad(buf[:], buf[:]) + fr32.Unpad(buf[:], out[:]) } } @@ -168,7 +170,7 @@ func BenchmarkUnpadChunk(b *testing.B) { var buf [128]byte copy(buf[:], bytes.Repeat([]byte{0xff}, 127)) - Pad(buf[:], buf[:]) + fr32.Pad(buf[:], buf[:]) var out [127]byte b.SetBytes(127) @@ -177,7 +179,7 @@ func BenchmarkUnpadChunk(b *testing.B) { bs := buf[:] for i := 0; i < b.N; i++ { - Unpad(bs, out[:]) + fr32.Unpad(bs, out[:]) } } @@ -186,7 +188,7 @@ func BenchmarkUnpad16MChunk(b *testing.B) { var buf [16 << 20]byte - Pad(bytes.Repeat([]byte{0xff}, int(up)), buf[:]) + fr32.Pad(bytes.Repeat([]byte{0xff}, int(up)), buf[:]) var out [16 << 20]byte b.SetBytes(16 << 20) @@ -194,7 +196,7 @@ func BenchmarkUnpad16MChunk(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { - Unpad(buf[:], out[:]) + fr32.Unpad(buf[:], out[:]) } } @@ -210,7 +212,7 @@ func BenchmarkPad16MChunk(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { - Pad(in, buf[:]) + fr32.Pad(in, buf[:]) } } @@ -226,7 +228,7 @@ func BenchmarkPad1GChunk(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { - Pad(in, buf[:]) + fr32.Pad(in, buf[:]) } } @@ -235,7 +237,7 @@ func BenchmarkUnpad1GChunk(b *testing.B) { var buf [1 << 30]byte - Pad(bytes.Repeat([]byte{0xff}, int(up)), buf[:]) + fr32.Pad(bytes.Repeat([]byte{0xff}, int(up)), buf[:]) var out [1 << 30]byte b.SetBytes(1 << 30) @@ -243,6 +245,6 @@ func BenchmarkUnpad1GChunk(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { - Unpad(buf[:], out[:]) + fr32.Unpad(buf[:], out[:]) } } diff --git a/fr32/readers_test.go b/fr32/readers_test.go index b987e8287..83ef14353 100644 --- a/fr32/readers_test.go +++ b/fr32/readers_test.go @@ -1,4 +1,4 @@ -package fr32 +package fr32_test import ( "bytes" @@ -8,6 +8,8 @@ import ( "github.com/stretchr/testify/require" "github.com/filecoin-project/specs-actors/actors/abi" + + "github.com/filecoin-project/sector-storage/fr32" ) func TestPadReader(t *testing.T) { @@ -15,7 +17,7 @@ func TestPadReader(t *testing.T) { raw := bytes.Repeat([]byte{0x55}, int(ps)) - r, err := NewPadReader(bytes.NewReader(raw), ps) + r, err := fr32.NewPadReader(bytes.NewReader(raw), ps) if err != nil { t.Fatal(err) } @@ -26,7 +28,7 @@ func TestPadReader(t *testing.T) { } padOut := make([]byte, ps.Padded()) - Pad(raw, padOut) + fr32.Pad(raw, padOut) require.Equal(t, padOut, readerPadded) } @@ -37,9 +39,9 @@ func TestUnpadReader(t *testing.T) { raw := bytes.Repeat([]byte{0x77}, int(ps)) padOut := make([]byte, ps.Padded()) - Pad(raw, padOut) + fr32.Pad(raw, padOut) - r, err := NewUnpadReader(bytes.NewReader(padOut), ps.Padded()) + r, err := fr32.NewUnpadReader(bytes.NewReader(padOut), ps.Padded()) if err != nil { t.Fatal(err) } From 9e7eb3feefdb8f402c0ebd2d5138e76eb68aa342 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Fri, 29 May 2020 19:40:49 +0200 Subject: [PATCH 103/199] Remove dead commented code --- ffiwrapper/sealer_cgo.go | 58 ---------------------------------------- 1 file changed, 58 deletions(-) diff --git a/ffiwrapper/sealer_cgo.go b/ffiwrapper/sealer_cgo.go index d7c03ff58..ce06bc847 100644 --- a/ffiwrapper/sealer_cgo.go +++ b/ffiwrapper/sealer_cgo.go @@ -385,17 +385,6 @@ func (sb *Sealer) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticke return nil, xerrors.Errorf("aggregated piece sizes don't match sector size: %d != %d (%d)", sum, ussize, int64(ussize-sum)) } - //staged := filepath.Join(paths.Cache, "staged") - - /*if err := sb.rewriteAsPadded(paths.Unsealed, staged); err != nil { - return nil, xerrors.Errorf("rewriting sector as padded: %w", err) - }*/ - /*defer func() { - if err := os.Remove(staged); err != nil { - log.Warnf("Removing staged sector file(%v): %s", sector, err) - } - }() - */ // TODO: context cancellation respect p1o, err := ffi.SealPreCommitPhase1( sb.sealProofType, @@ -413,53 +402,6 @@ func (sb *Sealer) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticke return p1o, nil } -/* -func (sb *Sealer) rewriteAsPadded(unsealed string, staged string) error { - maxPieceSize := abi.PaddedPieceSize(sb.ssize).Unpadded() - - pf, err := openPartialFile(maxPieceSize, unsealed) - if err != nil { - return xerrors.Errorf("opening unsealed file: %w", err) - } - - upr, err := pf.Reader(0, maxPieceSize) - if err != nil { - pf.Close() - return err - } - - st, err := os.OpenFile(staged, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) - if err != nil { - pf.Close() - return xerrors.Errorf("openning staged file: %w", err) - } - - // OPTIMIZATION: upr is a file, so it could be passed straight to - // WriteWithAlignment IF it wouldn't care about the trailer - lupr, werr, err := ToReadableFile(io.LimitReader(upr, int64(maxPieceSize)), int64(maxPieceSize)) - if err != nil { - return err - } - - _, _, _, err = ffi.WriteWithAlignment(sb.sealProofType, lupr, maxPieceSize, st, nil) - if err != nil { - pf.Close() - st.Close() - return xerrors.Errorf("write with alignment: %w", err) - } - - if err := st.Close(); err != nil { - pf.Close() - return err - } - - if err := pf.Close(); err != nil { - return err - } - - return werr() -}*/ - func (sb *Sealer) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage.PreCommit1Out) (storage.SectorCids, error) { paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTSealed|stores.FTCache, 0, true) if err != nil { From d9d3ccf6c6a178564639e5eee00659e7757c1500 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Wed, 3 Jun 2020 21:21:27 +0200 Subject: [PATCH 104/199] index: Implement basic global sector locking system --- manager.go | 7 +- stores/filetype.go | 10 +++ stores/index.go | 11 ++- stores/index_locks.go | 127 ++++++++++++++++++++++++++++ stores/index_locks_test.go | 168 +++++++++++++++++++++++++++++++++++++ stores/index_locks_util.go | 49 +++++++++++ 6 files changed, 366 insertions(+), 6 deletions(-) create mode 100644 stores/index_locks.go create mode 100644 stores/index_locks_test.go create mode 100644 stores/index_locks_util.go diff --git a/manager.go b/manager.go index 07484e041..8302427c7 100644 --- a/manager.go +++ b/manager.go @@ -311,15 +311,14 @@ func (m *Manager) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase } func (m *Manager) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (out storage.Commit1Out, err error) { + // NOTE: We set allowFetch to false in so that we always execute on a worker + // with direct access to the data. We want to do that because this step is + // generally very cheap / fast, and transferring data is not worth the effort selector, err := newExistingSelector(ctx, m.index, sector, stores.FTCache|stores.FTSealed, false) if err != nil { return storage.Commit1Out{}, xerrors.Errorf("creating path selector: %w", err) } - // TODO: Try very hard to execute on worker with access to the sectors - // (except, don't.. for now at least - we are using this step to bring data - // into 'provable' storage. Optimally we'd do that in commit2, in parallel - // with snark compute) err = m.sched.Schedule(ctx, sector, sealtasks.TTCommit1, selector, schedFetch(sector, stores.FTCache|stores.FTSealed, true, stores.AcquireMove), func(ctx context.Context, w Worker) error { p, err := w.SealCommit1(ctx, sector, ticket, seed, pieces, cids) if err != nil { diff --git a/stores/filetype.go b/stores/filetype.go index 1810054d8..e3cc4042c 100644 --- a/stores/filetype.go +++ b/stores/filetype.go @@ -11,6 +11,8 @@ const ( FTUnsealed SectorFileType = 1 << iota FTSealed FTCache + + FileTypes = iota ) const ( @@ -71,6 +73,14 @@ func (t SectorFileType) SealSpaceUse(spt abi.RegisteredProof) (uint64, error) { return need, nil } +func (t SectorFileType) All() (out [FileTypes]bool) { + for i := range out { + out[i] = t & (1 << i) > 0 + } + + return out +} + type SectorPaths struct { Id abi.SectorID diff --git a/stores/index.go b/stores/index.go index e1e35875d..c6856ef8e 100644 --- a/stores/index.go +++ b/stores/index.go @@ -59,6 +59,9 @@ type SectorIndex interface { // part of storage-miner api StorageFindSector(ctx context.Context, sector abi.SectorID, ft SectorFileType, allowFetch bool) ([]SectorStorageInfo, error) StorageBestAlloc(ctx context.Context, allocate SectorFileType, spt abi.RegisteredProof, pathType PathType) ([]StorageInfo, error) + + // atomically acquire locks on all sector file types. close ctx to unlock + StorageLock(ctx context.Context, sector abi.SectorID, read SectorFileType, write SectorFileType) error } type Decl struct { @@ -80,6 +83,7 @@ type storageEntry struct { } type Index struct { + *indexLocks lk sync.RWMutex sectors map[Decl][]*declMeta @@ -88,8 +92,11 @@ type Index struct { func NewIndex() *Index { return &Index{ - sectors: map[Decl][]*declMeta{}, - stores: map[ID]*storageEntry{}, + indexLocks: &indexLocks{ + locks: map[abi.SectorID]*sectorLock{}, + }, + sectors: map[Decl][]*declMeta{}, + stores: map[ID]*storageEntry{}, } } diff --git a/stores/index_locks.go b/stores/index_locks.go new file mode 100644 index 000000000..f7770d5e5 --- /dev/null +++ b/stores/index_locks.go @@ -0,0 +1,127 @@ +package stores + +import ( + "context" + "sync" + + "golang.org/x/xerrors" + + "github.com/filecoin-project/specs-actors/actors/abi" +) + +type sectorLock struct { + lk sync.Mutex + notif *ctxCond + + r [FileTypes]uint + w SectorFileType + + refs uint // access with indexLocks.lk +} + +func (l *sectorLock) canLock(read SectorFileType, write SectorFileType) bool { + for i, b := range write.All() { + if b && l.r[i] > 0 { + return false + } + } + + return l.w & (read | write) == 0 +} + +func (l *sectorLock) tryLock(read SectorFileType, write SectorFileType) bool { + if !l.canLock(read, write) { + return false + } + + for i, set := range read.All() { + if set { + l.r[i]++ + } + } + + l.w |= write + + return true +} + +func (l *sectorLock) lock(ctx context.Context, read SectorFileType, write SectorFileType) error { + l.lk.Lock() + defer l.lk.Unlock() + + for { + if l.tryLock(read, write) { + return nil + } + + if err := l.notif.Wait(ctx); err != nil { + return err + } + } +} + +func (l *sectorLock) unlock(read SectorFileType, write SectorFileType) { + l.lk.Lock() + defer l.lk.Unlock() + + for i, set := range read.All() { + if set { + l.r[i]-- + } + } + + l.w &= ^write + + l.notif.Broadcast() +} + +type indexLocks struct { + lk sync.Mutex + + locks map[abi.SectorID]*sectorLock +} + +func (i *indexLocks) StorageLock(ctx context.Context, sector abi.SectorID, read SectorFileType, write SectorFileType) error { + if read | write == 0 { + return nil + } + + if read | write > (1 << FileTypes) - 1 { + return xerrors.Errorf("unknown file types specified") + } + + i.lk.Lock() + slk, ok := i.locks[sector] + if !ok { + slk = §orLock{} + slk.notif = newCtxCond(&slk.lk) + i.locks[sector] = slk + } + + slk.refs++ + + i.lk.Unlock() + + if err := slk.lock(ctx, read, write); err != nil { + return err + } + + go func() { + // TODO: we can avoid this goroutine with a bit of creativity and reflect + + <-ctx.Done() + i.lk.Lock() + + slk.unlock(read, write) + slk.refs-- + + if slk.refs == 0 { + delete(i.locks, sector) + } + + i.lk.Unlock() + }() + + return nil +} + diff --git a/stores/index_locks_test.go b/stores/index_locks_test.go new file mode 100644 index 000000000..aeeddd137 --- /dev/null +++ b/stores/index_locks_test.go @@ -0,0 +1,168 @@ +package stores + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/specs-actors/actors/abi" +) + +var aSector = abi.SectorID{ + Miner: 2, + Number: 9000, +} + +func TestCanLock(t *testing.T) { + lk := sectorLock{ + r: [FileTypes]uint{}, + w: FTNone, + } + + require.Equal(t, true, lk.canLock(FTUnsealed, FTNone)) + require.Equal(t, true, lk.canLock(FTNone, FTUnsealed)) + + ftAll := FTUnsealed | FTSealed | FTCache + + require.Equal(t, true, lk.canLock(ftAll, FTNone)) + require.Equal(t, true, lk.canLock(FTNone, ftAll)) + + lk.r[0] = 1 // unsealed read taken + + require.Equal(t, true, lk.canLock(FTUnsealed, FTNone)) + require.Equal(t, false, lk.canLock(FTNone, FTUnsealed)) + + require.Equal(t, true, lk.canLock(ftAll, FTNone)) + require.Equal(t, false, lk.canLock(FTNone, ftAll)) + + require.Equal(t, true, lk.canLock(FTNone, FTSealed | FTCache)) + require.Equal(t, true, lk.canLock(FTUnsealed, FTSealed | FTCache)) + + lk.r[0] = 0 + + lk.w = FTSealed + + require.Equal(t, true, lk.canLock(FTUnsealed, FTNone)) + require.Equal(t, true, lk.canLock(FTNone, FTUnsealed)) + + require.Equal(t, false, lk.canLock(FTSealed, FTNone)) + require.Equal(t, false, lk.canLock(FTNone, FTSealed)) + + require.Equal(t, false, lk.canLock(ftAll, FTNone)) + require.Equal(t, false, lk.canLock(FTNone, ftAll)) +} + +func TestIndexLocksSeq(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + + ilk := &indexLocks{ + locks: map[abi.SectorID]*sectorLock{}, + } + + require.NoError(t, ilk.StorageLock(ctx, aSector, FTNone, FTUnsealed)) + cancel() + + ctx, cancel = context.WithTimeout(context.Background(), time.Second) + require.NoError(t, ilk.StorageLock(ctx, aSector, FTNone, FTUnsealed)) + cancel() + + require.NoError(t, ilk.StorageLock(ctx, aSector, FTNone, FTUnsealed)) + cancel() + + ctx, cancel = context.WithTimeout(context.Background(), time.Second) + require.NoError(t, ilk.StorageLock(ctx, aSector, FTUnsealed, FTNone)) + cancel() + + require.NoError(t, ilk.StorageLock(ctx, aSector, FTNone, FTUnsealed)) + cancel() + + ctx, cancel = context.WithTimeout(context.Background(), time.Second) + require.NoError(t, ilk.StorageLock(ctx, aSector, FTNone, FTUnsealed)) + cancel() +} + +func TestIndexLocksBlockOn(t *testing.T) { + test := func(r1 SectorFileType, w1 SectorFileType, r2 SectorFileType, w2 SectorFileType) func(t *testing.T) { + return func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + + ilk := &indexLocks{ + locks: map[abi.SectorID]*sectorLock{}, + } + + require.NoError(t, ilk.StorageLock(ctx, aSector, r1, w1)) + + sch := make(chan struct{}) + go func() { + ctx, cancel := context.WithCancel(context.Background()) + + sch <- struct{}{} + + require.NoError(t, ilk.StorageLock(ctx, aSector, r2, w2)) + cancel() + + sch <- struct{}{} + }() + + <-sch + + select { + case <-sch: + t.Fatal("that shouldn't happen") + case <-time.After(40 * time.Millisecond): + } + + cancel() + + select { + case <-sch: + case <-time.After(time.Second): + t.Fatal("timed out") + } + } + } + + t.Run("readBlocksWrite", test(FTUnsealed, FTNone, FTNone, FTUnsealed)) + t.Run("writeBlocksRead", test(FTNone, FTUnsealed, FTUnsealed, FTNone)) + t.Run("writeBlocksWrite", test(FTNone, FTUnsealed, FTNone, FTUnsealed)) +} + +func TestIndexLocksBlockWonR(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + + ilk := &indexLocks{ + locks: map[abi.SectorID]*sectorLock{}, + } + + require.NoError(t, ilk.StorageLock(ctx, aSector, FTUnsealed, FTNone)) + + sch := make(chan struct{}) + go func() { + ctx, cancel := context.WithCancel(context.Background()) + + sch <- struct{}{} + + require.NoError(t, ilk.StorageLock(ctx, aSector, FTNone, FTUnsealed)) + cancel() + + sch <- struct{}{} + }() + + <-sch + + select { + case <-sch: + t.Fatal("that shouldn't happen") + case <-time.After(40 * time.Millisecond): + } + + cancel() + + select { + case <-sch: + case <-time.After(time.Second): + t.Fatal("timed out") + } +} diff --git a/stores/index_locks_util.go b/stores/index_locks_util.go new file mode 100644 index 000000000..5e4ab6ab2 --- /dev/null +++ b/stores/index_locks_util.go @@ -0,0 +1,49 @@ +package stores + +import ( + "context" + "sync" +) + +// like sync.Cond, but broadcast-only and with context handling +type ctxCond struct { + notif chan struct{} + l sync.Locker + + lk sync.Mutex +} + +func newCtxCond(l sync.Locker) *ctxCond { + return &ctxCond{ + l: l, + } +} + +func (c *ctxCond) Broadcast() { + c.lk.Lock() + if c.notif != nil { + close(c.notif) + c.notif = nil + } + c.lk.Unlock() +} + +func (c *ctxCond) Wait(ctx context.Context) error { + c.lk.Lock() + if c.notif == nil { + c.notif = make(chan struct{}) + } + + wait := c.notif + c.lk.Unlock() + + c.l.Unlock() + defer c.l.Lock() + + select { + case <-wait: + return nil + case <-ctx.Done(): + return ctx.Err() + } +} From a39bc94c58c48fc9ddf48337bf4675009d84509b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Wed, 3 Jun 2020 22:00:34 +0200 Subject: [PATCH 105/199] Integrate index locks into manager --- manager.go | 44 +++++++++++++++++++++++++++++++++++++- roprov.go | 15 ++++++++++--- stores/filetype.go | 2 +- stores/index.go | 4 ++-- stores/index_locks.go | 9 ++++---- stores/index_locks_test.go | 10 +++++---- stores/index_locks_util.go | 2 +- 7 files changed, 69 insertions(+), 17 deletions(-) diff --git a/manager.go b/manager.go index 8302427c7..580bd6fa5 100644 --- a/manager.go +++ b/manager.go @@ -88,7 +88,7 @@ func New(ctx context.Context, ls stores.LocalStorage, si stores.SectorIndex, cfg return nil, err } - prover, err := ffiwrapper.New(&readonlyProvider{stor: lstor}, cfg) + prover, err := ffiwrapper.New(&readonlyProvider{stor: lstor, index: si}, cfg) if err != nil { return nil, xerrors.Errorf("creating prover instance: %w", err) } @@ -191,6 +191,13 @@ func schedFetch(sector abi.SectorID, ft stores.SectorFileType, sealing bool, am } func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + if err := m.index.StorageLock(ctx, sector, stores.FTSealed|stores.FTCache, stores.FTUnsealed); err != nil { + return xerrors.Errorf("acquiring sector lock: %w", err) + } + best, err := m.index.StorageFindSector(ctx, sector, stores.FTUnsealed, false) if err != nil { return xerrors.Errorf("read piece: checking for already existing unsealed sector: %w", err) @@ -249,6 +256,13 @@ func (m *Manager) NewSector(ctx context.Context, sector abi.SectorID) error { } func (m *Manager) AddPiece(ctx context.Context, sector abi.SectorID, existingPieces []abi.UnpaddedPieceSize, sz abi.UnpaddedPieceSize, r io.Reader) (abi.PieceInfo, error) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + if err := m.index.StorageLock(ctx, sector, stores.FTNone, stores.FTUnsealed); err != nil { + return abi.PieceInfo{}, xerrors.Errorf("acquiring sector lock: %w", err) + } + var selector WorkerSelector var err error if len(existingPieces) == 0 { // new @@ -274,6 +288,13 @@ func (m *Manager) AddPiece(ctx context.Context, sector abi.SectorID, existingPie } func (m *Manager) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + if err := m.index.StorageLock(ctx, sector, stores.FTUnsealed, stores.FTSealed|stores.FTCache); err != nil { + return nil, xerrors.Errorf("acquiring sector lock: %w", err) + } + // TODO: also consider where the unsealed data sits selector, err := newAllocSelector(ctx, m.index, stores.FTCache|stores.FTSealed) @@ -294,6 +315,13 @@ func (m *Manager) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticke } func (m *Manager) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage.PreCommit1Out) (out storage.SectorCids, err error) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + if err := m.index.StorageLock(ctx, sector, stores.FTSealed, stores.FTCache); err != nil { + return storage.SectorCids{}, xerrors.Errorf("acquiring sector lock: %w", err) + } + selector, err := newExistingSelector(ctx, m.index, sector, stores.FTCache|stores.FTSealed, true) if err != nil { return storage.SectorCids{}, xerrors.Errorf("creating path selector: %w", err) @@ -311,6 +339,13 @@ func (m *Manager) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase } func (m *Manager) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (out storage.Commit1Out, err error) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + if err := m.index.StorageLock(ctx, sector, stores.FTSealed, stores.FTCache); err != nil { + return storage.Commit1Out{}, xerrors.Errorf("acquiring sector lock: %w", err) + } + // NOTE: We set allowFetch to false in so that we always execute on a worker // with direct access to the data. We want to do that because this step is // generally very cheap / fast, and transferring data is not worth the effort @@ -346,6 +381,13 @@ func (m *Manager) SealCommit2(ctx context.Context, sector abi.SectorID, phase1Ou } func (m *Manager) FinalizeSector(ctx context.Context, sector abi.SectorID) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + if err := m.index.StorageLock(ctx, sector, stores.FTSealed, stores.FTUnsealed|stores.FTCache); err != nil { + return xerrors.Errorf("acquiring sector lock: %w", err) + } + selector, err := newExistingSelector(ctx, m.index, sector, stores.FTCache|stores.FTSealed|stores.FTUnsealed, false) if err != nil { return xerrors.Errorf("creating path selector: %w", err) diff --git a/roprov.go b/roprov.go index aba6bb5d9..0e8950478 100644 --- a/roprov.go +++ b/roprov.go @@ -11,8 +11,9 @@ import ( ) type readonlyProvider struct { - stor *stores.Local - spt abi.RegisteredProof + index stores.SectorIndex + stor *stores.Local + spt abi.RegisteredProof } func (l *readonlyProvider) AcquireSector(ctx context.Context, id abi.SectorID, existing stores.SectorFileType, allocate stores.SectorFileType, sealing bool) (stores.SectorPaths, func(), error) { @@ -20,7 +21,15 @@ func (l *readonlyProvider) AcquireSector(ctx context.Context, id abi.SectorID, e return stores.SectorPaths{}, nil, xerrors.New("read-only storage") } + ctx, cancel := context.WithCancel(ctx) + if err := l.index.StorageLock(ctx, id, existing, stores.FTNone); err != nil { + return stores.SectorPaths{}, nil, xerrors.Errorf("acquiring sector lock: %w", err) + } + p, _, done, err := l.stor.AcquireSector(ctx, id, l.spt, existing, allocate, stores.PathType(sealing), stores.AcquireMove) - return p, done, err + return p, func() { + cancel() + done() + }, err } diff --git a/stores/filetype.go b/stores/filetype.go index e3cc4042c..cfb9db38a 100644 --- a/stores/filetype.go +++ b/stores/filetype.go @@ -75,7 +75,7 @@ func (t SectorFileType) SealSpaceUse(spt abi.RegisteredProof) (uint64, error) { func (t SectorFileType) All() (out [FileTypes]bool) { for i := range out { - out[i] = t & (1 << i) > 0 + out[i] = t&(1< 0 } return out diff --git a/stores/index.go b/stores/index.go index c6856ef8e..6ef346367 100644 --- a/stores/index.go +++ b/stores/index.go @@ -95,8 +95,8 @@ func NewIndex() *Index { indexLocks: &indexLocks{ locks: map[abi.SectorID]*sectorLock{}, }, - sectors: map[Decl][]*declMeta{}, - stores: map[ID]*storageEntry{}, + sectors: map[Decl][]*declMeta{}, + stores: map[ID]*storageEntry{}, } } diff --git a/stores/index_locks.go b/stores/index_locks.go index f7770d5e5..0bce92a22 100644 --- a/stores/index_locks.go +++ b/stores/index_locks.go @@ -10,7 +10,7 @@ import ( ) type sectorLock struct { - lk sync.Mutex + lk sync.Mutex notif *ctxCond r [FileTypes]uint @@ -26,7 +26,7 @@ func (l *sectorLock) canLock(read SectorFileType, write SectorFileType) bool { } } - return l.w & (read | write) == 0 + return l.w&(read|write) == 0 } func (l *sectorLock) tryLock(read SectorFileType, write SectorFileType) bool { @@ -82,11 +82,11 @@ type indexLocks struct { } func (i *indexLocks) StorageLock(ctx context.Context, sector abi.SectorID, read SectorFileType, write SectorFileType) error { - if read | write == 0 { + if read|write == 0 { return nil } - if read | write > (1 << FileTypes) - 1 { + if read|write > (1< Date: Wed, 3 Jun 2020 23:44:59 +0200 Subject: [PATCH 106/199] Allow FinalizeSector on all nodes --- localworker.go | 4 ++++ manager.go | 45 +++++++++++++++++++++++++++++++++------------ selector_alloc.go | 6 ++++-- 3 files changed, 41 insertions(+), 14 deletions(-) diff --git a/localworker.go b/localworker.go index 31d357bb0..52a1c7cb0 100644 --- a/localworker.go +++ b/localworker.go @@ -176,6 +176,10 @@ func (l *LocalWorker) FinalizeSector(ctx context.Context, sector abi.SectorID) e return xerrors.Errorf("removing unsealed data: %w", err) } + return nil +} + +func (l *LocalWorker) MoveStorage(ctx context.Context, sector abi.SectorID) error { if err := l.storage.MoveStorage(ctx, sector, l.scfg.SealProofType, stores.FTSealed|stores.FTCache); err != nil { return xerrors.Errorf("moving sealed data to storage: %w", err) } diff --git a/manager.go b/manager.go index 580bd6fa5..3c50c877f 100644 --- a/manager.go +++ b/manager.go @@ -29,6 +29,8 @@ type URLs []string type Worker interface { ffiwrapper.StorageSealer + MoveStorage(ctx context.Context, sector abi.SectorID) error + Fetch(ctx context.Context, s abi.SectorID, ft stores.SectorFileType, sealing bool, am stores.AcquireMode) error UnsealPiece(context.Context, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) error ReadPiece(context.Context, io.Writer, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize) error @@ -184,9 +186,9 @@ func schedNop(context.Context, Worker) error { return nil } -func schedFetch(sector abi.SectorID, ft stores.SectorFileType, sealing bool, am stores.AcquireMode) func(context.Context, Worker) error { +func schedFetch(sector abi.SectorID, ft stores.SectorFileType, sealing stores.PathType, am stores.AcquireMode) func(context.Context, Worker) error { return func(ctx context.Context, worker Worker) error { - return worker.Fetch(ctx, sector, ft, sealing, am) + return worker.Fetch(ctx, sector, ft, bool(sealing), am) } } @@ -205,7 +207,7 @@ func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector abi.Sect var selector WorkerSelector if len(best) == 0 { // new - selector, err = newAllocSelector(ctx, m.index, stores.FTUnsealed) + selector, err = newAllocSelector(ctx, m.index, stores.FTUnsealed, stores.PathSealing) } else { // append to existing selector, err = newExistingSelector(ctx, m.index, sector, stores.FTUnsealed, false) } @@ -240,7 +242,7 @@ func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector abi.Sect return xerrors.Errorf("creating readPiece selector: %w", err) } - err = m.sched.Schedule(ctx, sector, sealtasks.TTReadUnsealed, selector, schedFetch(sector, stores.FTUnsealed, true, stores.AcquireMove), func(ctx context.Context, w Worker) error { + err = m.sched.Schedule(ctx, sector, sealtasks.TTReadUnsealed, selector, schedFetch(sector, stores.FTUnsealed, stores.PathSealing, stores.AcquireMove), func(ctx context.Context, w Worker) error { return w.ReadPiece(ctx, sink, sector, offset, size) }) if err != nil { @@ -266,7 +268,7 @@ func (m *Manager) AddPiece(ctx context.Context, sector abi.SectorID, existingPie var selector WorkerSelector var err error if len(existingPieces) == 0 { // new - selector, err = newAllocSelector(ctx, m.index, stores.FTUnsealed) + selector, err = newAllocSelector(ctx, m.index, stores.FTUnsealed, stores.PathSealing) } else { // use existing selector, err = newExistingSelector(ctx, m.index, sector, stores.FTUnsealed, false) } @@ -297,12 +299,12 @@ func (m *Manager) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticke // TODO: also consider where the unsealed data sits - selector, err := newAllocSelector(ctx, m.index, stores.FTCache|stores.FTSealed) + selector, err := newAllocSelector(ctx, m.index, stores.FTCache|stores.FTSealed, stores.PathSealing) if err != nil { return nil, xerrors.Errorf("creating path selector: %w", err) } - err = m.sched.Schedule(ctx, sector, sealtasks.TTPreCommit1, selector, schedFetch(sector, stores.FTUnsealed, true, stores.AcquireMove), func(ctx context.Context, w Worker) error { + err = m.sched.Schedule(ctx, sector, sealtasks.TTPreCommit1, selector, schedFetch(sector, stores.FTUnsealed, stores.PathSealing, stores.AcquireMove), func(ctx context.Context, w Worker) error { p, err := w.SealPreCommit1(ctx, sector, ticket, pieces) if err != nil { return err @@ -327,7 +329,7 @@ func (m *Manager) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase return storage.SectorCids{}, xerrors.Errorf("creating path selector: %w", err) } - err = m.sched.Schedule(ctx, sector, sealtasks.TTPreCommit2, selector, schedFetch(sector, stores.FTCache|stores.FTSealed, true, stores.AcquireMove), func(ctx context.Context, w Worker) error { + err = m.sched.Schedule(ctx, sector, sealtasks.TTPreCommit2, selector, schedFetch(sector, stores.FTCache|stores.FTSealed, stores.PathSealing, stores.AcquireMove), func(ctx context.Context, w Worker) error { p, err := w.SealPreCommit2(ctx, sector, phase1Out) if err != nil { return err @@ -354,7 +356,7 @@ func (m *Manager) SealCommit1(ctx context.Context, sector abi.SectorID, ticket a return storage.Commit1Out{}, xerrors.Errorf("creating path selector: %w", err) } - err = m.sched.Schedule(ctx, sector, sealtasks.TTCommit1, selector, schedFetch(sector, stores.FTCache|stores.FTSealed, true, stores.AcquireMove), func(ctx context.Context, w Worker) error { + err = m.sched.Schedule(ctx, sector, sealtasks.TTCommit1, selector, schedFetch(sector, stores.FTCache|stores.FTSealed, stores.PathSealing, stores.AcquireMove), func(ctx context.Context, w Worker) error { p, err := w.SealCommit1(ctx, sector, ticket, seed, pieces, cids) if err != nil { return err @@ -384,7 +386,7 @@ func (m *Manager) FinalizeSector(ctx context.Context, sector abi.SectorID) error ctx, cancel := context.WithCancel(ctx) defer cancel() - if err := m.index.StorageLock(ctx, sector, stores.FTSealed, stores.FTUnsealed|stores.FTCache); err != nil { + if err := m.index.StorageLock(ctx, sector, stores.FTNone, stores.FTSealed|stores.FTUnsealed|stores.FTCache); err != nil { return xerrors.Errorf("acquiring sector lock: %w", err) } @@ -393,11 +395,30 @@ func (m *Manager) FinalizeSector(ctx context.Context, sector abi.SectorID) error return xerrors.Errorf("creating path selector: %w", err) } - return m.sched.Schedule(ctx, sector, sealtasks.TTFinalize, selector, - schedFetch(sector, stores.FTCache|stores.FTSealed|stores.FTUnsealed, false, stores.AcquireMove), + err = m.sched.Schedule(ctx, sector, sealtasks.TTFinalize, selector, + schedFetch(sector, stores.FTCache|stores.FTSealed|stores.FTUnsealed, stores.PathSealing, stores.AcquireMove), func(ctx context.Context, w Worker) error { return w.FinalizeSector(ctx, sector) }) + if err != nil { + return err + } + + fetchSel, err := newAllocSelector(ctx, m.index, stores.FTCache|stores.FTSealed, stores.PathStorage) + if err != nil { + return xerrors.Errorf("creating fetchSel: %w", err) + } + + err = m.sched.Schedule(ctx, sector, sealtasks.TTFetch, fetchSel, + schedFetch(sector, stores.FTCache|stores.FTSealed, stores.PathStorage, stores.AcquireMove), + func(ctx context.Context, w Worker) error { + return w.MoveStorage(ctx, sector) + }) + if err != nil { + return xerrors.Errorf("moving sector to storage: %w", err) + } + + return nil } func (m *Manager) StorageLocal(ctx context.Context) (map[stores.ID]string, error) { diff --git a/selector_alloc.go b/selector_alloc.go index 0a7850424..543020ef1 100644 --- a/selector_alloc.go +++ b/selector_alloc.go @@ -14,12 +14,14 @@ import ( type allocSelector struct { index stores.SectorIndex alloc stores.SectorFileType + ptype stores.PathType } -func newAllocSelector(ctx context.Context, index stores.SectorIndex, alloc stores.SectorFileType) (*allocSelector, error) { +func newAllocSelector(ctx context.Context, index stores.SectorIndex, alloc stores.SectorFileType, ptype stores.PathType) (*allocSelector, error) { return &allocSelector{ index: index, alloc: alloc, + ptype: ptype, }, nil } @@ -42,7 +44,7 @@ func (s *allocSelector) Ok(ctx context.Context, task sealtasks.TaskType, spt abi have[path.ID] = struct{}{} } - best, err := s.index.StorageBestAlloc(ctx, s.alloc, spt, true) + best, err := s.index.StorageBestAlloc(ctx, s.alloc, spt, s.ptype) if err != nil { return false, xerrors.Errorf("finding best alloc storage: %w", err) } From aac3c448a47cbc83b9e6f6c02d01bebc561a7bce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Thu, 4 Jun 2020 21:00:16 +0200 Subject: [PATCH 107/199] Drop AcquireSector locks --- faults.go | 5 +++-- localworker.go | 4 +--- roprov.go | 7 ++----- stores/http_handler.go | 5 +++-- stores/interface.go | 2 +- stores/local.go | 17 ++++++++--------- stores/remote.go | 43 ++++++++++++++++-------------------------- 7 files changed, 34 insertions(+), 49 deletions(-) diff --git a/faults.go b/faults.go index 1f5475259..75143d55e 100644 --- a/faults.go +++ b/faults.go @@ -19,13 +19,14 @@ func (m *Manager) CheckProvable(ctx context.Context, spt abi.RegisteredProof, se var bad []abi.SectorID // TODO: More better checks + // TODO: This should live in sector-storage + // TODO: Use proper locking for _, sector := range sectors { err := func() error { - lp, _, done, err := m.localStore.AcquireSector(ctx, sector, spt, stores.FTSealed|stores.FTCache, stores.FTNone, false, stores.AcquireMove) + lp, _, err := m.localStore.AcquireSector(ctx, sector, spt, stores.FTSealed|stores.FTCache, stores.FTNone, false, stores.AcquireMove) if err != nil { return xerrors.Errorf("acquire sector in checkProvable: %w", err) } - defer done() if lp.Sealed == "" || lp.Cache == "" { log.Warnw("CheckProvable Sector FAULT: cache an/or sealed paths not found", "sector", sector, "sealed", lp.Sealed, "cache", lp.Cache) diff --git a/localworker.go b/localworker.go index 52a1c7cb0..7e2e9ca26 100644 --- a/localworker.go +++ b/localworker.go @@ -60,7 +60,7 @@ type localWorkerPathProvider struct { } func (l *localWorkerPathProvider) AcquireSector(ctx context.Context, sector abi.SectorID, existing stores.SectorFileType, allocate stores.SectorFileType, sealing bool) (stores.SectorPaths, func(), error) { - paths, storageIDs, done, err := l.w.storage.AcquireSector(ctx, sector, l.w.scfg.SealProofType, existing, allocate, stores.PathType(sealing), l.op) + paths, storageIDs, err := l.w.storage.AcquireSector(ctx, sector, l.w.scfg.SealProofType, existing, allocate, stores.PathType(sealing), l.op) if err != nil { return stores.SectorPaths{}, nil, err } @@ -68,8 +68,6 @@ func (l *localWorkerPathProvider) AcquireSector(ctx context.Context, sector abi. log.Debugf("acquired sector %d (e:%d; a:%d): %v", sector, existing, allocate, paths) return paths, func() { - done() - for _, fileType := range pathTypes { if fileType&allocate == 0 { continue diff --git a/roprov.go b/roprov.go index 0e8950478..aa94521a3 100644 --- a/roprov.go +++ b/roprov.go @@ -26,10 +26,7 @@ func (l *readonlyProvider) AcquireSector(ctx context.Context, id abi.SectorID, e return stores.SectorPaths{}, nil, xerrors.Errorf("acquiring sector lock: %w", err) } - p, _, done, err := l.stor.AcquireSector(ctx, id, l.spt, existing, allocate, stores.PathType(sealing), stores.AcquireMove) + p, _, err := l.stor.AcquireSector(ctx, id, l.spt, existing, allocate, stores.PathType(sealing), stores.AcquireMove) - return p, func() { - cancel() - done() - }, err + return p, cancel, err } diff --git a/stores/http_handler.go b/stores/http_handler.go index 7e2330dbd..60f8a41c5 100644 --- a/stores/http_handler.go +++ b/stores/http_handler.go @@ -69,14 +69,15 @@ func (handler *FetchHandler) remoteGetSector(w http.ResponseWriter, r *http.Requ return } + // The caller has a lock on this sector already, no need to get one here + // passing 0 spt because we don't allocate anything - paths, _, done, err := handler.Local.AcquireSector(r.Context(), id, 0, ft, FTNone, false, AcquireMove) + paths, _, err := handler.Local.AcquireSector(r.Context(), id, 0, ft, FTNone, false, AcquireMove) if err != nil { log.Error("%+v", err) w.WriteHeader(500) return } - defer done() path := PathByType(paths, ft) if path == "" { diff --git a/stores/interface.go b/stores/interface.go index 01ac2bffe..c400019aa 100644 --- a/stores/interface.go +++ b/stores/interface.go @@ -24,7 +24,7 @@ const ( ) type Store interface { - AcquireSector(ctx context.Context, s abi.SectorID, spt abi.RegisteredProof, existing SectorFileType, allocate SectorFileType, sealing PathType, op AcquireMode) (paths SectorPaths, stores SectorPaths, done func(), err error) + AcquireSector(ctx context.Context, s abi.SectorID, spt abi.RegisteredProof, existing SectorFileType, allocate SectorFileType, sealing PathType, op AcquireMode) (paths SectorPaths, stores SectorPaths, err error) Remove(ctx context.Context, s abi.SectorID, types SectorFileType, force bool) error // like remove, but doesn't remove the primary sector copy, nor the last diff --git a/stores/local.go b/stores/local.go index 9c0dc4477..bf11a3418 100644 --- a/stores/local.go +++ b/stores/local.go @@ -197,12 +197,13 @@ func (st *Local) reportHealth(ctx context.Context) { } } -func (st *Local) AcquireSector(ctx context.Context, sid abi.SectorID, spt abi.RegisteredProof, existing SectorFileType, allocate SectorFileType, pathType PathType, op AcquireMode) (SectorPaths, SectorPaths, func(), error) { +func (st *Local) AcquireSector(ctx context.Context, sid abi.SectorID, spt abi.RegisteredProof, existing SectorFileType, allocate SectorFileType, pathType PathType, op AcquireMode) (SectorPaths, SectorPaths, error) { if existing|allocate != existing^allocate { - return SectorPaths{}, SectorPaths{}, nil, xerrors.New("can't both find and allocate a sector") + return SectorPaths{}, SectorPaths{}, xerrors.New("can't both find and allocate a sector") } st.localLk.RLock() + defer st.localLk.RUnlock() var out SectorPaths var storageIDs SectorPaths @@ -245,7 +246,7 @@ func (st *Local) AcquireSector(ctx context.Context, sid abi.SectorID, spt abi.Re sis, err := st.index.StorageBestAlloc(ctx, fileType, spt, pathType) if err != nil { st.localLk.RUnlock() - return SectorPaths{}, SectorPaths{}, nil, xerrors.Errorf("finding best storage for allocating : %w", err) + return SectorPaths{}, SectorPaths{}, xerrors.Errorf("finding best storage for allocating : %w", err) } var best string @@ -277,7 +278,7 @@ func (st *Local) AcquireSector(ctx context.Context, sid abi.SectorID, spt abi.Re if best == "" { st.localLk.RUnlock() - return SectorPaths{}, SectorPaths{}, nil, xerrors.Errorf("couldn't find a suitable path for a sector") + return SectorPaths{}, SectorPaths{}, xerrors.Errorf("couldn't find a suitable path for a sector") } SetPathByType(&out, fileType, best) @@ -285,7 +286,7 @@ func (st *Local) AcquireSector(ctx context.Context, sid abi.SectorID, spt abi.Re allocate ^= fileType } - return out, storageIDs, st.localLk.RUnlock, nil + return out, storageIDs, nil } func (st *Local) Local(ctx context.Context) ([]StoragePath, error) { @@ -399,17 +400,15 @@ func (st *Local) removeSector(ctx context.Context, sid abi.SectorID, typ SectorF } func (st *Local) MoveStorage(ctx context.Context, s abi.SectorID, spt abi.RegisteredProof, types SectorFileType) error { - dest, destIds, sdone, err := st.AcquireSector(ctx, s, spt, FTNone, types, false, AcquireMove) + dest, destIds, err := st.AcquireSector(ctx, s, spt, FTNone, types, false, AcquireMove) if err != nil { return xerrors.Errorf("acquire dest storage: %w", err) } - defer sdone() - src, srcIds, ddone, err := st.AcquireSector(ctx, s, spt, types, FTNone, false, AcquireMove) + src, srcIds, err := st.AcquireSector(ctx, s, spt, types, FTNone, false, AcquireMove) if err != nil { return xerrors.Errorf("acquire src storage: %w", err) } - defer ddone() for _, fileType := range PathTypes { if fileType&types == 0 { diff --git a/stores/remote.go b/stores/remote.go index e510d71d1..be1ebf1d2 100644 --- a/stores/remote.go +++ b/stores/remote.go @@ -50,9 +50,9 @@ func NewRemote(local *Local, index SectorIndex, auth http.Header) *Remote { } } -func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, spt abi.RegisteredProof, existing SectorFileType, allocate SectorFileType, pathType PathType, op AcquireMode) (SectorPaths, SectorPaths, func(), error) { +func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, spt abi.RegisteredProof, existing SectorFileType, allocate SectorFileType, pathType PathType, op AcquireMode) (SectorPaths, SectorPaths, error) { if existing|allocate != existing^allocate { - return SectorPaths{}, SectorPaths{}, nil, xerrors.New("can't both find and allocate a sector") + return SectorPaths{}, SectorPaths{}, xerrors.New("can't both find and allocate a sector") } for { @@ -71,7 +71,7 @@ func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, spt abi.Regi case <-c: continue case <-ctx.Done(): - return SectorPaths{}, SectorPaths{}, nil, ctx.Err() + return SectorPaths{}, SectorPaths{}, ctx.Err() } } @@ -82,9 +82,9 @@ func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, spt abi.Regi r.fetchLk.Unlock() }() - paths, stores, done, err := r.local.AcquireSector(ctx, s, spt, existing, allocate, pathType, op) + paths, stores, err := r.local.AcquireSector(ctx, s, spt, existing, allocate, pathType, op) if err != nil { - return SectorPaths{}, SectorPaths{}, nil, xerrors.Errorf("local acquire error: %w", err) + return SectorPaths{}, SectorPaths{}, xerrors.Errorf("local acquire error: %w", err) } for _, fileType := range PathTypes { @@ -96,13 +96,11 @@ func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, spt abi.Regi continue } - ap, storageID, url, rdone, err := r.acquireFromRemote(ctx, s, spt, fileType, pathType, op) + ap, storageID, url, err := r.acquireFromRemote(ctx, s, spt, fileType, pathType, op) if err != nil { - done() - return SectorPaths{}, SectorPaths{}, nil, err + return SectorPaths{}, SectorPaths{}, err } - done = mergeDone(done, rdone) SetPathByType(&paths, fileType, ap) SetPathByType(&stores, fileType, string(storageID)) @@ -118,26 +116,26 @@ func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, spt abi.Regi } } - return paths, stores, done, nil + return paths, stores, nil } -func (r *Remote) acquireFromRemote(ctx context.Context, s abi.SectorID, spt abi.RegisteredProof, fileType SectorFileType, pathType PathType, op AcquireMode) (string, ID, string, func(), error) { +func (r *Remote) acquireFromRemote(ctx context.Context, s abi.SectorID, spt abi.RegisteredProof, fileType SectorFileType, pathType PathType, op AcquireMode) (string, ID, string, error) { si, err := r.index.StorageFindSector(ctx, s, fileType, false) if err != nil { - return "", "", "", nil, err + return "", "", "", err } if len(si) == 0 { - return "", "", "", nil, xerrors.Errorf("failed to acquire sector %v from remote(%d): %w", s, fileType, storiface.ErrSectorNotFound) + return "", "", "", xerrors.Errorf("failed to acquire sector %v from remote(%d): %w", s, fileType, storiface.ErrSectorNotFound) } sort.Slice(si, func(i, j int) bool { return si[i].Weight < si[j].Weight }) - apaths, ids, done, err := r.local.AcquireSector(ctx, s, spt, FTNone, fileType, pathType, op) + apaths, ids, err := r.local.AcquireSector(ctx, s, spt, FTNone, fileType, pathType, op) if err != nil { - return "", "", "", nil, xerrors.Errorf("allocate local sector for fetching: %w", err) + return "", "", "", xerrors.Errorf("allocate local sector for fetching: %w", err) } dest := PathByType(apaths, fileType) storageID := PathByType(ids, fileType) @@ -156,12 +154,11 @@ func (r *Remote) acquireFromRemote(ctx context.Context, s abi.SectorID, spt abi. if merr != nil { log.Warnw("acquireFromRemote encountered errors when fetching sector from remote", "errors", merr) } - return dest, ID(storageID), url, done, nil + return dest, ID(storageID), url, nil } } - done() - return "", "", "", nil, xerrors.Errorf("failed to acquire sector %v from remote (tried %v): %w", s, si, merr) + return "", "", "", xerrors.Errorf("failed to acquire sector %v from remote (tried %v): %w", s, si, merr) } func (r *Remote) fetch(ctx context.Context, url, outname string) error { @@ -215,11 +212,10 @@ func (r *Remote) fetch(ctx context.Context, url, outname string) error { func (r *Remote) MoveStorage(ctx context.Context, s abi.SectorID, spt abi.RegisteredProof, types SectorFileType) error { // Make sure we have the data local - _, _, ddone, err := r.AcquireSector(ctx, s, spt, types, FTNone, PathStorage, AcquireMove) + _, _, err := r.AcquireSector(ctx, s, spt, types, FTNone, PathStorage, AcquireMove) if err != nil { return xerrors.Errorf("acquire src storage (remote): %w", err) } - ddone() return r.local.MoveStorage(ctx, s, spt, types) } @@ -336,11 +332,4 @@ func (r *Remote) FsStat(ctx context.Context, id ID) (FsStat, error) { return out, nil } -func mergeDone(a func(), b func()) func() { - return func() { - a() - b() - } -} - var _ Store = &Remote{} From b3cde66bd29548abadfdee607f3180ef134d3648 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Thu, 4 Jun 2020 21:15:15 +0200 Subject: [PATCH 108/199] Recover from half-successful FinalizeSector --- manager.go | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/manager.go b/manager.go index 3c50c877f..d2815b2ee 100644 --- a/manager.go +++ b/manager.go @@ -390,13 +390,25 @@ func (m *Manager) FinalizeSector(ctx context.Context, sector abi.SectorID) error return xerrors.Errorf("acquiring sector lock: %w", err) } - selector, err := newExistingSelector(ctx, m.index, sector, stores.FTCache|stores.FTSealed|stores.FTUnsealed, false) + unsealed := stores.FTUnsealed + { + unsealedStores, err := m.index.StorageFindSector(ctx, sector, stores.FTUnsealed, false) + if err != nil { + return xerrors.Errorf("finding unsealed sector: %w", err) + } + + if len(unsealedStores) == 0 { // Is some edge-cases unsealed sector may not exist already, that's fine + unsealed = stores.FTNone + } + } + + selector, err := newExistingSelector(ctx, m.index, sector, stores.FTCache|stores.FTSealed|unsealed, false) if err != nil { return xerrors.Errorf("creating path selector: %w", err) } err = m.sched.Schedule(ctx, sector, sealtasks.TTFinalize, selector, - schedFetch(sector, stores.FTCache|stores.FTSealed|stores.FTUnsealed, stores.PathSealing, stores.AcquireMove), + schedFetch(sector, stores.FTCache|stores.FTSealed|unsealed, stores.PathSealing, stores.AcquireMove), func(ctx context.Context, w Worker) error { return w.FinalizeSector(ctx, sector) }) From 05d703e53bc717a644b045221dd8692e115f6e66 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Thu, 4 Jun 2020 21:19:15 +0200 Subject: [PATCH 109/199] Fix testworker --- testworker_test.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/testworker_test.go b/testworker_test.go index e61cf96ba..3fe408b93 100644 --- a/testworker_test.go +++ b/testworker_test.go @@ -77,6 +77,10 @@ func (t *testWorker) FinalizeSector(ctx context.Context, sector abi.SectorID) er panic("implement me") } +func (t *testWorker) MoveStorage(ctx context.Context, sector abi.SectorID) error { + panic("implement me") +} + func (t *testWorker) Fetch(ctx context.Context, id abi.SectorID, fileType stores.SectorFileType, b bool, am stores.AcquireMode) error { return nil } From 13176e8b537dacea5a1f7239ec8eddde265a3cbe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Thu, 4 Jun 2020 23:30:20 +0200 Subject: [PATCH 110/199] Cleanup PathType usages --- ffiwrapper/basicfs/fs.go | 2 +- ffiwrapper/types.go | 2 +- localworker.go | 8 ++++---- manager.go | 6 +++--- roprov.go | 4 ++-- testworker_test.go | 2 +- 6 files changed, 12 insertions(+), 12 deletions(-) diff --git a/ffiwrapper/basicfs/fs.go b/ffiwrapper/basicfs/fs.go index 41ec8d4b4..3f865f590 100644 --- a/ffiwrapper/basicfs/fs.go +++ b/ffiwrapper/basicfs/fs.go @@ -24,7 +24,7 @@ type Provider struct { waitSector map[sectorFile]chan struct{} } -func (b *Provider) AcquireSector(ctx context.Context, id abi.SectorID, existing stores.SectorFileType, allocate stores.SectorFileType, sealing bool) (stores.SectorPaths, func(), error) { +func (b *Provider) AcquireSector(ctx context.Context, id abi.SectorID, existing stores.SectorFileType, allocate stores.SectorFileType, ptype stores.PathType) (stores.SectorPaths, func(), error) { if err := os.Mkdir(filepath.Join(b.Root, stores.FTUnsealed.String()), 0755); err != nil && !os.IsExist(err) { return stores.SectorPaths{}, nil, err } diff --git a/ffiwrapper/types.go b/ffiwrapper/types.go index cf211056f..c640df2e7 100644 --- a/ffiwrapper/types.go +++ b/ffiwrapper/types.go @@ -43,7 +43,7 @@ type Verifier interface { type SectorProvider interface { // * returns storiface.ErrSectorNotFound if a requested existing sector doesn't exist // * returns an error when allocate is set, and existing isn't, and the sector exists - AcquireSector(ctx context.Context, id abi.SectorID, existing stores.SectorFileType, allocate stores.SectorFileType, sealing bool) (stores.SectorPaths, func(), error) + AcquireSector(ctx context.Context, id abi.SectorID, existing stores.SectorFileType, allocate stores.SectorFileType, ptype stores.PathType) (stores.SectorPaths, func(), error) } var _ SectorProvider = &basicfs.Provider{} diff --git a/localworker.go b/localworker.go index 7e2e9ca26..fad5d42ec 100644 --- a/localworker.go +++ b/localworker.go @@ -59,8 +59,8 @@ type localWorkerPathProvider struct { op stores.AcquireMode } -func (l *localWorkerPathProvider) AcquireSector(ctx context.Context, sector abi.SectorID, existing stores.SectorFileType, allocate stores.SectorFileType, sealing bool) (stores.SectorPaths, func(), error) { - paths, storageIDs, err := l.w.storage.AcquireSector(ctx, sector, l.w.scfg.SealProofType, existing, allocate, stores.PathType(sealing), l.op) +func (l *localWorkerPathProvider) AcquireSector(ctx context.Context, sector abi.SectorID, existing stores.SectorFileType, allocate stores.SectorFileType, sealing stores.PathType) (stores.SectorPaths, func(), error) { + paths, storageIDs, err := l.w.storage.AcquireSector(ctx, sector, l.w.scfg.SealProofType, existing, allocate, sealing, l.op) if err != nil { return stores.SectorPaths{}, nil, err } @@ -104,8 +104,8 @@ func (l *LocalWorker) AddPiece(ctx context.Context, sector abi.SectorID, epcs [] return sb.AddPiece(ctx, sector, epcs, sz, r) } -func (l *LocalWorker) Fetch(ctx context.Context, sector abi.SectorID, fileType stores.SectorFileType, sealing bool, am stores.AcquireMode) error { - _, done, err := (&localWorkerPathProvider{w: l, op: am}).AcquireSector(ctx, sector, fileType, stores.FTNone, sealing) +func (l *LocalWorker) Fetch(ctx context.Context, sector abi.SectorID, fileType stores.SectorFileType, ptype stores.PathType, am stores.AcquireMode) error { + _, done, err := (&localWorkerPathProvider{w: l, op: am}).AcquireSector(ctx, sector, fileType, stores.FTNone, ptype) if err != nil { return err } diff --git a/manager.go b/manager.go index d2815b2ee..bc6dd1d9d 100644 --- a/manager.go +++ b/manager.go @@ -31,7 +31,7 @@ type Worker interface { MoveStorage(ctx context.Context, sector abi.SectorID) error - Fetch(ctx context.Context, s abi.SectorID, ft stores.SectorFileType, sealing bool, am stores.AcquireMode) error + Fetch(ctx context.Context, s abi.SectorID, ft stores.SectorFileType, ptype stores.PathType, am stores.AcquireMode) error UnsealPiece(context.Context, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) error ReadPiece(context.Context, io.Writer, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize) error @@ -186,9 +186,9 @@ func schedNop(context.Context, Worker) error { return nil } -func schedFetch(sector abi.SectorID, ft stores.SectorFileType, sealing stores.PathType, am stores.AcquireMode) func(context.Context, Worker) error { +func schedFetch(sector abi.SectorID, ft stores.SectorFileType, ptype stores.PathType, am stores.AcquireMode) func(context.Context, Worker) error { return func(ctx context.Context, worker Worker) error { - return worker.Fetch(ctx, sector, ft, bool(sealing), am) + return worker.Fetch(ctx, sector, ft, ptype, am) } } diff --git a/roprov.go b/roprov.go index aa94521a3..ad63526c9 100644 --- a/roprov.go +++ b/roprov.go @@ -16,7 +16,7 @@ type readonlyProvider struct { spt abi.RegisteredProof } -func (l *readonlyProvider) AcquireSector(ctx context.Context, id abi.SectorID, existing stores.SectorFileType, allocate stores.SectorFileType, sealing bool) (stores.SectorPaths, func(), error) { +func (l *readonlyProvider) AcquireSector(ctx context.Context, id abi.SectorID, existing stores.SectorFileType, allocate stores.SectorFileType, sealing stores.PathType) (stores.SectorPaths, func(), error) { if allocate != stores.FTNone { return stores.SectorPaths{}, nil, xerrors.New("read-only storage") } @@ -26,7 +26,7 @@ func (l *readonlyProvider) AcquireSector(ctx context.Context, id abi.SectorID, e return stores.SectorPaths{}, nil, xerrors.Errorf("acquiring sector lock: %w", err) } - p, _, err := l.stor.AcquireSector(ctx, id, l.spt, existing, allocate, stores.PathType(sealing), stores.AcquireMove) + p, _, err := l.stor.AcquireSector(ctx, id, l.spt, existing, allocate, sealing, stores.AcquireMove) return p, cancel, err } diff --git a/testworker_test.go b/testworker_test.go index 3fe408b93..46218d2f6 100644 --- a/testworker_test.go +++ b/testworker_test.go @@ -81,7 +81,7 @@ func (t *testWorker) MoveStorage(ctx context.Context, sector abi.SectorID) error panic("implement me") } -func (t *testWorker) Fetch(ctx context.Context, id abi.SectorID, fileType stores.SectorFileType, b bool, am stores.AcquireMode) error { +func (t *testWorker) Fetch(ctx context.Context, id abi.SectorID, fileType stores.SectorFileType, ptype stores.PathType, am stores.AcquireMode) error { return nil } From c06dc750da32175e7f639e8d4a1b96a25fcddb78 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Fri, 5 Jun 2020 10:21:21 +0200 Subject: [PATCH 111/199] Address review --- faults.go | 1 - stores/filetype.go | 4 +++- stores/index_locks.go | 3 ++- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/faults.go b/faults.go index 75143d55e..6bf036089 100644 --- a/faults.go +++ b/faults.go @@ -19,7 +19,6 @@ func (m *Manager) CheckProvable(ctx context.Context, spt abi.RegisteredProof, se var bad []abi.SectorID // TODO: More better checks - // TODO: This should live in sector-storage // TODO: Use proper locking for _, sector := range sectors { err := func() error { diff --git a/stores/filetype.go b/stores/filetype.go index cfb9db38a..fee8b256f 100644 --- a/stores/filetype.go +++ b/stores/filetype.go @@ -73,7 +73,9 @@ func (t SectorFileType) SealSpaceUse(spt abi.RegisteredProof) (uint64, error) { return need, nil } -func (t SectorFileType) All() (out [FileTypes]bool) { +func (t SectorFileType) All() [FileTypes]bool { + var out [FileTypes]bool + for i := range out { out[i] = t&(1< 0 } diff --git a/stores/index_locks.go b/stores/index_locks.go index 0bce92a22..4fd6237ab 100644 --- a/stores/index_locks.go +++ b/stores/index_locks.go @@ -26,7 +26,8 @@ func (l *sectorLock) canLock(read SectorFileType, write SectorFileType) bool { } } - return l.w&(read|write) == 0 + // check that there are no locks taken for either read or write file types we want + return l.w&read == 0 && l.w&write == 0 } func (l *sectorLock) tryLock(read SectorFileType, write SectorFileType) bool { From 958f74340b4cbb3883a63d7bcad3e498b11755af Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Fri, 5 Jun 2020 18:43:47 +0200 Subject: [PATCH 112/199] mock: Fix concurrent map writes in AddPiece --- mock/mock.go | 26 +++++++++++++++----------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/mock/mock.go b/mock/mock.go index 26aacce13..2e80bde36 100644 --- a/mock/mock.go +++ b/mock/mock.go @@ -70,17 +70,6 @@ func (mgr *SectorMgr) NewSector(ctx context.Context, sector abi.SectorID) error func (mgr *SectorMgr) AddPiece(ctx context.Context, sectorId abi.SectorID, existingPieces []abi.UnpaddedPieceSize, size abi.UnpaddedPieceSize, r io.Reader) (abi.PieceInfo, error) { log.Warn("Add piece: ", sectorId, size, mgr.proofType) - mgr.lk.Lock() - ss, ok := mgr.sectors[sectorId] - if !ok { - ss = §orState{ - state: statePacking, - } - mgr.sectors[sectorId] = ss - } - mgr.lk.Unlock() - ss.lk.Lock() - defer ss.lk.Unlock() var b bytes.Buffer tr := io.TeeReader(r, &b) @@ -92,9 +81,24 @@ func (mgr *SectorMgr) AddPiece(ctx context.Context, sectorId abi.SectorID, exist log.Warn("Generated Piece CID: ", c) + mgr.lk.Lock() mgr.pieces[c] = b.Bytes() + + ss, ok := mgr.sectors[sectorId] + if !ok { + ss = §orState{ + state: statePacking, + } + mgr.sectors[sectorId] = ss + } + mgr.lk.Unlock() + + ss.lk.Lock() ss.pieces = append(ss.pieces, c) + ss.lk.Unlock() + return abi.PieceInfo{ + Size: size.Padded(), PieceCID: c, }, nil From f92ef92193a4d89f1f526f00147735803314962e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Fri, 5 Jun 2020 20:04:59 +0200 Subject: [PATCH 113/199] Make ctxCond nicer --- stores/index_locks.go | 25 +++++++++++-------------- stores/index_locks_util.go | 8 ++++---- 2 files changed, 15 insertions(+), 18 deletions(-) diff --git a/stores/index_locks.go b/stores/index_locks.go index 4fd6237ab..8e1a07a02 100644 --- a/stores/index_locks.go +++ b/stores/index_locks.go @@ -10,8 +10,7 @@ import ( ) type sectorLock struct { - lk sync.Mutex - notif *ctxCond + cond *ctxCond r [FileTypes]uint w SectorFileType @@ -47,23 +46,21 @@ func (l *sectorLock) tryLock(read SectorFileType, write SectorFileType) bool { } func (l *sectorLock) lock(ctx context.Context, read SectorFileType, write SectorFileType) error { - l.lk.Lock() - defer l.lk.Unlock() + l.cond.L.Lock() + defer l.cond.L.Unlock() - for { - if l.tryLock(read, write) { - return nil - } - - if err := l.notif.Wait(ctx); err != nil { + for !l.tryLock(read, write) { + if err := l.cond.Wait(ctx); err != nil { return err } } + + return nil } func (l *sectorLock) unlock(read SectorFileType, write SectorFileType) { - l.lk.Lock() - defer l.lk.Unlock() + l.cond.L.Lock() + defer l.cond.L.Unlock() for i, set := range read.All() { if set { @@ -73,7 +70,7 @@ func (l *sectorLock) unlock(read SectorFileType, write SectorFileType) { l.w &= ^write - l.notif.Broadcast() + l.cond.Broadcast() } type indexLocks struct { @@ -95,7 +92,7 @@ func (i *indexLocks) StorageLock(ctx context.Context, sector abi.SectorID, read slk, ok := i.locks[sector] if !ok { slk = §orLock{} - slk.notif = newCtxCond(&slk.lk) + slk.cond = newCtxCond(&sync.Mutex{}) i.locks[sector] = slk } diff --git a/stores/index_locks_util.go b/stores/index_locks_util.go index 2dbb3a596..a8327fded 100644 --- a/stores/index_locks_util.go +++ b/stores/index_locks_util.go @@ -8,14 +8,14 @@ import ( // like sync.Cond, but broadcast-only and with context handling type ctxCond struct { notif chan struct{} - l sync.Locker + L sync.Locker lk sync.Mutex } func newCtxCond(l sync.Locker) *ctxCond { return &ctxCond{ - l: l, + L: l, } } @@ -37,8 +37,8 @@ func (c *ctxCond) Wait(ctx context.Context) error { wait := c.notif c.lk.Unlock() - c.l.Unlock() - defer c.l.Lock() + c.L.Unlock() + defer c.L.Lock() select { case <-wait: From 1a5af8cafd4d465d5cf670f6ffaaaecc08b4b6c2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Mon, 8 Jun 2020 18:47:59 +0200 Subject: [PATCH 114/199] Improve fault checker --- faults.go | 33 +++++++++++++++++++++++++++++-- stores/index.go | 1 + stores/index_locks.go | 46 +++++++++++++++++++++++++++++++++++-------- 3 files changed, 70 insertions(+), 10 deletions(-) diff --git a/faults.go b/faults.go index 6bf036089..635ffa0c3 100644 --- a/faults.go +++ b/faults.go @@ -2,6 +2,8 @@ package sectorstorage import ( "context" + "os" + "path/filepath" "golang.org/x/xerrors" @@ -19,9 +21,22 @@ func (m *Manager) CheckProvable(ctx context.Context, spt abi.RegisteredProof, se var bad []abi.SectorID // TODO: More better checks - // TODO: Use proper locking for _, sector := range sectors { err := func() error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + locked, err := m.index.StorageTryLock(ctx, sector, stores.FTSealed|stores.FTCache, stores.FTNone) + if err != nil { + return xerrors.Errorf("acquiring sector lock: %w", err) + } + + if !locked { + log.Warnw("CheckProvable Sector FAULT: can't acquire read lock", "sector", sector, "sealed") + bad = append(bad, sector) + return nil + } + lp, _, err := m.localStore.AcquireSector(ctx, sector, spt, stores.FTSealed|stores.FTCache, stores.FTNone, false, stores.AcquireMove) if err != nil { return xerrors.Errorf("acquire sector in checkProvable: %w", err) @@ -33,7 +48,21 @@ func (m *Manager) CheckProvable(ctx context.Context, spt abi.RegisteredProof, se return nil } - // must be fine + toCheck := []string{ + lp.Sealed, + filepath.Join(lp.Cache, "t_aux"), + filepath.Join(lp.Cache, "p_aux"), + filepath.Join(lp.Cache, "sc-02-data-tree-r-last.dat"), + } + + for _, p := range toCheck { + _, err := os.Stat(p) + if err != nil { + log.Warnw("CheckProvable Sector FAULT: sector file stat error", "sector", sector, "sealed", lp.Sealed, "cache", lp.Cache, "file", p) + bad = append(bad, sector) + return nil + } + } return nil }() diff --git a/stores/index.go b/stores/index.go index 6ef346367..e113a005a 100644 --- a/stores/index.go +++ b/stores/index.go @@ -62,6 +62,7 @@ type SectorIndex interface { // part of storage-miner api // atomically acquire locks on all sector file types. close ctx to unlock StorageLock(ctx context.Context, sector abi.SectorID, read SectorFileType, write SectorFileType) error + StorageTryLock(ctx context.Context, sector abi.SectorID, read SectorFileType, write SectorFileType) (bool, error) } type Decl struct { diff --git a/stores/index_locks.go b/stores/index_locks.go index 8e1a07a02..8bf15b950 100644 --- a/stores/index_locks.go +++ b/stores/index_locks.go @@ -45,17 +45,26 @@ func (l *sectorLock) tryLock(read SectorFileType, write SectorFileType) bool { return true } -func (l *sectorLock) lock(ctx context.Context, read SectorFileType, write SectorFileType) error { +type lockFn func(l *sectorLock, ctx context.Context, read SectorFileType, write SectorFileType) (bool, error) + +func (l *sectorLock) tryLockSafe(ctx context.Context, read SectorFileType, write SectorFileType) (bool, error) { + l.cond.L.Lock() + defer l.cond.L.Unlock() + + return l.tryLock(read, write), nil +} + +func (l *sectorLock) lock(ctx context.Context, read SectorFileType, write SectorFileType) (bool, error) { l.cond.L.Lock() defer l.cond.L.Unlock() for !l.tryLock(read, write) { if err := l.cond.Wait(ctx); err != nil { - return err + return false, err } } - return nil + return true, nil } func (l *sectorLock) unlock(read SectorFileType, write SectorFileType) { @@ -79,13 +88,13 @@ type indexLocks struct { locks map[abi.SectorID]*sectorLock } -func (i *indexLocks) StorageLock(ctx context.Context, sector abi.SectorID, read SectorFileType, write SectorFileType) error { +func (i *indexLocks) lockWith(ctx context.Context, lockFn lockFn, sector abi.SectorID, read SectorFileType, write SectorFileType) (bool, error) { if read|write == 0 { - return nil + return false, nil } if read|write > (1< Date: Mon, 8 Jun 2020 18:57:56 +0200 Subject: [PATCH 115/199] roprov: Use TryLock --- roprov.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/roprov.go b/roprov.go index ad63526c9..7a288bb39 100644 --- a/roprov.go +++ b/roprov.go @@ -22,9 +22,15 @@ func (l *readonlyProvider) AcquireSector(ctx context.Context, id abi.SectorID, e } ctx, cancel := context.WithCancel(ctx) - if err := l.index.StorageLock(ctx, id, existing, stores.FTNone); err != nil { + + // use TryLock to avoid blocking + locked, err := l.index.StorageTryLock(ctx, id, existing, stores.FTNone) + if err != nil { return stores.SectorPaths{}, nil, xerrors.Errorf("acquiring sector lock: %w", err) } + if !locked { + return stores.SectorPaths{}, nil, xerrors.Errorf("failed to acquire sector lock") + } p, _, err := l.stor.AcquireSector(ctx, id, l.spt, existing, allocate, sealing, stores.AcquireMove) From 8c58fcb1cc8ffa2b9fc8bd29308ae7b4724b4e49 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Mon, 8 Jun 2020 19:22:11 +0200 Subject: [PATCH 116/199] verifier: update interfaces --- ffiwrapper/verifier_cgo.go | 26 +++++++++++++++----------- go.mod | 2 +- go.sum | 13 ++++--------- 3 files changed, 20 insertions(+), 21 deletions(-) diff --git a/ffiwrapper/verifier_cgo.go b/ffiwrapper/verifier_cgo.go index bf9a60c50..2a094968f 100644 --- a/ffiwrapper/verifier_cgo.go +++ b/ffiwrapper/verifier_cgo.go @@ -15,26 +15,30 @@ import ( ) func (sb *Sealer) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []abi.SectorInfo, randomness abi.PoStRandomness) ([]abi.PoStProof, error) { - randomness[31] = 0 // TODO: Not correct, fixme - privsectors, err := sb.pubSectorToPriv(ctx, minerID, sectorInfo, nil, abi.RegisteredProof.RegisteredWinningPoStProof) // TODO: FAULTS? + randomness[31] = 0 // TODO: Not correct, fixme + privsectors, skipped, err := sb.pubSectorToPriv(ctx, minerID, sectorInfo, nil, abi.RegisteredProof.RegisteredWinningPoStProof) // TODO: FAULTS? if err != nil { return nil, err } + if len(skipped) > 0 { + return nil, xerrors.Errorf("pubSectorToPriv skipped sectors: %+v", skipped) + } return ffi.GenerateWinningPoSt(minerID, privsectors, randomness) } -func (sb *Sealer) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []abi.SectorInfo, randomness abi.PoStRandomness) ([]abi.PoStProof, error) { - randomness[31] = 0 // TODO: Not correct, fixme - privsectors, err := sb.pubSectorToPriv(ctx, minerID, sectorInfo, nil, abi.RegisteredProof.RegisteredWindowPoStProof) // TODO: FAULTS? +func (sb *Sealer) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []abi.SectorInfo, randomness abi.PoStRandomness) ([]abi.PoStProof, []abi.SectorID, error) { + randomness[31] = 0 // TODO: Not correct, fixme + privsectors, skipped, err := sb.pubSectorToPriv(ctx, minerID, sectorInfo, nil, abi.RegisteredProof.RegisteredWindowPoStProof) if err != nil { - return nil, err + return nil, nil, xerrors.Errorf("gathering sector info: %w", err) } - return ffi.GenerateWindowPoSt(minerID, privsectors, randomness) + proof, err := ffi.GenerateWindowPoSt(minerID, privsectors, randomness) + return proof, skipped, err } -func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorInfo []abi.SectorInfo, faults []abi.SectorNumber, rpt func(abi.RegisteredProof) (abi.RegisteredProof, error)) (ffi.SortedPrivateSectorInfo, error) { +func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorInfo []abi.SectorInfo, faults []abi.SectorNumber, rpt func(abi.RegisteredProof) (abi.RegisteredProof, error)) (ffi.SortedPrivateSectorInfo, []abi.SectorID, error) { fmap := map[abi.SectorNumber]struct{}{} for _, fault := range faults { fmap[fault] = struct{}{} @@ -48,13 +52,13 @@ func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorIn paths, done, err := sb.sectors.AcquireSector(ctx, abi.SectorID{Miner: mid, Number: s.SectorNumber}, stores.FTCache|stores.FTSealed, 0, false) if err != nil { - return ffi.SortedPrivateSectorInfo{}, xerrors.Errorf("acquire sector paths: %w", err) + return ffi.SortedPrivateSectorInfo{}, nil, xerrors.Errorf("acquire sector paths: %w", err) } done() // TODO: This is a tiny bit suboptimal postProofType, err := rpt(s.RegisteredProof) if err != nil { - return ffi.SortedPrivateSectorInfo{}, xerrors.Errorf("acquiring registered PoSt proof from sector info %+v: %w", s, err) + return ffi.SortedPrivateSectorInfo{}, nil, xerrors.Errorf("acquiring registered PoSt proof from sector info %+v: %w", s, err) } out = append(out, ffi.PrivateSectorInfo{ @@ -65,7 +69,7 @@ func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorIn }) } - return ffi.NewSortedPrivateSectorInfo(out...), nil + return ffi.NewSortedPrivateSectorInfo(out...), nil, nil } var _ Verifier = ProofVerifier diff --git a/go.mod b/go.mod index b9567100b..08c7e9185 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5 github.com/filecoin-project/go-paramfetch v0.0.1 github.com/filecoin-project/specs-actors v0.5.4-0.20200521014528-0df536f7e461 - github.com/filecoin-project/specs-storage v0.0.0-20200417134612-61b2d91a6102 + github.com/filecoin-project/specs-storage v0.1.0 github.com/google/uuid v1.1.1 github.com/gorilla/mux v1.7.4 github.com/hashicorp/go-multierror v1.0.0 diff --git a/go.sum b/go.sum index 81096287e..958c02ba1 100644 --- a/go.sum +++ b/go.sum @@ -31,8 +31,7 @@ github.com/filecoin-project/go-address v0.0.2-0.20200218010043-eb9bb40ed5be h1:T github.com/filecoin-project/go-address v0.0.2-0.20200218010043-eb9bb40ed5be/go.mod h1:SAOwJoakQ8EPjwNIsiakIQKsoKdkcbx8U3IapgCg9R0= github.com/filecoin-project/go-amt-ipld/v2 v2.0.1-0.20200131012142-05d80eeccc5e/go.mod h1:boRtQhzmxNocrMxOXo1NYn4oUc1NGvR8tEa79wApNXg= github.com/filecoin-project/go-amt-ipld/v2 v2.0.1-0.20200424220931-6263827e49f2/go.mod h1:boRtQhzmxNocrMxOXo1NYn4oUc1NGvR8tEa79wApNXg= -github.com/filecoin-project/go-bitfield v0.0.0-20200309034705-8c7ac40bd550 h1:aockulLU8Qjkdj4FQz53WQpNosAIYk8DxRediRLkE5c= -github.com/filecoin-project/go-bitfield v0.0.0-20200309034705-8c7ac40bd550/go.mod h1:iodsLxOFZnqKtjj2zkgqzoGNrv6vUqj69AT/J8DKXEw= +github.com/filecoin-project/go-bitfield v0.0.0-20200416002808-b3ee67ec9060/go.mod h1:iodsLxOFZnqKtjj2zkgqzoGNrv6vUqj69AT/J8DKXEw= github.com/filecoin-project/go-bitfield v0.0.1 h1:Xg/JnrqqE77aJVKdbEyR04n9FZQWhwrN+buDgQCVpZU= github.com/filecoin-project/go-bitfield v0.0.1/go.mod h1:Ry9/iUlWSyjPUzlAvdnfy4Gtvrq4kWmWDztCU1yEgJY= github.com/filecoin-project/go-bitfield v0.0.2-0.20200518150651-562fdb554b6e h1:gkG/7G+iKy4He+IiQNeQn+nndFznb/vCoOR8iRQsm60= @@ -43,12 +42,11 @@ github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5 h1 github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5/go.mod h1:JbkIgFF/Z9BDlvrJO1FuKkaWsH673/UdFaiVS6uIHlA= github.com/filecoin-project/go-paramfetch v0.0.1 h1:gV7bs5YaqlgpGFMiLxInGK2L1FyCXUE0rimz4L7ghoE= github.com/filecoin-project/go-paramfetch v0.0.1/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc= -github.com/filecoin-project/specs-actors v0.0.0-20200409043918-e569f4a2f504 h1:mwuAaqxKThl70+7FkGdFKVLdwaQZQ8XmscKdhSBBtnc= -github.com/filecoin-project/specs-actors v0.0.0-20200409043918-e569f4a2f504/go.mod h1:mdJraXq5vMy0+/FqVQIrnNlpQ/Em6zeu06G/ltQ0/lA= +github.com/filecoin-project/specs-actors v0.3.0/go.mod h1:nQYnFbQ7Y0bHZyq6HDEuVlCPR+U3z5Q3wMOQ+2aiV+Y= github.com/filecoin-project/specs-actors v0.5.4-0.20200521014528-0df536f7e461 h1:yyAoJ9dNboljDWj0uBdJEbdaTak/YhkjYUQt0GzlY0A= github.com/filecoin-project/specs-actors v0.5.4-0.20200521014528-0df536f7e461/go.mod h1:r5btrNzZD0oBkEz1pohv80gSCXQnqGrD0kYwOTiExyE= -github.com/filecoin-project/specs-storage v0.0.0-20200417134612-61b2d91a6102 h1:T3f/zkuvgtgqcXrb0NO3BicuveGOxxUAMPa/Yif2kuE= -github.com/filecoin-project/specs-storage v0.0.0-20200417134612-61b2d91a6102/go.mod h1:xJ1/xl9+8zZeSSSFmDC3Wr6uusCTxyYPI0VeNVSFmPE= +github.com/filecoin-project/specs-storage v0.1.0 h1:PkDgTOT5W5Ao7752onjDl4QSv+sgOVdJbvFjOnD5w94= +github.com/filecoin-project/specs-storage v0.1.0/go.mod h1:Pr5ntAaxsh+sLG/LYiL4tKzvA83Vk5vLODYhfNwOg7k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= @@ -82,7 +80,6 @@ github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJ github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= -github.com/ipfs/go-cid v0.0.4/go.mod h1:4LLaPOQwmk5z9LBgQnpkivrx8BJjUyGwTXCd5Xfj6+M= github.com/ipfs/go-cid v0.0.5 h1:o0Ix8e/ql7Zb5UVUJEUfjsWCIY8t48++9lR8qi6oiJU= github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= github.com/ipfs/go-hamt-ipld v0.0.15-0.20200131012125-dd88a59d3f2e/go.mod h1:9aQJu/i/TaRDW6jqB5U217dLIDopn50wxLdHXM2CTfE= @@ -219,8 +216,6 @@ github.com/warpfork/go-wish v0.0.0-20190328234359-8b3e70f8e830 h1:8kxMKmKzXXL4Ru github.com/warpfork/go-wish v0.0.0-20190328234359-8b3e70f8e830/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= github.com/whyrusleeping/cbor-gen v0.0.0-20191216205031-b047b6acb3c0/go.mod h1:xdlJQaiqipF0HW+Mzpg7XRM3fWbGvfgFlcppuvlkIvY= github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= -github.com/whyrusleeping/cbor-gen v0.0.0-20200206220010-03c9665e2a66 h1:LolR9FiEfQNn5U031bAhn/46po2JgWHKadYbcWFIJ+0= -github.com/whyrusleeping/cbor-gen v0.0.0-20200206220010-03c9665e2a66/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= github.com/whyrusleeping/cbor-gen v0.0.0-20200414195334-429a0b5e922e h1:JY8o/ebUUrCYetWmjRCNghxC59cOEaili83rxPRQCLw= github.com/whyrusleeping/cbor-gen v0.0.0-20200414195334-429a0b5e922e/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc/go.mod h1:bopw91TMyo8J3tvftk8xmU2kPmlrt4nScJQZU2hE5EM= From 8e489cdbdbb8761e7141b19ece5d1af50d519373 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Mon, 8 Jun 2020 19:36:41 +0200 Subject: [PATCH 117/199] verifier: Skip sectors which can't be acquired --- ffiwrapper/verifier_cgo.go | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/ffiwrapper/verifier_cgo.go b/ffiwrapper/verifier_cgo.go index 2a094968f..889bc6e39 100644 --- a/ffiwrapper/verifier_cgo.go +++ b/ffiwrapper/verifier_cgo.go @@ -44,15 +44,20 @@ func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorIn fmap[fault] = struct{}{} } + var skipped []abi.SectorID var out []ffi.PrivateSectorInfo for _, s := range sectorInfo { if _, faulty := fmap[s.SectorNumber]; faulty { continue } - paths, done, err := sb.sectors.AcquireSector(ctx, abi.SectorID{Miner: mid, Number: s.SectorNumber}, stores.FTCache|stores.FTSealed, 0, false) + sid := abi.SectorID{Miner: mid, Number: s.SectorNumber} + + paths, done, err := sb.sectors.AcquireSector(ctx, sid, stores.FTCache|stores.FTSealed, 0, false) if err != nil { - return ffi.SortedPrivateSectorInfo{}, nil, xerrors.Errorf("acquire sector paths: %w", err) + log.Warnw("failed to acquire sector, skipping", "sector", sid, "error", err) + skipped = append(skipped, sid) + continue } done() // TODO: This is a tiny bit suboptimal @@ -69,7 +74,7 @@ func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorIn }) } - return ffi.NewSortedPrivateSectorInfo(out...), nil, nil + return ffi.NewSortedPrivateSectorInfo(out...), skipped, nil } var _ Verifier = ProofVerifier From 375a6b4d3892b6335317cf17caa7ffc25716e3c9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Mon, 8 Jun 2020 19:57:28 +0200 Subject: [PATCH 118/199] verifier: Handle sector unlocking correctly --- ffiwrapper/verifier_cgo.go | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/ffiwrapper/verifier_cgo.go b/ffiwrapper/verifier_cgo.go index 889bc6e39..502576286 100644 --- a/ffiwrapper/verifier_cgo.go +++ b/ffiwrapper/verifier_cgo.go @@ -15,11 +15,12 @@ import ( ) func (sb *Sealer) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []abi.SectorInfo, randomness abi.PoStRandomness) ([]abi.PoStProof, error) { - randomness[31] = 0 // TODO: Not correct, fixme - privsectors, skipped, err := sb.pubSectorToPriv(ctx, minerID, sectorInfo, nil, abi.RegisteredProof.RegisteredWinningPoStProof) // TODO: FAULTS? + randomness[31] = 0 // TODO: Not correct, fixme + privsectors, skipped, done, err := sb.pubSectorToPriv(ctx, minerID, sectorInfo, nil, abi.RegisteredProof.RegisteredWinningPoStProof) // TODO: FAULTS? if err != nil { return nil, err } + defer done() if len(skipped) > 0 { return nil, xerrors.Errorf("pubSectorToPriv skipped sectors: %+v", skipped) } @@ -29,21 +30,29 @@ func (sb *Sealer) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, func (sb *Sealer) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []abi.SectorInfo, randomness abi.PoStRandomness) ([]abi.PoStProof, []abi.SectorID, error) { randomness[31] = 0 // TODO: Not correct, fixme - privsectors, skipped, err := sb.pubSectorToPriv(ctx, minerID, sectorInfo, nil, abi.RegisteredProof.RegisteredWindowPoStProof) + privsectors, skipped, done, err := sb.pubSectorToPriv(ctx, minerID, sectorInfo, nil, abi.RegisteredProof.RegisteredWindowPoStProof) if err != nil { return nil, nil, xerrors.Errorf("gathering sector info: %w", err) } + defer done() proof, err := ffi.GenerateWindowPoSt(minerID, privsectors, randomness) return proof, skipped, err } -func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorInfo []abi.SectorInfo, faults []abi.SectorNumber, rpt func(abi.RegisteredProof) (abi.RegisteredProof, error)) (ffi.SortedPrivateSectorInfo, []abi.SectorID, error) { +func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorInfo []abi.SectorInfo, faults []abi.SectorNumber, rpt func(abi.RegisteredProof) (abi.RegisteredProof, error)) (ffi.SortedPrivateSectorInfo, []abi.SectorID, func(), error) { fmap := map[abi.SectorNumber]struct{}{} for _, fault := range faults { fmap[fault] = struct{}{} } + var doneFuncs []func() + done := func() { + for _, df := range doneFuncs { + df() + } + } + var skipped []abi.SectorID var out []ffi.PrivateSectorInfo for _, s := range sectorInfo { @@ -53,17 +62,18 @@ func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorIn sid := abi.SectorID{Miner: mid, Number: s.SectorNumber} - paths, done, err := sb.sectors.AcquireSector(ctx, sid, stores.FTCache|stores.FTSealed, 0, false) + paths, d, err := sb.sectors.AcquireSector(ctx, sid, stores.FTCache|stores.FTSealed, 0, false) if err != nil { log.Warnw("failed to acquire sector, skipping", "sector", sid, "error", err) skipped = append(skipped, sid) continue } - done() // TODO: This is a tiny bit suboptimal + doneFuncs = append(doneFuncs, d) postProofType, err := rpt(s.RegisteredProof) if err != nil { - return ffi.SortedPrivateSectorInfo{}, nil, xerrors.Errorf("acquiring registered PoSt proof from sector info %+v: %w", s, err) + done() + return ffi.SortedPrivateSectorInfo{}, nil, nil, xerrors.Errorf("acquiring registered PoSt proof from sector info %+v: %w", s, err) } out = append(out, ffi.PrivateSectorInfo{ @@ -74,7 +84,7 @@ func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorIn }) } - return ffi.NewSortedPrivateSectorInfo(out...), skipped, nil + return ffi.NewSortedPrivateSectorInfo(out...), skipped, done, nil } var _ Verifier = ProofVerifier From 1902049972ca6c29c618b27555596d08a8fdeac2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Mon, 8 Jun 2020 20:30:48 +0200 Subject: [PATCH 119/199] mock: Update interface --- mock/mock.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mock/mock.go b/mock/mock.go index 2e80bde36..ef472ae10 100644 --- a/mock/mock.go +++ b/mock/mock.go @@ -261,8 +261,8 @@ func (mgr *SectorMgr) GenerateWinningPoSt(ctx context.Context, minerID abi.Actor return generateFakePoSt(sectorInfo), nil } -func (mgr *SectorMgr) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []abi.SectorInfo, randomness abi.PoStRandomness) ([]abi.PoStProof, error) { - return generateFakePoSt(sectorInfo), nil +func (mgr *SectorMgr) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []abi.SectorInfo, randomness abi.PoStRandomness) ([]abi.PoStProof, []abi.SectorID, error) { + return generateFakePoSt(sectorInfo), nil, nil } func generateFakePoSt(sectorInfo []abi.SectorInfo) []abi.PoStProof { From 796af1351cdf8859442b0679212b1a55153c8e34 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Mon, 8 Jun 2020 23:53:31 +0200 Subject: [PATCH 120/199] v27 parameters --- extern/filecoin-ffi | 2 +- ffiwrapper/sealer_cgo.go | 45 ++++------ parameters.json | 180 +++++++++++++++++++-------------------- 3 files changed, 107 insertions(+), 120 deletions(-) diff --git a/extern/filecoin-ffi b/extern/filecoin-ffi index 6109b6ad2..61c02f6be 160000 --- a/extern/filecoin-ffi +++ b/extern/filecoin-ffi @@ -1 +1 @@ -Subproject commit 6109b6ad2fa9968941c206161dd01ac059011d4e +Subproject commit 61c02f6bea8d69bb79c70daa1d62f26c486643aa diff --git a/ffiwrapper/sealer_cgo.go b/ffiwrapper/sealer_cgo.go index ce06bc847..3b20f33bc 100644 --- a/ffiwrapper/sealer_cgo.go +++ b/ffiwrapper/sealer_cgo.go @@ -6,11 +6,9 @@ import ( "bufio" "context" "io" - "io/ioutil" "math/bits" "os" "runtime" - "syscall" "github.com/ipfs/go-cid" "golang.org/x/xerrors" @@ -200,6 +198,12 @@ func (sb *Sealer) UnsealPiece(ctx context.Context, sector abi.SectorID, offset s } defer srcDone() + sealed, err := os.OpenFile(srcPaths.Sealed, os.O_RDONLY, 0644) + if err != nil { + return xerrors.Errorf("opening sealed file: %w", err) + } + defer sealed.Close() + var at, nextat abi.PaddedPieceSize for { piece, err := toUnseal.NextRun() @@ -220,40 +224,20 @@ func (sb *Sealer) UnsealPiece(ctx context.Context, sector abi.SectorID, offset s } // - outpipe, err := ioutil.TempFile(os.TempDir(), "sector-storage-unseal-") + opr, opw, err := os.Pipe() if err != nil { - return xerrors.Errorf("creating temp pipe file: %w", err) + return xerrors.Errorf("creating out pipe: %w", err) } - var outpath string + var perr error outWait := make(chan struct{}) { - outpath = outpipe.Name() - if err := outpipe.Close(); err != nil { - return xerrors.Errorf("close pipe temp: %w", err) - } - if err := os.Remove(outpath); err != nil { - return xerrors.Errorf("rm pipe temp: %w", err) - } - - // TODO: Make UnsealRange write to an FD - if err := syscall.Mkfifo(outpath, 0600); err != nil { - return xerrors.Errorf("mk temp fifo: %w", err) - } - go func() { defer close(outWait) - defer os.Remove(outpath) + defer opr.Close() - outpipe, err = os.OpenFile(outpath, os.O_RDONLY, 0600) - if err != nil { - perr = xerrors.Errorf("open temp pipe: %w", err) - return - } - defer outpipe.Close() - - padreader, err := fr32.NewPadReader(outpipe, abi.PaddedPieceSize(piece.Len).Unpadded()) + padreader, err := fr32.NewPadReader(opr, abi.PaddedPieceSize(piece.Len).Unpadded()) if err != nil { perr = xerrors.Errorf("creating new padded reader: %w", err) return @@ -274,14 +258,17 @@ func (sb *Sealer) UnsealPiece(ctx context.Context, sector abi.SectorID, offset s // TODO: This may be possible to do in parallel err = ffi.UnsealRange(sb.sealProofType, srcPaths.Cache, - srcPaths.Sealed, - outpath, + sealed, + opw, sector.Number, sector.Miner, randomness, commd, uint64(at.Unpadded()), uint64(abi.PaddedPieceSize(piece.Len).Unpadded())) + + _ = opr.Close() + if err != nil { return xerrors.Errorf("unseal range: %w", err) } diff --git a/parameters.json b/parameters.json index 4ca3e6d2d..b632c17e8 100644 --- a/parameters.json +++ b/parameters.json @@ -1,152 +1,152 @@ { - "v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0170db1f394b35d995252228ee359194b13199d259380541dc529fb0099096b0.params": { - "cid": "QmYkygifkXnrnsN4MJsjBFHTQJHx294CyikDgDK8nYxdGh", - "digest": "df3f30442a6d6b4192f5071fb17e820c", + "v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0170db1f394b35d995252228ee359194b13199d259380541dc529fb0099096b0.params": { + "cid": "QmeDRyxek34F1H6xJY6AkFdWvPsy5F6dKTrebV3ZtWT4ky", + "digest": "f5827f2d8801c62c831e0f972f6dc8bb", "sector_size": 2048 }, - "v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0170db1f394b35d995252228ee359194b13199d259380541dc529fb0099096b0.vk": { - "cid": "QmdXyqbmy2bkJA9Kyhh6z25GrTCq48LwX6c1mxPsm54wi7", - "digest": "0bea3951abf9557a3569f68e52a30c6c", + "v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0170db1f394b35d995252228ee359194b13199d259380541dc529fb0099096b0.vk": { + "cid": "QmUw1ZmG4BBbX19MsbH3zAEGKUc42iFJc5ZAyomDHeJTsA", + "digest": "398fecdb4b2de445125852bc3c080b35", "sector_size": 2048 }, - "v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0cfb4f178bbb71cf2ecfcd42accce558b27199ab4fb59cb78f2483fe21ef36d9.params": { - "cid": "Qmf5XZZtP5VcYTf65MbKjLVabcS6cYMbr2rFShmfJzh5e5", - "digest": "655e6277638edc8c658094f6f0b33d54", + "v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0cfb4f178bbb71cf2ecfcd42accce558b27199ab4fb59cb78f2483fe21ef36d9.params": { + "cid": "QmUeNKp9YZpiAFm81RV5KuxH1FDGJx2DuwcbU2XNSZLLSv", + "digest": "2b6d2972ac9e862e8134d98fb695b0c5", "sector_size": 536870912 }, - "v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0cfb4f178bbb71cf2ecfcd42accce558b27199ab4fb59cb78f2483fe21ef36d9.vk": { - "cid": "QmPuhdWnAXBks43emnkqi9FQzyU1gASKyz23zrD27BPGs8", - "digest": "57690e3a6a94c3f704802a674b34f36b", + "v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0cfb4f178bbb71cf2ecfcd42accce558b27199ab4fb59cb78f2483fe21ef36d9.vk": { + "cid": "QmQaQmTXX995Akd66ggtJY5bNx6Gkxk8P34JTdMMq8393G", + "digest": "3688c9eb256b7b17f411dad78d5ef74a", "sector_size": 536870912 }, - "v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-3ea05428c9d11689f23529cde32fd30aabd50f7d2c93657c1d3650bca3e8ea9e.params": { - "cid": "QmPNVgTN7N5vDtD5u7ERMTLcvUtrKRBfYVUDr6uW3pKhX7", - "digest": "3d390654f58e603b896ac70c653f5676", + "v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-3ea05428c9d11689f23529cde32fd30aabd50f7d2c93657c1d3650bca3e8ea9e.params": { + "cid": "QmfEYTMSkwGJTumQx26iKXGNKiYh3mmAC4SkdybZpJCj5p", + "digest": "09bff16aed893349d94485cfae366a9c", "sector_size": 2048 }, - "v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-3ea05428c9d11689f23529cde32fd30aabd50f7d2c93657c1d3650bca3e8ea9e.vk": { - "cid": "Qmbj61Zez7v5xA7nSCnmWbyLYznWJDWeusz7Yg8EcgVdoN", - "digest": "8c170a164743c39576a7f47a1b51e6f3", + "v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-3ea05428c9d11689f23529cde32fd30aabd50f7d2c93657c1d3650bca3e8ea9e.vk": { + "cid": "QmP4ThPieSUJyRanjibWpT5R5cCMzMAU4j8Y7kBn7CSW1Q", + "digest": "142f2f7e8f1b1779290315cabfd2c803", "sector_size": 2048 }, - "v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-50c7368dea9593ed0989e70974d28024efa9d156d585b7eea1be22b2e753f331.params": { - "cid": "QmRApb8RZoBK3cqicT7V3ydXg8yVvqPFMPrQNXP33aBihp", - "digest": "b1b58ff9a297b82885e8a7dfb035f83c", + "v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-50c7368dea9593ed0989e70974d28024efa9d156d585b7eea1be22b2e753f331.params": { + "cid": "QmcAixrHsz29DgvtZiMc2kQjvPRvWxYUp36QYmRDZbmREm", + "digest": "8f987f64d434365562180b96ec12e299", "sector_size": 8388608 }, - "v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-50c7368dea9593ed0989e70974d28024efa9d156d585b7eea1be22b2e753f331.vk": { - "cid": "QmcytF1dTdqMFoyXi931j1RgmGtLfR9LLLaBznRt1tPQyD", - "digest": "1a09e00c641f192f55af3433a028f050", + "v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-50c7368dea9593ed0989e70974d28024efa9d156d585b7eea1be22b2e753f331.vk": { + "cid": "QmT4iFnbL6r4txS5PXsiV7NTzbhCxHy54PvdkJJGV2VFXb", + "digest": "94b6c24ac01924f4feeecedd16b5d77d", "sector_size": 8388608 }, - "v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-5294475db5237a2e83c3e52fd6c2b03859a1831d45ed08c4f35dbf9a803165a9.params": { - "cid": "QmPvr54tWaVeP4WnekivzUAJitTqsQfvikBvAHNEaDNQSw", - "digest": "9380e41368ed4083dbc922b290d3b786", + "v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-5294475db5237a2e83c3e52fd6c2b03859a1831d45ed08c4f35dbf9a803165a9.params": { + "cid": "QmbjFst6SFCK1KsTQrfwPdxf3VTNa1raed574tEZZ9PoyQ", + "digest": "2c245fe8179839dd6c6cdea207c67ae8", "sector_size": 8388608 }, - "v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-5294475db5237a2e83c3e52fd6c2b03859a1831d45ed08c4f35dbf9a803165a9.vk": { - "cid": "QmXyVLVDRCcxA9SjT7PeK8HFtyxZ2ZH3SHa8KoGLw8VGJt", - "digest": "f0731a7e20f90704bd38fc5d27882f6d", + "v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-5294475db5237a2e83c3e52fd6c2b03859a1831d45ed08c4f35dbf9a803165a9.vk": { + "cid": "QmQJKmvZN1a5cQ1Nw6CDyXs3nuRPzvyU5NvCFMUL2BfcZC", + "digest": "56ae47bfda53bb8d22981ed8d8d27d72", "sector_size": 8388608 }, - "v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-7d739b8cf60f1b0709eeebee7730e297683552e4b69cab6984ec0285663c5781.params": { - "cid": "Qmf5f6ko3dqj7qauzXpZqxM9B2x2sL977K6gE2ppNwuJPv", - "digest": "273ebb8c896326b7c292bee8b775fd38", + "v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-7d739b8cf60f1b0709eeebee7730e297683552e4b69cab6984ec0285663c5781.params": { + "cid": "QmQCABxeTpdvXTyjDyk7nPBxkQzCh7MXfGztWnSXEPKMLW", + "digest": "7e6b2eb5ecbb11ac651ad66ebbb2075a", "sector_size": 536870912 }, - "v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-7d739b8cf60f1b0709eeebee7730e297683552e4b69cab6984ec0285663c5781.vk": { - "cid": "QmfP3MQe8koW63n5MkDENENVHxib78MJYYyZvbneCsuze8", - "digest": "3dd94da9da64e51b3445bc528d84e76d", + "v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-7d739b8cf60f1b0709eeebee7730e297683552e4b69cab6984ec0285663c5781.vk": { + "cid": "QmPBweyugh5Sx4umk8ULhgEGbjY8xmWLfU6M7EMpc8Mad6", + "digest": "94a8d9e25a9ab9674d339833664eba25", "sector_size": 536870912 }, - "v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-0377ded656c6f524f1618760bffe4e0a1c51d5a70c4509eedae8a27555733edc.params": { - "cid": "QmYEeeCE8uT2bsVkxcqqUYeMmMEbe6rfmo8wQCv7jFHqqm", - "digest": "c947f2021304ed43b7216f7a8436e294", + "v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-0377ded656c6f524f1618760bffe4e0a1c51d5a70c4509eedae8a27555733edc.params": { + "cid": "QmY5yax1E9KymBnCeHksE9Zi8NieZbmwcpoDGoabkeeb9h", + "digest": "c909ea9e3fe25ab9b391a64593afdbba", "sector_size": 34359738368 }, - "v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-0377ded656c6f524f1618760bffe4e0a1c51d5a70c4509eedae8a27555733edc.vk": { - "cid": "QmXB63ExriFjB4ywWnXTnFwCcLFfCeEP3h15qtL5i7F4aX", - "digest": "ab20d7b253e7e9a0d2ccdf7599ec8ec3", + "v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-0377ded656c6f524f1618760bffe4e0a1c51d5a70c4509eedae8a27555733edc.vk": { + "cid": "QmXnPo4yH5mwMguwrvqgRfduSttbmPrXtbBfbwU21wQWHt", + "digest": "caf900461e988bbf86dbcaca087b7864", "sector_size": 34359738368 }, - "v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-559e581f022bb4e4ec6e719e563bf0e026ad6de42e56c18714a2c692b1b88d7e.params": { - "cid": "QmW5Yxg3L1NSzuQVcRMHMbG3uvVoi4dTLzVaDpnEUPQpnA", - "digest": "079ba19645828ae42b22b0e3f4866e8d", + "v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-559e581f022bb4e4ec6e719e563bf0e026ad6de42e56c18714a2c692b1b88d7e.params": { + "cid": "QmZtzzPWwmZEgR7MSMvXRbt9KVK8k4XZ5RLWHybHJW9SdE", + "digest": "a2844f0703f186d143a06146a04577d8", "sector_size": 34359738368 }, - "v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-559e581f022bb4e4ec6e719e563bf0e026ad6de42e56c18714a2c692b1b88d7e.vk": { - "cid": "QmQzZ5dJ11tcSBees38WX41tZLXS9BqpEti253m5QcnTNs", - "digest": "c76125a50a7de315165de359b5174ae4", + "v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-559e581f022bb4e4ec6e719e563bf0e026ad6de42e56c18714a2c692b1b88d7e.vk": { + "cid": "QmWxEA7EdQCUJTzjNpxg5XTF45D2uVyYnN1QRUb5TRYU8M", + "digest": "2306247a1e616dbe07f01b88196c2044", "sector_size": 34359738368 }, - "v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-2627e4006b67f99cef990c0a47d5426cb7ab0a0ad58fc1061547bf2d28b09def.params": { - "cid": "QmNk3wga1tS53FUu1QnkK8ehWA2cqpCnSEAPv3KLxdJxNa", - "digest": "421e4790c0b80e0107a7ff67acf14084", + "v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-2627e4006b67f99cef990c0a47d5426cb7ab0a0ad58fc1061547bf2d28b09def.params": { + "cid": "QmP676KwuvyF9Y64uJnXvLtvD1xcuWQ6wD23RzYtQ6dd4f", + "digest": "215b1c667a4f46a1d0178338df568615", "sector_size": 68719476736 }, - "v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-2627e4006b67f99cef990c0a47d5426cb7ab0a0ad58fc1061547bf2d28b09def.vk": { - "cid": "QmVQCHGsrUtbn9RjHs1e6GXfeXDW5m9w4ge48PSX3Z2as2", - "digest": "8b60e9cc1470a6729c687d6cf0a1f79c", + "v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-2627e4006b67f99cef990c0a47d5426cb7ab0a0ad58fc1061547bf2d28b09def.vk": { + "cid": "QmPvPwbJtcSGyqB1rQJhSF5yvFbX9ZBSsHVej5F8JUyHUJ", + "digest": "0c9c423b28b1455fcbc329a1045fd4dd", "sector_size": 68719476736 }, - "v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-b62098629d07946e9028127e70295ed996fe3ed25b0f9f88eb610a0ab4385a3c.params": { - "cid": "QmTL3VvydaMFWKvE5VzxjgKsJYgL9JMM4JVYNtQxdj9JK1", - "digest": "2685f31124b22ea6b2857e5a5e87ffa3", + "v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-b62098629d07946e9028127e70295ed996fe3ed25b0f9f88eb610a0ab4385a3c.params": { + "cid": "QmUxPQfvckzm1t6MFRdDZ1fDK5UJzAjK7pTZ97cwyachdr", + "digest": "965132f51ae445b0e6d32692b7561995", "sector_size": 68719476736 }, - "v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-b62098629d07946e9028127e70295ed996fe3ed25b0f9f88eb610a0ab4385a3c.vk": { - "cid": "QmSVWbLqQYbUbbJyfsRMzEib2rfSqMtnPks1Nw22omcBQm", - "digest": "efe703cd2839597c7ca5c2a906b74296", + "v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-b62098629d07946e9028127e70295ed996fe3ed25b0f9f88eb610a0ab4385a3c.vk": { + "cid": "QmTxq2EBnQWb5R8tS4MHdchj4vNfLYGoSXxwJFvs5xgW4K", + "digest": "fc8c3d26e0e56373ad96cb41520d55a6", "sector_size": 68719476736 }, - "v26-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-032d3138d22506ec0082ed72b2dcba18df18477904e35bafee82b3793b06832f.params": { - "cid": "QmU9dH31nZZUJnsogR4Ld4ySUcH6wm2RgmGiujwnqtbU6k", - "digest": "fcef8e87ae2afd7a28aae44347b804cf", + "v27-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-032d3138d22506ec0082ed72b2dcba18df18477904e35bafee82b3793b06832f.params": { + "cid": "QmRjgZHERgqGoRagR788Kh6ybi26csVYa8mqbqhmZm57Jx", + "digest": "cfc7b0897d1eee48c586f7beb89e67f7", "sector_size": 2048 }, - "v26-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-032d3138d22506ec0082ed72b2dcba18df18477904e35bafee82b3793b06832f.vk": { - "cid": "QmdJ15DMGPooye5NaPcRfXUdHUDibcN7hKjbmTGuu1K4AQ", - "digest": "2ee2b3518229680db15161d4f582af37", + "v27-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-032d3138d22506ec0082ed72b2dcba18df18477904e35bafee82b3793b06832f.vk": { + "cid": "QmNjvnvFP7KgovHUddULoB19fBHT81iz7NcUbzEHZUUPsm", + "digest": "fb59bd061c987eac7068008c44de346b", "sector_size": 2048 }, - "v26-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-6babf46ce344ae495d558e7770a585b2382d54f225af8ed0397b8be7c3fcd472.params": { - "cid": "QmZgtxcY3tMXXQxZTA7ZTUDXLVUnfxNcerXgeW4gG2NnfP", - "digest": "3273c7135cb75684248b475781b738ee", + "v27-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-6babf46ce344ae495d558e7770a585b2382d54f225af8ed0397b8be7c3fcd472.params": { + "cid": "QmTpRPBA4dt8fgGpcVzi4L1KA1U2eBHCE8WVmS2GUygMvT", + "digest": "36d465915b0afbf96bd08e7915e00952", "sector_size": 536870912 }, - "v26-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-6babf46ce344ae495d558e7770a585b2382d54f225af8ed0397b8be7c3fcd472.vk": { - "cid": "QmSS6ZkAV2aGZcgKgdPpEEgihXF1ryZX8PSAZDWSoeL1d4", - "digest": "1519b5f61d9044a59f2bdc57537c094b", + "v27-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-6babf46ce344ae495d558e7770a585b2382d54f225af8ed0397b8be7c3fcd472.vk": { + "cid": "QmRzDyVfQCLsxspoVsed5bcQRsG6KiktngJfcNBL3TJPZe", + "digest": "99d16df0eb6a7e227a4f4570c4f6b6f1", "sector_size": 536870912 }, - "v26-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-ecd683648512ab1765faa2a5f14bab48f676e633467f0aa8aad4b55dcb0652bb.params": { - "cid": "QmQBGXeiNn6hVwbR6qFarQqiNGDdKk4h9ucfyvcXyfYz2N", - "digest": "7d5f896f435c38e93bcda6dd168d860b", + "v27-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-ecd683648512ab1765faa2a5f14bab48f676e633467f0aa8aad4b55dcb0652bb.params": { + "cid": "QmV8ZjTSGzDUWmFvsq9NSyPBR7eDDUcvCPNgj2yE7HMAFu", + "digest": "34f3ddf1d1c9f41c0cd73b91e8b4bc27", "sector_size": 8388608 }, - "v26-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-ecd683648512ab1765faa2a5f14bab48f676e633467f0aa8aad4b55dcb0652bb.vk": { - "cid": "QmPrZgBVGMckEAeu5eSJnLmiAwcPQjKjZe5ir6VaQ5AxKs", - "digest": "fe6d2de44580a0db5a4934688899b92f", + "v27-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-ecd683648512ab1765faa2a5f14bab48f676e633467f0aa8aad4b55dcb0652bb.vk": { + "cid": "QmTa3VbjTiqJWU6r4WKayaQrUaaBsrpp5UDqYvPDd2C5hs", + "digest": "ec62d59651daa5631d3d1e9c782dd940", "sector_size": 8388608 }, - "v26-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-0-sha256_hasher-82a357d2f2ca81dc61bb45f4a762807aedee1b0a53fd6c4e77b46a01bfef7820.params": { - "cid": "QmZL2cq45XJn5BFzagAZwgFmLrcM1W6CXoiEF9C5j5tjEF", - "digest": "acdfed9f0512bc85a01a9fb871d475d5", + "v27-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-0-sha256_hasher-82a357d2f2ca81dc61bb45f4a762807aedee1b0a53fd6c4e77b46a01bfef7820.params": { + "cid": "Qmf8ngfArxrv9tFWDqBcNegdBMymvuakwyHKd1pbW3pbsb", + "digest": "a16d6f4c6424fb280236739f84b24f97", "sector_size": 34359738368 }, - "v26-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-0-sha256_hasher-82a357d2f2ca81dc61bb45f4a762807aedee1b0a53fd6c4e77b46a01bfef7820.vk": { - "cid": "QmQ4zB7nNa1tDYNifBkExRnZtwtxZw775iaqvVsZyRi6Q2", - "digest": "524a2f3e9d6826593caebc41bb545c40", + "v27-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-0-sha256_hasher-82a357d2f2ca81dc61bb45f4a762807aedee1b0a53fd6c4e77b46a01bfef7820.vk": { + "cid": "QmfQgVFerArJ6Jupwyc9tKjLD9n1J9ajLHBdpY465tRM7M", + "digest": "7a139d82b8a02e35279d657e197f5c1f", "sector_size": 34359738368 }, - "v26-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-2-sha256_hasher-96f1b4a04c5c51e4759bbf224bbc2ef5a42c7100f16ec0637123f16a845ddfb2.params": { - "cid": "QmY7DitNKXFeLQt9QoVQkfjM1EvRnprqUVxjmkTXkHDNka", - "digest": "f27271c0537ba65ade2ec045f8fbd069", + "v27-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-2-sha256_hasher-96f1b4a04c5c51e4759bbf224bbc2ef5a42c7100f16ec0637123f16a845ddfb2.params": { + "cid": "QmfDha8271nXJn14Aq3qQeghjMBWbs6HNSGa6VuzCVk4TW", + "digest": "5d3cd3f107a3bea8a96d1189efd2965c", "sector_size": 68719476736 }, - "v26-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-2-sha256_hasher-96f1b4a04c5c51e4759bbf224bbc2ef5a42c7100f16ec0637123f16a845ddfb2.vk": { - "cid": "QmUJsvoCuQ4LszPmeRVAkMYb5qY95ctz3UXKhu8xLzyFKo", - "digest": "576b292938c6c9d0a0e721bd867a543b", + "v27-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-2-sha256_hasher-96f1b4a04c5c51e4759bbf224bbc2ef5a42c7100f16ec0637123f16a845ddfb2.vk": { + "cid": "QmRVtTtiFzHJTHurYzaCvetGAchux9cktixT4aGHthN6Zt", + "digest": "62c366405404e60f171e661492740b1c", "sector_size": 68719476736 } } \ No newline at end of file From 3cd79b6fece3c6376a6aef84b06c5a4caa863274 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Tue, 9 Jun 2020 11:13:23 +0200 Subject: [PATCH 121/199] wip fixing addpiece --- ffiwrapper/sealer_cgo.go | 119 ++++++++++++++++++++++++++++++++------- fr32/fr32.go | 6 +- fr32/readers.go | 54 +----------------- 3 files changed, 104 insertions(+), 75 deletions(-) diff --git a/ffiwrapper/sealer_cgo.go b/ffiwrapper/sealer_cgo.go index 3b20f33bc..74ebeff40 100644 --- a/ffiwrapper/sealer_cgo.go +++ b/ffiwrapper/sealer_cgo.go @@ -4,16 +4,19 @@ package ffiwrapper import ( "bufio" + "bytes" "context" "io" "math/bits" "os" "runtime" + "sync" "github.com/ipfs/go-cid" "golang.org/x/xerrors" ffi "github.com/filecoin-project/filecoin-ffi" + commcid "github.com/filecoin-project/go-fil-commcid" "github.com/filecoin-project/specs-actors/actors/abi" "github.com/filecoin-project/specs-storage/storage" @@ -105,20 +108,58 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector abi.SectorID, existingPie return abi.PieceInfo{}, xerrors.Errorf("getting partial file writer: %w", err) } - pw, err := fr32.NewPadWriter(w) - if err != nil { - return abi.PieceInfo{}, xerrors.Errorf("creating padded reader: %w", err) - } + pw := fr32.NewPadWriter(w) pr := io.TeeReader(io.LimitReader(file, int64(pieceSize)), pw) - prf, werr, err := ToReadableFile(pr, int64(pieceSize)) - if err != nil { - return abi.PieceInfo{}, xerrors.Errorf("getting tee reader pipe: %w", err) - } - pieceCID, err := ffi.GeneratePieceCIDFromFile(sb.sealProofType, prf, pieceSize) - if err != nil { - return abi.PieceInfo{}, xerrors.Errorf("generating piece commitment: %w", err) + thr := 1 << bits.Len32(uint32(runtime.NumCPU())) + chunk := abi.PaddedPieceSize(4 << 20) + var wg sync.WaitGroup + + buf := make([]byte, (chunk * abi.PaddedPieceSize(thr)).Unpadded()) + var pieceCids []abi.PieceInfo + + for { + n, err := pr.Read(buf[:]) + if err != nil && err != io.EOF { + return abi.PieceInfo{}, xerrors.Errorf("pr read error: %w", err) + } + if err == io.EOF { + break + } + + wg.Add(n/int(chunk)) + res := make([]interface{}, n/int(chunk)) + + for i := 0; i < n/int(chunk); i++ { + go func(i int) { + defer wg.Done() + + b := buf[i*int(chunk.Unpadded()):((i+1)*int(chunk.Unpadded()))] + + c, err := sb.pieceCid(b) + if err != nil { + res[i] = err + return + } + res[i] = abi.PieceInfo{ + Size: abi.UnpaddedPieceSize(len(b)).Padded(), + PieceCID: c, + } + }(i) + } + wg.Wait() + + for _, r := range res { + switch r := r.(type) { + case abi.PieceInfo: + pieceCids = append(pieceCids, r) + case error: + return abi.PieceInfo{}, xerrors.Errorf("pieceCid error: %w", r) + default: + return abi.PieceInfo{}, xerrors.Errorf("pieceCid mystery result: %v", r) + } + } } if err := pw.Close(); err != nil { @@ -134,16 +175,40 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector abi.SectorID, existingPie } stagedFile = nil + if len(pieceCids) == 1 { + return pieceCids[0], nil + } + + pieceCID, err := ffi.GenerateUnsealedCID(sb.sealProofType, pieceCids) + if err != nil { + return abi.PieceInfo{}, xerrors.Errorf("generate unsealed CID: %w", err) + } + + commp, err := commcid.CIDToDataCommitmentV1(pieceCID) + if err != nil { + return abi.PieceInfo{}, err + } + return abi.PieceInfo{ Size: pieceSize.Padded(), - PieceCID: pieceCID, - }, werr() + PieceCID: commcid.PieceCommitmentV1ToCID(commp), + }, nil } -type closerFunc func() error +func (sb *Sealer) pieceCid(in []byte) (cid.Cid, error) { + prf, werr, err := ToReadableFile(bytes.NewReader(in), int64(len(in))) + if err != nil { + return cid.Undef, xerrors.Errorf("getting tee reader pipe: %w", err) + } -func (cf closerFunc) Close() error { - return cf() + pieceCID, err := ffi.GeneratePieceCIDFromFile(sb.sealProofType, prf, abi.UnpaddedPieceSize(len(in))) + if err != nil { + return cid.Undef, xerrors.Errorf("generating piece commitment: %w", err) + } + + prf.Close() + + return pieceCID, werr() } func (sb *Sealer) UnsealPiece(ctx context.Context, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd cid.Cid) error { @@ -237,9 +302,9 @@ func (sb *Sealer) UnsealPiece(ctx context.Context, sector abi.SectorID, offset s defer close(outWait) defer opr.Close() - padreader, err := fr32.NewPadReader(opr, abi.PaddedPieceSize(piece.Len).Unpadded()) + padwriter := fr32.NewPadWriter(out) if err != nil { - perr = xerrors.Errorf("creating new padded reader: %w", err) + perr = xerrors.Errorf("creating new padded writer: %w", err) return } @@ -248,9 +313,23 @@ func (sb *Sealer) UnsealPiece(ctx context.Context, sector abi.SectorID, offset s bsize = uint64(runtime.NumCPU()) * fr32.MTTresh } - padreader = bufio.NewReaderSize(padreader, int(bsize)) + bw := bufio.NewWriterSize(padwriter, int(bsize)) - _, perr = io.CopyN(out, padreader, int64(size.Padded())) + _, err = io.CopyN(bw, opr, int64(size.Padded())) + if err != nil { + perr = xerrors.Errorf("copying data: %w", err) + return + } + + if err := bw.Flush(); err != nil { + perr = xerrors.Errorf("flushing unpadded data: %w", err) + return + } + + if err := padwriter.Close(); err != nil { + perr = xerrors.Errorf("closing padwriter: %w", err) + return + } }() } // diff --git a/fr32/fr32.go b/fr32/fr32.go index 08ecb767c..fdf9d9223 100644 --- a/fr32/fr32.go +++ b/fr32/fr32.go @@ -13,13 +13,13 @@ var MTTresh = uint64(32 << 20) func mtChunkCount(usz abi.PaddedPieceSize) uint64 { threads := (uint64(usz)) / MTTresh if threads > uint64(runtime.NumCPU()) { - threads = 1 << (32 - bits.LeadingZeros32(uint32(runtime.NumCPU()))) + threads = 1 << (bits.Len32(uint32(runtime.NumCPU()))) } if threads == 0 { return 1 } - if threads > 64 { - return 64 // avoid too large buffers + if threads > 32 { + return 32 // avoid too large buffers } return threads } diff --git a/fr32/readers.go b/fr32/readers.go index f974f2cd1..8a1bbe087 100644 --- a/fr32/readers.go +++ b/fr32/readers.go @@ -9,56 +9,6 @@ import ( "github.com/filecoin-project/specs-actors/actors/abi" ) -type padReader struct { - src io.Reader - - left uint64 - work []byte -} - -func NewPadReader(src io.Reader, sz abi.UnpaddedPieceSize) (io.Reader, error) { - if err := sz.Validate(); err != nil { - return nil, xerrors.Errorf("bad piece size: %w", err) - } - - buf := make([]byte, MTTresh*mtChunkCount(sz.Padded())) - - return &padReader{ - src: src, - - left: uint64(sz.Padded()), - work: buf, - }, nil -} - -func (r *padReader) Read(out []byte) (int, error) { - if r.left == 0 { - return 0, io.EOF - } - - outTwoPow := 1 << (63 - bits.LeadingZeros64(uint64(len(out)))) - - if err := abi.PaddedPieceSize(outTwoPow).Validate(); err != nil { - return 0, xerrors.Errorf("output must be of valid padded piece size: %w", err) - } - - todo := abi.PaddedPieceSize(outTwoPow).Unpadded() - if r.left < uint64(todo.Padded()) { - todo = abi.PaddedPieceSize(1 << (63 - bits.LeadingZeros64(r.left))).Unpadded() - } - - r.left -= uint64(todo.Padded()) - - n, err := r.src.Read(r.work[:todo]) - if err != nil && err != io.EOF { - return n, err - } - - Pad(r.work[:todo], out[:todo.Padded()]) - - return int(todo.Padded()), err -} - type unpadReader struct { src io.Reader @@ -122,10 +72,10 @@ type padWriter struct { work []byte } -func NewPadWriter(dst io.Writer) (io.WriteCloser, error) { +func NewPadWriter(dst io.Writer) io.WriteCloser { return &padWriter{ dst: dst, - }, nil + } } func (w *padWriter) Write(p []byte) (int, error) { From 9dfe8851f89b7aea0ea1bba9f0c1ca2ecf84e931 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Tue, 9 Jun 2020 11:14:25 +0200 Subject: [PATCH 122/199] vet fixes --- fr32/readers_test.go | 21 --------------------- roprov.go | 2 ++ 2 files changed, 2 insertions(+), 21 deletions(-) diff --git a/fr32/readers_test.go b/fr32/readers_test.go index 83ef14353..f0f1e21bc 100644 --- a/fr32/readers_test.go +++ b/fr32/readers_test.go @@ -12,27 +12,6 @@ import ( "github.com/filecoin-project/sector-storage/fr32" ) -func TestPadReader(t *testing.T) { - ps := abi.PaddedPieceSize(64 << 20).Unpadded() - - raw := bytes.Repeat([]byte{0x55}, int(ps)) - - r, err := fr32.NewPadReader(bytes.NewReader(raw), ps) - if err != nil { - t.Fatal(err) - } - - readerPadded, err := ioutil.ReadAll(r) - if err != nil { - t.Fatal(err) - } - - padOut := make([]byte, ps.Padded()) - fr32.Pad(raw, padOut) - - require.Equal(t, padOut, readerPadded) -} - func TestUnpadReader(t *testing.T) { ps := abi.PaddedPieceSize(64 << 20).Unpadded() diff --git a/roprov.go b/roprov.go index 7a288bb39..c7baede82 100644 --- a/roprov.go +++ b/roprov.go @@ -26,9 +26,11 @@ func (l *readonlyProvider) AcquireSector(ctx context.Context, id abi.SectorID, e // use TryLock to avoid blocking locked, err := l.index.StorageTryLock(ctx, id, existing, stores.FTNone) if err != nil { + cancel() return stores.SectorPaths{}, nil, xerrors.Errorf("acquiring sector lock: %w", err) } if !locked { + cancel() return stores.SectorPaths{}, nil, xerrors.Errorf("failed to acquire sector lock") } From edb4a1ee9ea8953b715fdab0d426b2ae97015ed4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Tue, 9 Jun 2020 12:06:21 +0200 Subject: [PATCH 123/199] Fix addpiece and unsealRange --- ffiwrapper/sealer_cgo.go | 47 +++++++++------------------------------- 1 file changed, 10 insertions(+), 37 deletions(-) diff --git a/ffiwrapper/sealer_cgo.go b/ffiwrapper/sealer_cgo.go index 74ebeff40..4894c14ed 100644 --- a/ffiwrapper/sealer_cgo.go +++ b/ffiwrapper/sealer_cgo.go @@ -10,7 +10,6 @@ import ( "math/bits" "os" "runtime" - "sync" "github.com/ipfs/go-cid" "golang.org/x/xerrors" @@ -112,11 +111,9 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector abi.SectorID, existingPie pr := io.TeeReader(io.LimitReader(file, int64(pieceSize)), pw) - thr := 1 << bits.Len32(uint32(runtime.NumCPU())) chunk := abi.PaddedPieceSize(4 << 20) - var wg sync.WaitGroup - buf := make([]byte, (chunk * abi.PaddedPieceSize(thr)).Unpadded()) + buf := make([]byte, chunk.Unpadded()) var pieceCids []abi.PieceInfo for { @@ -128,38 +125,14 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector abi.SectorID, existingPie break } - wg.Add(n/int(chunk)) - res := make([]interface{}, n/int(chunk)) - - for i := 0; i < n/int(chunk); i++ { - go func(i int) { - defer wg.Done() - - b := buf[i*int(chunk.Unpadded()):((i+1)*int(chunk.Unpadded()))] - - c, err := sb.pieceCid(b) - if err != nil { - res[i] = err - return - } - res[i] = abi.PieceInfo{ - Size: abi.UnpaddedPieceSize(len(b)).Padded(), - PieceCID: c, - } - }(i) - } - wg.Wait() - - for _, r := range res { - switch r := r.(type) { - case abi.PieceInfo: - pieceCids = append(pieceCids, r) - case error: - return abi.PieceInfo{}, xerrors.Errorf("pieceCid error: %w", r) - default: - return abi.PieceInfo{}, xerrors.Errorf("pieceCid mystery result: %v", r) - } + c, err := sb.pieceCid(buf[:n]) + if err != nil { + return abi.PieceInfo{}, xerrors.Errorf("pieceCid error: %w", err) } + pieceCids = append(pieceCids, abi.PieceInfo{ + Size: abi.UnpaddedPieceSize(len(buf[:n])).Padded(), + PieceCID: c, + }) } if err := pw.Close(); err != nil { @@ -313,9 +286,9 @@ func (sb *Sealer) UnsealPiece(ctx context.Context, sector abi.SectorID, offset s bsize = uint64(runtime.NumCPU()) * fr32.MTTresh } - bw := bufio.NewWriterSize(padwriter, int(bsize)) + bw := bufio.NewWriterSize(padwriter, int(abi.PaddedPieceSize(bsize).Unpadded())) - _, err = io.CopyN(bw, opr, int64(size.Padded())) + _, err = io.CopyN(bw, opr, int64(size)) if err != nil { perr = xerrors.Errorf("copying data: %w", err) return From 57adda40f9b8cf8232ea7877c568bf1b7085d2df Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Tue, 9 Jun 2020 12:24:03 +0200 Subject: [PATCH 124/199] ffiwrapper: Test FD leaks --- ffiwrapper/sealer_test.go | 51 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) diff --git a/ffiwrapper/sealer_test.go b/ffiwrapper/sealer_test.go index fdc7db5c3..c9b6e3af4 100644 --- a/ffiwrapper/sealer_test.go +++ b/ffiwrapper/sealer_test.go @@ -8,7 +8,9 @@ import ( "io/ioutil" "math/rand" "os" + "path/filepath" "runtime" + "strings" "sync" "testing" "time" @@ -229,10 +231,14 @@ func getGrothParamFileAndVerifyingKeys(s abi.SectorSize) { // go test -run=^TestDownloadParams // func TestDownloadParams(t *testing.T) { + defer requireFDsClosed(t, openFDs(t)) + getGrothParamFileAndVerifyingKeys(sectorSize) } func TestSealAndVerify(t *testing.T) { + defer requireFDsClosed(t, openFDs(t)) + if runtime.NumCPU() < 10 && os.Getenv("CI") == "" { // don't bother on slow hardware t.Skip("this is slow") } @@ -301,6 +307,8 @@ func TestSealAndVerify(t *testing.T) { } func TestSealPoStNoCommit(t *testing.T) { + defer requireFDsClosed(t, openFDs(t)) + if runtime.NumCPU() < 10 && os.Getenv("CI") == "" { // don't bother on slow hardware t.Skip("this is slow") } @@ -361,6 +369,8 @@ func TestSealPoStNoCommit(t *testing.T) { } func TestSealAndVerify2(t *testing.T) { + defer requireFDsClosed(t, openFDs(t)) + if runtime.NumCPU() < 10 && os.Getenv("CI") == "" { // don't bother on slow hardware t.Skip("this is slow") } @@ -430,3 +440,44 @@ func BenchmarkWriteWithAlignment(b *testing.B) { w() } } + +func openFDs(t *testing.T) int { + dent, err := ioutil.ReadDir("/proc/self/fd") + require.NoError(t, err) + + var skip int + for _, info := range dent { + l, err := os.Readlink(filepath.Join("/proc/self/fd", info.Name())) + if err != nil { + continue + } + + if strings.HasPrefix(l, "/dev/nvidia") { + skip++ + } + } + + return len(dent) - skip +} + +func requireFDsClosed(t *testing.T, start int) { + openNow := openFDs(t) + + if start != openNow { + dent, err := ioutil.ReadDir("/proc/self/fd") + require.NoError(t, err) + + for _, info := range dent { + l, err := os.Readlink(filepath.Join("/proc/self/fd", info.Name())) + if err != nil { + fmt.Printf("FD err %s\n", err) + continue + } + + fmt.Printf("FD %s -> %s\n", info.Name(), l) + } + } + + log.Infow("open FDs", "start", start, "now", openNow) + require.Equal(t, start, openNow, "FDs shouldn't leak") +} From 52782b6def7666a7d9299d82f47a193636094178 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Tue, 9 Jun 2020 21:54:52 +0200 Subject: [PATCH 125/199] ffiwrapper: Fix AddPiece with multireaders --- ffiwrapper/sealer_cgo.go | 22 ++++++++++++++++------ ffiwrapper/sealer_test.go | 5 ++++- 2 files changed, 20 insertions(+), 7 deletions(-) diff --git a/ffiwrapper/sealer_cgo.go b/ffiwrapper/sealer_cgo.go index 4894c14ed..3c01f65df 100644 --- a/ffiwrapper/sealer_cgo.go +++ b/ffiwrapper/sealer_cgo.go @@ -117,20 +117,30 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector abi.SectorID, existingPie var pieceCids []abi.PieceInfo for { - n, err := pr.Read(buf[:]) - if err != nil && err != io.EOF { - return abi.PieceInfo{}, xerrors.Errorf("pr read error: %w", err) + var read int + for rbuf := buf; len(rbuf) > 0; { + n, err := pr.Read(rbuf) + if err != nil && err != io.EOF { + return abi.PieceInfo{}, xerrors.Errorf("pr read error: %w", err) + } + + rbuf = rbuf[n:] + read += n + + if err == io.EOF { + break + } } - if err == io.EOF { + if read == 0 { break } - c, err := sb.pieceCid(buf[:n]) + c, err := sb.pieceCid(buf[:read]) if err != nil { return abi.PieceInfo{}, xerrors.Errorf("pieceCid error: %w", err) } pieceCids = append(pieceCids, abi.PieceInfo{ - Size: abi.UnpaddedPieceSize(len(buf[:n])).Padded(), + Size: abi.UnpaddedPieceSize(len(buf[:read])).Padded(), PieceCID: c, }) } diff --git a/ffiwrapper/sealer_test.go b/ffiwrapper/sealer_test.go index c9b6e3af4..76ef8fea9 100644 --- a/ffiwrapper/sealer_test.go +++ b/ffiwrapper/sealer_test.go @@ -45,7 +45,10 @@ type seal struct { } func data(sn abi.SectorNumber, dlen abi.UnpaddedPieceSize) io.Reader { - return io.LimitReader(rand.New(rand.NewSource(42+int64(sn))), int64(dlen)) + return io.MultiReader( + io.LimitReader(rand.New(rand.NewSource(42+int64(sn))), int64(123)), + io.LimitReader(rand.New(rand.NewSource(42+int64(sn))), int64(dlen-123)), + ) } func (s *seal) precommit(t *testing.T, sb *Sealer, id abi.SectorID, done func()) { From 40712b1026f649abbbb39dea31d020438cbbb96c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Wed, 10 Jun 2020 14:08:03 +0200 Subject: [PATCH 126/199] local store: Fix double RUnlock --- stores/local.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/stores/local.go b/stores/local.go index bf11a3418..55d34549b 100644 --- a/stores/local.go +++ b/stores/local.go @@ -245,7 +245,6 @@ func (st *Local) AcquireSector(ctx context.Context, sid abi.SectorID, spt abi.Re sis, err := st.index.StorageBestAlloc(ctx, fileType, spt, pathType) if err != nil { - st.localLk.RUnlock() return SectorPaths{}, SectorPaths{}, xerrors.Errorf("finding best storage for allocating : %w", err) } @@ -277,7 +276,6 @@ func (st *Local) AcquireSector(ctx context.Context, sid abi.SectorID, spt abi.Re } if best == "" { - st.localLk.RUnlock() return SectorPaths{}, SectorPaths{}, xerrors.Errorf("couldn't find a suitable path for a sector") } From 14c615dead44b78cbe486267f0f91ec04b5d9e6c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Mon, 15 Jun 2020 12:50:53 +0200 Subject: [PATCH 127/199] faults: Check sealed sector file size --- faults.go | 27 ++++++++++++++++++++------- 1 file changed, 20 insertions(+), 7 deletions(-) diff --git a/faults.go b/faults.go index 635ffa0c3..e263f8304 100644 --- a/faults.go +++ b/faults.go @@ -20,6 +20,11 @@ type FaultTracker interface { func (m *Manager) CheckProvable(ctx context.Context, spt abi.RegisteredProof, sectors []abi.SectorID) ([]abi.SectorID, error) { var bad []abi.SectorID + ssize, err := spt.SectorSize() + if err != nil { + return nil, err + } + // TODO: More better checks for _, sector := range sectors { err := func() error { @@ -48,20 +53,28 @@ func (m *Manager) CheckProvable(ctx context.Context, spt abi.RegisteredProof, se return nil } - toCheck := []string{ - lp.Sealed, - filepath.Join(lp.Cache, "t_aux"), - filepath.Join(lp.Cache, "p_aux"), - filepath.Join(lp.Cache, "sc-02-data-tree-r-last.dat"), + toCheck := map[string]int64{ + lp.Sealed: 1, + filepath.Join(lp.Cache, "t_aux"): 0, + filepath.Join(lp.Cache, "p_aux"): 0, + filepath.Join(lp.Cache, "sc-02-data-tree-r-last.dat"): 0, } - for _, p := range toCheck { - _, err := os.Stat(p) + for p, sz := range toCheck { + st, err := os.Stat(p) if err != nil { log.Warnw("CheckProvable Sector FAULT: sector file stat error", "sector", sector, "sealed", lp.Sealed, "cache", lp.Cache, "file", p) bad = append(bad, sector) return nil } + + if sz != 0 { + if st.Size() != int64(ssize)*sz { + log.Warnw("CheckProvable Sector FAULT: sector file is wrong size", "sector", sector, "sealed", lp.Sealed, "cache", lp.Cache, "file", p, "size", st.Size(), "expectSize", int64(ssize)*sz) + bad = append(bad, sector) + return nil + } + } } return nil From f67bbbc4579949146985d3e48a64cbf14d204c1f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Mon, 15 Jun 2020 14:32:17 +0200 Subject: [PATCH 128/199] specs-actors v0.6 --- extern/filecoin-ffi | 2 +- faults.go | 4 +-- ffiwrapper/config.go | 18 +++++------ ffiwrapper/sealer.go | 4 +-- ffiwrapper/sealer_cgo.go | 4 +-- ffiwrapper/sealer_test.go | 6 ++-- ffiwrapper/types.go | 2 +- ffiwrapper/verifier_cgo.go | 10 +++--- fr32/fr32_ffi_cmp_test.go | 2 +- fr32/fr32_test.go | 2 +- go.mod | 2 +- go.sum | 2 ++ localworker.go | 2 +- manager_test.go | 4 +-- mock/mock.go | 21 ++++++++----- resources.go | 62 +++++++++++++++++++------------------- roprov.go | 2 +- sched.go | 14 ++++----- selector_alloc.go | 2 +- selector_existing.go | 2 +- selector_task.go | 2 +- stores/filetype.go | 2 +- stores/index.go | 4 +-- stores/interface.go | 4 +-- stores/local.go | 4 +-- stores/remote.go | 6 ++-- testworker_test.go | 2 +- zerocomm/zerocomm_test.go | 10 +++--- 28 files changed, 102 insertions(+), 99 deletions(-) diff --git a/extern/filecoin-ffi b/extern/filecoin-ffi index 61c02f6be..1bff7f456 160000 --- a/extern/filecoin-ffi +++ b/extern/filecoin-ffi @@ -1 +1 @@ -Subproject commit 61c02f6bea8d69bb79c70daa1d62f26c486643aa +Subproject commit 1bff7f4563370ada590a605b5459b91e1662ebaa diff --git a/faults.go b/faults.go index 635ffa0c3..77924271f 100644 --- a/faults.go +++ b/faults.go @@ -13,11 +13,11 @@ import ( // TODO: Track things more actively type FaultTracker interface { - CheckProvable(ctx context.Context, spt abi.RegisteredProof, sectors []abi.SectorID) ([]abi.SectorID, error) + CheckProvable(ctx context.Context, spt abi.RegisteredSealProof, sectors []abi.SectorID) ([]abi.SectorID, error) } // Returns unprovable sectors -func (m *Manager) CheckProvable(ctx context.Context, spt abi.RegisteredProof, sectors []abi.SectorID) ([]abi.SectorID, error) { +func (m *Manager) CheckProvable(ctx context.Context, spt abi.RegisteredSealProof, sectors []abi.SectorID) ([]abi.SectorID, error) { var bad []abi.SectorID // TODO: More better checks diff --git a/ffiwrapper/config.go b/ffiwrapper/config.go index 143283c0a..707fc6746 100644 --- a/ffiwrapper/config.go +++ b/ffiwrapper/config.go @@ -7,31 +7,27 @@ import ( ) type Config struct { - SealProofType abi.RegisteredProof + SealProofType abi.RegisteredSealProof _ struct{} // guard against nameless init } func sizeFromConfig(cfg Config) (abi.SectorSize, error) { - if cfg.SealProofType == abi.RegisteredProof(0) { - return abi.SectorSize(0), xerrors.New("must specify a seal proof type from abi.RegisteredProof") - } - return cfg.SealProofType.SectorSize() } -func SealProofTypeFromSectorSize(ssize abi.SectorSize) (abi.RegisteredProof, error) { +func SealProofTypeFromSectorSize(ssize abi.SectorSize) (abi.RegisteredSealProof, error) { switch ssize { case 2 << 10: - return abi.RegisteredProof_StackedDRG2KiBSeal, nil + return abi.RegisteredSealProof_StackedDrg2KiBV1, nil case 8 << 20: - return abi.RegisteredProof_StackedDRG8MiBSeal, nil + return abi.RegisteredSealProof_StackedDrg8MiBV1, nil case 512 << 20: - return abi.RegisteredProof_StackedDRG512MiBSeal, nil + return abi.RegisteredSealProof_StackedDrg512MiBV1, nil case 32 << 30: - return abi.RegisteredProof_StackedDRG32GiBSeal, nil + return abi.RegisteredSealProof_StackedDrg32GiBV1, nil case 64 << 30: - return abi.RegisteredProof_StackedDRG64GiBSeal, nil + return abi.RegisteredSealProof_StackedDrg64GiBV1, nil default: return 0, xerrors.Errorf("unsupported sector size for miner: %v", ssize) } diff --git a/ffiwrapper/sealer.go b/ffiwrapper/sealer.go index fc77c8388..c97557a37 100644 --- a/ffiwrapper/sealer.go +++ b/ffiwrapper/sealer.go @@ -8,7 +8,7 @@ import ( var log = logging.Logger("ffiwrapper") type Sealer struct { - sealProofType abi.RegisteredProof + sealProofType abi.RegisteredSealProof ssize abi.SectorSize // a function of sealProofType and postProofType sectors SectorProvider @@ -23,6 +23,6 @@ func (sb *Sealer) SectorSize() abi.SectorSize { return sb.ssize } -func (sb *Sealer) SealProofType() abi.RegisteredProof { +func (sb *Sealer) SealProofType() abi.RegisteredSealProof { return sb.sealProofType } diff --git a/ffiwrapper/sealer_cgo.go b/ffiwrapper/sealer_cgo.go index 3c01f65df..0beb9b86c 100644 --- a/ffiwrapper/sealer_cgo.go +++ b/ffiwrapper/sealer_cgo.go @@ -510,7 +510,7 @@ func (sb *Sealer) FinalizeSector(ctx context.Context, sector abi.SectorID) error return ffi.ClearCache(uint64(sb.ssize), paths.Cache) } -func GeneratePieceCIDFromFile(proofType abi.RegisteredProof, piece io.Reader, pieceSize abi.UnpaddedPieceSize) (cid.Cid, error) { +func GeneratePieceCIDFromFile(proofType abi.RegisteredSealProof, piece io.Reader, pieceSize abi.UnpaddedPieceSize) (cid.Cid, error) { f, werr, err := ToReadableFile(piece, int64(pieceSize)) if err != nil { return cid.Undef, err @@ -524,7 +524,7 @@ func GeneratePieceCIDFromFile(proofType abi.RegisteredProof, piece io.Reader, pi return pieceCID, werr() } -func GenerateUnsealedCID(proofType abi.RegisteredProof, pieces []abi.PieceInfo) (cid.Cid, error) { +func GenerateUnsealedCID(proofType abi.RegisteredSealProof, pieces []abi.PieceInfo) (cid.Cid, error) { var sum abi.PaddedPieceSize for _, p := range pieces { sum += p.Size diff --git a/ffiwrapper/sealer_test.go b/ffiwrapper/sealer_test.go index 76ef8fea9..1cb1ac9ce 100644 --- a/ffiwrapper/sealer_test.go +++ b/ffiwrapper/sealer_test.go @@ -32,7 +32,7 @@ func init() { logging.SetLogLevel("*", "DEBUG") //nolint: errcheck } -var sealProofType = abi.RegisteredProof_StackedDRG2KiBSeal +var sealProofType = abi.RegisteredSealProof_StackedDrg2KiBV1 var sectorSize, _ = sealProofType.SectorSize() var sealRand = abi.SealRandomness{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2} @@ -91,7 +91,7 @@ func (s *seal) commit(t *testing.T, sb *Sealer, done func()) { ok, err := ProofVerifier.VerifySeal(abi.SealVerifyInfo{ SectorID: s.id, SealedCID: s.cids.Sealed, - RegisteredProof: sealProofType, + SealProof: sealProofType, Proof: proof, Randomness: s.ticket, InteractiveRandomness: seed, @@ -439,7 +439,7 @@ func BenchmarkWriteWithAlignment(b *testing.B) { tf, _ := ioutil.TempFile("/tmp/", "scrb-") b.StartTimer() - ffi.WriteWithAlignment(abi.RegisteredProof_StackedDRG2KiBSeal, rf, bt, tf, nil) + ffi.WriteWithAlignment(abi.RegisteredSealProof_StackedDrg2KiBV1, rf, bt, tf, nil) w() } } diff --git a/ffiwrapper/types.go b/ffiwrapper/types.go index c640df2e7..13c0ee990 100644 --- a/ffiwrapper/types.go +++ b/ffiwrapper/types.go @@ -37,7 +37,7 @@ type Verifier interface { VerifyWinningPoSt(ctx context.Context, info abi.WinningPoStVerifyInfo) (bool, error) VerifyWindowPoSt(ctx context.Context, info abi.WindowPoStVerifyInfo) (bool, error) - GenerateWinningPoStSectorChallenge(context.Context, abi.RegisteredProof, abi.ActorID, abi.PoStRandomness, uint64) ([]uint64, error) + GenerateWinningPoStSectorChallenge(context.Context, abi.RegisteredPoStProof, abi.ActorID, abi.PoStRandomness, uint64) ([]uint64, error) } type SectorProvider interface { diff --git a/ffiwrapper/verifier_cgo.go b/ffiwrapper/verifier_cgo.go index 502576286..a699ee10c 100644 --- a/ffiwrapper/verifier_cgo.go +++ b/ffiwrapper/verifier_cgo.go @@ -16,7 +16,7 @@ import ( func (sb *Sealer) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []abi.SectorInfo, randomness abi.PoStRandomness) ([]abi.PoStProof, error) { randomness[31] = 0 // TODO: Not correct, fixme - privsectors, skipped, done, err := sb.pubSectorToPriv(ctx, minerID, sectorInfo, nil, abi.RegisteredProof.RegisteredWinningPoStProof) // TODO: FAULTS? + privsectors, skipped, done, err := sb.pubSectorToPriv(ctx, minerID, sectorInfo, nil, abi.RegisteredSealProof.RegisteredWinningPoStProof) // TODO: FAULTS? if err != nil { return nil, err } @@ -30,7 +30,7 @@ func (sb *Sealer) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, func (sb *Sealer) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []abi.SectorInfo, randomness abi.PoStRandomness) ([]abi.PoStProof, []abi.SectorID, error) { randomness[31] = 0 // TODO: Not correct, fixme - privsectors, skipped, done, err := sb.pubSectorToPriv(ctx, minerID, sectorInfo, nil, abi.RegisteredProof.RegisteredWindowPoStProof) + privsectors, skipped, done, err := sb.pubSectorToPriv(ctx, minerID, sectorInfo, nil, abi.RegisteredSealProof.RegisteredWindowPoStProof) if err != nil { return nil, nil, xerrors.Errorf("gathering sector info: %w", err) } @@ -40,7 +40,7 @@ func (sb *Sealer) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, s return proof, skipped, err } -func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorInfo []abi.SectorInfo, faults []abi.SectorNumber, rpt func(abi.RegisteredProof) (abi.RegisteredProof, error)) (ffi.SortedPrivateSectorInfo, []abi.SectorID, func(), error) { +func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorInfo []abi.SectorInfo, faults []abi.SectorNumber, rpt func(abi.RegisteredSealProof) (abi.RegisteredPoStProof, error)) (ffi.SortedPrivateSectorInfo, []abi.SectorID, func(), error) { fmap := map[abi.SectorNumber]struct{}{} for _, fault := range faults { fmap[fault] = struct{}{} @@ -70,7 +70,7 @@ func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorIn } doneFuncs = append(doneFuncs, d) - postProofType, err := rpt(s.RegisteredProof) + postProofType, err := rpt(s.SealProof) if err != nil { done() return ffi.SortedPrivateSectorInfo{}, nil, nil, xerrors.Errorf("acquiring registered PoSt proof from sector info %+v: %w", s, err) @@ -113,7 +113,7 @@ func (proofVerifier) VerifyWindowPoSt(ctx context.Context, info abi.WindowPoStVe return ffi.VerifyWindowPoSt(info) } -func (proofVerifier) GenerateWinningPoStSectorChallenge(ctx context.Context, proofType abi.RegisteredProof, minerID abi.ActorID, randomness abi.PoStRandomness, eligibleSectorCount uint64) ([]uint64, error) { +func (proofVerifier) GenerateWinningPoStSectorChallenge(ctx context.Context, proofType abi.RegisteredPoStProof, minerID abi.ActorID, randomness abi.PoStRandomness, eligibleSectorCount uint64) ([]uint64, error) { randomness[31] = 0 // TODO: Not correct, fixme return ffi.GenerateWinningPoStSectorChallenge(proofType, minerID, randomness, eligibleSectorCount) } diff --git a/fr32/fr32_ffi_cmp_test.go b/fr32/fr32_ffi_cmp_test.go index c2ae04a96..d9c3ba283 100644 --- a/fr32/fr32_ffi_cmp_test.go +++ b/fr32/fr32_ffi_cmp_test.go @@ -30,7 +30,7 @@ func TestWriteTwoPcs(t *testing.T) { rf, w, _ := ffiwrapper.ToReadableFile(bytes.NewReader(buf), int64(len(buf))) - _, _, _, err := ffi.WriteWithAlignment(abi.RegisteredProof_StackedDRG32GiBSeal, rf, abi.UnpaddedPieceSize(len(buf)), tf, nil) + _, _, _, err := ffi.WriteWithAlignment(abi.RegisteredSealProof_StackedDrg32GiBV1, rf, abi.UnpaddedPieceSize(len(buf)), tf, nil) if err != nil { panic(err) } diff --git a/fr32/fr32_test.go b/fr32/fr32_test.go index a41c9f7ab..219f10f5c 100644 --- a/fr32/fr32_test.go +++ b/fr32/fr32_test.go @@ -20,7 +20,7 @@ func padFFI(buf []byte) []byte { rf, w, _ := ffiwrapper.ToReadableFile(bytes.NewReader(buf), int64(len(buf))) tf, _ := ioutil.TempFile("/tmp/", "scrb-") - _, _, _, err := ffi.WriteWithAlignment(abi.RegisteredProof_StackedDRG32GiBSeal, rf, abi.UnpaddedPieceSize(len(buf)), tf, nil) + _, _, _, err := ffi.WriteWithAlignment(abi.RegisteredSealProof_StackedDrg32GiBV1, rf, abi.UnpaddedPieceSize(len(buf)), tf, nil) if err != nil { panic(err) } diff --git a/go.mod b/go.mod index 08c7e9185..82eab906c 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( github.com/filecoin-project/go-bitfield v0.0.2-0.20200518150651-562fdb554b6e github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5 github.com/filecoin-project/go-paramfetch v0.0.1 - github.com/filecoin-project/specs-actors v0.5.4-0.20200521014528-0df536f7e461 + github.com/filecoin-project/specs-actors v0.6.0 github.com/filecoin-project/specs-storage v0.1.0 github.com/google/uuid v1.1.1 github.com/gorilla/mux v1.7.4 diff --git a/go.sum b/go.sum index 958c02ba1..df51caede 100644 --- a/go.sum +++ b/go.sum @@ -45,6 +45,8 @@ github.com/filecoin-project/go-paramfetch v0.0.1/go.mod h1:fZzmf4tftbwf9S37XRifo github.com/filecoin-project/specs-actors v0.3.0/go.mod h1:nQYnFbQ7Y0bHZyq6HDEuVlCPR+U3z5Q3wMOQ+2aiV+Y= github.com/filecoin-project/specs-actors v0.5.4-0.20200521014528-0df536f7e461 h1:yyAoJ9dNboljDWj0uBdJEbdaTak/YhkjYUQt0GzlY0A= github.com/filecoin-project/specs-actors v0.5.4-0.20200521014528-0df536f7e461/go.mod h1:r5btrNzZD0oBkEz1pohv80gSCXQnqGrD0kYwOTiExyE= +github.com/filecoin-project/specs-actors v0.6.0 h1:IepUsmDGY60QliENVTkBTAkwqGWw9kNbbHOcU/9oiC0= +github.com/filecoin-project/specs-actors v0.6.0/go.mod h1:dRdy3cURykh2R8O/DKqy8olScl70rmIS7GrB4hB1IDY= github.com/filecoin-project/specs-storage v0.1.0 h1:PkDgTOT5W5Ao7752onjDl4QSv+sgOVdJbvFjOnD5w94= github.com/filecoin-project/specs-storage v0.1.0/go.mod h1:Pr5ntAaxsh+sLG/LYiL4tKzvA83Vk5vLODYhfNwOg7k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= diff --git a/localworker.go b/localworker.go index fad5d42ec..6056fb214 100644 --- a/localworker.go +++ b/localworker.go @@ -23,7 +23,7 @@ import ( var pathTypes = []stores.SectorFileType{stores.FTUnsealed, stores.FTSealed, stores.FTCache} type WorkerConfig struct { - SealProof abi.RegisteredProof + SealProof abi.RegisteredSealProof TaskTypes []sealtasks.TaskType } diff --git a/manager_test.go b/manager_test.go index 165ecf280..ae318b487 100644 --- a/manager_test.go +++ b/manager_test.go @@ -77,7 +77,7 @@ func newTestMgr(ctx context.Context, t *testing.T) (*Manager, *stores.Local, *st si := stores.NewIndex() cfg := &ffiwrapper.Config{ - SealProofType: abi.RegisteredProof_StackedDRG2KiBSeal, + SealProofType: abi.RegisteredSealProof_StackedDrg2KiBV1, } lstor, err := stores.NewLocal(ctx, st, si, nil) @@ -118,7 +118,7 @@ func TestSimple(t *testing.T) { } err := m.AddWorker(ctx, newTestWorker(WorkerConfig{ - SealProof: abi.RegisteredProof_StackedDRG2KiBSeal, + SealProof: abi.RegisteredSealProof_StackedDrg2KiBV1, TaskTypes: localTasks, }, lstor)) require.NoError(t, err) diff --git a/mock/mock.go b/mock/mock.go index ef472ae10..0d61e69de 100644 --- a/mock/mock.go +++ b/mock/mock.go @@ -27,7 +27,7 @@ type SectorMgr struct { pieces map[cid.Cid][]byte sectorSize abi.SectorSize nextSectorID abi.SectorNumber - proofType abi.RegisteredProof + proofType abi.RegisteredSealProof lk sync.Mutex } @@ -258,21 +258,26 @@ func AddOpFinish(ctx context.Context) (context.Context, func()) { } func (mgr *SectorMgr) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []abi.SectorInfo, randomness abi.PoStRandomness) ([]abi.PoStProof, error) { - return generateFakePoSt(sectorInfo), nil + return generateFakePoSt(sectorInfo, abi.RegisteredSealProof.RegisteredWinningPoStProof), nil } func (mgr *SectorMgr) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []abi.SectorInfo, randomness abi.PoStRandomness) ([]abi.PoStProof, []abi.SectorID, error) { - return generateFakePoSt(sectorInfo), nil, nil + return generateFakePoSt(sectorInfo, abi.RegisteredSealProof.RegisteredWindowPoStProof), nil, nil } -func generateFakePoSt(sectorInfo []abi.SectorInfo) []abi.PoStProof { - se, err := sectorInfo[0].RegisteredProof.WindowPoStPartitionSectors() +func generateFakePoSt(sectorInfo []abi.SectorInfo, rpt func(abi.RegisteredSealProof) (abi.RegisteredPoStProof, error)) []abi.PoStProof { + se, err := sectorInfo[0].SealProof.WindowPoStPartitionSectors() if err != nil { panic(err) } + wp, err := rpt(sectorInfo[0].SealProof) + if err != nil { + panic(err) + } + return []abi.PoStProof{ { - RegisteredProof: sectorInfo[0].RegisteredProof, + PoStProof: wp, ProofBytes: make([]byte, 192*int(math.Ceil(float64(len(sectorInfo))/float64(se)))), }, } @@ -340,11 +345,11 @@ func (m mockVerif) VerifyWindowPoSt(ctx context.Context, info abi.WindowPoStVeri return true, nil } -func (m mockVerif) GenerateDataCommitment(pt abi.RegisteredProof, pieces []abi.PieceInfo) (cid.Cid, error) { +func (m mockVerif) GenerateDataCommitment(pt abi.RegisteredSealProof, pieces []abi.PieceInfo) (cid.Cid, error) { return ffiwrapper.GenerateUnsealedCID(pt, pieces) } -func (m mockVerif) GenerateWinningPoStSectorChallenge(ctx context.Context, proofType abi.RegisteredProof, minerID abi.ActorID, randomness abi.PoStRandomness, eligibleSectorCount uint64) ([]uint64, error) { +func (m mockVerif) GenerateWinningPoStSectorChallenge(ctx context.Context, proofType abi.RegisteredPoStProof, minerID abi.ActorID, randomness abi.PoStRandomness, eligibleSectorCount uint64) ([]uint64, error) { return []uint64{0}, nil } diff --git a/resources.go b/resources.go index d3c6d0612..06725a1b8 100644 --- a/resources.go +++ b/resources.go @@ -22,9 +22,9 @@ func (r Resources) MultiThread() bool { const MaxCachingOverhead = 32 << 30 -var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ +var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources{ sealtasks.TTAddPiece: { - abi.RegisteredProof_StackedDRG64GiBSeal: Resources{ // This is probably a bit conservative + abi.RegisteredSealProof_StackedDrg64GiBV1: Resources{ // This is probably a bit conservative MaxMemory: 64 << 30, MinMemory: 64 << 30, @@ -32,7 +32,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ BaseMinMemory: 1 << 30, }, - abi.RegisteredProof_StackedDRG32GiBSeal: Resources{ // This is probably a bit conservative + abi.RegisteredSealProof_StackedDrg32GiBV1: Resources{ // This is probably a bit conservative MaxMemory: 32 << 30, MinMemory: 32 << 30, @@ -40,7 +40,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ BaseMinMemory: 1 << 30, }, - abi.RegisteredProof_StackedDRG512MiBSeal: Resources{ + abi.RegisteredSealProof_StackedDrg512MiBV1: Resources{ MaxMemory: 1 << 30, MinMemory: 1 << 30, @@ -48,7 +48,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ BaseMinMemory: 1 << 30, }, - abi.RegisteredProof_StackedDRG2KiBSeal: Resources{ + abi.RegisteredSealProof_StackedDrg2KiBV1: Resources{ MaxMemory: 2 << 10, MinMemory: 2 << 10, @@ -56,7 +56,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ BaseMinMemory: 2 << 10, }, - abi.RegisteredProof_StackedDRG8MiBSeal: Resources{ + abi.RegisteredSealProof_StackedDrg8MiBV1: Resources{ MaxMemory: 8 << 20, MinMemory: 8 << 20, @@ -66,7 +66,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ }, }, sealtasks.TTPreCommit1: { - abi.RegisteredProof_StackedDRG64GiBSeal: Resources{ + abi.RegisteredSealProof_StackedDrg64GiBV1: Resources{ MaxMemory: 128 << 30, MinMemory: 96 << 30, @@ -74,7 +74,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ BaseMinMemory: 60 << 30, }, - abi.RegisteredProof_StackedDRG32GiBSeal: Resources{ + abi.RegisteredSealProof_StackedDrg32GiBV1: Resources{ MaxMemory: 64 << 30, MinMemory: 48 << 30, @@ -82,7 +82,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ BaseMinMemory: 30 << 30, }, - abi.RegisteredProof_StackedDRG512MiBSeal: Resources{ + abi.RegisteredSealProof_StackedDrg512MiBV1: Resources{ MaxMemory: 3 << 29, // 1.5G MinMemory: 1 << 30, @@ -90,7 +90,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ BaseMinMemory: 1 << 30, }, - abi.RegisteredProof_StackedDRG2KiBSeal: Resources{ + abi.RegisteredSealProof_StackedDrg2KiBV1: Resources{ MaxMemory: 2 << 10, MinMemory: 2 << 10, @@ -98,7 +98,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ BaseMinMemory: 2 << 10, }, - abi.RegisteredProof_StackedDRG8MiBSeal: Resources{ + abi.RegisteredSealProof_StackedDrg8MiBV1: Resources{ MaxMemory: 8 << 20, MinMemory: 8 << 20, @@ -108,7 +108,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ }, }, sealtasks.TTPreCommit2: { - abi.RegisteredProof_StackedDRG64GiBSeal: Resources{ + abi.RegisteredSealProof_StackedDrg64GiBV1: Resources{ MaxMemory: 64 << 30, MinMemory: 64 << 30, @@ -117,7 +117,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ BaseMinMemory: 60 << 30, }, - abi.RegisteredProof_StackedDRG32GiBSeal: Resources{ + abi.RegisteredSealProof_StackedDrg32GiBV1: Resources{ MaxMemory: 32 << 30, MinMemory: 32 << 30, @@ -126,7 +126,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ BaseMinMemory: 30 << 30, }, - abi.RegisteredProof_StackedDRG512MiBSeal: Resources{ + abi.RegisteredSealProof_StackedDrg512MiBV1: Resources{ MaxMemory: 3 << 29, // 1.5G MinMemory: 1 << 30, @@ -134,7 +134,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ BaseMinMemory: 1 << 30, }, - abi.RegisteredProof_StackedDRG2KiBSeal: Resources{ + abi.RegisteredSealProof_StackedDrg2KiBV1: Resources{ MaxMemory: 2 << 10, MinMemory: 2 << 10, @@ -142,7 +142,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ BaseMinMemory: 2 << 10, }, - abi.RegisteredProof_StackedDRG8MiBSeal: Resources{ + abi.RegisteredSealProof_StackedDrg8MiBV1: Resources{ MaxMemory: 8 << 20, MinMemory: 8 << 20, @@ -152,7 +152,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ }, }, sealtasks.TTCommit1: { // Very short (~100ms), so params are very light - abi.RegisteredProof_StackedDRG64GiBSeal: Resources{ + abi.RegisteredSealProof_StackedDrg64GiBV1: Resources{ MaxMemory: 1 << 30, MinMemory: 1 << 30, @@ -160,7 +160,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ BaseMinMemory: 1 << 30, }, - abi.RegisteredProof_StackedDRG32GiBSeal: Resources{ + abi.RegisteredSealProof_StackedDrg32GiBV1: Resources{ MaxMemory: 1 << 30, MinMemory: 1 << 30, @@ -168,7 +168,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ BaseMinMemory: 1 << 30, }, - abi.RegisteredProof_StackedDRG512MiBSeal: Resources{ + abi.RegisteredSealProof_StackedDrg512MiBV1: Resources{ MaxMemory: 1 << 30, MinMemory: 1 << 30, @@ -176,7 +176,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ BaseMinMemory: 1 << 30, }, - abi.RegisteredProof_StackedDRG2KiBSeal: Resources{ + abi.RegisteredSealProof_StackedDrg2KiBV1: Resources{ MaxMemory: 2 << 10, MinMemory: 2 << 10, @@ -184,7 +184,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ BaseMinMemory: 2 << 10, }, - abi.RegisteredProof_StackedDRG8MiBSeal: Resources{ + abi.RegisteredSealProof_StackedDrg8MiBV1: Resources{ MaxMemory: 8 << 20, MinMemory: 8 << 20, @@ -194,7 +194,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ }, }, sealtasks.TTCommit2: { - abi.RegisteredProof_StackedDRG64GiBSeal: Resources{ + abi.RegisteredSealProof_StackedDrg64GiBV1: Resources{ MaxMemory: 260 << 30, // TODO: Confirm MinMemory: 60 << 30, @@ -203,7 +203,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ BaseMinMemory: 64 << 30, // params }, - abi.RegisteredProof_StackedDRG32GiBSeal: Resources{ + abi.RegisteredSealProof_StackedDrg32GiBV1: Resources{ MaxMemory: 150 << 30, // TODO: ~30G of this should really be BaseMaxMemory MinMemory: 30 << 30, @@ -212,7 +212,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ BaseMinMemory: 32 << 30, // params }, - abi.RegisteredProof_StackedDRG512MiBSeal: Resources{ + abi.RegisteredSealProof_StackedDrg512MiBV1: Resources{ MaxMemory: 3 << 29, // 1.5G MinMemory: 1 << 30, @@ -221,7 +221,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ BaseMinMemory: 10 << 30, }, - abi.RegisteredProof_StackedDRG2KiBSeal: Resources{ + abi.RegisteredSealProof_StackedDrg2KiBV1: Resources{ MaxMemory: 2 << 10, MinMemory: 2 << 10, @@ -230,7 +230,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ BaseMinMemory: 2 << 10, }, - abi.RegisteredProof_StackedDRG8MiBSeal: Resources{ + abi.RegisteredSealProof_StackedDrg8MiBV1: Resources{ MaxMemory: 8 << 20, MinMemory: 8 << 20, @@ -241,7 +241,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ }, }, sealtasks.TTFetch: { - abi.RegisteredProof_StackedDRG64GiBSeal: Resources{ + abi.RegisteredSealProof_StackedDrg64GiBV1: Resources{ MaxMemory: 1 << 20, MinMemory: 1 << 20, @@ -250,7 +250,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ BaseMinMemory: 0, }, - abi.RegisteredProof_StackedDRG32GiBSeal: Resources{ + abi.RegisteredSealProof_StackedDrg32GiBV1: Resources{ MaxMemory: 1 << 20, MinMemory: 1 << 20, @@ -259,7 +259,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ BaseMinMemory: 0, }, - abi.RegisteredProof_StackedDRG512MiBSeal: Resources{ + abi.RegisteredSealProof_StackedDrg512MiBV1: Resources{ MaxMemory: 1 << 20, MinMemory: 1 << 20, @@ -268,7 +268,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ BaseMinMemory: 0, }, - abi.RegisteredProof_StackedDRG2KiBSeal: Resources{ + abi.RegisteredSealProof_StackedDrg2KiBV1: Resources{ MaxMemory: 1 << 20, MinMemory: 1 << 20, @@ -277,7 +277,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{ BaseMinMemory: 0, }, - abi.RegisteredProof_StackedDRG8MiBSeal: Resources{ + abi.RegisteredSealProof_StackedDrg8MiBV1: Resources{ MaxMemory: 1 << 20, MinMemory: 1 << 20, diff --git a/roprov.go b/roprov.go index c7baede82..fc10ebbec 100644 --- a/roprov.go +++ b/roprov.go @@ -13,7 +13,7 @@ import ( type readonlyProvider struct { index stores.SectorIndex stor *stores.Local - spt abi.RegisteredProof + spt abi.RegisteredSealProof } func (l *readonlyProvider) AcquireSector(ctx context.Context, id abi.SectorID, existing stores.SectorFileType, allocate stores.SectorFileType, sealing stores.PathType) (stores.SectorPaths, func(), error) { diff --git a/sched.go b/sched.go index ba41a013b..715a823e1 100644 --- a/sched.go +++ b/sched.go @@ -20,13 +20,13 @@ const mib = 1 << 20 type WorkerAction func(ctx context.Context, w Worker) error type WorkerSelector interface { - Ok(ctx context.Context, task sealtasks.TaskType, spt abi.RegisteredProof, a *workerHandle) (bool, error) // true if worker is acceptable for performing a task + Ok(ctx context.Context, task sealtasks.TaskType, spt abi.RegisteredSealProof, a *workerHandle) (bool, error) // true if worker is acceptable for performing a task Cmp(ctx context.Context, task sealtasks.TaskType, a, b *workerHandle) (bool, error) // true if a is preferred over b } type scheduler struct { - spt abi.RegisteredProof + spt abi.RegisteredSealProof workersLk sync.Mutex nextWorker WorkerID @@ -44,7 +44,7 @@ type scheduler struct { schedQueue *requestQueue } -func newScheduler(spt abi.RegisteredProof) *scheduler { +func newScheduler(spt abi.RegisteredSealProof) *scheduler { return &scheduler{ spt: spt, @@ -321,7 +321,7 @@ func (sh *scheduler) assignWorker(wid WorkerID, w *workerHandle, req *workerRequ return nil } -func (a *activeResources) withResources(spt abi.RegisteredProof, id WorkerID, wr storiface.WorkerResources, r Resources, locker sync.Locker, cb func() error) error { +func (a *activeResources) withResources(spt abi.RegisteredSealProof, id WorkerID, wr storiface.WorkerResources, r Resources, locker sync.Locker, cb func() error) error { for !canHandleRequest(r, spt, id, wr, a) { if a.cond == nil { a.cond = sync.NewCond(locker) @@ -367,7 +367,7 @@ func (a *activeResources) free(wr storiface.WorkerResources, r Resources) { a.memUsedMax -= r.MaxMemory } -func canHandleRequest(needRes Resources, spt abi.RegisteredProof, wid WorkerID, res storiface.WorkerResources, active *activeResources) bool { +func canHandleRequest(needRes Resources, spt abi.RegisteredSealProof, wid WorkerID, res storiface.WorkerResources, active *activeResources) bool { // TODO: dedupe needRes.BaseMinMemory per task type (don't add if that task is already running) minNeedMem := res.MemReserved + active.memUsedMin + needRes.MinMemory + needRes.BaseMinMemory @@ -377,10 +377,10 @@ func canHandleRequest(needRes Resources, spt abi.RegisteredProof, wid WorkerID, } maxNeedMem := res.MemReserved + active.memUsedMax + needRes.MaxMemory + needRes.BaseMinMemory - if spt == abi.RegisteredProof_StackedDRG32GiBSeal { + if spt == abi.RegisteredSealProof_StackedDrg32GiBV1 { maxNeedMem += MaxCachingOverhead } - if spt == abi.RegisteredProof_StackedDRG64GiBSeal { + if spt == abi.RegisteredSealProof_StackedDrg64GiBV1 { maxNeedMem += MaxCachingOverhead * 2 // ewwrhmwh } if maxNeedMem > res.MemSwap+res.MemPhysical { diff --git a/selector_alloc.go b/selector_alloc.go index 543020ef1..874bf7bb0 100644 --- a/selector_alloc.go +++ b/selector_alloc.go @@ -25,7 +25,7 @@ func newAllocSelector(ctx context.Context, index stores.SectorIndex, alloc store }, nil } -func (s *allocSelector) Ok(ctx context.Context, task sealtasks.TaskType, spt abi.RegisteredProof, whnd *workerHandle) (bool, error) { +func (s *allocSelector) Ok(ctx context.Context, task sealtasks.TaskType, spt abi.RegisteredSealProof, whnd *workerHandle) (bool, error) { tasks, err := whnd.w.TaskTypes(ctx) if err != nil { return false, xerrors.Errorf("getting supported worker task types: %w", err) diff --git a/selector_existing.go b/selector_existing.go index bba48b965..3f99010cb 100644 --- a/selector_existing.go +++ b/selector_existing.go @@ -26,7 +26,7 @@ func newExistingSelector(ctx context.Context, index stores.SectorIndex, sector a }, nil } -func (s *existingSelector) Ok(ctx context.Context, task sealtasks.TaskType, spt abi.RegisteredProof, whnd *workerHandle) (bool, error) { +func (s *existingSelector) Ok(ctx context.Context, task sealtasks.TaskType, spt abi.RegisteredSealProof, whnd *workerHandle) (bool, error) { tasks, err := whnd.w.TaskTypes(ctx) if err != nil { return false, xerrors.Errorf("getting supported worker task types: %w", err) diff --git a/selector_task.go b/selector_task.go index d2cf73476..bf0788ef9 100644 --- a/selector_task.go +++ b/selector_task.go @@ -19,7 +19,7 @@ func newTaskSelector() *taskSelector { return &taskSelector{} } -func (s *taskSelector) Ok(ctx context.Context, task sealtasks.TaskType, spt abi.RegisteredProof, whnd *workerHandle) (bool, error) { +func (s *taskSelector) Ok(ctx context.Context, task sealtasks.TaskType, spt abi.RegisteredSealProof, whnd *workerHandle) (bool, error) { tasks, err := whnd.w.TaskTypes(ctx) if err != nil { return false, xerrors.Errorf("getting supported worker task types: %w", err) diff --git a/stores/filetype.go b/stores/filetype.go index fee8b256f..c31dfefb2 100644 --- a/stores/filetype.go +++ b/stores/filetype.go @@ -50,7 +50,7 @@ func (t SectorFileType) Has(singleType SectorFileType) bool { return t&singleType == singleType } -func (t SectorFileType) SealSpaceUse(spt abi.RegisteredProof) (uint64, error) { +func (t SectorFileType) SealSpaceUse(spt abi.RegisteredSealProof) (uint64, error) { ssize, err := spt.SectorSize() if err != nil { return 0, xerrors.Errorf("getting sector size: %w", err) diff --git a/stores/index.go b/stores/index.go index e113a005a..049e2dc20 100644 --- a/stores/index.go +++ b/stores/index.go @@ -58,7 +58,7 @@ type SectorIndex interface { // part of storage-miner api StorageDropSector(ctx context.Context, storageId ID, s abi.SectorID, ft SectorFileType) error StorageFindSector(ctx context.Context, sector abi.SectorID, ft SectorFileType, allowFetch bool) ([]SectorStorageInfo, error) - StorageBestAlloc(ctx context.Context, allocate SectorFileType, spt abi.RegisteredProof, pathType PathType) ([]StorageInfo, error) + StorageBestAlloc(ctx context.Context, allocate SectorFileType, spt abi.RegisteredSealProof, pathType PathType) ([]StorageInfo, error) // atomically acquire locks on all sector file types. close ctx to unlock StorageLock(ctx context.Context, sector abi.SectorID, read SectorFileType, write SectorFileType) error @@ -342,7 +342,7 @@ func (i *Index) StorageInfo(ctx context.Context, id ID) (StorageInfo, error) { return *si.info, nil } -func (i *Index) StorageBestAlloc(ctx context.Context, allocate SectorFileType, spt abi.RegisteredProof, pathType PathType) ([]StorageInfo, error) { +func (i *Index) StorageBestAlloc(ctx context.Context, allocate SectorFileType, spt abi.RegisteredSealProof, pathType PathType) ([]StorageInfo, error) { i.lk.RLock() defer i.lk.RUnlock() diff --git a/stores/interface.go b/stores/interface.go index c400019aa..54aaec90c 100644 --- a/stores/interface.go +++ b/stores/interface.go @@ -24,7 +24,7 @@ const ( ) type Store interface { - AcquireSector(ctx context.Context, s abi.SectorID, spt abi.RegisteredProof, existing SectorFileType, allocate SectorFileType, sealing PathType, op AcquireMode) (paths SectorPaths, stores SectorPaths, err error) + AcquireSector(ctx context.Context, s abi.SectorID, spt abi.RegisteredSealProof, existing SectorFileType, allocate SectorFileType, sealing PathType, op AcquireMode) (paths SectorPaths, stores SectorPaths, err error) Remove(ctx context.Context, s abi.SectorID, types SectorFileType, force bool) error // like remove, but doesn't remove the primary sector copy, nor the last @@ -32,7 +32,7 @@ type Store interface { RemoveCopies(ctx context.Context, s abi.SectorID, types SectorFileType) error // move sectors into storage - MoveStorage(ctx context.Context, s abi.SectorID, spt abi.RegisteredProof, types SectorFileType) error + MoveStorage(ctx context.Context, s abi.SectorID, spt abi.RegisteredSealProof, types SectorFileType) error FsStat(ctx context.Context, id ID) (FsStat, error) } diff --git a/stores/local.go b/stores/local.go index 55d34549b..26b7ccb75 100644 --- a/stores/local.go +++ b/stores/local.go @@ -197,7 +197,7 @@ func (st *Local) reportHealth(ctx context.Context) { } } -func (st *Local) AcquireSector(ctx context.Context, sid abi.SectorID, spt abi.RegisteredProof, existing SectorFileType, allocate SectorFileType, pathType PathType, op AcquireMode) (SectorPaths, SectorPaths, error) { +func (st *Local) AcquireSector(ctx context.Context, sid abi.SectorID, spt abi.RegisteredSealProof, existing SectorFileType, allocate SectorFileType, pathType PathType, op AcquireMode) (SectorPaths, SectorPaths, error) { if existing|allocate != existing^allocate { return SectorPaths{}, SectorPaths{}, xerrors.New("can't both find and allocate a sector") } @@ -397,7 +397,7 @@ func (st *Local) removeSector(ctx context.Context, sid abi.SectorID, typ SectorF return nil } -func (st *Local) MoveStorage(ctx context.Context, s abi.SectorID, spt abi.RegisteredProof, types SectorFileType) error { +func (st *Local) MoveStorage(ctx context.Context, s abi.SectorID, spt abi.RegisteredSealProof, types SectorFileType) error { dest, destIds, err := st.AcquireSector(ctx, s, spt, FTNone, types, false, AcquireMove) if err != nil { return xerrors.Errorf("acquire dest storage: %w", err) diff --git a/stores/remote.go b/stores/remote.go index be1ebf1d2..30fe3abf9 100644 --- a/stores/remote.go +++ b/stores/remote.go @@ -50,7 +50,7 @@ func NewRemote(local *Local, index SectorIndex, auth http.Header) *Remote { } } -func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, spt abi.RegisteredProof, existing SectorFileType, allocate SectorFileType, pathType PathType, op AcquireMode) (SectorPaths, SectorPaths, error) { +func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, spt abi.RegisteredSealProof, existing SectorFileType, allocate SectorFileType, pathType PathType, op AcquireMode) (SectorPaths, SectorPaths, error) { if existing|allocate != existing^allocate { return SectorPaths{}, SectorPaths{}, xerrors.New("can't both find and allocate a sector") } @@ -119,7 +119,7 @@ func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, spt abi.Regi return paths, stores, nil } -func (r *Remote) acquireFromRemote(ctx context.Context, s abi.SectorID, spt abi.RegisteredProof, fileType SectorFileType, pathType PathType, op AcquireMode) (string, ID, string, error) { +func (r *Remote) acquireFromRemote(ctx context.Context, s abi.SectorID, spt abi.RegisteredSealProof, fileType SectorFileType, pathType PathType, op AcquireMode) (string, ID, string, error) { si, err := r.index.StorageFindSector(ctx, s, fileType, false) if err != nil { return "", "", "", err @@ -210,7 +210,7 @@ func (r *Remote) fetch(ctx context.Context, url, outname string) error { } } -func (r *Remote) MoveStorage(ctx context.Context, s abi.SectorID, spt abi.RegisteredProof, types SectorFileType) error { +func (r *Remote) MoveStorage(ctx context.Context, s abi.SectorID, spt abi.RegisteredSealProof, types SectorFileType) error { // Make sure we have the data local _, _, err := r.AcquireSector(ctx, s, spt, types, FTNone, PathStorage, AcquireMove) if err != nil { diff --git a/testworker_test.go b/testworker_test.go index 46218d2f6..d28761702 100644 --- a/testworker_test.go +++ b/testworker_test.go @@ -94,7 +94,7 @@ func (t *testWorker) Paths(ctx context.Context) ([]stores.StoragePath, error) { } func (t *testWorker) Info(ctx context.Context) (storiface.WorkerInfo, error) { - res := ResourceTable[sealtasks.TTPreCommit2][abi.RegisteredProof_StackedDRG2KiBSeal] + res := ResourceTable[sealtasks.TTPreCommit2][abi.RegisteredSealProof_StackedDrg2KiBV1] return storiface.WorkerInfo{ Hostname: "testworkerer", diff --git a/zerocomm/zerocomm_test.go b/zerocomm/zerocomm_test.go index 763ff675a..f3206740b 100644 --- a/zerocomm/zerocomm_test.go +++ b/zerocomm/zerocomm_test.go @@ -20,7 +20,7 @@ func TestComms(t *testing.T) { var expPieceComms [zerocomm.Levels - zerocomm.Skip]cid.Cid { - l2, err := ffiwrapper.GeneratePieceCIDFromFile(abi.RegisteredProof_StackedDRG2KiBPoSt, bytes.NewReader(make([]byte, 127)), 127) + l2, err := ffiwrapper.GeneratePieceCIDFromFile(abi.RegisteredSealProof_StackedDrg2KiBV1, bytes.NewReader(make([]byte, 127)), 127) if err != nil { t.Fatal(err) } @@ -33,7 +33,7 @@ func TestComms(t *testing.T) { fmt.Println(i, sz) r := io.LimitReader(&NullReader{}, int64(sz)) - expPieceComms[i], err = ffiwrapper.GeneratePieceCIDFromFile(abi.RegisteredProof_StackedDRG2KiBPoSt, r, sz) + expPieceComms[i], err = ffiwrapper.GeneratePieceCIDFromFile(abi.RegisteredSealProof_StackedDrg2KiBV1, r, sz) if err != nil { t.Fatal(err) } @@ -59,7 +59,7 @@ func TestCommsSmall(t *testing.T) { lvls := len(expPieceComms) + zerocomm.Skip { - l2, err := ffiwrapper.GeneratePieceCIDFromFile(abi.RegisteredProof_StackedDRG2KiBPoSt, bytes.NewReader(make([]byte, 127)), 127) + l2, err := ffiwrapper.GeneratePieceCIDFromFile(abi.RegisteredSealProof_StackedDrg2KiBV1, bytes.NewReader(make([]byte, 127)), 127) if err != nil { t.Fatal(err) } @@ -72,7 +72,7 @@ func TestCommsSmall(t *testing.T) { fmt.Println(i, sz) r := io.LimitReader(&NullReader{}, int64(sz)) - expPieceComms[i], err = ffiwrapper.GeneratePieceCIDFromFile(abi.RegisteredProof_StackedDRG2KiBPoSt, r, sz) + expPieceComms[i], err = ffiwrapper.GeneratePieceCIDFromFile(abi.RegisteredSealProof_StackedDrg2KiBV1, r, sz) if err != nil { t.Fatal(err) } @@ -94,7 +94,7 @@ func TestCommsSmall(t *testing.T) { } func TestForSise(t *testing.T) { - exp, err := ffiwrapper.GeneratePieceCIDFromFile(abi.RegisteredProof_StackedDRG2KiBPoSt, bytes.NewReader(make([]byte, 1016)), 1016) + exp, err := ffiwrapper.GeneratePieceCIDFromFile(abi.RegisteredSealProof_StackedDrg2KiBV1, bytes.NewReader(make([]byte, 1016)), 1016) if err != nil { return } From 3d1225bf3449e4e32723d04fb541471d8007d844 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Mon, 15 Jun 2020 14:33:01 +0200 Subject: [PATCH 129/199] gofmt --- extern/filecoin-ffi | 2 +- ffiwrapper/sealer_test.go | 2 +- ffiwrapper/verifier_cgo.go | 2 +- mock/mock.go | 4 ++-- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/extern/filecoin-ffi b/extern/filecoin-ffi index 1bff7f456..5bb4a309b 160000 --- a/extern/filecoin-ffi +++ b/extern/filecoin-ffi @@ -1 +1 @@ -Subproject commit 1bff7f4563370ada590a605b5459b91e1662ebaa +Subproject commit 5bb4a309bce9d446ac618f34a8b9e2883af2002f diff --git a/ffiwrapper/sealer_test.go b/ffiwrapper/sealer_test.go index 1cb1ac9ce..7c1ad0474 100644 --- a/ffiwrapper/sealer_test.go +++ b/ffiwrapper/sealer_test.go @@ -91,7 +91,7 @@ func (s *seal) commit(t *testing.T, sb *Sealer, done func()) { ok, err := ProofVerifier.VerifySeal(abi.SealVerifyInfo{ SectorID: s.id, SealedCID: s.cids.Sealed, - SealProof: sealProofType, + SealProof: sealProofType, Proof: proof, Randomness: s.ticket, InteractiveRandomness: seed, diff --git a/ffiwrapper/verifier_cgo.go b/ffiwrapper/verifier_cgo.go index a699ee10c..e3e8dd886 100644 --- a/ffiwrapper/verifier_cgo.go +++ b/ffiwrapper/verifier_cgo.go @@ -15,7 +15,7 @@ import ( ) func (sb *Sealer) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []abi.SectorInfo, randomness abi.PoStRandomness) ([]abi.PoStProof, error) { - randomness[31] = 0 // TODO: Not correct, fixme + randomness[31] = 0 // TODO: Not correct, fixme privsectors, skipped, done, err := sb.pubSectorToPriv(ctx, minerID, sectorInfo, nil, abi.RegisteredSealProof.RegisteredWinningPoStProof) // TODO: FAULTS? if err != nil { return nil, err diff --git a/mock/mock.go b/mock/mock.go index 0d61e69de..eae17bd3c 100644 --- a/mock/mock.go +++ b/mock/mock.go @@ -278,7 +278,7 @@ func generateFakePoSt(sectorInfo []abi.SectorInfo, rpt func(abi.RegisteredSealPr return []abi.PoStProof{ { PoStProof: wp, - ProofBytes: make([]byte, 192*int(math.Ceil(float64(len(sectorInfo))/float64(se)))), + ProofBytes: make([]byte, 192*int(math.Ceil(float64(len(sectorInfo))/float64(se)))), }, } } @@ -319,7 +319,7 @@ func (mgr *SectorMgr) FinalizeSector(context.Context, abi.SectorID) error { return nil } -func (mgr *SectorMgr) CheckProvable(context.Context, abi.RegisteredProof, []abi.SectorID) ([]abi.SectorID, error) { +func (mgr *SectorMgr) CheckProvable(context.Context, abi.RegisteredSealProof, []abi.SectorID) ([]abi.SectorID, error) { return nil, nil } From f3026a9ee6236b2694aa9f5e438df25c39bd76c9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Mon, 15 Jun 2020 17:45:37 +0200 Subject: [PATCH 130/199] mod tidy --- go.sum | 2 -- 1 file changed, 2 deletions(-) diff --git a/go.sum b/go.sum index df51caede..33fbe76a6 100644 --- a/go.sum +++ b/go.sum @@ -43,8 +43,6 @@ github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5/go github.com/filecoin-project/go-paramfetch v0.0.1 h1:gV7bs5YaqlgpGFMiLxInGK2L1FyCXUE0rimz4L7ghoE= github.com/filecoin-project/go-paramfetch v0.0.1/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc= github.com/filecoin-project/specs-actors v0.3.0/go.mod h1:nQYnFbQ7Y0bHZyq6HDEuVlCPR+U3z5Q3wMOQ+2aiV+Y= -github.com/filecoin-project/specs-actors v0.5.4-0.20200521014528-0df536f7e461 h1:yyAoJ9dNboljDWj0uBdJEbdaTak/YhkjYUQt0GzlY0A= -github.com/filecoin-project/specs-actors v0.5.4-0.20200521014528-0df536f7e461/go.mod h1:r5btrNzZD0oBkEz1pohv80gSCXQnqGrD0kYwOTiExyE= github.com/filecoin-project/specs-actors v0.6.0 h1:IepUsmDGY60QliENVTkBTAkwqGWw9kNbbHOcU/9oiC0= github.com/filecoin-project/specs-actors v0.6.0/go.mod h1:dRdy3cURykh2R8O/DKqy8olScl70rmIS7GrB4hB1IDY= github.com/filecoin-project/specs-storage v0.1.0 h1:PkDgTOT5W5Ao7752onjDl4QSv+sgOVdJbvFjOnD5w94= From e530ff93c437d5a843687ad17c6e5353a7cd6421 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Mon, 15 Jun 2020 19:59:20 +0200 Subject: [PATCH 131/199] ffiwrapper: Close the correct end of the pipe in unseal --- ffiwrapper/sealer_cgo.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ffiwrapper/sealer_cgo.go b/ffiwrapper/sealer_cgo.go index 0beb9b86c..d17aed272 100644 --- a/ffiwrapper/sealer_cgo.go +++ b/ffiwrapper/sealer_cgo.go @@ -329,7 +329,7 @@ func (sb *Sealer) UnsealPiece(ctx context.Context, sector abi.SectorID, offset s uint64(at.Unpadded()), uint64(abi.PaddedPieceSize(piece.Len).Unpadded())) - _ = opr.Close() + _ = opw.Close() if err != nil { return xerrors.Errorf("unseal range: %w", err) From e82d2ee8eea359a5489aa4c4c2ce57fc3aa8a479 Mon Sep 17 00:00:00 2001 From: Jakub Sztandera Date: Mon, 15 Jun 2020 20:19:42 +0200 Subject: [PATCH 132/199] Log and ignore if fallocate is not supported Signed-off-by: Jakub Sztandera --- ffiwrapper/partialfile.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/ffiwrapper/partialfile.go b/ffiwrapper/partialfile.go index 094448e89..a2c1f1151 100644 --- a/ffiwrapper/partialfile.go +++ b/ffiwrapper/partialfile.go @@ -4,6 +4,7 @@ import ( "encoding/binary" "io" "os" + "syscall" "github.com/detailyang/go-fallocate" "golang.org/x/xerrors" @@ -63,6 +64,12 @@ func createPartialFile(maxPieceSize abi.PaddedPieceSize, path string) (*partialF err = func() error { err := fallocate.Fallocate(f, 0, int64(maxPieceSize)) + if errno, ok := err.(syscall.Errno); ok { + if errno == syscall.EOPNOTSUPP || errno == syscall.ENOSYS { + log.Warnf("could not allocated space, ignoring: %v", errno) + err = nil // log and ignore + } + } if err != nil { return xerrors.Errorf("fallocate '%s': %w", path, err) } From 1436e217617a80d428b7a9229ee7516aea45aa57 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Wed, 17 Jun 2020 19:22:03 -0700 Subject: [PATCH 133/199] actually log the stat error in question --- faults.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/faults.go b/faults.go index 4f5fc5be6..d810d8c5e 100644 --- a/faults.go +++ b/faults.go @@ -63,7 +63,7 @@ func (m *Manager) CheckProvable(ctx context.Context, spt abi.RegisteredSealProof for p, sz := range toCheck { st, err := os.Stat(p) if err != nil { - log.Warnw("CheckProvable Sector FAULT: sector file stat error", "sector", sector, "sealed", lp.Sealed, "cache", lp.Cache, "file", p) + log.Warnw("CheckProvable Sector FAULT: sector file stat error", "sector", sector, "sealed", lp.Sealed, "cache", lp.Cache, "file", p, "err", err) bad = append(bad, sector) return nil } From 5d7d633595d2525e0ccd8b58f2a7b4a68c5e29e2 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Wed, 17 Jun 2020 19:35:05 -0700 Subject: [PATCH 134/199] check appropriate paths per sector size --- faults.go | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/faults.go b/faults.go index d810d8c5e..11c1c3df2 100644 --- a/faults.go +++ b/faults.go @@ -2,6 +2,7 @@ package sectorstorage import ( "context" + "fmt" "os" "path/filepath" @@ -57,9 +58,10 @@ func (m *Manager) CheckProvable(ctx context.Context, spt abi.RegisteredSealProof lp.Sealed: 1, filepath.Join(lp.Cache, "t_aux"): 0, filepath.Join(lp.Cache, "p_aux"): 0, - filepath.Join(lp.Cache, "sc-02-data-tree-r-last.dat"): 0, } + addCachePathsForSectorSize(toCheck, lp.Cache, ssize) + for p, sz := range toCheck { st, err := os.Stat(p) if err != nil { @@ -87,4 +89,17 @@ func (m *Manager) CheckProvable(ctx context.Context, spt abi.RegisteredSealProof return bad, nil } +func addCachePathsForSectorSize(chk map[string]int64, cacheDir string, ssize abi.SectorSize) { + switch ssize { + case 512 << 20: + chk[filepath.Join(cacheDir, "sc-02-data-tree-r-last.dat")] = 0 + case 32 << 30: + for i := 0; i < 8; i++ { + chk[filepath.Join(cacheDir, fmt.Sprintf("sc-02-data-tree-r-last-%d.dat", i))] = 0 + } + default: + log.Warnf("not checking cache files of %s sectors for faults", ssize) + } +} + var _ FaultTracker = &Manager{} From 92a6ba8a57f602e40c91038f07af107a8e2a9dbe Mon Sep 17 00:00:00 2001 From: yaohcn Date: Fri, 19 Jun 2020 09:43:19 +0800 Subject: [PATCH 135/199] finalize not sched on unsealed worker --- manager.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/manager.go b/manager.go index bc6dd1d9d..383320483 100644 --- a/manager.go +++ b/manager.go @@ -402,7 +402,7 @@ func (m *Manager) FinalizeSector(ctx context.Context, sector abi.SectorID) error } } - selector, err := newExistingSelector(ctx, m.index, sector, stores.FTCache|stores.FTSealed|unsealed, false) + selector, err := newExistingSelector(ctx, m.index, sector, stores.FTCache|stores.FTSealed, false) if err != nil { return xerrors.Errorf("creating path selector: %w", err) } From 07cf84cbc787fc965a887a073018f061b9fdba4c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Mon, 22 Jun 2020 17:02:59 +0200 Subject: [PATCH 136/199] Update specs-storage, sector removing support --- ffiwrapper/sealer_cgo.go | 20 +++++++++++++++- ffiwrapper/sealer_test.go | 4 ++-- go.mod | 2 +- go.sum | 4 ++-- localworker.go | 25 ++++++++++++++++++-- manager.go | 49 ++++++++++++++++++++++++++++++++++++--- mock/mock.go | 19 ++++++++++++++- testworker_test.go | 10 +++++++- 8 files changed, 120 insertions(+), 13 deletions(-) diff --git a/ffiwrapper/sealer_cgo.go b/ffiwrapper/sealer_cgo.go index d17aed272..6510f81cc 100644 --- a/ffiwrapper/sealer_cgo.go +++ b/ffiwrapper/sealer_cgo.go @@ -500,7 +500,11 @@ func (sb *Sealer) SealCommit2(ctx context.Context, sector abi.SectorID, phase1Ou return ffi.SealCommitPhase2(phase1Out, sector.Number, sector.Miner) } -func (sb *Sealer) FinalizeSector(ctx context.Context, sector abi.SectorID) error { +func (sb *Sealer) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) error { + if len(keepUnsealed) > 0 { + return xerrors.Errorf("keepUnsealed unsupported") // TODO: impl for fastretrieval copies + } + paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTCache, 0, false) if err != nil { return xerrors.Errorf("acquiring sector cache path: %w", err) @@ -510,6 +514,20 @@ func (sb *Sealer) FinalizeSector(ctx context.Context, sector abi.SectorID) error return ffi.ClearCache(uint64(sb.ssize), paths.Cache) } +func (sb *Sealer) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) error { + // This call is meant to mark storage as 'freeable'. Given that unsealing is + // very expensive, we don't remove data as soon as we can - instead we only + // do that when we don't have free space for data that really needs it + + // This function should not be called at this layer, everything should be + // handled in localworker + return xerrors.Errorf("not supported at this layer") +} + +func (sb *Sealer) Remove(ctx context.Context, sector abi.SectorID) error { + return xerrors.Errorf("not supported at this layer") // happens in localworker +} + func GeneratePieceCIDFromFile(proofType abi.RegisteredSealProof, piece io.Reader, pieceSize abi.UnpaddedPieceSize) (cid.Cid, error) { f, werr, err := ToReadableFile(piece, int64(pieceSize)) if err != nil { diff --git a/ffiwrapper/sealer_test.go b/ffiwrapper/sealer_test.go index 7c1ad0474..5e6f02cd2 100644 --- a/ffiwrapper/sealer_test.go +++ b/ffiwrapper/sealer_test.go @@ -297,7 +297,7 @@ func TestSealAndVerify(t *testing.T) { post(t, sb, s) - if err := sb.FinalizeSector(context.TODO(), si); err != nil { + if err := sb.FinalizeSector(context.TODO(), si, nil); err != nil { t.Fatalf("%+v", err) } @@ -358,7 +358,7 @@ func TestSealPoStNoCommit(t *testing.T) { precommit := time.Now() - if err := sb.FinalizeSector(context.TODO(), si); err != nil { + if err := sb.FinalizeSector(context.TODO(), si, nil); err != nil { t.Fatal(err) } diff --git a/go.mod b/go.mod index 82eab906c..60f31942f 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5 github.com/filecoin-project/go-paramfetch v0.0.1 github.com/filecoin-project/specs-actors v0.6.0 - github.com/filecoin-project/specs-storage v0.1.0 + github.com/filecoin-project/specs-storage v0.1.1-0.20200622113353-88a9704877ea github.com/google/uuid v1.1.1 github.com/gorilla/mux v1.7.4 github.com/hashicorp/go-multierror v1.0.0 diff --git a/go.sum b/go.sum index 33fbe76a6..300226c5e 100644 --- a/go.sum +++ b/go.sum @@ -45,8 +45,8 @@ github.com/filecoin-project/go-paramfetch v0.0.1/go.mod h1:fZzmf4tftbwf9S37XRifo github.com/filecoin-project/specs-actors v0.3.0/go.mod h1:nQYnFbQ7Y0bHZyq6HDEuVlCPR+U3z5Q3wMOQ+2aiV+Y= github.com/filecoin-project/specs-actors v0.6.0 h1:IepUsmDGY60QliENVTkBTAkwqGWw9kNbbHOcU/9oiC0= github.com/filecoin-project/specs-actors v0.6.0/go.mod h1:dRdy3cURykh2R8O/DKqy8olScl70rmIS7GrB4hB1IDY= -github.com/filecoin-project/specs-storage v0.1.0 h1:PkDgTOT5W5Ao7752onjDl4QSv+sgOVdJbvFjOnD5w94= -github.com/filecoin-project/specs-storage v0.1.0/go.mod h1:Pr5ntAaxsh+sLG/LYiL4tKzvA83Vk5vLODYhfNwOg7k= +github.com/filecoin-project/specs-storage v0.1.1-0.20200622113353-88a9704877ea h1:iixjULRQFPn7Q9KlIqfwLJnlAXO10bbkI+xy5GKGdLY= +github.com/filecoin-project/specs-storage v0.1.1-0.20200622113353-88a9704877ea/go.mod h1:Pr5ntAaxsh+sLG/LYiL4tKzvA83Vk5vLODYhfNwOg7k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= diff --git a/localworker.go b/localworker.go index 6056fb214..a1d82209a 100644 --- a/localworker.go +++ b/localworker.go @@ -7,6 +7,7 @@ import ( "runtime" "github.com/elastic/go-sysinfo" + "github.com/hashicorp/go-multierror" "github.com/ipfs/go-cid" "golang.org/x/xerrors" @@ -160,13 +161,13 @@ func (l *LocalWorker) SealCommit2(ctx context.Context, sector abi.SectorID, phas return sb.SealCommit2(ctx, sector, phase1Out) } -func (l *LocalWorker) FinalizeSector(ctx context.Context, sector abi.SectorID) error { +func (l *LocalWorker) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage2.Range) error { sb, err := l.sb() if err != nil { return err } - if err := sb.FinalizeSector(ctx, sector); err != nil { + if err := sb.FinalizeSector(ctx, sector, keepUnsealed); err != nil { return xerrors.Errorf("finalizing sector: %w", err) } @@ -177,6 +178,26 @@ func (l *LocalWorker) FinalizeSector(ctx context.Context, sector abi.SectorID) e return nil } +func (l *LocalWorker) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage2.Range) error { + return xerrors.Errorf("implement me") +} + +func (l *LocalWorker) Remove(ctx context.Context, sector abi.SectorID) error { + var err error + + if rerr := l.storage.Remove(ctx, sector, stores.FTSealed, true); rerr != nil { + err = multierror.Append(err, xerrors.Errorf("removing sector (sealed): %w", rerr)) + } + if rerr := l.storage.Remove(ctx, sector, stores.FTCache, true); rerr != nil { + err = multierror.Append(err, xerrors.Errorf("removing sector (cache): %w", rerr)) + } + if rerr := l.storage.Remove(ctx, sector, stores.FTUnsealed, true); rerr != nil { + err = multierror.Append(err, xerrors.Errorf("removing sector (unsealed): %w", rerr)) + } + + return err +} + func (l *LocalWorker) MoveStorage(ctx context.Context, sector abi.SectorID) error { if err := l.storage.MoveStorage(ctx, sector, l.scfg.SealProofType, stores.FTSealed|stores.FTCache); err != nil { return xerrors.Errorf("moving sealed data to storage: %w", err) diff --git a/manager.go b/manager.go index 383320483..ba84e579d 100644 --- a/manager.go +++ b/manager.go @@ -382,7 +382,7 @@ func (m *Manager) SealCommit2(ctx context.Context, sector abi.SectorID, phase1Ou return out, err } -func (m *Manager) FinalizeSector(ctx context.Context, sector abi.SectorID) error { +func (m *Manager) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) error { ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -410,7 +410,7 @@ func (m *Manager) FinalizeSector(ctx context.Context, sector abi.SectorID) error err = m.sched.Schedule(ctx, sector, sealtasks.TTFinalize, selector, schedFetch(sector, stores.FTCache|stores.FTSealed|unsealed, stores.PathSealing, stores.AcquireMove), func(ctx context.Context, w Worker) error { - return w.FinalizeSector(ctx, sector) + return w.FinalizeSector(ctx, sector, keepUnsealed) }) if err != nil { return err @@ -421,8 +421,15 @@ func (m *Manager) FinalizeSector(ctx context.Context, sector abi.SectorID) error return xerrors.Errorf("creating fetchSel: %w", err) } + moveUnsealed := unsealed + { + if len(keepUnsealed) == 0 { + unsealed = stores.FTNone + } + } + err = m.sched.Schedule(ctx, sector, sealtasks.TTFetch, fetchSel, - schedFetch(sector, stores.FTCache|stores.FTSealed, stores.PathStorage, stores.AcquireMove), + schedFetch(sector, stores.FTCache|stores.FTSealed|moveUnsealed, stores.PathStorage, stores.AcquireMove), func(ctx context.Context, w Worker) error { return w.MoveStorage(ctx, sector) }) @@ -433,6 +440,42 @@ func (m *Manager) FinalizeSector(ctx context.Context, sector abi.SectorID) error return nil } +func (m *Manager) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) error { + return xerrors.Errorf("implement me") +} + +func (m *Manager) Remove(ctx context.Context, sector abi.SectorID) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + if err := m.index.StorageLock(ctx, sector, stores.FTNone, stores.FTSealed|stores.FTUnsealed|stores.FTCache); err != nil { + return xerrors.Errorf("acquiring sector lock: %w", err) + } + + unsealed := stores.FTUnsealed + { + unsealedStores, err := m.index.StorageFindSector(ctx, sector, stores.FTUnsealed, false) + if err != nil { + return xerrors.Errorf("finding unsealed sector: %w", err) + } + + if len(unsealedStores) == 0 { // can be already removed + unsealed = stores.FTNone + } + } + + selector, err := newExistingSelector(ctx, m.index, sector, stores.FTCache|stores.FTSealed, false) + if err != nil { + return xerrors.Errorf("creating selector: %w", err) + } + + return m.sched.Schedule(ctx, sector, sealtasks.TTFinalize, selector, + schedFetch(sector, stores.FTCache|stores.FTSealed|unsealed, stores.PathStorage, stores.AcquireMove), + func(ctx context.Context, w Worker) error { + return w.Remove(ctx, sector) + }) +} + func (m *Manager) StorageLocal(ctx context.Context) (map[stores.ID]string, error) { l, err := m.localStore.Local(ctx) if err != nil { diff --git a/mock/mock.go b/mock/mock.go index eae17bd3c..cbc3a1f99 100644 --- a/mock/mock.go +++ b/mock/mock.go @@ -315,7 +315,23 @@ func (mgr *SectorMgr) StageFakeData(mid abi.ActorID) (abi.SectorID, []abi.PieceI return id, []abi.PieceInfo{pi}, nil } -func (mgr *SectorMgr) FinalizeSector(context.Context, abi.SectorID) error { +func (mgr *SectorMgr) FinalizeSector(context.Context, abi.SectorID, []storage.Range) error { + return nil +} + +func (mgr *SectorMgr) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) error { + panic("implement me") +} + +func (mgr *SectorMgr) Remove(ctx context.Context, sector abi.SectorID) error { + mgr.lk.Lock() + defer mgr.lk.Unlock() + + if _, has := mgr.sectors[sector]; !has { + return xerrors.Errorf("sector not found") + } + + delete(mgr.sectors, sector) return nil } @@ -355,4 +371,5 @@ func (m mockVerif) GenerateWinningPoStSectorChallenge(ctx context.Context, proof var MockVerifier = mockVerif{} +var _ storage.Sealer = &SectorMgr{} var _ ffiwrapper.Verifier = MockVerifier diff --git a/testworker_test.go b/testworker_test.go index d28761702..5ca51b771 100644 --- a/testworker_test.go +++ b/testworker_test.go @@ -73,7 +73,15 @@ func (t *testWorker) SealCommit2(ctx context.Context, sector abi.SectorID, c1o s panic("implement me") } -func (t *testWorker) FinalizeSector(ctx context.Context, sector abi.SectorID) error { +func (t *testWorker) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) error { + panic("implement me") +} + +func (t *testWorker) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) error { + panic("implement me") +} + +func (t *testWorker) Remove(ctx context.Context, sector abi.SectorID) error { panic("implement me") } From be4112ee68263b2523bfb1670ee7c4c34ca0a336 Mon Sep 17 00:00:00 2001 From: Howard Yeh Date: Tue, 23 Jun 2020 17:42:47 +0800 Subject: [PATCH 137/199] add RPC timeout to maybeSchedRequest --- sched.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/sched.go b/sched.go index 715a823e1..5b99f48ff 100644 --- a/sched.go +++ b/sched.go @@ -5,6 +5,7 @@ import ( "context" "sort" "sync" + "time" "github.com/hashicorp/go-multierror" "golang.org/x/xerrors" @@ -214,7 +215,10 @@ func (sh *scheduler) maybeSchedRequest(req *workerRequest) (bool, error) { needRes := ResourceTable[req.taskType][sh.spt] for wid, worker := range sh.workers { - ok, err := req.sel.Ok(req.ctx, req.taskType, sh.spt, worker) + rpcCtx, cancel := context.WithTimeout(req.ctx, 5*time.Second) + ok, err := req.sel.Ok(rpcCtx, req.taskType, sh.spt, worker) + cancel() + if err != nil { return false, err } From 57bda914f9097d6a06df6cde9808add9eccc809d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Wed, 24 Jun 2020 00:04:02 +0200 Subject: [PATCH 138/199] fix: Don't try to move removed unsealed sector files --- manager.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/manager.go b/manager.go index ba84e579d..caea09cd0 100644 --- a/manager.go +++ b/manager.go @@ -424,7 +424,7 @@ func (m *Manager) FinalizeSector(ctx context.Context, sector abi.SectorID, keepU moveUnsealed := unsealed { if len(keepUnsealed) == 0 { - unsealed = stores.FTNone + moveUnsealed = stores.FTNone } } From cea46d8c8cf724663903494c80eadeffb39e3263 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Wed, 24 Jun 2020 00:35:34 +0200 Subject: [PATCH 139/199] sched: Also handle timeout on selector Cmp --- sched.go | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/sched.go b/sched.go index 5b99f48ff..3b3e28d65 100644 --- a/sched.go +++ b/sched.go @@ -205,6 +205,8 @@ func (sh *scheduler) onWorkerFreed(wid WorkerID) { } } +var selectorTimeout = 5 * time.Second + func (sh *scheduler) maybeSchedRequest(req *workerRequest) (bool, error) { sh.workersLk.Lock() defer sh.workersLk.Unlock() @@ -215,7 +217,7 @@ func (sh *scheduler) maybeSchedRequest(req *workerRequest) (bool, error) { needRes := ResourceTable[req.taskType][sh.spt] for wid, worker := range sh.workers { - rpcCtx, cancel := context.WithTimeout(req.ctx, 5*time.Second) + rpcCtx, cancel := context.WithTimeout(req.ctx, selectorTimeout) ok, err := req.sel.Ok(rpcCtx, req.taskType, sh.spt, worker) cancel() @@ -240,7 +242,10 @@ func (sh *scheduler) maybeSchedRequest(req *workerRequest) (bool, error) { var serr error sort.SliceStable(acceptable, func(i, j int) bool { - r, err := req.sel.Cmp(req.ctx, req.taskType, sh.workers[acceptable[i]], sh.workers[acceptable[j]]) + rpcCtx, cancel := context.WithTimeout(req.ctx, selectorTimeout) + defer cancel() + r, err := req.sel.Cmp(rpcCtx, req.taskType, sh.workers[acceptable[i]], sh.workers[acceptable[j]]) + if err != nil { serr = multierror.Append(serr, err) } From ddd1f21e5d516db63a07999ac81eeb6450504fd5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Wed, 24 Jun 2020 23:06:56 +0200 Subject: [PATCH 140/199] sched: Support external priority --- request_queue.go | 4 ++++ sched.go | 20 ++++++++++++++++++++ sched_test.go | 18 ++++++++++++++++++ 3 files changed, 42 insertions(+) create mode 100644 sched_test.go diff --git a/request_queue.go b/request_queue.go index e5b3fd234..0d35e9f1d 100644 --- a/request_queue.go +++ b/request_queue.go @@ -7,6 +7,10 @@ type requestQueue []*workerRequest func (q requestQueue) Len() int { return len(q) } func (q requestQueue) Less(i, j int) bool { + if q[i].priority != q[j].priority { + return q[i].priority > q[j].priority + } + if q[i].taskType != q[j].taskType { return q[i].taskType.Less(q[j].taskType) } diff --git a/sched.go b/sched.go index 3b3e28d65..8920a1a21 100644 --- a/sched.go +++ b/sched.go @@ -16,6 +16,24 @@ import ( "github.com/filecoin-project/sector-storage/storiface" ) +type schedPrioCtxKey int + +var SchedPriorityKey schedPrioCtxKey +var DefaultSchedPriority = 0 + +func getPriority(ctx context.Context) int { + sp := ctx.Value(SchedPriorityKey) + if p, ok := sp.(int); ok { + return p + } + + return DefaultSchedPriority +} + +func WithPriority(ctx context.Context, priority int) context.Context { + return context.WithValue(ctx, SchedPriorityKey, priority) +} + const mib = 1 << 20 type WorkerAction func(ctx context.Context, w Worker) error @@ -72,6 +90,7 @@ func (sh *scheduler) Schedule(ctx context.Context, sector abi.SectorID, taskType case sh.schedule <- &workerRequest{ sector: sector, taskType: taskType, + priority: getPriority(ctx), sel: sel, prepare: prepare, @@ -99,6 +118,7 @@ func (sh *scheduler) Schedule(ctx context.Context, sector abi.SectorID, taskType type workerRequest struct { sector abi.SectorID taskType sealtasks.TaskType + priority int // larger values more important sel WorkerSelector prepare WorkerAction diff --git a/sched_test.go b/sched_test.go new file mode 100644 index 000000000..d0d0e7ca9 --- /dev/null +++ b/sched_test.go @@ -0,0 +1,18 @@ +package sectorstorage + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestWithPriority(t *testing.T) { + ctx := context.Background() + + require.Equal(t, DefaultSchedPriority, getPriority(ctx)) + + ctx = WithPriority(ctx, 2222) + + require.Equal(t, 2222, getPriority(ctx)) +} From a8997dc35c60db2ac6c9b9e74f14e20ab6fa8933 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Thu, 25 Jun 2020 21:53:51 +0200 Subject: [PATCH 141/199] ffiwrapper: Insert alignment between pieces --- ffiwrapper/sealer_cgo.go | 46 ++++++++++------- ffiwrapper/sealer_test.go | 103 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 132 insertions(+), 17 deletions(-) diff --git a/ffiwrapper/sealer_cgo.go b/ffiwrapper/sealer_cgo.go index 6510f81cc..177ddeae0 100644 --- a/ffiwrapper/sealer_cgo.go +++ b/ffiwrapper/sealer_cgo.go @@ -543,8 +543,35 @@ func GeneratePieceCIDFromFile(proofType abi.RegisteredSealProof, piece io.Reader } func GenerateUnsealedCID(proofType abi.RegisteredSealProof, pieces []abi.PieceInfo) (cid.Cid, error) { + allPieces := make([]abi.PieceInfo, 0, len(pieces)) var sum abi.PaddedPieceSize + + padTo := func(s abi.PaddedPieceSize, trailing bool) { + // pad remaining space with 0 CommPs + toFill := uint64(-sum % s) + if trailing && sum == 0 { + toFill = uint64(s) + } + + n := bits.OnesCount64(toFill) + for i := 0; i < n; i++ { + next := bits.TrailingZeros64(toFill) + psize := uint64(1) << uint(next) + toFill ^= psize + + padded := abi.PaddedPieceSize(psize) + allPieces = append(allPieces, abi.PieceInfo{ + Size: padded, + PieceCID: zerocomm.ZeroPieceCommitment(padded.Unpadded()), + }) + sum += padded + } + } + for _, p := range pieces { + padTo(p.Size, false) + + allPieces = append(allPieces, p) sum += p.Size } @@ -553,22 +580,7 @@ func GenerateUnsealedCID(proofType abi.RegisteredSealProof, pieces []abi.PieceIn return cid.Undef, err } - { - // pad remaining space with 0 CommPs - toFill := uint64(abi.PaddedPieceSize(ssize) - sum) - n := bits.OnesCount64(toFill) - for i := 0; i < n; i++ { - next := bits.TrailingZeros64(toFill) - psize := uint64(1) << uint(next) - toFill ^= psize + padTo(abi.PaddedPieceSize(ssize), true) - unpadded := abi.PaddedPieceSize(psize).Unpadded() - pieces = append(pieces, abi.PieceInfo{ - Size: unpadded.Padded(), - PieceCID: zerocomm.ZeroPieceCommitment(unpadded), - }) - } - } - - return ffi.GenerateUnsealedCID(proofType, pieces) + return ffi.GenerateUnsealedCID(proofType, allPieces) } diff --git a/ffiwrapper/sealer_test.go b/ffiwrapper/sealer_test.go index 5e6f02cd2..e9628c2dd 100644 --- a/ffiwrapper/sealer_test.go +++ b/ffiwrapper/sealer_test.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "fmt" + "github.com/ipfs/go-cid" "io" "io/ioutil" "math/rand" @@ -484,3 +485,105 @@ func requireFDsClosed(t *testing.T, start int) { log.Infow("open FDs", "start", start, "now", openNow) require.Equal(t, start, openNow, "FDs shouldn't leak") } + +func TestGenerateUnsealedCID(t *testing.T) { + pt := abi.RegisteredSealProof_StackedDrg2KiBV1 + ups := int(abi.PaddedPieceSize(2048).Unpadded()) + + commP := func(b []byte) cid.Cid { + pf, werr, err := ToReadableFile(bytes.NewReader(b), int64(len(b))) + require.NoError(t, err) + + c, err := ffi.GeneratePieceCIDFromFile(pt, pf, abi.UnpaddedPieceSize(len(b))) + require.NoError(t, err) + + require.NoError(t, werr()) + + return c + } + + testCommEq := func(name string, in [][]byte, expect [][]byte) { + t.Run(name, func(t *testing.T) { + upi := make([]abi.PieceInfo, len(in)) + for i, b := range in { + upi[i] = abi.PieceInfo{ + Size: abi.UnpaddedPieceSize(len(b)).Padded(), + PieceCID: commP(b), + } + } + + sectorPi := []abi.PieceInfo{ + { + Size: 2048, + PieceCID: commP(bytes.Join(expect, nil)), + }, + } + + expectCid, err := GenerateUnsealedCID(pt, sectorPi) + require.NoError(t, err) + + actualCid, err := GenerateUnsealedCID(pt, upi) + require.NoError(t, err) + + require.Equal(t, expectCid, actualCid) + }) + } + + barr := func(b byte, den int) []byte { + return bytes.Repeat([]byte{b}, ups/den) + } + + // 0000 + testCommEq("zero", + nil, + [][]byte{barr(0, 1)}, + ) + + // 1111 + testCommEq("one", + [][]byte{barr(1, 1)}, + [][]byte{barr(1, 1)}, + ) + + // 11 00 + testCommEq("one|2", + [][]byte{barr(1, 2)}, + [][]byte{barr(1, 2), barr(0, 2)}, + ) + + // 1 0 00 + testCommEq("one|4", + [][]byte{barr(1, 4)}, + [][]byte{barr(1, 4), barr(0, 4), barr(0, 2)}, + ) + + // 11 2 0 + testCommEq("one|2-two|4", + [][]byte{barr(1, 2), barr(2, 4)}, + [][]byte{barr(1, 2), barr(2, 4), barr(0, 4)}, + ) + + // 1 0 22 + testCommEq("one|4-two|2", + [][]byte{barr(1, 4), barr(2, 2)}, + [][]byte{barr(1, 4), barr(0, 4), barr(2, 2)}, + ) + + // 1 0 22 0000 + testCommEq("one|8-two|4", + [][]byte{barr(1, 8), barr(2, 4)}, + [][]byte{barr(1, 8), barr(0, 8), barr(2, 4), barr(0, 2)}, + ) + + // 11 2 0 0000 + testCommEq("one|4-two|8", + [][]byte{barr(1, 4), barr(2, 8)}, + [][]byte{barr(1, 4), barr(2, 8), barr(0, 8), barr(0, 2)}, + ) + + // 1 0 22 3 0 00 4444 5 0 00 + testCommEq("one|16-two|8-three|16-four|4-five|16", + [][]byte{barr(1, 16), barr(2, 8), barr(3, 16), barr(4, 4), barr(5, 16)}, + [][]byte{barr(1, 16), barr(0, 16), barr(2, 8), barr(3, 16), barr(0, 16), barr(0, 8), barr(4, 4), barr(5, 16), barr(0, 16), barr(0, 8)}, + ) +} From c1ac986f20d862d9a9cb8da2ccc342c996d12fcc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Tue, 30 Jun 2020 19:05:47 +0200 Subject: [PATCH 142/199] Update FFI --- extern/filecoin-ffi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/extern/filecoin-ffi b/extern/filecoin-ffi index 5bb4a309b..5342c7c97 160000 --- a/extern/filecoin-ffi +++ b/extern/filecoin-ffi @@ -1 +1 @@ -Subproject commit 5bb4a309bce9d446ac618f34a8b9e2883af2002f +Subproject commit 5342c7c97d1a1df4650629d14f2823d52889edd9 From cff68a14d8cbc39345cb8d6559bd1f2ae8e4ada9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Tue, 30 Jun 2020 19:26:56 +0200 Subject: [PATCH 143/199] Adjust resource table for filecoin-ffi 0.30.1 --- resources.go | 18 ++++++++---------- sched.go | 17 ++++++----------- 2 files changed, 14 insertions(+), 21 deletions(-) diff --git a/resources.go b/resources.go index 06725a1b8..2f67dc84e 100644 --- a/resources.go +++ b/resources.go @@ -20,8 +20,6 @@ func (r Resources) MultiThread() bool { return r.Threads == -1 } -const MaxCachingOverhead = 32 << 30 - var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources{ sealtasks.TTAddPiece: { abi.RegisteredSealProof_StackedDrg64GiBV1: Resources{ // This is probably a bit conservative @@ -68,27 +66,27 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources sealtasks.TTPreCommit1: { abi.RegisteredSealProof_StackedDrg64GiBV1: Resources{ MaxMemory: 128 << 30, - MinMemory: 96 << 30, + MinMemory: 112 << 30, Threads: 1, - BaseMinMemory: 60 << 30, + BaseMinMemory: 10 << 20, }, abi.RegisteredSealProof_StackedDrg32GiBV1: Resources{ MaxMemory: 64 << 30, - MinMemory: 48 << 30, + MinMemory: 56 << 30, Threads: 1, - BaseMinMemory: 30 << 30, + BaseMinMemory: 10 << 20, }, abi.RegisteredSealProof_StackedDrg512MiBV1: Resources{ - MaxMemory: 3 << 29, // 1.5G - MinMemory: 1 << 30, + MaxMemory: 1 << 30, + MinMemory: 768 << 20, Threads: 1, - BaseMinMemory: 1 << 30, + BaseMinMemory: 1 << 20, }, abi.RegisteredSealProof_StackedDrg2KiBV1: Resources{ MaxMemory: 2 << 10, @@ -195,7 +193,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources }, sealtasks.TTCommit2: { abi.RegisteredSealProof_StackedDrg64GiBV1: Resources{ - MaxMemory: 260 << 30, // TODO: Confirm + MaxMemory: 190 << 30, // TODO: Confirm MinMemory: 60 << 30, Threads: -1, diff --git a/sched.go b/sched.go index 8920a1a21..9fddd7bd9 100644 --- a/sched.go +++ b/sched.go @@ -250,7 +250,7 @@ func (sh *scheduler) maybeSchedRequest(req *workerRequest) (bool, error) { } tried++ - if !canHandleRequest(needRes, sh.spt, wid, worker.info.Resources, worker.preparing) { + if !canHandleRequest(needRes, wid, worker.info.Resources, worker.preparing) { continue } @@ -316,7 +316,7 @@ func (sh *scheduler) assignWorker(wid WorkerID, w *workerHandle, req *workerRequ return } - err = w.active.withResources(sh.spt, wid, w.info.Resources, needRes, &sh.workersLk, func() error { + err = w.active.withResources(wid, w.info.Resources, needRes, &sh.workersLk, func() error { w.preparing.free(w.info.Resources, needRes) sh.workersLk.Unlock() defer sh.workersLk.Lock() // we MUST return locked from this function @@ -350,8 +350,8 @@ func (sh *scheduler) assignWorker(wid WorkerID, w *workerHandle, req *workerRequ return nil } -func (a *activeResources) withResources(spt abi.RegisteredSealProof, id WorkerID, wr storiface.WorkerResources, r Resources, locker sync.Locker, cb func() error) error { - for !canHandleRequest(r, spt, id, wr, a) { +func (a *activeResources) withResources(id WorkerID, wr storiface.WorkerResources, r Resources, locker sync.Locker, cb func() error) error { + for !canHandleRequest(r, id, wr, a) { if a.cond == nil { a.cond = sync.NewCond(locker) } @@ -396,7 +396,7 @@ func (a *activeResources) free(wr storiface.WorkerResources, r Resources) { a.memUsedMax -= r.MaxMemory } -func canHandleRequest(needRes Resources, spt abi.RegisteredSealProof, wid WorkerID, res storiface.WorkerResources, active *activeResources) bool { +func canHandleRequest(needRes Resources, wid WorkerID, res storiface.WorkerResources, active *activeResources) bool { // TODO: dedupe needRes.BaseMinMemory per task type (don't add if that task is already running) minNeedMem := res.MemReserved + active.memUsedMin + needRes.MinMemory + needRes.BaseMinMemory @@ -406,12 +406,7 @@ func canHandleRequest(needRes Resources, spt abi.RegisteredSealProof, wid Worker } maxNeedMem := res.MemReserved + active.memUsedMax + needRes.MaxMemory + needRes.BaseMinMemory - if spt == abi.RegisteredSealProof_StackedDrg32GiBV1 { - maxNeedMem += MaxCachingOverhead - } - if spt == abi.RegisteredSealProof_StackedDrg64GiBV1 { - maxNeedMem += MaxCachingOverhead * 2 // ewwrhmwh - } + if maxNeedMem > res.MemSwap+res.MemPhysical { log.Debugf("sched: not scheduling on worker %d; not enough virtual memory - need: %dM, have %dM", wid, maxNeedMem/mib, (res.MemSwap+res.MemPhysical)/mib) return false From 6d02ac0290fc2775a072106863f9221098f5321e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Tue, 30 Jun 2020 19:30:06 +0200 Subject: [PATCH 144/199] mod tidy --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 60f31942f..9e51c0445 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( github.com/filecoin-project/go-bitfield v0.0.2-0.20200518150651-562fdb554b6e github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5 github.com/filecoin-project/go-paramfetch v0.0.1 - github.com/filecoin-project/specs-actors v0.6.0 + github.com/filecoin-project/specs-actors v0.6.1 github.com/filecoin-project/specs-storage v0.1.1-0.20200622113353-88a9704877ea github.com/google/uuid v1.1.1 github.com/gorilla/mux v1.7.4 diff --git a/go.sum b/go.sum index 300226c5e..2f97216e3 100644 --- a/go.sum +++ b/go.sum @@ -43,8 +43,8 @@ github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5/go github.com/filecoin-project/go-paramfetch v0.0.1 h1:gV7bs5YaqlgpGFMiLxInGK2L1FyCXUE0rimz4L7ghoE= github.com/filecoin-project/go-paramfetch v0.0.1/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc= github.com/filecoin-project/specs-actors v0.3.0/go.mod h1:nQYnFbQ7Y0bHZyq6HDEuVlCPR+U3z5Q3wMOQ+2aiV+Y= -github.com/filecoin-project/specs-actors v0.6.0 h1:IepUsmDGY60QliENVTkBTAkwqGWw9kNbbHOcU/9oiC0= -github.com/filecoin-project/specs-actors v0.6.0/go.mod h1:dRdy3cURykh2R8O/DKqy8olScl70rmIS7GrB4hB1IDY= +github.com/filecoin-project/specs-actors v0.6.1 h1:rhHlEzqcuuQU6oKc4csuq+/kQBDZ4EXtSomoN2XApCA= +github.com/filecoin-project/specs-actors v0.6.1/go.mod h1:dRdy3cURykh2R8O/DKqy8olScl70rmIS7GrB4hB1IDY= github.com/filecoin-project/specs-storage v0.1.1-0.20200622113353-88a9704877ea h1:iixjULRQFPn7Q9KlIqfwLJnlAXO10bbkI+xy5GKGdLY= github.com/filecoin-project/specs-storage v0.1.1-0.20200622113353-88a9704877ea/go.mod h1:Pr5ntAaxsh+sLG/LYiL4tKzvA83Vk5vLODYhfNwOg7k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= From a53fff7608d8c8ac1072ddb12b3f5cb602cbeb87 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Tue, 30 Jun 2020 19:38:41 +0200 Subject: [PATCH 145/199] Fix tests --- ffiwrapper/sealer_test.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ffiwrapper/sealer_test.go b/ffiwrapper/sealer_test.go index 5e6f02cd2..5b3305460 100644 --- a/ffiwrapper/sealer_test.go +++ b/ffiwrapper/sealer_test.go @@ -458,6 +458,10 @@ func openFDs(t *testing.T) int { if strings.HasPrefix(l, "/dev/nvidia") { skip++ } + + if strings.HasPrefix(l, "/var/tmp/filecoin-proof-parameters/") { + skip++ + } } return len(dent) - skip From 31d9abfc8cac192758b62c27945e7480fc5fe328 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Fri, 3 Jul 2020 21:52:31 +0200 Subject: [PATCH 146/199] Implement FinalizeSector keepUnsealed --- ffiwrapper/partialfile.go | 23 ++++++++++++++++ ffiwrapper/sealer_cgo.go | 56 ++++++++++++++++++++++++++++++++++++++- fsutil/dealloc_linux.go | 28 ++++++++++++++++++++ fsutil/dealloc_other.go | 18 +++++++++++++ go.mod | 2 +- go.sum | 2 ++ localworker.go | 6 +++-- 7 files changed, 131 insertions(+), 4 deletions(-) create mode 100644 fsutil/dealloc_linux.go create mode 100644 fsutil/dealloc_other.go diff --git a/ffiwrapper/partialfile.go b/ffiwrapper/partialfile.go index a2c1f1151..8c4fdcc72 100644 --- a/ffiwrapper/partialfile.go +++ b/ffiwrapper/partialfile.go @@ -12,6 +12,7 @@ import ( rlepluslazy "github.com/filecoin-project/go-bitfield/rle" "github.com/filecoin-project/specs-actors/actors/abi" + "github.com/filecoin-project/sector-storage/fsutil" "github.com/filecoin-project/sector-storage/storiface" ) @@ -218,6 +219,28 @@ func (pf *partialFile) MarkAllocated(offset storiface.PaddedByteIndex, size abi. return nil } +func (pf *partialFile) Free(offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) error { + have, err := pf.allocated.RunIterator() + if err != nil { + return err + } + + if err := fsutil.Deallocate(pf.file, int64(offset), int64(size)); err != nil { + return xerrors.Errorf("deallocating: %w", err) + } + + s, err := rlepluslazy.Subtract(have, pieceRun(offset, size)) + if err != nil { + return err + } + + if err := writeTrailer(int64(pf.maxPiece), pf.file, s); err != nil { + return xerrors.Errorf("writing trailer: %w", err) + } + + return nil +} + func (pf *partialFile) Reader(offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) (*os.File, error) { if _, err := pf.file.Seek(int64(offset), io.SeekStart); err != nil { return nil, xerrors.Errorf("seek piece start: %w", err) diff --git a/ffiwrapper/sealer_cgo.go b/ffiwrapper/sealer_cgo.go index 177ddeae0..58d9d8c5b 100644 --- a/ffiwrapper/sealer_cgo.go +++ b/ffiwrapper/sealer_cgo.go @@ -15,6 +15,7 @@ import ( "golang.org/x/xerrors" ffi "github.com/filecoin-project/filecoin-ffi" + rlepluslazy "github.com/filecoin-project/go-bitfield/rle" commcid "github.com/filecoin-project/go-fil-commcid" "github.com/filecoin-project/specs-actors/actors/abi" "github.com/filecoin-project/specs-storage/storage" @@ -502,7 +503,60 @@ func (sb *Sealer) SealCommit2(ctx context.Context, sector abi.SectorID, phase1Ou func (sb *Sealer) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) error { if len(keepUnsealed) > 0 { - return xerrors.Errorf("keepUnsealed unsupported") // TODO: impl for fastretrieval copies + maxPieceSize := abi.PaddedPieceSize(sb.ssize) + + sr := pieceRun(0, maxPieceSize) + + for _, s := range keepUnsealed { + si := &rlepluslazy.RunSliceIterator{} + if s.Offset != 0 { + si.Runs = append(si.Runs, rlepluslazy.Run{Val: false, Len: uint64(s.Offset)}) + } + si.Runs = append(si.Runs, rlepluslazy.Run{Val: true, Len: uint64(s.Size)}) + + var err error + sr, err = rlepluslazy.Subtract(sr, si) + if err != nil { + return err + } + } + + + paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, 0, false) + if err != nil { + return xerrors.Errorf("acquiring sector cache path: %w", err) + } + defer done() + + pf, err := openPartialFile(maxPieceSize, paths.Unsealed) + if xerrors.Is(err, os.ErrNotExist) { + return xerrors.Errorf("opening partial file: %w", err) + } + + var at uint64 + for sr.HasNext() { + r, err := sr.NextRun() + if err != nil { + _ = pf.Close() + return err + } + + offset := at + at += r.Len + if !r.Val { + continue + } + + err = pf.Free(storiface.PaddedByteIndex(abi.UnpaddedPieceSize(offset).Padded()), abi.UnpaddedPieceSize(r.Len).Padded()) + if err != nil { + _ = pf.Close() + return xerrors.Errorf("free partial file range: %w", err) + } + } + + if err := pf.Close(); err != nil { + return err + } } paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTCache, 0, false) diff --git a/fsutil/dealloc_linux.go b/fsutil/dealloc_linux.go new file mode 100644 index 000000000..0b20c568d --- /dev/null +++ b/fsutil/dealloc_linux.go @@ -0,0 +1,28 @@ +package fsutil + +import ( + "os" + "syscall" + + logging "github.com/ipfs/go-log/v2" +) + +var log = logging.Logger("fsutil") + +const FallocFlPunchHole = 0x02 // linux/falloc.h + +func Deallocate(file *os.File, offset int64, length int64) error { + if length == 0 { + return nil + } + + err := syscall.Fallocate(int(file.Fd()), FallocFlPunchHole, offset, length) + if errno, ok := err.(syscall.Errno); ok { + if errno == syscall.EOPNOTSUPP || errno == syscall.ENOSYS { + log.Warnf("could not deallocate space, ignoring: %v", errno) + err = nil // log and ignore + } + } + + return err +} diff --git a/fsutil/dealloc_other.go b/fsutil/dealloc_other.go new file mode 100644 index 000000000..721116af1 --- /dev/null +++ b/fsutil/dealloc_other.go @@ -0,0 +1,18 @@ +// +build !linux + +package fsutil + +import ( + "os" + + logging "github.com/ipfs/go-log/v2" +) + +var log = logging.Logger("fsutil") + + +func Deallocate(file *os.File, offset int64, length int64) error { + log.Warnf("deallocating space not supported") + + return err +} diff --git a/go.mod b/go.mod index 9e51c0445..83424841f 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( github.com/detailyang/go-fallocate v0.0.0-20180908115635-432fa640bd2e github.com/elastic/go-sysinfo v1.3.0 github.com/filecoin-project/filecoin-ffi v0.0.0-20200326153646-e899cc1dd072 - github.com/filecoin-project/go-bitfield v0.0.2-0.20200518150651-562fdb554b6e + github.com/filecoin-project/go-bitfield v0.0.4-0.20200703174658-f4a5758051a1 github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5 github.com/filecoin-project/go-paramfetch v0.0.1 github.com/filecoin-project/specs-actors v0.6.1 diff --git a/go.sum b/go.sum index 2f97216e3..330b97579 100644 --- a/go.sum +++ b/go.sum @@ -36,6 +36,8 @@ github.com/filecoin-project/go-bitfield v0.0.1 h1:Xg/JnrqqE77aJVKdbEyR04n9FZQWhw github.com/filecoin-project/go-bitfield v0.0.1/go.mod h1:Ry9/iUlWSyjPUzlAvdnfy4Gtvrq4kWmWDztCU1yEgJY= github.com/filecoin-project/go-bitfield v0.0.2-0.20200518150651-562fdb554b6e h1:gkG/7G+iKy4He+IiQNeQn+nndFznb/vCoOR8iRQsm60= github.com/filecoin-project/go-bitfield v0.0.2-0.20200518150651-562fdb554b6e/go.mod h1:Ry9/iUlWSyjPUzlAvdnfy4Gtvrq4kWmWDztCU1yEgJY= +github.com/filecoin-project/go-bitfield v0.0.4-0.20200703174658-f4a5758051a1 h1:xuHlrdznafh7ul5t4xEncnA4qgpQvJZEw+mr98eqHXw= +github.com/filecoin-project/go-bitfield v0.0.4-0.20200703174658-f4a5758051a1/go.mod h1:Ry9/iUlWSyjPUzlAvdnfy4Gtvrq4kWmWDztCU1yEgJY= github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 h1:2pMXdBnCiXjfCYx/hLqFxccPoqsSveQFxVLvNxy9bus= github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ= github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5 h1:yvQJCW9mmi9zy+51xA01Ea2X7/dL7r8eKDPuGUjRmbo= diff --git a/localworker.go b/localworker.go index a1d82209a..a6042826a 100644 --- a/localworker.go +++ b/localworker.go @@ -171,8 +171,10 @@ func (l *LocalWorker) FinalizeSector(ctx context.Context, sector abi.SectorID, k return xerrors.Errorf("finalizing sector: %w", err) } - if err := l.storage.Remove(ctx, sector, stores.FTUnsealed, true); err != nil { - return xerrors.Errorf("removing unsealed data: %w", err) + if len(keepUnsealed) == 0 { + if err := l.storage.Remove(ctx, sector, stores.FTUnsealed, true); err != nil { + return xerrors.Errorf("removing unsealed data: %w", err) + } } return nil From 636bf90f842d7532adda8bf6ea45ffcd1d350ff4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Fri, 3 Jul 2020 22:23:36 +0200 Subject: [PATCH 147/199] Don't error in ReleaseUnsealed --- manager.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/manager.go b/manager.go index caea09cd0..767e87cf9 100644 --- a/manager.go +++ b/manager.go @@ -441,7 +441,8 @@ func (m *Manager) FinalizeSector(ctx context.Context, sector abi.SectorID, keepU } func (m *Manager) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) error { - return xerrors.Errorf("implement me") + log.Warnw("ReleaseUnsealed todo") + return nil } func (m *Manager) Remove(ctx context.Context, sector abi.SectorID) error { From 0fd142153a2b844e17676e26d29c084fd1c5708f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Fri, 3 Jul 2020 22:24:47 +0200 Subject: [PATCH 148/199] mod tidy --- go.sum | 2 -- 1 file changed, 2 deletions(-) diff --git a/go.sum b/go.sum index 330b97579..508d985d7 100644 --- a/go.sum +++ b/go.sum @@ -34,8 +34,6 @@ github.com/filecoin-project/go-amt-ipld/v2 v2.0.1-0.20200424220931-6263827e49f2/ github.com/filecoin-project/go-bitfield v0.0.0-20200416002808-b3ee67ec9060/go.mod h1:iodsLxOFZnqKtjj2zkgqzoGNrv6vUqj69AT/J8DKXEw= github.com/filecoin-project/go-bitfield v0.0.1 h1:Xg/JnrqqE77aJVKdbEyR04n9FZQWhwrN+buDgQCVpZU= github.com/filecoin-project/go-bitfield v0.0.1/go.mod h1:Ry9/iUlWSyjPUzlAvdnfy4Gtvrq4kWmWDztCU1yEgJY= -github.com/filecoin-project/go-bitfield v0.0.2-0.20200518150651-562fdb554b6e h1:gkG/7G+iKy4He+IiQNeQn+nndFznb/vCoOR8iRQsm60= -github.com/filecoin-project/go-bitfield v0.0.2-0.20200518150651-562fdb554b6e/go.mod h1:Ry9/iUlWSyjPUzlAvdnfy4Gtvrq4kWmWDztCU1yEgJY= github.com/filecoin-project/go-bitfield v0.0.4-0.20200703174658-f4a5758051a1 h1:xuHlrdznafh7ul5t4xEncnA4qgpQvJZEw+mr98eqHXw= github.com/filecoin-project/go-bitfield v0.0.4-0.20200703174658-f4a5758051a1/go.mod h1:Ry9/iUlWSyjPUzlAvdnfy4Gtvrq4kWmWDztCU1yEgJY= github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 h1:2pMXdBnCiXjfCYx/hLqFxccPoqsSveQFxVLvNxy9bus= From 837aba777f14cca3304d569235a8e6869ffc85dc Mon Sep 17 00:00:00 2001 From: chunqizhi <1558763837@qq.com> Date: Mon, 6 Jul 2020 15:08:13 +0800 Subject: [PATCH 149/199] Modity addCachePathsForSectorSize func to support 2KiB sector size devnet locally also --- faults.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/faults.go b/faults.go index 11c1c3df2..05347f26c 100644 --- a/faults.go +++ b/faults.go @@ -91,9 +91,15 @@ func (m *Manager) CheckProvable(ctx context.Context, spt abi.RegisteredSealProof func addCachePathsForSectorSize(chk map[string]int64, cacheDir string, ssize abi.SectorSize) { switch ssize { + case 2 << 10: + fallthrough + case 8 << 20: + fallthrough case 512 << 20: chk[filepath.Join(cacheDir, "sc-02-data-tree-r-last.dat")] = 0 case 32 << 30: + fallthrough + case 64 << 30: for i := 0; i < 8; i++ { chk[filepath.Join(cacheDir, fmt.Sprintf("sc-02-data-tree-r-last-%d.dat", i))] = 0 } From 7b9ab5e2928f562483c2817e2f497f15d4e33e99 Mon Sep 17 00:00:00 2001 From: chunqizhi <1558763837@qq.com> Date: Mon, 6 Jul 2020 17:49:05 +0800 Subject: [PATCH 150/199] Fix an error in addCachePathsForSectorSize func --- faults.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/faults.go b/faults.go index 05347f26c..8580c0d93 100644 --- a/faults.go +++ b/faults.go @@ -98,11 +98,13 @@ func addCachePathsForSectorSize(chk map[string]int64, cacheDir string, ssize abi case 512 << 20: chk[filepath.Join(cacheDir, "sc-02-data-tree-r-last.dat")] = 0 case 32 << 30: - fallthrough - case 64 << 30: for i := 0; i < 8; i++ { chk[filepath.Join(cacheDir, fmt.Sprintf("sc-02-data-tree-r-last-%d.dat", i))] = 0 } + case 64 << 30: + for i := 0; i < 16; i++ { + chk[filepath.Join(cacheDir, fmt.Sprintf("sc-02-data-tree-r-last-%d.dat", i))] = 0 + } default: log.Warnf("not checking cache files of %s sectors for faults", ssize) } From c5a96fdd08d3fd32ca8b19425adaa903cd9b344e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Mon, 6 Jul 2020 16:13:42 +0200 Subject: [PATCH 151/199] Change PathType to string --- faults.go | 6 ++++-- ffiwrapper/sealer_cgo.go | 20 ++++++++++---------- ffiwrapper/sealer_test.go | 2 +- ffiwrapper/verifier_cgo.go | 2 +- manager.go | 4 ++-- stores/filetype.go | 2 +- stores/http_handler.go | 2 +- stores/interface.go | 6 +++--- stores/local.go | 4 ++-- 9 files changed, 25 insertions(+), 23 deletions(-) diff --git a/faults.go b/faults.go index 11c1c3df2..0eebc42f0 100644 --- a/faults.go +++ b/faults.go @@ -43,9 +43,11 @@ func (m *Manager) CheckProvable(ctx context.Context, spt abi.RegisteredSealProof return nil } - lp, _, err := m.localStore.AcquireSector(ctx, sector, spt, stores.FTSealed|stores.FTCache, stores.FTNone, false, stores.AcquireMove) + lp, _, err := m.localStore.AcquireSector(ctx, sector, spt, stores.FTSealed|stores.FTCache, stores.FTNone, stores.PathStorage, stores.AcquireMove) if err != nil { - return xerrors.Errorf("acquire sector in checkProvable: %w", err) + log.Warnw("CheckProvable Sector FAULT: acquire sector in checkProvable", "sector", sector, "error", err) + bad = append(bad, sector) + return nil } if lp.Sealed == "" || lp.Cache == "" { diff --git a/ffiwrapper/sealer_cgo.go b/ffiwrapper/sealer_cgo.go index 177ddeae0..0ff8c8f2f 100644 --- a/ffiwrapper/sealer_cgo.go +++ b/ffiwrapper/sealer_cgo.go @@ -81,7 +81,7 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector abi.SectorID, existingPie var stagedPath stores.SectorPaths if len(existingPieceSizes) == 0 { - stagedPath, done, err = sb.sectors.AcquireSector(ctx, sector, 0, stores.FTUnsealed, true) + stagedPath, done, err = sb.sectors.AcquireSector(ctx, sector, 0, stores.FTUnsealed, stores.PathSealing) if err != nil { return abi.PieceInfo{}, xerrors.Errorf("acquire unsealed sector: %w", err) } @@ -91,7 +91,7 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector abi.SectorID, existingPie return abi.PieceInfo{}, xerrors.Errorf("creating unsealed sector file: %w", err) } } else { - stagedPath, done, err = sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, 0, true) + stagedPath, done, err = sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, 0, stores.PathSealing) if err != nil { return abi.PieceInfo{}, xerrors.Errorf("acquire unsealed sector: %w", err) } @@ -198,12 +198,12 @@ func (sb *Sealer) UnsealPiece(ctx context.Context, sector abi.SectorID, offset s maxPieceSize := abi.PaddedPieceSize(sb.ssize) // try finding existing - unsealedPath, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, stores.FTNone, false) + unsealedPath, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, stores.FTNone, stores.PathStorage) var pf *partialFile switch { case xerrors.Is(err, storiface.ErrSectorNotFound): - unsealedPath, done, err = sb.sectors.AcquireSector(ctx, sector, stores.FTNone, stores.FTUnsealed, false) + unsealedPath, done, err = sb.sectors.AcquireSector(ctx, sector, stores.FTNone, stores.FTUnsealed, stores.PathStorage) if err != nil { return xerrors.Errorf("acquire unsealed sector path (allocate): %w", err) } @@ -240,7 +240,7 @@ func (sb *Sealer) UnsealPiece(ctx context.Context, sector abi.SectorID, offset s return nil } - srcPaths, srcDone, err := sb.sectors.AcquireSector(ctx, sector, stores.FTCache|stores.FTSealed, stores.FTNone, false) + srcPaths, srcDone, err := sb.sectors.AcquireSector(ctx, sector, stores.FTCache|stores.FTSealed, stores.FTNone, stores.PathStorage) if err != nil { return xerrors.Errorf("acquire sealed sector paths: %w", err) } @@ -358,7 +358,7 @@ func (sb *Sealer) UnsealPiece(ctx context.Context, sector abi.SectorID, offset s } func (sb *Sealer) ReadPiece(ctx context.Context, writer io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) error { - path, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, stores.FTNone, false) + path, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, stores.FTNone, stores.PathStorage) if err != nil { return xerrors.Errorf("acquire unsealed sector path: %w", err) } @@ -395,7 +395,7 @@ func (sb *Sealer) ReadPiece(ctx context.Context, writer io.Writer, sector abi.Se } func (sb *Sealer) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) { - paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, stores.FTSealed|stores.FTCache, true) + paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, stores.FTSealed|stores.FTCache, stores.PathSealing) if err != nil { return nil, xerrors.Errorf("acquiring sector paths: %w", err) } @@ -452,7 +452,7 @@ func (sb *Sealer) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticke } func (sb *Sealer) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage.PreCommit1Out) (storage.SectorCids, error) { - paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTSealed|stores.FTCache, 0, true) + paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTSealed|stores.FTCache, 0, stores.PathSealing) if err != nil { return storage.SectorCids{}, xerrors.Errorf("acquiring sector paths: %w", err) } @@ -470,7 +470,7 @@ func (sb *Sealer) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase } func (sb *Sealer) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) { - paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTSealed|stores.FTCache, 0, true) + paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTSealed|stores.FTCache, 0, stores.PathSealing) if err != nil { return nil, xerrors.Errorf("acquire sector paths: %w", err) } @@ -505,7 +505,7 @@ func (sb *Sealer) FinalizeSector(ctx context.Context, sector abi.SectorID, keepU return xerrors.Errorf("keepUnsealed unsupported") // TODO: impl for fastretrieval copies } - paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTCache, 0, false) + paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTCache, 0, stores.PathStorage) if err != nil { return xerrors.Errorf("acquiring sector cache path: %w", err) } diff --git a/ffiwrapper/sealer_test.go b/ffiwrapper/sealer_test.go index 5b9c3d1ac..0b5018d84 100644 --- a/ffiwrapper/sealer_test.go +++ b/ffiwrapper/sealer_test.go @@ -121,7 +121,7 @@ func (s *seal) unseal(t *testing.T, sb *Sealer, sp *basicfs.Provider, si abi.Sec t.Fatal("read wrong bytes") } - p, sd, err := sp.AcquireSector(context.TODO(), si, stores.FTUnsealed, stores.FTNone, false) + p, sd, err := sp.AcquireSector(context.TODO(), si, stores.FTUnsealed, stores.FTNone, stores.PathStorage) if err != nil { t.Fatal(err) } diff --git a/ffiwrapper/verifier_cgo.go b/ffiwrapper/verifier_cgo.go index e3e8dd886..60d56dddc 100644 --- a/ffiwrapper/verifier_cgo.go +++ b/ffiwrapper/verifier_cgo.go @@ -62,7 +62,7 @@ func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorIn sid := abi.SectorID{Miner: mid, Number: s.SectorNumber} - paths, d, err := sb.sectors.AcquireSector(ctx, sid, stores.FTCache|stores.FTSealed, 0, false) + paths, d, err := sb.sectors.AcquireSector(ctx, sid, stores.FTCache|stores.FTSealed, 0, stores.PathStorage) if err != nil { log.Warnw("failed to acquire sector, skipping", "sector", sid, "error", err) skipped = append(skipped, sid) diff --git a/manager.go b/manager.go index caea09cd0..6c1b93ced 100644 --- a/manager.go +++ b/manager.go @@ -218,12 +218,12 @@ func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector abi.Sect // TODO: Optimization: don't send unseal to a worker if the requested range is already unsealed unsealFetch := func(ctx context.Context, worker Worker) error { - if err := worker.Fetch(ctx, sector, stores.FTSealed|stores.FTCache, true, stores.AcquireCopy); err != nil { + if err := worker.Fetch(ctx, sector, stores.FTSealed|stores.FTCache, stores.PathSealing, stores.AcquireCopy); err != nil { return xerrors.Errorf("copy sealed/cache sector data: %w", err) } if len(best) > 0 { - if err := worker.Fetch(ctx, sector, stores.FTUnsealed, true, stores.AcquireMove); err != nil { + if err := worker.Fetch(ctx, sector, stores.FTUnsealed, stores.PathSealing, stores.AcquireMove); err != nil { return xerrors.Errorf("copy unsealed sector data: %w", err) } } diff --git a/stores/filetype.go b/stores/filetype.go index c31dfefb2..60c47d1f7 100644 --- a/stores/filetype.go +++ b/stores/filetype.go @@ -22,7 +22,7 @@ const ( var FSOverheadSeal = map[SectorFileType]int{ // 10x overheads FTUnsealed: 10, FTSealed: 10, - FTCache: 70, // TODO: confirm for 32G + FTCache: 141, // 11 layers + D(2x ssize) + C + R } var FsOverheadFinalized = map[SectorFileType]int{ diff --git a/stores/http_handler.go b/stores/http_handler.go index 60f8a41c5..93fb94637 100644 --- a/stores/http_handler.go +++ b/stores/http_handler.go @@ -72,7 +72,7 @@ func (handler *FetchHandler) remoteGetSector(w http.ResponseWriter, r *http.Requ // The caller has a lock on this sector already, no need to get one here // passing 0 spt because we don't allocate anything - paths, _, err := handler.Local.AcquireSector(r.Context(), id, 0, ft, FTNone, false, AcquireMove) + paths, _, err := handler.Local.AcquireSector(r.Context(), id, 0, ft, FTNone, PathStorage, AcquireMove) if err != nil { log.Error("%+v", err) w.WriteHeader(500) diff --git a/stores/interface.go b/stores/interface.go index 54aaec90c..b61980125 100644 --- a/stores/interface.go +++ b/stores/interface.go @@ -9,11 +9,11 @@ import ( "github.com/filecoin-project/specs-actors/actors/abi" ) -type PathType bool +type PathType string const ( - PathStorage = false - PathSealing = true + PathStorage = "storage" + PathSealing = "sealing" ) type AcquireMode string diff --git a/stores/local.go b/stores/local.go index 26b7ccb75..ac63ae0dd 100644 --- a/stores/local.go +++ b/stores/local.go @@ -398,12 +398,12 @@ func (st *Local) removeSector(ctx context.Context, sid abi.SectorID, typ SectorF } func (st *Local) MoveStorage(ctx context.Context, s abi.SectorID, spt abi.RegisteredSealProof, types SectorFileType) error { - dest, destIds, err := st.AcquireSector(ctx, s, spt, FTNone, types, false, AcquireMove) + dest, destIds, err := st.AcquireSector(ctx, s, spt, FTNone, types, PathStorage, AcquireMove) if err != nil { return xerrors.Errorf("acquire dest storage: %w", err) } - src, srcIds, err := st.AcquireSector(ctx, s, spt, types, FTNone, false, AcquireMove) + src, srcIds, err := st.AcquireSector(ctx, s, spt, types, FTNone, PathStorage, AcquireMove) if err != nil { return xerrors.Errorf("acquire src storage: %w", err) } From 8099621cd0b2a73f96751431ddd861e27498fae1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Mon, 6 Jul 2020 18:36:44 +0200 Subject: [PATCH 152/199] stores: Allow reserving local storage --- localworker.go | 8 ++++ stores/filetype.go | 12 +++--- stores/http_handler.go | 2 + stores/index.go | 2 +- stores/interface.go | 11 +++--- stores/local.go | 83 ++++++++++++++++++++++++++++++++++++++++-- 6 files changed, 104 insertions(+), 14 deletions(-) diff --git a/localworker.go b/localworker.go index a1d82209a..d03ace359 100644 --- a/localworker.go +++ b/localworker.go @@ -61,14 +61,22 @@ type localWorkerPathProvider struct { } func (l *localWorkerPathProvider) AcquireSector(ctx context.Context, sector abi.SectorID, existing stores.SectorFileType, allocate stores.SectorFileType, sealing stores.PathType) (stores.SectorPaths, func(), error) { + paths, storageIDs, err := l.w.storage.AcquireSector(ctx, sector, l.w.scfg.SealProofType, existing, allocate, sealing, l.op) if err != nil { return stores.SectorPaths{}, nil, err } + releaseStorage, err := l.w.localStore.Reserve(ctx, sector, l.w.scfg.SealProofType, allocate, storageIDs, stores.FSOverheadSeal) + if err != nil { + return stores.SectorPaths{}, nil, xerrors.Errorf("reserving storage space: %w", err) + } + log.Debugf("acquired sector %d (e:%d; a:%d): %v", sector, existing, allocate, paths) return paths, func() { + releaseStorage() + for _, fileType := range pathTypes { if fileType&allocate == 0 { continue diff --git a/stores/filetype.go b/stores/filetype.go index 60c47d1f7..650b92f71 100644 --- a/stores/filetype.go +++ b/stores/filetype.go @@ -19,15 +19,17 @@ const ( FTNone SectorFileType = 0 ) +const FSOverheadDen = 10 + var FSOverheadSeal = map[SectorFileType]int{ // 10x overheads - FTUnsealed: 10, - FTSealed: 10, + FTUnsealed: FSOverheadDen, + FTSealed: FSOverheadDen, FTCache: 141, // 11 layers + D(2x ssize) + C + R } var FsOverheadFinalized = map[SectorFileType]int{ - FTUnsealed: 10, - FTSealed: 10, + FTUnsealed: FSOverheadDen, + FTSealed: FSOverheadDen, FTCache: 2, } @@ -67,7 +69,7 @@ func (t SectorFileType) SealSpaceUse(spt abi.RegisteredSealProof) (uint64, error return 0, xerrors.Errorf("no seal overhead info for %s", pathType) } - need += uint64(oh) * uint64(ssize) / 10 + need += uint64(oh) * uint64(ssize) / FSOverheadDen } return need, nil diff --git a/stores/http_handler.go b/stores/http_handler.go index 93fb94637..4f0556138 100644 --- a/stores/http_handler.go +++ b/stores/http_handler.go @@ -79,6 +79,8 @@ func (handler *FetchHandler) remoteGetSector(w http.ResponseWriter, r *http.Requ return } + // TODO: reserve local storage here + path := PathByType(paths, ft) if path == "" { log.Error("acquired path was empty") diff --git a/stores/index.go b/stores/index.go index 049e2dc20..e48ae02bb 100644 --- a/stores/index.go +++ b/stores/index.go @@ -361,7 +361,7 @@ func (i *Index) StorageBestAlloc(ctx context.Context, allocate SectorFileType, s continue } - if spaceReq > p.fsi.Available { + if spaceReq > uint64(p.fsi.Available) { log.Debugf("not allocating on %s, out of space (available: %d, need: %d)", p.info.ID, p.fsi.Available, spaceReq) continue } diff --git a/stores/interface.go b/stores/interface.go index b61980125..6fd4a7ad7 100644 --- a/stores/interface.go +++ b/stores/interface.go @@ -44,13 +44,14 @@ func Stat(path string) (FsStat, error) { } return FsStat{ - Capacity: stat.Blocks * uint64(stat.Bsize), - Available: stat.Bavail * uint64(stat.Bsize), + Capacity: int64(stat.Blocks) * stat.Bsize, + Available: int64(stat.Bavail) * stat.Bsize, }, nil } type FsStat struct { - Capacity uint64 - Available uint64 // Available to use for sector storage - Used uint64 + Capacity int64 + Available int64 // Available to use for sector storage + Used int64 + Reserved int64 } diff --git a/stores/local.go b/stores/local.go index ac63ae0dd..a21909d69 100644 --- a/stores/local.go +++ b/stores/local.go @@ -67,6 +67,25 @@ type Local struct { type path struct { local string // absolute local path + + reserved int64 + reservations map[abi.SectorID]SectorFileType +} + +type statFn func(path string) (FsStat, error) +func (p *path) stat(st statFn) (FsStat, error) { + stat, err := st(p.local) + if err != nil { + return FsStat{}, err + } + + stat.Reserved = p.reserved + stat.Available -= p.reserved + if stat.Available < 0 { + stat.Available = 0 + } + + return stat, err } func NewLocal(ctx context.Context, ls LocalStorage, index SectorIndex, urls []string) (*Local, error) { @@ -98,9 +117,12 @@ func (st *Local) OpenPath(ctx context.Context, p string) error { out := &path{ local: p, + + reserved: 0, + reservations: map[abi.SectorID]SectorFileType{}, } - fst, err := st.localStorage.Stat(p) + fst, err := out.stat(st.localStorage.Stat) if err != nil { return err } @@ -179,7 +201,7 @@ func (st *Local) reportHealth(ctx context.Context) { toReport := map[ID]HealthReport{} for id, p := range st.paths { - stat, err := st.localStorage.Stat(p.local) + stat, err := p.stat(st.localStorage.Stat) toReport[id] = HealthReport{ Stat: stat, @@ -197,6 +219,61 @@ func (st *Local) reportHealth(ctx context.Context) { } } +func (st *Local) Reserve(ctx context.Context, sid abi.SectorID, spt abi.RegisteredSealProof, ft SectorFileType, storageIDs SectorPaths, overheadTab map[SectorFileType]int) (func(), error) { + ssize, err := spt.SectorSize() + if err != nil { + return nil, xerrors.Errorf("getting sector size: %w", err) + } + + st.localLk.Lock() + + done := func(){} + deferredDone := func() { done() } + defer func() { + st.localLk.Unlock() + deferredDone() + }() + + for _, fileType := range PathTypes { + if fileType&ft == 0 { + continue + } + + id := ID(PathByType(storageIDs, fileType)) + + p, ok := st.paths[id] + if !ok { + return nil, errPathNotFound + } + + stat, err := p.stat(st.localStorage.Stat) + if err != nil { + return nil, err + } + + overhead := int64(overheadTab[fileType]) * int64(ssize) / FSOverheadDen + + if stat.Available < overhead { + return nil, xerrors.Errorf("can't reserve %d bytes in '%s' (id:%s), only %d available", overhead, p.local, id, stat.Available) + } + + p.reserved += overhead + + prevDone := done + done = func() { + prevDone() + + st.localLk.Lock() + defer st.localLk.Unlock() + + p.reserved -= overhead + } + } + + deferredDone = func() {} + return done, nil +} + func (st *Local) AcquireSector(ctx context.Context, sid abi.SectorID, spt abi.RegisteredSealProof, existing SectorFileType, allocate SectorFileType, pathType PathType, op AcquireMode) (SectorPaths, SectorPaths, error) { if existing|allocate != existing^allocate { return SectorPaths{}, SectorPaths{}, xerrors.New("can't both find and allocate a sector") @@ -463,7 +540,7 @@ func (st *Local) FsStat(ctx context.Context, id ID) (FsStat, error) { return FsStat{}, errPathNotFound } - return st.localStorage.Stat(p.local) + return p.stat(st.localStorage.Stat) } var _ Store = &Local{} From 7279a80dfafdce363883434002431d98e8cdfeee Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Mon, 6 Jul 2020 18:56:46 +0200 Subject: [PATCH 153/199] localstorage: don't double count reserved storage --- stores/local.go | 48 +++++++++++++++++++++++++++++++++++++----------- 1 file changed, 37 insertions(+), 11 deletions(-) diff --git a/stores/local.go b/stores/local.go index a21909d69..4e91748f8 100644 --- a/stores/local.go +++ b/stores/local.go @@ -49,6 +49,7 @@ type LocalStorage interface { SetStorage(func(*StorageConfig)) error Stat(path string) (FsStat, error) + DiskUsage(path string) (int64, error) // returns real disk usage for a file/directory } const MetaFile = "sectorstore.json" @@ -72,15 +73,36 @@ type path struct { reservations map[abi.SectorID]SectorFileType } -type statFn func(path string) (FsStat, error) -func (p *path) stat(st statFn) (FsStat, error) { - stat, err := st(p.local) +func (p *path) stat(ls LocalStorage) (FsStat, error) { + stat, err := ls.Stat(p.local) if err != nil { return FsStat{}, err } stat.Reserved = p.reserved - stat.Available -= p.reserved + + for id, ft := range p.reservations { + for _, fileType := range PathTypes { + if fileType&ft == 0 { + continue + } + + used, err := ls.DiskUsage(p.sectorPath(id, fileType)) + if err != nil { + log.Errorf("getting disk usage of '%s': %+v", p.sectorPath(id, fileType), err) + continue + } + + stat.Reserved -= used + } + } + + if stat.Reserved < 0 { + log.Warnf("negative reserved storage: p.reserved=%d, reserved: %d", p.reserved, stat.Reserved) + stat.Reserved = 0 + } + + stat.Available -= stat.Reserved if stat.Available < 0 { stat.Available = 0 } @@ -88,6 +110,10 @@ func (p *path) stat(st statFn) (FsStat, error) { return stat, err } +func (p *path) sectorPath(sid abi.SectorID, fileType SectorFileType) string { + return filepath.Join(p.local, fileType.String(), SectorName(sid)) +} + func NewLocal(ctx context.Context, ls LocalStorage, index SectorIndex, urls []string) (*Local, error) { l := &Local{ localStorage: ls, @@ -122,7 +148,7 @@ func (st *Local) OpenPath(ctx context.Context, p string) error { reservations: map[abi.SectorID]SectorFileType{}, } - fst, err := out.stat(st.localStorage.Stat) + fst, err := out.stat(st.localStorage) if err != nil { return err } @@ -201,7 +227,7 @@ func (st *Local) reportHealth(ctx context.Context) { toReport := map[ID]HealthReport{} for id, p := range st.paths { - stat, err := p.stat(st.localStorage.Stat) + stat, err := p.stat(st.localStorage) toReport[id] = HealthReport{ Stat: stat, @@ -246,7 +272,7 @@ func (st *Local) Reserve(ctx context.Context, sid abi.SectorID, spt abi.Register return nil, errPathNotFound } - stat, err := p.stat(st.localStorage.Stat) + stat, err := p.stat(st.localStorage) if err != nil { return nil, err } @@ -306,7 +332,7 @@ func (st *Local) AcquireSector(ctx context.Context, sid abi.SectorID, spt abi.Re continue } - spath := filepath.Join(p.local, fileType.String(), SectorName(sid)) + spath := p.sectorPath(sid, fileType) SetPathByType(&out, fileType, spath) SetPathByType(&storageIDs, fileType, string(info.ID)) @@ -348,7 +374,7 @@ func (st *Local) AcquireSector(ctx context.Context, sid abi.SectorID, spt abi.Re // TODO: Check free space - best = filepath.Join(p.local, fileType.String(), SectorName(sid)) + best = p.sectorPath(sid, fileType) bestID = si.ID } @@ -464,7 +490,7 @@ func (st *Local) removeSector(ctx context.Context, sid abi.SectorID, typ SectorF return xerrors.Errorf("dropping sector from index: %w", err) } - spath := filepath.Join(p.local, typ.String(), SectorName(sid)) + spath := p.sectorPath(sid, typ) log.Infof("remove %s", spath) if err := os.RemoveAll(spath); err != nil { @@ -540,7 +566,7 @@ func (st *Local) FsStat(ctx context.Context, id ID) (FsStat, error) { return FsStat{}, errPathNotFound } - return p.stat(st.localStorage.Stat) + return p.stat(st.localStorage) } var _ Store = &Local{} From 63c62c49cecb5aa8dec5c4f51368e1676382d86c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Mon, 6 Jul 2020 19:19:13 +0200 Subject: [PATCH 154/199] Fix tests --- manager_test.go | 4 ++++ stores/local_test.go | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/manager_test.go b/manager_test.go index ae318b487..b9198a2b3 100644 --- a/manager_test.go +++ b/manager_test.go @@ -24,6 +24,10 @@ import ( type testStorage stores.StorageConfig +func (t testStorage) DiskUsage(path string) (int64, error) { + return 1, nil // close enough +} + func newTestStorage(t *testing.T) *testStorage { tp, err := ioutil.TempDir(os.TempDir(), "sector-storage-test-") require.NoError(t, err) diff --git a/stores/local_test.go b/stores/local_test.go index 8e654d725..e748d061b 100644 --- a/stores/local_test.go +++ b/stores/local_test.go @@ -19,6 +19,10 @@ type TestingLocalStorage struct { c StorageConfig } +func (t *TestingLocalStorage) DiskUsage(path string) (int64, error) { + return 1, nil +} + func (t *TestingLocalStorage) GetStorage() (StorageConfig, error) { return t.c, nil } From 63ba9bd01836bcf2ed10242f3562bd17c38e2438 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Mon, 6 Jul 2020 19:19:24 +0200 Subject: [PATCH 155/199] gofmt --- stores/local.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/stores/local.go b/stores/local.go index 4e91748f8..cf52540ad 100644 --- a/stores/local.go +++ b/stores/local.go @@ -69,7 +69,7 @@ type Local struct { type path struct { local string // absolute local path - reserved int64 + reserved int64 reservations map[abi.SectorID]SectorFileType } @@ -144,7 +144,7 @@ func (st *Local) OpenPath(ctx context.Context, p string) error { out := &path{ local: p, - reserved: 0, + reserved: 0, reservations: map[abi.SectorID]SectorFileType{}, } @@ -253,7 +253,7 @@ func (st *Local) Reserve(ctx context.Context, sid abi.SectorID, spt abi.Register st.localLk.Lock() - done := func(){} + done := func() {} deferredDone := func() { done() } defer func() { st.localLk.Unlock() From 0bc41d562dd3ba0a824e825dd9ea79939742ca11 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Wed, 8 Jul 2020 16:58:09 +0200 Subject: [PATCH 156/199] Move statfs to fsutil --- ffiwrapper/sealer_cgo.go | 3 +-- fsutil/dealloc_other.go | 1 - fsutil/statfs.go | 7 +++++++ fsutil/statfs_unix.go | 19 +++++++++++++++++++ fsutil/statfs_windows.go | 28 ++++++++++++++++++++++++++++ manager.go | 3 ++- manager_test.go | 5 +++-- stores/index.go | 9 +++++---- stores/interface.go | 26 ++------------------------ stores/local.go | 11 ++++++----- stores/local_test.go | 6 +++--- stores/remote.go | 25 +++++++++++++------------ 12 files changed, 89 insertions(+), 54 deletions(-) create mode 100644 fsutil/statfs.go create mode 100644 fsutil/statfs_unix.go create mode 100644 fsutil/statfs_windows.go diff --git a/ffiwrapper/sealer_cgo.go b/ffiwrapper/sealer_cgo.go index d3abe1063..c766f5555 100644 --- a/ffiwrapper/sealer_cgo.go +++ b/ffiwrapper/sealer_cgo.go @@ -521,8 +521,7 @@ func (sb *Sealer) FinalizeSector(ctx context.Context, sector abi.SectorID, keepU } } - - paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, 0, false) + paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, 0, stores.PathStorage) if err != nil { return xerrors.Errorf("acquiring sector cache path: %w", err) } diff --git a/fsutil/dealloc_other.go b/fsutil/dealloc_other.go index 721116af1..3ae8973ff 100644 --- a/fsutil/dealloc_other.go +++ b/fsutil/dealloc_other.go @@ -10,7 +10,6 @@ import ( var log = logging.Logger("fsutil") - func Deallocate(file *os.File, offset int64, length int64) error { log.Warnf("deallocating space not supported") diff --git a/fsutil/statfs.go b/fsutil/statfs.go new file mode 100644 index 000000000..2a00ccb9a --- /dev/null +++ b/fsutil/statfs.go @@ -0,0 +1,7 @@ +package fsutil + +type FsStat struct { + Capacity int64 + Available int64 // Available to use for sector storage + Reserved int64 +} diff --git a/fsutil/statfs_unix.go b/fsutil/statfs_unix.go new file mode 100644 index 000000000..3e69d5a8e --- /dev/null +++ b/fsutil/statfs_unix.go @@ -0,0 +1,19 @@ +package fsutil + +import ( + "syscall" + + "golang.org/x/xerrors" +) + +func Statfs(path string) (FsStat, error) { + var stat syscall.Statfs_t + if err := syscall.Statfs(path, &stat); err != nil { + return FsStat{}, xerrors.Errorf("statfs: %w", err) + } + + return FsStat{ + Capacity: int64(stat.Blocks) * stat.Bsize, + Available: int64(stat.Bavail) * stat.Bsize, + }, nil +} diff --git a/fsutil/statfs_windows.go b/fsutil/statfs_windows.go new file mode 100644 index 000000000..d78565182 --- /dev/null +++ b/fsutil/statfs_windows.go @@ -0,0 +1,28 @@ +package fsutil + +import ( + "syscall" + "unsafe" +) + +func Statfs(volumePath string) (FsStat, error) { + // From https://github.com/ricochet2200/go-disk-usage/blob/master/du/diskusage_windows.go + + h := syscall.MustLoadDLL("kernel32.dll") + c := h.MustFindProc("GetDiskFreeSpaceExW") + + var freeBytes int64 + var totalBytes int64 + var availBytes int64 + + c.Call( + uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(volumePath))), + uintptr(unsafe.Pointer(&freeBytes)), + uintptr(unsafe.Pointer(&totalBytes)), + uintptr(unsafe.Pointer(&availBytes))) + + return FsStat{ + Capacity: totalBytes, + Available: availBytes, + }, nil +} diff --git a/manager.go b/manager.go index a7053c102..0c18645ac 100644 --- a/manager.go +++ b/manager.go @@ -3,6 +3,7 @@ package sectorstorage import ( "context" "errors" + "github.com/filecoin-project/sector-storage/fsutil" "io" "net/http" @@ -491,7 +492,7 @@ func (m *Manager) StorageLocal(ctx context.Context) (map[stores.ID]string, error return out, nil } -func (m *Manager) FsStat(ctx context.Context, id stores.ID) (stores.FsStat, error) { +func (m *Manager) FsStat(ctx context.Context, id stores.ID) (fsutil.FsStat, error) { return m.storage.FsStat(ctx, id) } diff --git a/manager_test.go b/manager_test.go index b9198a2b3..8539f8918 100644 --- a/manager_test.go +++ b/manager_test.go @@ -5,6 +5,7 @@ import ( "context" "encoding/json" "fmt" + "github.com/filecoin-project/sector-storage/fsutil" "github.com/filecoin-project/sector-storage/sealtasks" logging "github.com/ipfs/go-log" "io/ioutil" @@ -69,8 +70,8 @@ func (t *testStorage) SetStorage(f func(*stores.StorageConfig)) error { return nil } -func (t *testStorage) Stat(path string) (stores.FsStat, error) { - return stores.Stat(path) +func (t *testStorage) Stat(path string) (fsutil.FsStat, error) { + return fsutil.Statfs(path) } var _ stores.LocalStorage = &testStorage{} diff --git a/stores/index.go b/stores/index.go index e48ae02bb..c85dc125e 100644 --- a/stores/index.go +++ b/stores/index.go @@ -2,6 +2,7 @@ package stores import ( "context" + "github.com/filecoin-project/sector-storage/fsutil" "net/url" gopath "path" "sort" @@ -34,7 +35,7 @@ type StorageInfo struct { } type HealthReport struct { - Stat FsStat + Stat fsutil.FsStat Err error } @@ -50,7 +51,7 @@ type SectorStorageInfo struct { } type SectorIndex interface { // part of storage-miner api - StorageAttach(context.Context, StorageInfo, FsStat) error + StorageAttach(context.Context, StorageInfo, fsutil.FsStat) error StorageInfo(context.Context, ID) (StorageInfo, error) StorageReportHealth(context.Context, ID, HealthReport) error @@ -77,7 +78,7 @@ type declMeta struct { type storageEntry struct { info *StorageInfo - fsi FsStat + fsi fsutil.FsStat lastHeartbeat time.Time heartbeatErr error @@ -130,7 +131,7 @@ func (i *Index) StorageList(ctx context.Context) (map[ID][]Decl, error) { return out, nil } -func (i *Index) StorageAttach(ctx context.Context, si StorageInfo, st FsStat) error { +func (i *Index) StorageAttach(ctx context.Context, si StorageInfo, st fsutil.FsStat) error { i.lk.Lock() defer i.lk.Unlock() diff --git a/stores/interface.go b/stores/interface.go index 6fd4a7ad7..836705f40 100644 --- a/stores/interface.go +++ b/stores/interface.go @@ -2,10 +2,7 @@ package stores import ( "context" - "syscall" - - "golang.org/x/xerrors" - + "github.com/filecoin-project/sector-storage/fsutil" "github.com/filecoin-project/specs-actors/actors/abi" ) @@ -34,24 +31,5 @@ type Store interface { // move sectors into storage MoveStorage(ctx context.Context, s abi.SectorID, spt abi.RegisteredSealProof, types SectorFileType) error - FsStat(ctx context.Context, id ID) (FsStat, error) -} - -func Stat(path string) (FsStat, error) { - var stat syscall.Statfs_t - if err := syscall.Statfs(path, &stat); err != nil { - return FsStat{}, xerrors.Errorf("statfs: %w", err) - } - - return FsStat{ - Capacity: int64(stat.Blocks) * stat.Bsize, - Available: int64(stat.Bavail) * stat.Bsize, - }, nil -} - -type FsStat struct { - Capacity int64 - Available int64 // Available to use for sector storage - Used int64 - Reserved int64 + FsStat(ctx context.Context, id ID) (fsutil.FsStat, error) } diff --git a/stores/local.go b/stores/local.go index cf52540ad..cbc9dbae1 100644 --- a/stores/local.go +++ b/stores/local.go @@ -3,6 +3,7 @@ package stores import ( "context" "encoding/json" + "github.com/filecoin-project/sector-storage/fsutil" "io/ioutil" "math/bits" "math/rand" @@ -48,7 +49,7 @@ type LocalStorage interface { GetStorage() (StorageConfig, error) SetStorage(func(*StorageConfig)) error - Stat(path string) (FsStat, error) + Stat(path string) (fsutil.FsStat, error) DiskUsage(path string) (int64, error) // returns real disk usage for a file/directory } @@ -73,10 +74,10 @@ type path struct { reservations map[abi.SectorID]SectorFileType } -func (p *path) stat(ls LocalStorage) (FsStat, error) { +func (p *path) stat(ls LocalStorage) (fsutil.FsStat, error) { stat, err := ls.Stat(p.local) if err != nil { - return FsStat{}, err + return fsutil.FsStat{}, err } stat.Reserved = p.reserved @@ -557,13 +558,13 @@ func (st *Local) MoveStorage(ctx context.Context, s abi.SectorID, spt abi.Regist var errPathNotFound = xerrors.Errorf("fsstat: path not found") -func (st *Local) FsStat(ctx context.Context, id ID) (FsStat, error) { +func (st *Local) FsStat(ctx context.Context, id ID) (fsutil.FsStat, error) { st.localLk.RLock() defer st.localLk.RUnlock() p, ok := st.paths[id] if !ok { - return FsStat{}, errPathNotFound + return fsutil.FsStat{}, errPathNotFound } return p.stat(st.localStorage) diff --git a/stores/local_test.go b/stores/local_test.go index e748d061b..56ac7c020 100644 --- a/stores/local_test.go +++ b/stores/local_test.go @@ -3,6 +3,7 @@ package stores import ( "context" "encoding/json" + "github.com/filecoin-project/sector-storage/fsutil" "github.com/google/uuid" "io/ioutil" "os" @@ -32,11 +33,10 @@ func (t *TestingLocalStorage) SetStorage(f func(*StorageConfig)) error { return nil } -func (t *TestingLocalStorage) Stat(path string) (FsStat, error) { - return FsStat{ +func (t *TestingLocalStorage) Stat(path string) (fsutil.FsStat, error) { + return fsutil.FsStat{ Capacity: pathSize, Available: pathSize, - Used: 0, }, nil } diff --git a/stores/remote.go b/stores/remote.go index 30fe3abf9..c78f026f4 100644 --- a/stores/remote.go +++ b/stores/remote.go @@ -3,6 +3,7 @@ package stores import ( "context" "encoding/json" + "github.com/filecoin-project/sector-storage/fsutil" "io/ioutil" "math/bits" "mime" @@ -270,7 +271,7 @@ func (r *Remote) deleteFromRemote(ctx context.Context, url string) error { return nil } -func (r *Remote) FsStat(ctx context.Context, id ID) (FsStat, error) { +func (r *Remote) FsStat(ctx context.Context, id ID) (fsutil.FsStat, error) { st, err := r.local.FsStat(ctx, id) switch err { case nil: @@ -278,53 +279,53 @@ func (r *Remote) FsStat(ctx context.Context, id ID) (FsStat, error) { case errPathNotFound: break default: - return FsStat{}, xerrors.Errorf("local stat: %w", err) + return fsutil.FsStat{}, xerrors.Errorf("local stat: %w", err) } si, err := r.index.StorageInfo(ctx, id) if err != nil { - return FsStat{}, xerrors.Errorf("getting remote storage info: %w", err) + return fsutil.FsStat{}, xerrors.Errorf("getting remote storage info: %w", err) } if len(si.URLs) == 0 { - return FsStat{}, xerrors.Errorf("no known URLs for remote storage %s", id) + return fsutil.FsStat{}, xerrors.Errorf("no known URLs for remote storage %s", id) } rl, err := url.Parse(si.URLs[0]) if err != nil { - return FsStat{}, xerrors.Errorf("failed to parse url: %w", err) + return fsutil.FsStat{}, xerrors.Errorf("failed to parse url: %w", err) } rl.Path = gopath.Join(rl.Path, "stat", string(id)) req, err := http.NewRequest("GET", rl.String(), nil) if err != nil { - return FsStat{}, xerrors.Errorf("request: %w", err) + return fsutil.FsStat{}, xerrors.Errorf("request: %w", err) } req.Header = r.auth req = req.WithContext(ctx) resp, err := http.DefaultClient.Do(req) if err != nil { - return FsStat{}, xerrors.Errorf("do request: %w", err) + return fsutil.FsStat{}, xerrors.Errorf("do request: %w", err) } switch resp.StatusCode { case 200: break case 404: - return FsStat{}, errPathNotFound + return fsutil.FsStat{}, errPathNotFound case 500: b, err := ioutil.ReadAll(resp.Body) if err != nil { - return FsStat{}, xerrors.Errorf("fsstat: got http 500, then failed to read the error: %w", err) + return fsutil.FsStat{}, xerrors.Errorf("fsstat: got http 500, then failed to read the error: %w", err) } - return FsStat{}, xerrors.Errorf("fsstat: got http 500: %s", string(b)) + return fsutil.FsStat{}, xerrors.Errorf("fsstat: got http 500: %s", string(b)) } - var out FsStat + var out fsutil.FsStat if err := json.NewDecoder(resp.Body).Decode(&out); err != nil { - return FsStat{}, xerrors.Errorf("decoding fsstat: %w", err) + return fsutil.FsStat{}, xerrors.Errorf("decoding fsstat: %w", err) } defer resp.Body.Close() From c0a242a1eb664dbbc32e6454c26a4890eb95d5dd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Wed, 8 Jul 2020 17:09:35 +0200 Subject: [PATCH 157/199] fsutil: FileSize util --- fsutil/filesize_unix.go | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) create mode 100644 fsutil/filesize_unix.go diff --git a/fsutil/filesize_unix.go b/fsutil/filesize_unix.go new file mode 100644 index 000000000..e45ccca17 --- /dev/null +++ b/fsutil/filesize_unix.go @@ -0,0 +1,25 @@ +package fsutil + +import ( + "syscall" + + "golang.org/x/xerrors" +) + +type SizeInfo struct { + OnDisk int64 +} + +// FileSize returns bytes used by a file on disk +func FileSize(path string) (SizeInfo, error) { + var stat syscall.Stat_t + if err := syscall.Stat(path, &stat); err != nil { + return SizeInfo{}, xerrors.Errorf("stat: %w", err) + } + + // NOTE: stat.Blocks is in 512B blocks, NOT in stat.Blksize + // See https://www.gnu.org/software/libc/manual/html_node/Attribute-Meanings.html + return SizeInfo{ + stat.Blocks * 512, + }, nil +} \ No newline at end of file From 56570a22005f77a5eb744109a3bd845c3a5def0f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Wed, 8 Jul 2020 17:39:58 +0200 Subject: [PATCH 158/199] mock: Implemet ReleaseUnsealed correctly --- mock/mock.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mock/mock.go b/mock/mock.go index cbc3a1f99..7c9ed57f0 100644 --- a/mock/mock.go +++ b/mock/mock.go @@ -320,7 +320,7 @@ func (mgr *SectorMgr) FinalizeSector(context.Context, abi.SectorID, []storage.Ra } func (mgr *SectorMgr) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) error { - panic("implement me") + return nil } func (mgr *SectorMgr) Remove(ctx context.Context, sector abi.SectorID) error { From 9af64c9b217e6b118ec29a669be7fb455bfe54e2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Wed, 8 Jul 2020 19:51:26 +0200 Subject: [PATCH 159/199] ffiwrapper: Fix UnsealPiece --- ffiwrapper/sealer_cgo.go | 5 ++++- fsutil/filesize_unix.go | 2 +- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/ffiwrapper/sealer_cgo.go b/ffiwrapper/sealer_cgo.go index c766f5555..88218921c 100644 --- a/ffiwrapper/sealer_cgo.go +++ b/ffiwrapper/sealer_cgo.go @@ -254,7 +254,10 @@ func (sb *Sealer) UnsealPiece(ctx context.Context, sector abi.SectorID, offset s defer sealed.Close() var at, nextat abi.PaddedPieceSize - for { + first := true + for first || toUnseal.HasNext() { + first = false + piece, err := toUnseal.NextRun() if err != nil { return xerrors.Errorf("getting next range to unseal: %w", err) diff --git a/fsutil/filesize_unix.go b/fsutil/filesize_unix.go index e45ccca17..d596e4be7 100644 --- a/fsutil/filesize_unix.go +++ b/fsutil/filesize_unix.go @@ -22,4 +22,4 @@ func FileSize(path string) (SizeInfo, error) { return SizeInfo{ stat.Blocks * 512, }, nil -} \ No newline at end of file +} From ac7dc28cfb2c1439e40cae38e4ed5757e696b39a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Thu, 9 Jul 2020 12:58:52 +0200 Subject: [PATCH 160/199] sched: WIP Windows --- sched.go | 436 ++++++++++++++++++++++++++++--------------------------- 1 file changed, 222 insertions(+), 214 deletions(-) diff --git a/sched.go b/sched.go index 9fddd7bd9..af6981b08 100644 --- a/sched.go +++ b/sched.go @@ -3,11 +3,11 @@ package sectorstorage import ( "container/heap" "context" + "math/rand" "sort" "sync" "time" - "github.com/hashicorp/go-multierror" "golang.org/x/xerrors" "github.com/filecoin-project/specs-actors/actors/abi" @@ -20,6 +20,11 @@ type schedPrioCtxKey int var SchedPriorityKey schedPrioCtxKey var DefaultSchedPriority = 0 +var SelectorTimeout = 5 * time.Second + +var ( + SchedWindows = 2 +) func getPriority(ctx context.Context) int { sp := ctx.Value(SchedPriorityKey) @@ -56,11 +61,63 @@ type scheduler struct { watchClosing chan WorkerID workerClosing chan WorkerID - schedule chan *workerRequest - workerFree chan WorkerID - closing chan struct{} + schedule chan *workerRequest + windowRequests chan *schedWindowRequest - schedQueue *requestQueue + // owned by the sh.runSched goroutine + schedQueue *requestQueue + openWindows []*schedWindowRequest + + closing chan struct{} +} + +type workerHandle struct { + w Worker + + info storiface.WorkerInfo + + preparing *activeResources + active *activeResources +} + +type schedWindowRequest struct { + worker WorkerID + + done chan *schedWindow +} + +type schedWindow struct { + worker WorkerID + allocated *activeResources + todo []*workerRequest +} + +type activeResources struct { + memUsedMin uint64 + memUsedMax uint64 + gpuUsed bool + cpuUse uint64 + + cond *sync.Cond +} + +type workerRequest struct { + sector abi.SectorID + taskType sealtasks.TaskType + priority int // larger values more important + sel WorkerSelector + + prepare WorkerAction + work WorkerAction + + index int // The index of the item in the heap. + + ret chan<- workerResponse + ctx context.Context +} + +type workerResponse struct { + err error } func newScheduler(spt abi.RegisteredSealProof) *scheduler { @@ -75,9 +132,8 @@ func newScheduler(spt abi.RegisteredSealProof) *scheduler { watchClosing: make(chan WorkerID), workerClosing: make(chan WorkerID), - schedule: make(chan *workerRequest), - workerFree: make(chan WorkerID), - closing: make(chan struct{}), + schedule: make(chan *workerRequest), + closing: make(chan struct{}), schedQueue: &requestQueue{}, } @@ -115,25 +171,6 @@ func (sh *scheduler) Schedule(ctx context.Context, sector abi.SectorID, taskType } } -type workerRequest struct { - sector abi.SectorID - taskType sealtasks.TaskType - priority int // larger values more important - sel WorkerSelector - - prepare WorkerAction - work WorkerAction - - index int // The index of the item in the heap. - - ret chan<- workerResponse - ctx context.Context -} - -type workerResponse struct { - err error -} - func (r *workerRequest) respond(err error) { select { case r.ret <- workerResponse{err: err}: @@ -142,46 +179,25 @@ func (r *workerRequest) respond(err error) { } } -type activeResources struct { - memUsedMin uint64 - memUsedMax uint64 - gpuUsed bool - cpuUse uint64 - - cond *sync.Cond -} - -type workerHandle struct { - w Worker - - info storiface.WorkerInfo - - preparing *activeResources - active *activeResources -} - func (sh *scheduler) runSched() { go sh.runWorkerWatcher() for { select { case w := <-sh.newWorkers: - sh.schedNewWorker(w) - case wid := <-sh.workerClosing: - sh.schedDropWorker(wid) - case req := <-sh.schedule: - scheduled, err := sh.maybeSchedRequest(req) - if err != nil { - req.respond(err) - continue - } - if scheduled { - continue - } + sh.newWorker(w) + case wid := <-sh.workerClosing: + sh.dropWorker(wid) + + case req := <-sh.schedule: heap.Push(sh.schedQueue, req) - case wid := <-sh.workerFree: - sh.onWorkerFreed(wid) + sh.trySched() + + case req := <-sh.windowRequests: + sh.openWindows = append(sh.openWindows, req) + sh.trySched() + case <-sh.closing: sh.schedClose() return @@ -189,169 +205,161 @@ func (sh *scheduler) runSched() { } } -func (sh *scheduler) onWorkerFreed(wid WorkerID) { - sh.workersLk.Lock() - w, ok := sh.workers[wid] - sh.workersLk.Unlock() - if !ok { - log.Warnf("onWorkerFreed on invalid worker %d", wid) +func (sh *scheduler) trySched() { + /* + This assigns tasks to workers based on: + - Task priority (achieved by handling sh.schedQueue in order, since it's already sorted by priority) + - Worker resource availability + - Task-specified worker preference (acceptableWindows array below sorted by this preference) + - Window request age + + 1. For each task in the schedQueue find windows which can handle them + 1.1. Create list of windows capable of handling a task + 1.2. Sort windows according to task selector preferences + 2. Going through schedQueue again, assign task to first acceptable window + with resources available + 3. Submit windows with scheduled tasks to workers + + */ + + windows := make([]schedWindow, len(sh.openWindows)) + acceptableWindows := make([][]int, sh.schedQueue.Len()) + + // Step 1 + for sqi := 0; sqi < sh.schedQueue.Len(); sqi++ { + task := (*sh.schedQueue)[sqi] + needRes := ResourceTable[task.taskType][sh.spt] + + for wnd, windowRequest := range sh.openWindows { + worker := sh.workers[windowRequest.worker] + + // TODO: allow bigger windows + if !windows[wnd].allocated.canHandleRequest(needRes, windowRequest.worker, worker.info.Resources) { + continue + } + + ok, err := task.sel.Ok(task.ctx, task.taskType, sh.spt, worker) + if err != nil { + log.Errorf("trySched(1) req.sel.Ok error: %+v", err) + continue + } + + if !ok { + continue + } + + acceptableWindows[sqi] = append(acceptableWindows[sqi], wnd) + } + + if len(acceptableWindows[sqi]) == 0 { + continue + } + + // Pick best worker (shuffle in case some workers are equally as good) + rand.Shuffle(len(acceptableWindows[sqi]), func(i, j int) { + acceptableWindows[sqi][i], acceptableWindows[sqi][j] = acceptableWindows[sqi][j], acceptableWindows[sqi][i] + }) + sort.SliceStable(acceptableWindows, func(i, j int) bool { + wii := sh.openWindows[acceptableWindows[sqi][i]].worker + wji := sh.openWindows[acceptableWindows[sqi][j]].worker + + if wii == wji { + // for the same worker prefer older windows + return acceptableWindows[sqi][i] < acceptableWindows[sqi][j] + } + + wi := sh.workers[wii] + wj := sh.workers[wji] + + rpcCtx, cancel := context.WithTimeout(task.ctx, SelectorTimeout) + defer cancel() + + r, err := task.sel.Cmp(rpcCtx, task.taskType, wi, wj) + if err != nil { + log.Error("selecting best worker: %s", err) + } + return r + }) + } + + // Step 2 + scheduled := 0 + + for sqi := 0; sqi < sh.schedQueue.Len(); sqi++ { + task := (*sh.schedQueue)[sqi] + needRes := ResourceTable[task.taskType][sh.spt] + + selectedWindow := -1 + for _, wnd := range acceptableWindows[sqi+scheduled] { + wid := sh.openWindows[wnd].worker + wr := sh.workers[wid].info.Resources + + // TODO: allow bigger windows + if windows[wnd].allocated.canHandleRequest(needRes, wid, wr) { + continue + } + + windows[wnd].allocated.add(wr, needRes) + + selectedWindow = wnd + break + } + + windows[selectedWindow].todo = append(windows[selectedWindow].todo, task) + + heap.Remove(sh.schedQueue, sqi) + sqi-- + scheduled++ + } + + // Step 3 + + if scheduled == 0 { return } - for i := 0; i < sh.schedQueue.Len(); i++ { - req := (*sh.schedQueue)[i] - - ok, err := req.sel.Ok(req.ctx, req.taskType, sh.spt, w) - if err != nil { - log.Errorf("onWorkerFreed req.sel.Ok error: %+v", err) + scheduledWindows := map[int]struct{}{} + for wnd, window := range windows { + if len(window.todo) == 0 { + // Nothing scheduled here, keep the window open continue } - if !ok { - continue - } + scheduledWindows[wnd] = struct{}{} - scheduled, err := sh.maybeSchedRequest(req) - if err != nil { - req.respond(err) - continue - } - - if scheduled { - heap.Remove(sh.schedQueue, i) - i-- - continue + select { + case sh.openWindows[wnd].done <- &window: + default: + log.Error("expected sh.openWindows[wnd].done to be buffered") } } + + // Rewrite sh.openWindows array, removing scheduled windows + newOpenWindows := make([]*schedWindowRequest, 0, len(sh.openWindows)-len(scheduledWindows)) + for wnd, window := range sh.openWindows { + if _, scheduled := scheduledWindows[wnd]; !scheduled { + // keep unscheduled windows open + continue + } + + newOpenWindows = append(newOpenWindows, window) + } + + sh.openWindows = newOpenWindows } -var selectorTimeout = 5 * time.Second - -func (sh *scheduler) maybeSchedRequest(req *workerRequest) (bool, error) { - sh.workersLk.Lock() - defer sh.workersLk.Unlock() - - tried := 0 - var acceptable []WorkerID - - needRes := ResourceTable[req.taskType][sh.spt] - - for wid, worker := range sh.workers { - rpcCtx, cancel := context.WithTimeout(req.ctx, selectorTimeout) - ok, err := req.sel.Ok(rpcCtx, req.taskType, sh.spt, worker) - cancel() - - if err != nil { - return false, err - } - - if !ok { - continue - } - tried++ - - if !canHandleRequest(needRes, wid, worker.info.Resources, worker.preparing) { - continue - } - - acceptable = append(acceptable, wid) - } - - if len(acceptable) > 0 { - { - var serr error - - sort.SliceStable(acceptable, func(i, j int) bool { - rpcCtx, cancel := context.WithTimeout(req.ctx, selectorTimeout) - defer cancel() - r, err := req.sel.Cmp(rpcCtx, req.taskType, sh.workers[acceptable[i]], sh.workers[acceptable[j]]) - - if err != nil { - serr = multierror.Append(serr, err) - } - return r - }) - - if serr != nil { - return false, xerrors.Errorf("error(s) selecting best worker: %w", serr) - } - } - - return true, sh.assignWorker(acceptable[0], sh.workers[acceptable[0]], req) - } - - if tried == 0 { - return false, xerrors.New("maybeSchedRequest didn't find any good workers") - } - - return false, nil // put in waiting queue -} - -func (sh *scheduler) assignWorker(wid WorkerID, w *workerHandle, req *workerRequest) error { - needRes := ResourceTable[req.taskType][sh.spt] - - w.preparing.add(w.info.Resources, needRes) +func (sh *scheduler) runWorker(wid WorkerID) { + w := sh.workers[wid] go func() { - err := req.prepare(req.ctx, w.w) - sh.workersLk.Lock() + for { - if err != nil { - w.preparing.free(w.info.Resources, needRes) - sh.workersLk.Unlock() - - select { - case sh.workerFree <- wid: - case <-sh.closing: - log.Warnf("scheduler closed while sending response (prepare error: %+v)", err) - } - - select { - case req.ret <- workerResponse{err: err}: - case <-req.ctx.Done(): - log.Warnf("request got cancelled before we could respond (prepare error: %+v)", err) - case <-sh.closing: - log.Warnf("scheduler closed while sending response (prepare error: %+v)", err) - } - return - } - - err = w.active.withResources(wid, w.info.Resources, needRes, &sh.workersLk, func() error { - w.preparing.free(w.info.Resources, needRes) - sh.workersLk.Unlock() - defer sh.workersLk.Lock() // we MUST return locked from this function - - select { - case sh.workerFree <- wid: - case <-sh.closing: - } - - err = req.work(req.ctx, w.w) - - select { - case req.ret <- workerResponse{err: err}: - case <-req.ctx.Done(): - log.Warnf("request got cancelled before we could respond") - case <-sh.closing: - log.Warnf("scheduler closed while sending response") - } - - return nil - }) - - sh.workersLk.Unlock() - - // This error should always be nil, since nothing is setting it, but just to be safe: - if err != nil { - log.Errorf("error executing worker (withResources): %+v", err) } }() - - return nil } func (a *activeResources) withResources(id WorkerID, wr storiface.WorkerResources, r Resources, locker sync.Locker, cb func() error) error { - for !canHandleRequest(r, id, wr, a) { + for !a.canHandleRequest(r, id, wr) { if a.cond == nil { a.cond = sync.NewCond(locker) } @@ -396,16 +404,16 @@ func (a *activeResources) free(wr storiface.WorkerResources, r Resources) { a.memUsedMax -= r.MaxMemory } -func canHandleRequest(needRes Resources, wid WorkerID, res storiface.WorkerResources, active *activeResources) bool { +func (a *activeResources) canHandleRequest(needRes Resources, wid WorkerID, res storiface.WorkerResources) bool { // TODO: dedupe needRes.BaseMinMemory per task type (don't add if that task is already running) - minNeedMem := res.MemReserved + active.memUsedMin + needRes.MinMemory + needRes.BaseMinMemory + minNeedMem := res.MemReserved + a.memUsedMin + needRes.MinMemory + needRes.BaseMinMemory if minNeedMem > res.MemPhysical { log.Debugf("sched: not scheduling on worker %d; not enough physical memory - need: %dM, have %dM", wid, minNeedMem/mib, res.MemPhysical/mib) return false } - maxNeedMem := res.MemReserved + active.memUsedMax + needRes.MaxMemory + needRes.BaseMinMemory + maxNeedMem := res.MemReserved + a.memUsedMax + needRes.MaxMemory + needRes.BaseMinMemory if maxNeedMem > res.MemSwap+res.MemPhysical { log.Debugf("sched: not scheduling on worker %d; not enough virtual memory - need: %dM, have %dM", wid, maxNeedMem/mib, (res.MemSwap+res.MemPhysical)/mib) @@ -413,19 +421,19 @@ func canHandleRequest(needRes Resources, wid WorkerID, res storiface.WorkerResou } if needRes.MultiThread() { - if active.cpuUse > 0 { - log.Debugf("sched: not scheduling on worker %d; multicore process needs %d threads, %d in use, target %d", wid, res.CPUs, active.cpuUse, res.CPUs) + if a.cpuUse > 0 { + log.Debugf("sched: not scheduling on worker %d; multicore process needs %d threads, %d in use, target %d", wid, res.CPUs, a.cpuUse, res.CPUs) return false } } else { - if active.cpuUse+uint64(needRes.Threads) > res.CPUs { - log.Debugf("sched: not scheduling on worker %d; not enough threads, need %d, %d in use, target %d", wid, needRes.Threads, active.cpuUse, res.CPUs) + if a.cpuUse+uint64(needRes.Threads) > res.CPUs { + log.Debugf("sched: not scheduling on worker %d; not enough threads, need %d, %d in use, target %d", wid, needRes.Threads, a.cpuUse, res.CPUs) return false } } if len(res.GPUs) > 0 && needRes.CanGPU { - if active.gpuUsed { + if a.gpuUsed { log.Debugf("sched: not scheduling on worker %d; GPU in use", wid) return false } @@ -453,7 +461,7 @@ func (a *activeResources) utilization(wr storiface.WorkerResources) float64 { return max } -func (sh *scheduler) schedNewWorker(w *workerHandle) { +func (sh *scheduler) newWorker(w *workerHandle) { sh.workersLk.Lock() id := sh.nextWorker @@ -468,10 +476,10 @@ func (sh *scheduler) schedNewWorker(w *workerHandle) { return } - sh.onWorkerFreed(id) + sh.runWorker(id) } -func (sh *scheduler) schedDropWorker(wid WorkerID) { +func (sh *scheduler) dropWorker(wid WorkerID) { sh.workersLk.Lock() defer sh.workersLk.Unlock() From da96f06202c7dd6d4482396fcf9f2e0e22287a16 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Thu, 9 Jul 2020 13:49:01 +0200 Subject: [PATCH 161/199] sched: implement runWorker --- sched.go | 222 ++++++++++++++++++++++++++------------------- sched_resources.go | 110 ++++++++++++++++++++++ 2 files changed, 238 insertions(+), 94 deletions(-) create mode 100644 sched_resources.go diff --git a/sched.go b/sched.go index af6981b08..966bf2c46 100644 --- a/sched.go +++ b/sched.go @@ -349,116 +349,150 @@ func (sh *scheduler) trySched() { } func (sh *scheduler) runWorker(wid WorkerID) { - w := sh.workers[wid] - go func() { - for { + worker := sh.workers[wid] + scheduledWindows := make(chan *schedWindow, SchedWindows) + taskDone := make(chan struct{}, 1) + windowsRequested := 0 + var activeWindows []*schedWindow + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + workerClosing, err := worker.w.Closing(ctx) + if err != nil { + return + } + + defer func() { + log.Warnw("Worker closing", "workerid", wid) + + // TODO: close / return all queued tasks + }() + + for { + // ask for more windows if we need them + for ; windowsRequested < SchedWindows; windowsRequested++ { + select { + case sh.windowRequests <- &schedWindowRequest{ + worker: wid, + done: scheduledWindows, + }: + case <-sh.closing: + return + case <-workerClosing: + return + } + } + + select { + case w := <-scheduledWindows: + activeWindows = append(activeWindows, w) + case <-taskDone: + case <-sh.closing: + return + case <-workerClosing: + return + } + + assignLoop: + // process windows in order + for len(activeWindows) > 0 { + // process tasks within a window in order + for len(activeWindows[0].todo) > 0 { + todo := activeWindows[0].todo[0] + needRes := ResourceTable[todo.taskType][sh.spt] + + sh.workersLk.Lock() + ok := worker.preparing.canHandleRequest(needRes, wid, worker.info.Resources) + if !ok { + sh.workersLk.Unlock() + break assignLoop + } + + err := sh.assignWorker(taskDone, wid, worker, todo) + sh.workersLk.Unlock() + + if err != nil { + log.Error("assignWorker error: %+v", err) + go todo.respond(xerrors.Errorf("assignWorker error: %w", err)) + } + + activeWindows[0].todo = activeWindows[0].todo[1:] + } + + copy(activeWindows, activeWindows[1:]) + activeWindows[len(activeWindows)-1] = nil + activeWindows = activeWindows[:len(activeWindows)-1] + + windowsRequested-- + } } }() } -func (a *activeResources) withResources(id WorkerID, wr storiface.WorkerResources, r Resources, locker sync.Locker, cb func() error) error { - for !a.canHandleRequest(r, id, wr) { - if a.cond == nil { - a.cond = sync.NewCond(locker) +func (sh *scheduler) assignWorker(taskDone chan struct{}, wid WorkerID, w *workerHandle, req *workerRequest) error { + needRes := ResourceTable[req.taskType][sh.spt] + + w.preparing.add(w.info.Resources, needRes) + + go func() { + err := req.prepare(req.ctx, w.w) + sh.workersLk.Lock() + + if err != nil { + w.preparing.free(w.info.Resources, needRes) + sh.workersLk.Unlock() + + select { + case taskDone <- struct{}{}: + case <-sh.closing: + log.Warnf("scheduler closed while sending response (prepare error: %+v)", err) + } + + select { + case req.ret <- workerResponse{err: err}: + case <-req.ctx.Done(): + log.Warnf("request got cancelled before we could respond (prepare error: %+v)", err) + case <-sh.closing: + log.Warnf("scheduler closed while sending response (prepare error: %+v)", err) + } + return } - a.cond.Wait() - } - a.add(wr, r) + err = w.active.withResources(wid, w.info.Resources, needRes, &sh.workersLk, func() error { + w.preparing.free(w.info.Resources, needRes) + sh.workersLk.Unlock() + defer sh.workersLk.Lock() // we MUST return locked from this function - err := cb() + select { + case taskDone <- struct{}{}: + case <-sh.closing: + } - a.free(wr, r) - if a.cond != nil { - a.cond.Broadcast() - } + err = req.work(req.ctx, w.w) - return err -} + select { + case req.ret <- workerResponse{err: err}: + case <-req.ctx.Done(): + log.Warnf("request got cancelled before we could respond") + case <-sh.closing: + log.Warnf("scheduler closed while sending response") + } -func (a *activeResources) add(wr storiface.WorkerResources, r Resources) { - a.gpuUsed = r.CanGPU - if r.MultiThread() { - a.cpuUse += wr.CPUs - } else { - a.cpuUse += uint64(r.Threads) - } + return nil + }) - a.memUsedMin += r.MinMemory - a.memUsedMax += r.MaxMemory -} + sh.workersLk.Unlock() -func (a *activeResources) free(wr storiface.WorkerResources, r Resources) { - if r.CanGPU { - a.gpuUsed = false - } - if r.MultiThread() { - a.cpuUse -= wr.CPUs - } else { - a.cpuUse -= uint64(r.Threads) - } - - a.memUsedMin -= r.MinMemory - a.memUsedMax -= r.MaxMemory -} - -func (a *activeResources) canHandleRequest(needRes Resources, wid WorkerID, res storiface.WorkerResources) bool { - - // TODO: dedupe needRes.BaseMinMemory per task type (don't add if that task is already running) - minNeedMem := res.MemReserved + a.memUsedMin + needRes.MinMemory + needRes.BaseMinMemory - if minNeedMem > res.MemPhysical { - log.Debugf("sched: not scheduling on worker %d; not enough physical memory - need: %dM, have %dM", wid, minNeedMem/mib, res.MemPhysical/mib) - return false - } - - maxNeedMem := res.MemReserved + a.memUsedMax + needRes.MaxMemory + needRes.BaseMinMemory - - if maxNeedMem > res.MemSwap+res.MemPhysical { - log.Debugf("sched: not scheduling on worker %d; not enough virtual memory - need: %dM, have %dM", wid, maxNeedMem/mib, (res.MemSwap+res.MemPhysical)/mib) - return false - } - - if needRes.MultiThread() { - if a.cpuUse > 0 { - log.Debugf("sched: not scheduling on worker %d; multicore process needs %d threads, %d in use, target %d", wid, res.CPUs, a.cpuUse, res.CPUs) - return false + // This error should always be nil, since nothing is setting it, but just to be safe: + if err != nil { + log.Errorf("error executing worker (withResources): %+v", err) } - } else { - if a.cpuUse+uint64(needRes.Threads) > res.CPUs { - log.Debugf("sched: not scheduling on worker %d; not enough threads, need %d, %d in use, target %d", wid, needRes.Threads, a.cpuUse, res.CPUs) - return false - } - } + }() - if len(res.GPUs) > 0 && needRes.CanGPU { - if a.gpuUsed { - log.Debugf("sched: not scheduling on worker %d; GPU in use", wid) - return false - } - } - - return true -} - -func (a *activeResources) utilization(wr storiface.WorkerResources) float64 { - var max float64 - - cpu := float64(a.cpuUse) / float64(wr.CPUs) - max = cpu - - memMin := float64(a.memUsedMin+wr.MemReserved) / float64(wr.MemPhysical) - if memMin > max { - max = memMin - } - - memMax := float64(a.memUsedMax+wr.MemReserved) / float64(wr.MemPhysical+wr.MemSwap) - if memMax > max { - max = memMax - } - - return max + return nil } func (sh *scheduler) newWorker(w *workerHandle) { diff --git a/sched_resources.go b/sched_resources.go new file mode 100644 index 000000000..0ba9d1f66 --- /dev/null +++ b/sched_resources.go @@ -0,0 +1,110 @@ +package sectorstorage + +import ( + "sync" + + "github.com/filecoin-project/sector-storage/storiface" +) + +func (a *activeResources) withResources(id WorkerID, wr storiface.WorkerResources, r Resources, locker sync.Locker, cb func() error) error { + for !a.canHandleRequest(r, id, wr) { + if a.cond == nil { + a.cond = sync.NewCond(locker) + } + a.cond.Wait() + } + + a.add(wr, r) + + err := cb() + + a.free(wr, r) + if a.cond != nil { + a.cond.Broadcast() + } + + return err +} + +func (a *activeResources) add(wr storiface.WorkerResources, r Resources) { + a.gpuUsed = r.CanGPU + if r.MultiThread() { + a.cpuUse += wr.CPUs + } else { + a.cpuUse += uint64(r.Threads) + } + + a.memUsedMin += r.MinMemory + a.memUsedMax += r.MaxMemory +} + +func (a *activeResources) free(wr storiface.WorkerResources, r Resources) { + if r.CanGPU { + a.gpuUsed = false + } + if r.MultiThread() { + a.cpuUse -= wr.CPUs + } else { + a.cpuUse -= uint64(r.Threads) + } + + a.memUsedMin -= r.MinMemory + a.memUsedMax -= r.MaxMemory +} + +func (a *activeResources) canHandleRequest(needRes Resources, wid WorkerID, res storiface.WorkerResources) bool { + + // TODO: dedupe needRes.BaseMinMemory per task type (don't add if that task is already running) + minNeedMem := res.MemReserved + a.memUsedMin + needRes.MinMemory + needRes.BaseMinMemory + if minNeedMem > res.MemPhysical { + log.Debugf("sched: not scheduling on worker %d; not enough physical memory - need: %dM, have %dM", wid, minNeedMem/mib, res.MemPhysical/mib) + return false + } + + maxNeedMem := res.MemReserved + a.memUsedMax + needRes.MaxMemory + needRes.BaseMinMemory + + if maxNeedMem > res.MemSwap+res.MemPhysical { + log.Debugf("sched: not scheduling on worker %d; not enough virtual memory - need: %dM, have %dM", wid, maxNeedMem/mib, (res.MemSwap+res.MemPhysical)/mib) + return false + } + + if needRes.MultiThread() { + if a.cpuUse > 0 { + log.Debugf("sched: not scheduling on worker %d; multicore process needs %d threads, %d in use, target %d", wid, res.CPUs, a.cpuUse, res.CPUs) + return false + } + } else { + if a.cpuUse+uint64(needRes.Threads) > res.CPUs { + log.Debugf("sched: not scheduling on worker %d; not enough threads, need %d, %d in use, target %d", wid, needRes.Threads, a.cpuUse, res.CPUs) + return false + } + } + + if len(res.GPUs) > 0 && needRes.CanGPU { + if a.gpuUsed { + log.Debugf("sched: not scheduling on worker %d; GPU in use", wid) + return false + } + } + + return true +} + +func (a *activeResources) utilization(wr storiface.WorkerResources) float64 { + var max float64 + + cpu := float64(a.cpuUse) / float64(wr.CPUs) + max = cpu + + memMin := float64(a.memUsedMin+wr.MemReserved) / float64(wr.MemPhysical) + if memMin > max { + max = memMin + } + + memMax := float64(a.memUsedMax+wr.MemReserved) / float64(wr.MemPhysical+wr.MemSwap) + if memMax > max { + max = memMax + } + + return max +} From 903731adaf924a89d3b0ae41fd64a0fcf030fee4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Thu, 9 Jul 2020 14:40:53 +0200 Subject: [PATCH 162/199] sched: Fix tests --- manager_test.go | 4 ++++ sched.go | 17 +++++++++++++---- stores/index.go | 4 ++-- stores/local.go | 1 + 4 files changed, 20 insertions(+), 6 deletions(-) diff --git a/manager_test.go b/manager_test.go index ae318b487..19d9e3895 100644 --- a/manager_test.go +++ b/manager_test.go @@ -22,6 +22,10 @@ import ( "github.com/filecoin-project/sector-storage/stores" ) +func init() { + logging.SetAllLoggers(logging.LevelDebug) +} + type testStorage stores.StorageConfig func newTestStorage(t *testing.T) *testStorage { diff --git a/sched.go b/sched.go index 966bf2c46..d1ec33884 100644 --- a/sched.go +++ b/sched.go @@ -88,7 +88,7 @@ type schedWindowRequest struct { type schedWindow struct { worker WorkerID - allocated *activeResources + allocated activeResources todo []*workerRequest } @@ -132,10 +132,12 @@ func newScheduler(spt abi.RegisteredSealProof) *scheduler { watchClosing: make(chan WorkerID), workerClosing: make(chan WorkerID), - schedule: make(chan *workerRequest), - closing: make(chan struct{}), + schedule: make(chan *workerRequest), + windowRequests: make(chan *schedWindowRequest), schedQueue: &requestQueue{}, + + closing: make(chan struct{}), } } @@ -295,7 +297,7 @@ func (sh *scheduler) trySched() { wr := sh.workers[wid].info.Resources // TODO: allow bigger windows - if windows[wnd].allocated.canHandleRequest(needRes, wid, wr) { + if !windows[wnd].allocated.canHandleRequest(needRes, wid, wr) { continue } @@ -305,6 +307,11 @@ func (sh *scheduler) trySched() { break } + if selectedWindow < 0 { + // all windows full + continue + } + windows[selectedWindow].todo = append(windows[selectedWindow].todo, task) heap.Remove(sh.schedQueue, sqi) @@ -327,6 +334,7 @@ func (sh *scheduler) trySched() { scheduledWindows[wnd] = struct{}{} + window := window // copy select { case sh.openWindows[wnd].done <- &window: default: @@ -390,6 +398,7 @@ func (sh *scheduler) runWorker(wid WorkerID) { case w := <-scheduledWindows: activeWindows = append(activeWindows, w) case <-taskDone: + log.Debugw("task done", "workerid", wid) case <-sh.closing: return case <-workerClosing: diff --git a/stores/index.go b/stores/index.go index 049e2dc20..fda973124 100644 --- a/stores/index.go +++ b/stores/index.go @@ -384,8 +384,8 @@ func (i *Index) StorageBestAlloc(ctx context.Context, allocate SectorFileType, s } sort.Slice(candidates, func(i, j int) bool { - iw := big.Mul(big.NewInt(int64(candidates[i].fsi.Available)), big.NewInt(int64(candidates[i].info.Weight))) - jw := big.Mul(big.NewInt(int64(candidates[j].fsi.Available)), big.NewInt(int64(candidates[j].info.Weight))) + iw := big.Mul(big.NewInt(candidates[i].fsi.Available), big.NewInt(int64(candidates[i].info.Weight))) + jw := big.Mul(big.NewInt(candidates[j].fsi.Available), big.NewInt(int64(candidates[j].info.Weight))) return iw.GreaterThan(jw) }) diff --git a/stores/local.go b/stores/local.go index 26b7ccb75..92b777307 100644 --- a/stores/local.go +++ b/stores/local.go @@ -13,6 +13,7 @@ import ( "golang.org/x/xerrors" + "github.com/filecoin-project/sector-storage/fsutil" "github.com/filecoin-project/specs-actors/actors/abi" ) From 45c1b268f1294088d936e0a639511fc2b9cdc120 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Thu, 9 Jul 2020 15:09:38 +0200 Subject: [PATCH 163/199] sched: Remove unused worker field --- sched.go | 1 - 1 file changed, 1 deletion(-) diff --git a/sched.go b/sched.go index d1ec33884..b038eff1d 100644 --- a/sched.go +++ b/sched.go @@ -87,7 +87,6 @@ type schedWindowRequest struct { } type schedWindow struct { - worker WorkerID allocated activeResources todo []*workerRequest } From 5c5fe09990830f4619fefc414f14fe219b068f3e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Thu, 9 Jul 2020 15:18:45 +0200 Subject: [PATCH 164/199] post-rebase fixes --- stores/index.go | 4 ++-- stores/local.go | 1 - 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/stores/index.go b/stores/index.go index fda973124..049e2dc20 100644 --- a/stores/index.go +++ b/stores/index.go @@ -384,8 +384,8 @@ func (i *Index) StorageBestAlloc(ctx context.Context, allocate SectorFileType, s } sort.Slice(candidates, func(i, j int) bool { - iw := big.Mul(big.NewInt(candidates[i].fsi.Available), big.NewInt(int64(candidates[i].info.Weight))) - jw := big.Mul(big.NewInt(candidates[j].fsi.Available), big.NewInt(int64(candidates[j].info.Weight))) + iw := big.Mul(big.NewInt(int64(candidates[i].fsi.Available)), big.NewInt(int64(candidates[i].info.Weight))) + jw := big.Mul(big.NewInt(int64(candidates[j].fsi.Available)), big.NewInt(int64(candidates[j].info.Weight))) return iw.GreaterThan(jw) }) diff --git a/stores/local.go b/stores/local.go index 92b777307..26b7ccb75 100644 --- a/stores/local.go +++ b/stores/local.go @@ -13,7 +13,6 @@ import ( "golang.org/x/xerrors" - "github.com/filecoin-project/sector-storage/fsutil" "github.com/filecoin-project/specs-actors/actors/abi" ) From 7f115954fd7b977f59564cdbff7e2a61107d6de4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Thu, 9 Jul 2020 19:17:15 +0200 Subject: [PATCH 165/199] sched: More fixes --- sched.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sched.go b/sched.go index b038eff1d..d926e753a 100644 --- a/sched.go +++ b/sched.go @@ -260,7 +260,7 @@ func (sh *scheduler) trySched() { rand.Shuffle(len(acceptableWindows[sqi]), func(i, j int) { acceptableWindows[sqi][i], acceptableWindows[sqi][j] = acceptableWindows[sqi][j], acceptableWindows[sqi][i] }) - sort.SliceStable(acceptableWindows, func(i, j int) bool { + sort.SliceStable(acceptableWindows[sqi], func(i, j int) bool { wii := sh.openWindows[acceptableWindows[sqi][i]].worker wji := sh.openWindows[acceptableWindows[sqi][j]].worker @@ -344,7 +344,7 @@ func (sh *scheduler) trySched() { // Rewrite sh.openWindows array, removing scheduled windows newOpenWindows := make([]*schedWindowRequest, 0, len(sh.openWindows)-len(scheduledWindows)) for wnd, window := range sh.openWindows { - if _, scheduled := scheduledWindows[wnd]; !scheduled { + if _, scheduled := scheduledWindows[wnd]; scheduled { // keep unscheduled windows open continue } From d1a18c15e6655b63d2ca171d6ae6e3e71d76278a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Thu, 9 Jul 2020 19:29:05 +0200 Subject: [PATCH 166/199] Fix build on osx --- fsutil/dealloc_other.go | 2 +- fsutil/filesize_unix.go | 2 +- fsutil/statfs_unix.go | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/fsutil/dealloc_other.go b/fsutil/dealloc_other.go index 3ae8973ff..4f8347951 100644 --- a/fsutil/dealloc_other.go +++ b/fsutil/dealloc_other.go @@ -13,5 +13,5 @@ var log = logging.Logger("fsutil") func Deallocate(file *os.File, offset int64, length int64) error { log.Warnf("deallocating space not supported") - return err + return nil } diff --git a/fsutil/filesize_unix.go b/fsutil/filesize_unix.go index d596e4be7..41b62daf6 100644 --- a/fsutil/filesize_unix.go +++ b/fsutil/filesize_unix.go @@ -20,6 +20,6 @@ func FileSize(path string) (SizeInfo, error) { // NOTE: stat.Blocks is in 512B blocks, NOT in stat.Blksize // See https://www.gnu.org/software/libc/manual/html_node/Attribute-Meanings.html return SizeInfo{ - stat.Blocks * 512, + int64(stat.Blocks) * 512, }, nil } diff --git a/fsutil/statfs_unix.go b/fsutil/statfs_unix.go index 3e69d5a8e..7fcb8af37 100644 --- a/fsutil/statfs_unix.go +++ b/fsutil/statfs_unix.go @@ -13,7 +13,7 @@ func Statfs(path string) (FsStat, error) { } return FsStat{ - Capacity: int64(stat.Blocks) * stat.Bsize, - Available: int64(stat.Bavail) * stat.Bsize, + Capacity: int64(stat.Blocks) * int64(stat.Bsize), + Available: int64(stat.Bavail) * int64(stat.Bsize), }, nil } From 045e5977875f4a7ffb571b42401543b9d78bac80 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Sat, 11 Jul 2020 01:21:48 +0200 Subject: [PATCH 167/199] remove open windows when dropping workers --- sched.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/sched.go b/sched.go index d926e753a..241440beb 100644 --- a/sched.go +++ b/sched.go @@ -528,6 +528,16 @@ func (sh *scheduler) dropWorker(wid WorkerID) { w := sh.workers[wid] delete(sh.workers, wid) + newWindows := make([]*schedWindowRequest, 0, len(sh.openWindows)) + for _, window := range sh.openWindows { + if window.worker != wid { + newWindows = append(newWindows, window) + } + } + sh.openWindows = newWindows + + // TODO: sync close worker goroutine + go func() { if err := w.w.Close(); err != nil { log.Warnf("closing worker %d: %+v", err) From 1d67dcfa3c156dc04ca09c8a4f8efe70522f72ef Mon Sep 17 00:00:00 2001 From: Aayush Rajasekaran Date: Sat, 11 Jul 2020 21:30:16 -0400 Subject: [PATCH 168/199] extract GetRequiredPadding --- ffiwrapper/sealer_cgo.go | 78 +++++++++++++++++++++++++--------------- 1 file changed, 49 insertions(+), 29 deletions(-) diff --git a/ffiwrapper/sealer_cgo.go b/ffiwrapper/sealer_cgo.go index 88218921c..5c6e40ef9 100644 --- a/ffiwrapper/sealer_cgo.go +++ b/ffiwrapper/sealer_cgo.go @@ -598,45 +598,65 @@ func GeneratePieceCIDFromFile(proofType abi.RegisteredSealProof, piece io.Reader return pieceCID, werr() } -func GenerateUnsealedCID(proofType abi.RegisteredSealProof, pieces []abi.PieceInfo) (cid.Cid, error) { - allPieces := make([]abi.PieceInfo, 0, len(pieces)) +func GetRequiredPadding(oldLength abi.PaddedPieceSize, newPieceLength abi.PaddedPieceSize) ([]abi.PaddedPieceSize, abi.PaddedPieceSize) { + + padPieces := make([]abi.PaddedPieceSize, 0) + + toFill := uint64(-oldLength % newPieceLength) + + n := bits.OnesCount64(toFill) var sum abi.PaddedPieceSize + for i := 0; i < n; i++ { + next := bits.TrailingZeros64(toFill) + psize := uint64(1) << uint(next) + toFill ^= psize - padTo := func(s abi.PaddedPieceSize, trailing bool) { - // pad remaining space with 0 CommPs - toFill := uint64(-sum % s) - if trailing && sum == 0 { - toFill = uint64(s) - } - - n := bits.OnesCount64(toFill) - for i := 0; i < n; i++ { - next := bits.TrailingZeros64(toFill) - psize := uint64(1) << uint(next) - toFill ^= psize - - padded := abi.PaddedPieceSize(psize) - allPieces = append(allPieces, abi.PieceInfo{ - Size: padded, - PieceCID: zerocomm.ZeroPieceCommitment(padded.Unpadded()), - }) - sum += padded - } + padded := abi.PaddedPieceSize(psize) + padPieces = append(padPieces, padded) + sum += padded } - for _, p := range pieces { - padTo(p.Size, false) - - allPieces = append(allPieces, p) - sum += p.Size - } + return padPieces, sum +} +func GenerateUnsealedCID(proofType abi.RegisteredSealProof, pieces []abi.PieceInfo) (cid.Cid, error) { ssize, err := proofType.SectorSize() if err != nil { return cid.Undef, err } - padTo(abi.PaddedPieceSize(ssize), true) + pssize := abi.PaddedPieceSize(ssize) + allPieces := make([]abi.PieceInfo, 0, len(pieces)) + if len(pieces) == 0 { + allPieces = append(allPieces, abi.PieceInfo{ + Size: pssize, + PieceCID: zerocomm.ZeroPieceCommitment(pssize.Unpadded()), + }) + } else { + var sum abi.PaddedPieceSize + + padTo := func(pads []abi.PaddedPieceSize) { + for _, p := range pads { + allPieces = append(allPieces, abi.PieceInfo{ + Size: p, + PieceCID: zerocomm.ZeroPieceCommitment(p.Unpadded()), + }) + + sum += p + } + } + + for _, p := range pieces { + ps, _ := GetRequiredPadding(sum, p.Size) + padTo(ps) + + allPieces = append(allPieces, p) + sum += p.Size + } + + ps, _ := GetRequiredPadding(sum, pssize) + padTo(ps) + } return ffi.GenerateUnsealedCID(proofType, allPieces) } From 4f8015b58a9da612f1a49479d9fa299459da787d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Thu, 16 Jul 2020 18:18:22 +0200 Subject: [PATCH 169/199] Correctly turn randomness into fr32 values --- ffiwrapper/verifier_cgo.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/ffiwrapper/verifier_cgo.go b/ffiwrapper/verifier_cgo.go index 60d56dddc..1fecf9598 100644 --- a/ffiwrapper/verifier_cgo.go +++ b/ffiwrapper/verifier_cgo.go @@ -15,7 +15,7 @@ import ( ) func (sb *Sealer) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []abi.SectorInfo, randomness abi.PoStRandomness) ([]abi.PoStProof, error) { - randomness[31] = 0 // TODO: Not correct, fixme + randomness[31] &= 0x3f privsectors, skipped, done, err := sb.pubSectorToPriv(ctx, minerID, sectorInfo, nil, abi.RegisteredSealProof.RegisteredWinningPoStProof) // TODO: FAULTS? if err != nil { return nil, err @@ -29,7 +29,7 @@ func (sb *Sealer) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, } func (sb *Sealer) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []abi.SectorInfo, randomness abi.PoStRandomness) ([]abi.PoStProof, []abi.SectorID, error) { - randomness[31] = 0 // TODO: Not correct, fixme + randomness[31] &= 0x3f privsectors, skipped, done, err := sb.pubSectorToPriv(ctx, minerID, sectorInfo, nil, abi.RegisteredSealProof.RegisteredWindowPoStProof) if err != nil { return nil, nil, xerrors.Errorf("gathering sector info: %w", err) @@ -98,7 +98,7 @@ func (proofVerifier) VerifySeal(info abi.SealVerifyInfo) (bool, error) { } func (proofVerifier) VerifyWinningPoSt(ctx context.Context, info abi.WinningPoStVerifyInfo) (bool, error) { - info.Randomness[31] = 0 // TODO: Not correct, fixme + info.Randomness[31] &= 0x3f _, span := trace.StartSpan(ctx, "VerifyWinningPoSt") defer span.End() @@ -106,7 +106,7 @@ func (proofVerifier) VerifyWinningPoSt(ctx context.Context, info abi.WinningPoSt } func (proofVerifier) VerifyWindowPoSt(ctx context.Context, info abi.WindowPoStVerifyInfo) (bool, error) { - info.Randomness[31] = 0 // TODO: Not correct, fixme + info.Randomness[31] &= 0x3f _, span := trace.StartSpan(ctx, "VerifyWindowPoSt") defer span.End() @@ -114,6 +114,6 @@ func (proofVerifier) VerifyWindowPoSt(ctx context.Context, info abi.WindowPoStVe } func (proofVerifier) GenerateWinningPoStSectorChallenge(ctx context.Context, proofType abi.RegisteredPoStProof, minerID abi.ActorID, randomness abi.PoStRandomness, eligibleSectorCount uint64) ([]uint64, error) { - randomness[31] = 0 // TODO: Not correct, fixme + randomness[31] &= 0x3f return ffi.GenerateWinningPoStSectorChallenge(proofType, minerID, randomness, eligibleSectorCount) } From d244749f1a64ba1e764538e0d719c831928f1f93 Mon Sep 17 00:00:00 2001 From: Peter Rabbitson Date: Thu, 16 Jul 2020 01:53:13 +0200 Subject: [PATCH 170/199] Bump fil-commcid and filecoin-ffi deps Propagates correct on-chain commX CIDs --- extern/filecoin-ffi | 2 +- ffiwrapper/sealer_cgo.go | 6 +++--- go.mod | 6 +++--- go.sum | 12 ++++++++++-- mock/mock.go | 6 +++--- zerocomm/zerocomm.go | 3 ++- 6 files changed, 22 insertions(+), 13 deletions(-) diff --git a/extern/filecoin-ffi b/extern/filecoin-ffi index 5342c7c97..cddc56607 160000 --- a/extern/filecoin-ffi +++ b/extern/filecoin-ffi @@ -1 +1 @@ -Subproject commit 5342c7c97d1a1df4650629d14f2823d52889edd9 +Subproject commit cddc56607e1d851ea6d09d49404bd7db70cb3c2e diff --git a/ffiwrapper/sealer_cgo.go b/ffiwrapper/sealer_cgo.go index 5c6e40ef9..416bfa70b 100644 --- a/ffiwrapper/sealer_cgo.go +++ b/ffiwrapper/sealer_cgo.go @@ -168,14 +168,14 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector abi.SectorID, existingPie return abi.PieceInfo{}, xerrors.Errorf("generate unsealed CID: %w", err) } - commp, err := commcid.CIDToDataCommitmentV1(pieceCID) - if err != nil { + // validate that the pieceCID was properly formed + if _, err := commcid.CIDToPieceCommitmentV1(pieceCID); err != nil { return abi.PieceInfo{}, err } return abi.PieceInfo{ Size: pieceSize.Padded(), - PieceCID: commcid.PieceCommitmentV1ToCID(commp), + PieceCID: pieceCID, }, nil } diff --git a/go.mod b/go.mod index 83424841f..994a99a3f 100644 --- a/go.mod +++ b/go.mod @@ -5,16 +5,16 @@ go 1.13 require ( github.com/detailyang/go-fallocate v0.0.0-20180908115635-432fa640bd2e github.com/elastic/go-sysinfo v1.3.0 - github.com/filecoin-project/filecoin-ffi v0.0.0-20200326153646-e899cc1dd072 + github.com/filecoin-project/filecoin-ffi v0.30.4-0.20200716204036-cddc56607e1d github.com/filecoin-project/go-bitfield v0.0.4-0.20200703174658-f4a5758051a1 - github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5 + github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f github.com/filecoin-project/go-paramfetch v0.0.1 github.com/filecoin-project/specs-actors v0.6.1 github.com/filecoin-project/specs-storage v0.1.1-0.20200622113353-88a9704877ea github.com/google/uuid v1.1.1 github.com/gorilla/mux v1.7.4 github.com/hashicorp/go-multierror v1.0.0 - github.com/ipfs/go-cid v0.0.5 + github.com/ipfs/go-cid v0.0.6 github.com/ipfs/go-ipfs-files v0.0.7 github.com/ipfs/go-log v1.0.3 github.com/ipfs/go-log/v2 v2.0.3 diff --git a/go.sum b/go.sum index 508d985d7..67fa4d38d 100644 --- a/go.sum +++ b/go.sum @@ -38,8 +38,8 @@ github.com/filecoin-project/go-bitfield v0.0.4-0.20200703174658-f4a5758051a1 h1: github.com/filecoin-project/go-bitfield v0.0.4-0.20200703174658-f4a5758051a1/go.mod h1:Ry9/iUlWSyjPUzlAvdnfy4Gtvrq4kWmWDztCU1yEgJY= github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 h1:2pMXdBnCiXjfCYx/hLqFxccPoqsSveQFxVLvNxy9bus= github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ= -github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5 h1:yvQJCW9mmi9zy+51xA01Ea2X7/dL7r8eKDPuGUjRmbo= -github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5/go.mod h1:JbkIgFF/Z9BDlvrJO1FuKkaWsH673/UdFaiVS6uIHlA= +github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f h1:GxJzR3oRIMTPtpZ0b7QF8FKPK6/iPAc7trhlL5k/g+s= +github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= github.com/filecoin-project/go-paramfetch v0.0.1 h1:gV7bs5YaqlgpGFMiLxInGK2L1FyCXUE0rimz4L7ghoE= github.com/filecoin-project/go-paramfetch v0.0.1/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc= github.com/filecoin-project/specs-actors v0.3.0/go.mod h1:nQYnFbQ7Y0bHZyq6HDEuVlCPR+U3z5Q3wMOQ+2aiV+Y= @@ -82,6 +82,8 @@ github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUP github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.5 h1:o0Ix8e/ql7Zb5UVUJEUfjsWCIY8t48++9lR8qi6oiJU= github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= +github.com/ipfs/go-cid v0.0.6 h1:go0y+GcDOGeJIV01FeBsta4FHngoA4Wz7KMeLkXAhMs= +github.com/ipfs/go-cid v0.0.6/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= github.com/ipfs/go-hamt-ipld v0.0.15-0.20200131012125-dd88a59d3f2e/go.mod h1:9aQJu/i/TaRDW6jqB5U217dLIDopn50wxLdHXM2CTfE= github.com/ipfs/go-ipfs-files v0.0.7 h1:s5BRD12ndahqYifeH1S8Z73zqZhR+3IdKYAG9PiETs0= github.com/ipfs/go-ipfs-files v0.0.7/go.mod h1:wiN/jSG8FKyk7N0WyctKSvq3ljIa2NNTiZB55kpTdOs= @@ -157,15 +159,21 @@ github.com/mr-tron/base58 v1.1.3 h1:v+sk57XuaCKGXpWtVBX8YJzO7hMGx4Aajh4TQbdEFdc= github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/multiformats/go-base32 v0.0.3 h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp8Nq/kkI= github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= +github.com/multiformats/go-base36 v0.1.0 h1:JR6TyF7JjGd3m6FbLU2cOxhC0Li8z8dLNGQ89tUg4F4= +github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM= github.com/multiformats/go-multiaddr v0.2.0 h1:lR52sFwcTCuQb6bTfnXF6zA2XfyYvyd+5a9qECv/J90= github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4= github.com/multiformats/go-multibase v0.0.1 h1:PN9/v21eLywrFWdFNsFKaU04kLJzuYzmrJR+ubhT9qA= github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= +github.com/multiformats/go-multibase v0.0.3 h1:l/B6bJDQjvQ5G52jw4QGSYeOTZoAwIO77RblWplfIqk= +github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= github.com/multiformats/go-multihash v0.0.13 h1:06x+mk/zj1FoMsgNejLpy6QTvJqlSt/BhLEy87zidlc= github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= +github.com/multiformats/go-multihash v0.0.14 h1:QoBceQYQQtNUuf6s7wHxnE2c8bhbMqhfGzNI032se/I= +github.com/multiformats/go-multihash v0.0.14/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.2/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.5 h1:XVZwSo04Cs3j/jS0uAEPpT3JY6DzMcVLLoWOSnCxOjg= diff --git a/mock/mock.go b/mock/mock.go index 7c9ed57f0..55c38967c 100644 --- a/mock/mock.go +++ b/mock/mock.go @@ -161,7 +161,7 @@ func (mgr *SectorMgr) SealPreCommit1(ctx context.Context, sid abi.SectorID, tick return nil, err } - cc, _, err := commcid.CIDToCommitment(commd) + _, _, cc, err := commcid.CIDToCommitment(commd) if err != nil { panic(err) } @@ -175,14 +175,14 @@ func (mgr *SectorMgr) SealPreCommit2(ctx context.Context, sid abi.SectorID, phas db := []byte(string(phase1Out)) db[0] ^= 'd' - d := commcid.DataCommitmentV1ToCID(db) + d, _ := commcid.DataCommitmentV1ToCID(db) commr := make([]byte, 32) for i := range db { commr[32-(i+1)] = db[i] } - commR := commcid.ReplicaCommitmentV1ToCID(commr) + commR, _ := commcid.ReplicaCommitmentV1ToCID(commr) return storage.SectorCids{ Unsealed: d, diff --git a/zerocomm/zerocomm.go b/zerocomm/zerocomm.go index 7d6308549..9b59723a0 100644 --- a/zerocomm/zerocomm.go +++ b/zerocomm/zerocomm.go @@ -51,5 +51,6 @@ var PieceComms = [Levels - Skip][32]byte{ func ZeroPieceCommitment(sz abi.UnpaddedPieceSize) cid.Cid { level := bits.TrailingZeros64(uint64(sz.Padded())) - Skip - 5 // 2^5 = 32 - return commcid.PieceCommitmentV1ToCID(PieceComms[level][:]) + commP, _ := commcid.PieceCommitmentV1ToCID(PieceComms[level][:]) + return commP } From 0a6c939a7390e632f486e7e05738611dcd9a0dff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Thu, 16 Jul 2020 23:40:54 +0200 Subject: [PATCH 171/199] Drop unused SectorInfo fields --- stores/index.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/stores/index.go b/stores/index.go index c85dc125e..acad2abaa 100644 --- a/stores/index.go +++ b/stores/index.go @@ -29,9 +29,6 @@ type StorageInfo struct { CanSeal bool CanStore bool - - LastHeartbeat time.Time - HeartbeatErr error } type HealthReport struct { From be6b88f4064ac8df22e3efcfce6424008fa9dc10 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Thu, 16 Jul 2020 23:41:04 +0200 Subject: [PATCH 172/199] Some sched tests --- manager.go | 15 +- sched_test.go | 408 ++++++++++++++++++++++++++++++++++++++++++++++ selector_alloc.go | 4 +- 3 files changed, 414 insertions(+), 13 deletions(-) diff --git a/manager.go b/manager.go index 0c18645ac..0cd081d92 100644 --- a/manager.go +++ b/manager.go @@ -208,7 +208,7 @@ func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector abi.Sect var selector WorkerSelector if len(best) == 0 { // new - selector, err = newAllocSelector(ctx, m.index, stores.FTUnsealed, stores.PathSealing) + selector = newAllocSelector(ctx, m.index, stores.FTUnsealed, stores.PathSealing) } else { // append to existing selector, err = newExistingSelector(ctx, m.index, sector, stores.FTUnsealed, false) } @@ -269,7 +269,7 @@ func (m *Manager) AddPiece(ctx context.Context, sector abi.SectorID, existingPie var selector WorkerSelector var err error if len(existingPieces) == 0 { // new - selector, err = newAllocSelector(ctx, m.index, stores.FTUnsealed, stores.PathSealing) + selector = newAllocSelector(ctx, m.index, stores.FTUnsealed, stores.PathSealing) } else { // use existing selector, err = newExistingSelector(ctx, m.index, sector, stores.FTUnsealed, false) } @@ -300,10 +300,7 @@ func (m *Manager) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticke // TODO: also consider where the unsealed data sits - selector, err := newAllocSelector(ctx, m.index, stores.FTCache|stores.FTSealed, stores.PathSealing) - if err != nil { - return nil, xerrors.Errorf("creating path selector: %w", err) - } + selector := newAllocSelector(ctx, m.index, stores.FTCache|stores.FTSealed, stores.PathSealing) err = m.sched.Schedule(ctx, sector, sealtasks.TTPreCommit1, selector, schedFetch(sector, stores.FTUnsealed, stores.PathSealing, stores.AcquireMove), func(ctx context.Context, w Worker) error { p, err := w.SealPreCommit1(ctx, sector, ticket, pieces) @@ -417,11 +414,7 @@ func (m *Manager) FinalizeSector(ctx context.Context, sector abi.SectorID, keepU return err } - fetchSel, err := newAllocSelector(ctx, m.index, stores.FTCache|stores.FTSealed, stores.PathStorage) - if err != nil { - return xerrors.Errorf("creating fetchSel: %w", err) - } - + fetchSel := newAllocSelector(ctx, m.index, stores.FTCache|stores.FTSealed, stores.PathStorage) moveUnsealed := unsealed { if len(keepUnsealed) == 0 { diff --git a/sched_test.go b/sched_test.go index d0d0e7ca9..e810b6a0d 100644 --- a/sched_test.go +++ b/sched_test.go @@ -2,9 +2,21 @@ package sectorstorage import ( "context" + "io" + "sync" "testing" + "time" + "github.com/ipfs/go-cid" "github.com/stretchr/testify/require" + + "github.com/filecoin-project/specs-actors/actors/abi" + + "github.com/filecoin-project/sector-storage/fsutil" + "github.com/filecoin-project/sector-storage/sealtasks" + "github.com/filecoin-project/sector-storage/stores" + "github.com/filecoin-project/sector-storage/storiface" + "github.com/filecoin-project/specs-storage/storage" ) func TestWithPriority(t *testing.T) { @@ -16,3 +28,399 @@ func TestWithPriority(t *testing.T) { require.Equal(t, 2222, getPriority(ctx)) } + +type schedTestWorker struct { + name string + taskTypes map[sealtasks.TaskType]struct{} + paths []stores.StoragePath + + closed bool + closing chan struct{} +} + +func (s *schedTestWorker) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storage.PreCommit1Out, error) { + panic("implement me") +} + +func (s *schedTestWorker) SealPreCommit2(ctx context.Context, sector abi.SectorID, pc1o storage.PreCommit1Out) (storage.SectorCids, error) { + panic("implement me") +} + +func (s *schedTestWorker) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) { + panic("implement me") +} + +func (s *schedTestWorker) SealCommit2(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (storage.Proof, error) { + panic("implement me") +} + +func (s *schedTestWorker) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) error { + panic("implement me") +} + +func (s *schedTestWorker) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) error { + panic("implement me") +} + +func (s *schedTestWorker) Remove(ctx context.Context, sector abi.SectorID) error { + panic("implement me") +} + +func (s *schedTestWorker) NewSector(ctx context.Context, sector abi.SectorID) error { + panic("implement me") +} + +func (s *schedTestWorker) AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (abi.PieceInfo, error) { + panic("implement me") +} + +func (s *schedTestWorker) MoveStorage(ctx context.Context, sector abi.SectorID) error { + panic("implement me") +} + +func (s *schedTestWorker) Fetch(ctx context.Context, id abi.SectorID, ft stores.SectorFileType, ptype stores.PathType, am stores.AcquireMode) error { + panic("implement me") +} + +func (s *schedTestWorker) UnsealPiece(ctx context.Context, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, cid cid.Cid) error { + panic("implement me") +} + +func (s *schedTestWorker) ReadPiece(ctx context.Context, writer io.Writer, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) error { + panic("implement me") +} + +func (s *schedTestWorker) TaskTypes(ctx context.Context) (map[sealtasks.TaskType]struct{}, error) { + return s.taskTypes, nil +} + +func (s *schedTestWorker) Paths(ctx context.Context) ([]stores.StoragePath, error) { + return s.paths, nil +} + +func (s *schedTestWorker) Info(ctx context.Context) (storiface.WorkerInfo, error) { + return storiface.WorkerInfo{ + Hostname: s.name, + Resources: storiface.WorkerResources{ + MemPhysical: 128 << 30, + MemSwap: 200 << 30, + MemReserved: 2 << 30, + CPUs: 32, + GPUs: []string{"a GPU"}, + }, + }, nil +} + +func (s *schedTestWorker) Closing(ctx context.Context) (<-chan struct{}, error) { + return s.closing, nil +} + +func (s *schedTestWorker) Close() error { + if !s.closed { + s.closed = true + close(s.closing) + } + return nil +} + +var _ Worker = &schedTestWorker{} + +func addTestWorker(t *testing.T, sched *scheduler, index *stores.Index, name string, taskTypes map[sealtasks.TaskType]struct{}) { + w := &schedTestWorker{ + name: name, + taskTypes: taskTypes, + paths: []stores.StoragePath{{ID: "bb-8", Weight: 2, LocalPath: "food", CanSeal: true, CanStore: true}}, + + closing: make(chan struct{}), + } + + for _, path := range w.paths { + err := index.StorageAttach(context.TODO(), stores.StorageInfo{ + ID: path.ID, + URLs: nil, + Weight: path.Weight, + CanSeal: path.CanSeal, + CanStore: path.CanStore, + }, fsutil.FsStat{ + Capacity: 1 << 40, + Available: 1 << 40, + Reserved: 3, + }) + require.NoError(t, err) + } + + info, err := w.Info(context.TODO()) + require.NoError(t, err) + + sched.newWorkers <- &workerHandle{ + w: w, + info: info, + preparing: &activeResources{}, + active: &activeResources{}, + } +} + +func TestSchedStartStop(t *testing.T) { + spt := abi.RegisteredSealProof_StackedDrg32GiBV1 + sched := newScheduler(spt) + go sched.runSched() + + addTestWorker(t, sched, stores.NewIndex(), "fred", nil) + + sched.schedClose() +} + +func TestSched(t *testing.T) { + ctx := context.Background() + spt := abi.RegisteredSealProof_StackedDrg32GiBV1 + + sectorAte := abi.SectorID{ + Miner: 8, + Number: 8, + } + + type workerSpec struct { + name string + taskTypes map[sealtasks.TaskType]struct{} + } + + noopPrepare := func(ctx context.Context, w Worker) error { + return nil + } + + type runMeta struct { + done map[string]chan struct{} + + wg sync.WaitGroup + } + + type task func(*testing.T, *scheduler, *stores.Index, *runMeta) + + sched := func(taskName, expectWorker string, taskType sealtasks.TaskType) task { + return func(t *testing.T, sched *scheduler, index *stores.Index, rm *runMeta) { + done := make(chan struct{}) + rm.done[taskName] = done + + sel := newAllocSelector(ctx, index, stores.FTCache, stores.PathSealing) + + rm.wg.Add(1) + go func() { + defer rm.wg.Done() + + err := sched.Schedule(ctx, sectorAte, taskType, sel, noopPrepare, func(ctx context.Context, w Worker) error { + wi, err := w.Info(ctx) + require.NoError(t, err) + + require.Equal(t, expectWorker, wi.Hostname) + + log.Info("IN ", taskName) + + for { + _, ok := <-done + if !ok { + break + } + } + + log.Info("OUT ", taskName) + + return nil + }) + require.NoError(t, err) + }() + } + } + + taskStarted := func(name string) task { + return func(t *testing.T, sched *scheduler, index *stores.Index, rm *runMeta) { + rm.done[name] <- struct{}{} + } + } + + taskDone := func(name string) task { + return func(t *testing.T, sched *scheduler, index *stores.Index, rm *runMeta) { + rm.done[name] <- struct{}{} + close(rm.done[name]) + } + } + + taskNotScheduled := func(name string) task { + return func(t *testing.T, sched *scheduler, index *stores.Index, rm *runMeta) { + select { + case rm.done[name] <- struct{}{}: + t.Fatal("not expected") + case <-time.After(10 * time.Millisecond): // TODO: better synchronization thingy + } + } + } + + testFunc := func(workers []workerSpec, tasks []task) func(t *testing.T) { + return func(t *testing.T) { + index := stores.NewIndex() + + sched := newScheduler(spt) + go sched.runSched() + + for _, worker := range workers { + addTestWorker(t, sched, index, worker.name, worker.taskTypes) + } + + rm := runMeta{ + done: map[string]chan struct{}{}, + } + + for _, task := range tasks { + task(t, sched, index, &rm) + } + + log.Info("wait for async stuff") + rm.wg.Wait() + + sched.schedClose() + } + } + + multTask := func(tasks ...task) task { + return func(t *testing.T, s *scheduler, index *stores.Index, meta *runMeta) { + for _, tsk := range tasks { + tsk(t, s, index, meta) + } + } + } + + t.Run("one-pc1", testFunc([]workerSpec{ + {name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1:{}}}, + }, []task{ + sched("pc1-1", "fred", sealtasks.TTPreCommit1), + taskDone("pc1-1"), + })) + + t.Run("pc1-2workers-1", testFunc([]workerSpec{ + {name: "fred2", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit2:{}}}, + {name: "fred1", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1:{}}}, + }, []task{ + sched("pc1-1", "fred1", sealtasks.TTPreCommit1), + taskDone("pc1-1"), + })) + + t.Run("pc1-2workers-2", testFunc([]workerSpec{ + {name: "fred1", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1:{}}}, + {name: "fred2", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit2:{}}}, + }, []task{ + sched("pc1-1", "fred1", sealtasks.TTPreCommit1), + taskDone("pc1-1"), + })) + + t.Run("pc1-block-pc2", testFunc([]workerSpec{ + {name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1:{}, sealtasks.TTPreCommit2:{}}}, + }, []task{ + sched("pc1", "fred", sealtasks.TTPreCommit1), + taskStarted("pc1"), + + sched("pc2", "fred", sealtasks.TTPreCommit2), + taskNotScheduled("pc2"), + + taskDone("pc1"), + taskDone("pc2"), + })) + + t.Run("pc2-block-pc1", testFunc([]workerSpec{ + {name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1:{}, sealtasks.TTPreCommit2:{}}}, + }, []task{ + sched("pc2", "fred", sealtasks.TTPreCommit2), + taskStarted("pc2"), + + sched("pc1", "fred", sealtasks.TTPreCommit1), + taskNotScheduled("pc1"), + + taskDone("pc2"), + taskDone("pc1"), + })) + + t.Run("pc1-batching", testFunc([]workerSpec{ + {name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1:{}}}, + }, []task{ + sched("t1", "fred", sealtasks.TTPreCommit1), + taskStarted("t1"), + + sched("t2", "fred", sealtasks.TTPreCommit1), + taskStarted("t2"), + + // with worker settings, we can only run 2 parallel PC1s + + // start 2 more to fill fetch buffer + + sched("t3", "fred", sealtasks.TTPreCommit1), + taskNotScheduled("t3"), + + sched("t4", "fred", sealtasks.TTPreCommit1), + taskNotScheduled("t4"), + + taskDone("t1"), + taskDone("t2"), + + taskStarted("t3"), + taskStarted("t4"), + + taskDone("t3"), + taskDone("t4"), + })) + + twoPC1 := func(prefix string, schedAssert func(name string) task) task { + return multTask( + sched(prefix + "-a", "fred", sealtasks.TTPreCommit1), + schedAssert(prefix + "-a"), + + sched(prefix + "-b", "fred", sealtasks.TTPreCommit1), + schedAssert(prefix + "-b"), + ) + } + + twoPC1Done := func(prefix string) task { + return multTask( + taskDone(prefix + "-1"), + taskDone(prefix + "-b"), + ) + } + + t.Run("pc1-pc2-prio", testFunc([]workerSpec{ + {name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1:{}, sealtasks.TTPreCommit2: {}}}, + }, []task{ + // fill exec/fetch buffers + twoPC1("w0", taskStarted), + twoPC1("w1", taskNotScheduled), + + // fill worker windows + twoPC1("w2", taskNotScheduled), + twoPC1("w3", taskNotScheduled), + + // windowed + + sched("t1", "fred", sealtasks.TTPreCommit1), + taskNotScheduled("t1"), + + sched("t2", "fred", sealtasks.TTPreCommit1), + taskNotScheduled("t2"), + + sched("t3", "fred", sealtasks.TTPreCommit2), + taskNotScheduled("t3"), + + twoPC1Done("w0"), + twoPC1Done("w1"), + twoPC1Done("w2"), + twoPC1Done("w3"), + + taskStarted("t1"), + taskNotScheduled("t2"), + taskNotScheduled("t3"), + + taskDone("t1"), + + taskStarted("t2"), + taskStarted("t3"), + + taskDone("t2"), + taskDone("t3"), + })) +} + diff --git a/selector_alloc.go b/selector_alloc.go index 874bf7bb0..53e121737 100644 --- a/selector_alloc.go +++ b/selector_alloc.go @@ -17,12 +17,12 @@ type allocSelector struct { ptype stores.PathType } -func newAllocSelector(ctx context.Context, index stores.SectorIndex, alloc stores.SectorFileType, ptype stores.PathType) (*allocSelector, error) { +func newAllocSelector(ctx context.Context, index stores.SectorIndex, alloc stores.SectorFileType, ptype stores.PathType) (*allocSelector) { return &allocSelector{ index: index, alloc: alloc, ptype: ptype, - }, nil + } } func (s *allocSelector) Ok(ctx context.Context, task sealtasks.TaskType, spt abi.RegisteredSealProof, whnd *workerHandle) (bool, error) { From 2e557573f4864fdb431f205b59b4cb7574dc19db Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Thu, 16 Jul 2020 23:41:15 +0200 Subject: [PATCH 173/199] gofmt --- sched_test.go | 59 +++++++++++++++++++++++------------------------ selector_alloc.go | 2 +- 2 files changed, 30 insertions(+), 31 deletions(-) diff --git a/sched_test.go b/sched_test.go index e810b6a0d..1c7b88905 100644 --- a/sched_test.go +++ b/sched_test.go @@ -30,11 +30,11 @@ func TestWithPriority(t *testing.T) { } type schedTestWorker struct { - name string + name string taskTypes map[sealtasks.TaskType]struct{} - paths []stores.StoragePath + paths []stores.StoragePath - closed bool + closed bool closing chan struct{} } @@ -100,7 +100,7 @@ func (s *schedTestWorker) Paths(ctx context.Context) ([]stores.StoragePath, erro func (s *schedTestWorker) Info(ctx context.Context) (storiface.WorkerInfo, error) { return storiface.WorkerInfo{ - Hostname: s.name, + Hostname: s.name, Resources: storiface.WorkerResources{ MemPhysical: 128 << 30, MemSwap: 200 << 30, @@ -127,20 +127,20 @@ var _ Worker = &schedTestWorker{} func addTestWorker(t *testing.T, sched *scheduler, index *stores.Index, name string, taskTypes map[sealtasks.TaskType]struct{}) { w := &schedTestWorker{ - name: name, + name: name, taskTypes: taskTypes, - paths: []stores.StoragePath{{ID: "bb-8", Weight: 2, LocalPath: "food", CanSeal: true, CanStore: true}}, + paths: []stores.StoragePath{{ID: "bb-8", Weight: 2, LocalPath: "food", CanSeal: true, CanStore: true}}, closing: make(chan struct{}), } for _, path := range w.paths { err := index.StorageAttach(context.TODO(), stores.StorageInfo{ - ID: path.ID, - URLs: nil, - Weight: path.Weight, - CanSeal: path.CanSeal, - CanStore: path.CanStore, + ID: path.ID, + URLs: nil, + Weight: path.Weight, + CanSeal: path.CanSeal, + CanStore: path.CanStore, }, fsutil.FsStat{ Capacity: 1 << 40, Available: 1 << 40, @@ -180,7 +180,7 @@ func TestSched(t *testing.T) { } type workerSpec struct { - name string + name string taskTypes map[sealtasks.TaskType]struct{} } @@ -289,30 +289,30 @@ func TestSched(t *testing.T) { } t.Run("one-pc1", testFunc([]workerSpec{ - {name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1:{}}}, + {name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}}}, }, []task{ sched("pc1-1", "fred", sealtasks.TTPreCommit1), taskDone("pc1-1"), })) t.Run("pc1-2workers-1", testFunc([]workerSpec{ - {name: "fred2", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit2:{}}}, - {name: "fred1", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1:{}}}, + {name: "fred2", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit2: {}}}, + {name: "fred1", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}}}, }, []task{ sched("pc1-1", "fred1", sealtasks.TTPreCommit1), taskDone("pc1-1"), })) t.Run("pc1-2workers-2", testFunc([]workerSpec{ - {name: "fred1", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1:{}}}, - {name: "fred2", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit2:{}}}, + {name: "fred1", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}}}, + {name: "fred2", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit2: {}}}, }, []task{ sched("pc1-1", "fred1", sealtasks.TTPreCommit1), taskDone("pc1-1"), })) t.Run("pc1-block-pc2", testFunc([]workerSpec{ - {name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1:{}, sealtasks.TTPreCommit2:{}}}, + {name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}, sealtasks.TTPreCommit2: {}}}, }, []task{ sched("pc1", "fred", sealtasks.TTPreCommit1), taskStarted("pc1"), @@ -325,7 +325,7 @@ func TestSched(t *testing.T) { })) t.Run("pc2-block-pc1", testFunc([]workerSpec{ - {name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1:{}, sealtasks.TTPreCommit2:{}}}, + {name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}, sealtasks.TTPreCommit2: {}}}, }, []task{ sched("pc2", "fred", sealtasks.TTPreCommit2), taskStarted("pc2"), @@ -338,7 +338,7 @@ func TestSched(t *testing.T) { })) t.Run("pc1-batching", testFunc([]workerSpec{ - {name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1:{}}}, + {name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}}}, }, []task{ sched("t1", "fred", sealtasks.TTPreCommit1), taskStarted("t1"), @@ -368,23 +368,23 @@ func TestSched(t *testing.T) { twoPC1 := func(prefix string, schedAssert func(name string) task) task { return multTask( - sched(prefix + "-a", "fred", sealtasks.TTPreCommit1), - schedAssert(prefix + "-a"), + sched(prefix+"-a", "fred", sealtasks.TTPreCommit1), + schedAssert(prefix+"-a"), - sched(prefix + "-b", "fred", sealtasks.TTPreCommit1), - schedAssert(prefix + "-b"), - ) + sched(prefix+"-b", "fred", sealtasks.TTPreCommit1), + schedAssert(prefix+"-b"), + ) } twoPC1Done := func(prefix string) task { return multTask( - taskDone(prefix + "-1"), - taskDone(prefix + "-b"), - ) + taskDone(prefix+"-1"), + taskDone(prefix+"-b"), + ) } t.Run("pc1-pc2-prio", testFunc([]workerSpec{ - {name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1:{}, sealtasks.TTPreCommit2: {}}}, + {name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}, sealtasks.TTPreCommit2: {}}}, }, []task{ // fill exec/fetch buffers twoPC1("w0", taskStarted), @@ -423,4 +423,3 @@ func TestSched(t *testing.T) { taskDone("t3"), })) } - diff --git a/selector_alloc.go b/selector_alloc.go index 53e121737..35221921f 100644 --- a/selector_alloc.go +++ b/selector_alloc.go @@ -17,7 +17,7 @@ type allocSelector struct { ptype stores.PathType } -func newAllocSelector(ctx context.Context, index stores.SectorIndex, alloc stores.SectorFileType, ptype stores.PathType) (*allocSelector) { +func newAllocSelector(ctx context.Context, index stores.SectorIndex, alloc stores.SectorFileType, ptype stores.PathType) *allocSelector { return &allocSelector{ index: index, alloc: alloc, From cab0c74e08b35b3687c60f24a7a8e2724e5f4379 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Fri, 17 Jul 2020 01:26:55 +0200 Subject: [PATCH 174/199] more sched test debugging --- sched.go | 11 ++++ sched_test.go | 152 +++++++++++++++++++++++++++++++------------------- 2 files changed, 105 insertions(+), 58 deletions(-) diff --git a/sched.go b/sched.go index 241440beb..d89dad3f5 100644 --- a/sched.go +++ b/sched.go @@ -69,6 +69,7 @@ type scheduler struct { openWindows []*schedWindowRequest closing chan struct{} + testSync chan struct{} // used for testing } type workerHandle struct { @@ -195,6 +196,9 @@ func (sh *scheduler) runSched() { heap.Push(sh.schedQueue, req) sh.trySched() + if sh.testSync != nil { + sh.testSync <- struct{}{} + } case req := <-sh.windowRequests: sh.openWindows = append(sh.openWindows, req) sh.trySched() @@ -226,6 +230,8 @@ func (sh *scheduler) trySched() { windows := make([]schedWindow, len(sh.openWindows)) acceptableWindows := make([][]int, sh.schedQueue.Len()) + log.Debugf("trySched %d queued; %d open windows", sh.schedQueue.Len(), len(windows)) + // Step 1 for sqi := 0; sqi < sh.schedQueue.Len(); sqi++ { task := (*sh.schedQueue)[sqi] @@ -295,11 +301,15 @@ func (sh *scheduler) trySched() { wid := sh.openWindows[wnd].worker wr := sh.workers[wid].info.Resources + log.Debugf("trySched try assign sqi:%d sector %d to window %d", sqi, task.sector.Number, wnd) + // TODO: allow bigger windows if !windows[wnd].allocated.canHandleRequest(needRes, wid, wr) { continue } + log.Debugf("trySched ASSIGNED sqi:%d sector %d to window %d", sqi, task.sector.Number, wnd) + windows[wnd].allocated.add(wr, needRes) selectedWindow = wnd @@ -419,6 +429,7 @@ func (sh *scheduler) runWorker(wid WorkerID) { break assignLoop } + log.Debugf("assign worker sector %d", todo.sector.Number) err := sh.assignWorker(taskDone, wid, worker, todo) sh.workersLk.Unlock() diff --git a/sched_test.go b/sched_test.go index 1c7b88905..26961a4f6 100644 --- a/sched_test.go +++ b/sched_test.go @@ -2,7 +2,9 @@ package sectorstorage import ( "context" + "fmt" "io" + "runtime" "sync" "testing" "time" @@ -171,13 +173,10 @@ func TestSchedStartStop(t *testing.T) { } func TestSched(t *testing.T) { - ctx := context.Background() - spt := abi.RegisteredSealProof_StackedDrg32GiBV1 + ctx, done := context.WithTimeout(context.Background(), 20 * time.Second) + defer done() - sectorAte := abi.SectorID{ - Miner: 8, - Number: 8, - } + spt := abi.RegisteredSealProof_StackedDrg32GiBV1 type workerSpec struct { name string @@ -196,7 +195,10 @@ func TestSched(t *testing.T) { type task func(*testing.T, *scheduler, *stores.Index, *runMeta) - sched := func(taskName, expectWorker string, taskType sealtasks.TaskType) task { + sched := func(taskName, expectWorker string, sid abi.SectorNumber, taskType sealtasks.TaskType) task { + _, _, l, _ := runtime.Caller(1) + _, _, l2, _ := runtime.Caller(2) + return func(t *testing.T, sched *scheduler, index *stores.Index, rm *runMeta) { done := make(chan struct{}) rm.done[taskName] = done @@ -207,7 +209,12 @@ func TestSched(t *testing.T) { go func() { defer rm.wg.Done() - err := sched.Schedule(ctx, sectorAte, taskType, sel, noopPrepare, func(ctx context.Context, w Worker) error { + sectorNum := abi.SectorID{ + Miner: 8, + Number: sid, + } + + err := sched.Schedule(ctx, sectorNum, taskType, sel, noopPrepare, func(ctx context.Context, w Worker) error { wi, err := w.Info(ctx) require.NoError(t, err) @@ -226,29 +233,45 @@ func TestSched(t *testing.T) { return nil }) - require.NoError(t, err) + require.NoError(t, err, fmt.Sprint(l, l2)) }() + + <-sched.testSync } } taskStarted := func(name string) task { + _, _, l, _ := runtime.Caller(1) + _, _, l2, _ := runtime.Caller(2) return func(t *testing.T, sched *scheduler, index *stores.Index, rm *runMeta) { - rm.done[name] <- struct{}{} + select { + case rm.done[name] <- struct{}{}: + case <-ctx.Done(): + t.Fatal("ctx error", ctx.Err(), l, l2) + } } } taskDone := func(name string) task { + _, _, l, _ := runtime.Caller(1) + _, _, l2, _ := runtime.Caller(2) return func(t *testing.T, sched *scheduler, index *stores.Index, rm *runMeta) { - rm.done[name] <- struct{}{} + select { + case rm.done[name] <- struct{}{}: + case <-ctx.Done(): + t.Fatal("ctx error", ctx.Err(), l, l2) + } close(rm.done[name]) } } taskNotScheduled := func(name string) task { + _, _, l, _ := runtime.Caller(1) + _, _, l2, _ := runtime.Caller(2) return func(t *testing.T, sched *scheduler, index *stores.Index, rm *runMeta) { select { case rm.done[name] <- struct{}{}: - t.Fatal("not expected") + t.Fatal("not expected", l, l2) case <-time.After(10 * time.Millisecond): // TODO: better synchronization thingy } } @@ -259,6 +282,8 @@ func TestSched(t *testing.T) { index := stores.NewIndex() sched := newScheduler(spt) + sched.testSync = make(chan struct{}) + go sched.runSched() for _, worker := range workers { @@ -291,7 +316,7 @@ func TestSched(t *testing.T) { t.Run("one-pc1", testFunc([]workerSpec{ {name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}}}, }, []task{ - sched("pc1-1", "fred", sealtasks.TTPreCommit1), + sched("pc1-1", "fred", 8, sealtasks.TTPreCommit1), taskDone("pc1-1"), })) @@ -299,7 +324,7 @@ func TestSched(t *testing.T) { {name: "fred2", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit2: {}}}, {name: "fred1", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}}}, }, []task{ - sched("pc1-1", "fred1", sealtasks.TTPreCommit1), + sched("pc1-1", "fred1", 8, sealtasks.TTPreCommit1), taskDone("pc1-1"), })) @@ -307,17 +332,17 @@ func TestSched(t *testing.T) { {name: "fred1", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}}}, {name: "fred2", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit2: {}}}, }, []task{ - sched("pc1-1", "fred1", sealtasks.TTPreCommit1), + sched("pc1-1", "fred1", 8, sealtasks.TTPreCommit1), taskDone("pc1-1"), })) t.Run("pc1-block-pc2", testFunc([]workerSpec{ {name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}, sealtasks.TTPreCommit2: {}}}, }, []task{ - sched("pc1", "fred", sealtasks.TTPreCommit1), + sched("pc1", "fred", 8, sealtasks.TTPreCommit1), taskStarted("pc1"), - sched("pc2", "fred", sealtasks.TTPreCommit2), + sched("pc2", "fred", 8, sealtasks.TTPreCommit2), taskNotScheduled("pc2"), taskDone("pc1"), @@ -327,10 +352,10 @@ func TestSched(t *testing.T) { t.Run("pc2-block-pc1", testFunc([]workerSpec{ {name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}, sealtasks.TTPreCommit2: {}}}, }, []task{ - sched("pc2", "fred", sealtasks.TTPreCommit2), + sched("pc2", "fred", 8, sealtasks.TTPreCommit2), taskStarted("pc2"), - sched("pc1", "fred", sealtasks.TTPreCommit1), + sched("pc1", "fred", 8, sealtasks.TTPreCommit1), taskNotScheduled("pc1"), taskDone("pc2"), @@ -340,20 +365,20 @@ func TestSched(t *testing.T) { t.Run("pc1-batching", testFunc([]workerSpec{ {name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}}}, }, []task{ - sched("t1", "fred", sealtasks.TTPreCommit1), + sched("t1", "fred", 8, sealtasks.TTPreCommit1), taskStarted("t1"), - sched("t2", "fred", sealtasks.TTPreCommit1), + sched("t2", "fred", 8, sealtasks.TTPreCommit1), taskStarted("t2"), // with worker settings, we can only run 2 parallel PC1s // start 2 more to fill fetch buffer - sched("t3", "fred", sealtasks.TTPreCommit1), + sched("t3", "fred", 8, sealtasks.TTPreCommit1), taskNotScheduled("t3"), - sched("t4", "fred", sealtasks.TTPreCommit1), + sched("t4", "fred", 8, sealtasks.TTPreCommit1), taskNotScheduled("t4"), taskDone("t1"), @@ -366,60 +391,71 @@ func TestSched(t *testing.T) { taskDone("t4"), })) - twoPC1 := func(prefix string, schedAssert func(name string) task) task { + twoPC1 := func(prefix string, sid abi.SectorNumber, schedAssert func(name string) task) task { return multTask( - sched(prefix+"-a", "fred", sealtasks.TTPreCommit1), + sched(prefix+"-a", "fred", sid, sealtasks.TTPreCommit1), schedAssert(prefix+"-a"), - sched(prefix+"-b", "fred", sealtasks.TTPreCommit1), + sched(prefix+"-b", "fred", sid + 1, sealtasks.TTPreCommit1), schedAssert(prefix+"-b"), ) } - twoPC1Done := func(prefix string) task { + twoPC1Act := func(prefix string, schedAssert func(name string) task) task { return multTask( - taskDone(prefix+"-1"), - taskDone(prefix+"-b"), + schedAssert(prefix+"-a"), + schedAssert(prefix+"-b"), ) } - t.Run("pc1-pc2-prio", testFunc([]workerSpec{ - {name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}, sealtasks.TTPreCommit2: {}}}, - }, []task{ - // fill exec/fetch buffers - twoPC1("w0", taskStarted), - twoPC1("w1", taskNotScheduled), + for i := 0; i < 100; i++ { + t.Run("pc1-pc2-prio", testFunc([]workerSpec{ + {name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}, sealtasks.TTPreCommit2: {}}}, + }, []task{ + // fill exec/fetch buffers + twoPC1("w0", 0, taskStarted), + twoPC1("w1", 2, taskNotScheduled), - // fill worker windows - twoPC1("w2", taskNotScheduled), - twoPC1("w3", taskNotScheduled), + // fill worker windows + twoPC1("w2", 4, taskNotScheduled), + //twoPC1("w3", taskNotScheduled), - // windowed + // windowed - sched("t1", "fred", sealtasks.TTPreCommit1), - taskNotScheduled("t1"), + sched("t1", "fred", 6, sealtasks.TTPreCommit1), + taskNotScheduled("t1"), - sched("t2", "fred", sealtasks.TTPreCommit1), - taskNotScheduled("t2"), + sched("t2", "fred", 7, sealtasks.TTPreCommit1), + taskNotScheduled("t2"), - sched("t3", "fred", sealtasks.TTPreCommit2), - taskNotScheduled("t3"), + sched("t3", "fred", 8, sealtasks.TTPreCommit2), + taskNotScheduled("t3"), - twoPC1Done("w0"), - twoPC1Done("w1"), - twoPC1Done("w2"), - twoPC1Done("w3"), + twoPC1Act("w0", taskDone), + twoPC1Act("w1", taskStarted), + twoPC1Act("w2", taskNotScheduled), + //twoPC1Act("w3", taskNotScheduled), - taskStarted("t1"), - taskNotScheduled("t2"), - taskNotScheduled("t3"), + twoPC1Act("w1", taskDone), + twoPC1Act("w2", taskStarted), + //twoPC1Act("w3", taskNotScheduled), - taskDone("t1"), + twoPC1Act("w2", taskDone), + //twoPC1Act("w3", taskStarted), - taskStarted("t2"), - taskStarted("t3"), + //twoPC1Act("w3", taskDone), - taskDone("t2"), - taskDone("t3"), - })) + taskStarted("t3"), + taskNotScheduled("t1"), + taskNotScheduled("t2"), + + taskDone("t3"), + + taskStarted("t1"), + taskStarted("t2"), + + taskDone("t1"), + taskDone("t2"), + })) + } } From bf315e63d77f7eaa5d87058d32ca507bef904ca4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Fri, 17 Jul 2020 01:32:49 +0200 Subject: [PATCH 175/199] sched: working tests --- sched.go | 2 +- sched_test.go | 34 +++++++++++----------------------- 2 files changed, 12 insertions(+), 24 deletions(-) diff --git a/sched.go b/sched.go index d89dad3f5..44e62f6c2 100644 --- a/sched.go +++ b/sched.go @@ -68,7 +68,7 @@ type scheduler struct { schedQueue *requestQueue openWindows []*schedWindowRequest - closing chan struct{} + closing chan struct{} testSync chan struct{} // used for testing } diff --git a/sched_test.go b/sched_test.go index 26961a4f6..e6bd8d220 100644 --- a/sched_test.go +++ b/sched_test.go @@ -173,7 +173,7 @@ func TestSchedStartStop(t *testing.T) { } func TestSched(t *testing.T) { - ctx, done := context.WithTimeout(context.Background(), 20 * time.Second) + ctx, done := context.WithTimeout(context.Background(), 20*time.Second) defer done() spt := abi.RegisteredSealProof_StackedDrg32GiBV1 @@ -183,7 +183,7 @@ func TestSched(t *testing.T) { taskTypes map[sealtasks.TaskType]struct{} } - noopPrepare := func(ctx context.Context, w Worker) error { + noopAction := func(ctx context.Context, w Worker) error { return nil } @@ -214,7 +214,7 @@ func TestSched(t *testing.T) { Number: sid, } - err := sched.Schedule(ctx, sectorNum, taskType, sel, noopPrepare, func(ctx context.Context, w Worker) error { + err := sched.Schedule(ctx, sectorNum, taskType, sel, func(ctx context.Context, w Worker) error { wi, err := w.Info(ctx) require.NoError(t, err) @@ -232,7 +232,7 @@ func TestSched(t *testing.T) { log.Info("OUT ", taskName) return nil - }) + }, noopAction) require.NoError(t, err, fmt.Sprint(l, l2)) }() @@ -396,7 +396,7 @@ func TestSched(t *testing.T) { sched(prefix+"-a", "fred", sid, sealtasks.TTPreCommit1), schedAssert(prefix+"-a"), - sched(prefix+"-b", "fred", sid + 1, sealtasks.TTPreCommit1), + sched(prefix+"-b", "fred", sid+1, sealtasks.TTPreCommit1), schedAssert(prefix+"-b"), ) } @@ -408,42 +408,30 @@ func TestSched(t *testing.T) { ) } - for i := 0; i < 100; i++ { + // run this one a bunch of times, it had a very annoying tendency to fail randomly + for i := 0; i < 40; i++ { t.Run("pc1-pc2-prio", testFunc([]workerSpec{ {name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}, sealtasks.TTPreCommit2: {}}}, }, []task{ - // fill exec/fetch buffers + // fill queues twoPC1("w0", 0, taskStarted), twoPC1("w1", 2, taskNotScheduled), - // fill worker windows - twoPC1("w2", 4, taskNotScheduled), - //twoPC1("w3", taskNotScheduled), - // windowed - sched("t1", "fred", 6, sealtasks.TTPreCommit1), + sched("t1", "fred", 8, sealtasks.TTPreCommit1), taskNotScheduled("t1"), - sched("t2", "fred", 7, sealtasks.TTPreCommit1), + sched("t2", "fred", 9, sealtasks.TTPreCommit1), taskNotScheduled("t2"), - sched("t3", "fred", 8, sealtasks.TTPreCommit2), + sched("t3", "fred", 10, sealtasks.TTPreCommit2), taskNotScheduled("t3"), twoPC1Act("w0", taskDone), twoPC1Act("w1", taskStarted), - twoPC1Act("w2", taskNotScheduled), - //twoPC1Act("w3", taskNotScheduled), twoPC1Act("w1", taskDone), - twoPC1Act("w2", taskStarted), - //twoPC1Act("w3", taskNotScheduled), - - twoPC1Act("w2", taskDone), - //twoPC1Act("w3", taskStarted), - - //twoPC1Act("w3", taskDone), taskStarted("t3"), taskNotScheduled("t1"), From 908d47305bc8aa7fb63725c3991dafe57e1da23d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Fri, 17 Jul 2020 01:46:59 +0200 Subject: [PATCH 176/199] fix race in runWorker --- sched.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/sched.go b/sched.go index 44e62f6c2..caf67c678 100644 --- a/sched.go +++ b/sched.go @@ -367,7 +367,10 @@ func (sh *scheduler) trySched() { func (sh *scheduler) runWorker(wid WorkerID) { go func() { + sh.workersLk.Lock() worker := sh.workers[wid] + sh.workersLk.Unlock() + scheduledWindows := make(chan *schedWindow, SchedWindows) taskDone := make(chan struct{}, 1) windowsRequested := 0 From f1b38371866bec524ea8c5603b9e38d1a3391161 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Fri, 17 Jul 2020 12:59:12 +0200 Subject: [PATCH 177/199] fix worker setup/cleanup raciness --- manager.go | 4 +-- sched.go | 87 +++++++++++++++++++++++++++++++++++++++----------- sched_test.go | 7 ++-- sched_watch.go | 6 +++- 4 files changed, 80 insertions(+), 24 deletions(-) diff --git a/manager.go b/manager.go index 0cd081d92..fc3be18c1 100644 --- a/manager.go +++ b/manager.go @@ -489,8 +489,8 @@ func (m *Manager) FsStat(ctx context.Context, id stores.ID) (fsutil.FsStat, erro return m.storage.FsStat(ctx, id) } -func (m *Manager) Close() error { - return m.sched.Close() +func (m *Manager) Close(ctx context.Context) error { + return m.sched.Close(ctx) } var _ SectorManager = &Manager{} diff --git a/sched.go b/sched.go index caf67c678..bec5ee0c5 100644 --- a/sched.go +++ b/sched.go @@ -3,6 +3,7 @@ package sectorstorage import ( "container/heap" "context" + "fmt" "math/rand" "sort" "sync" @@ -69,6 +70,7 @@ type scheduler struct { openWindows []*schedWindowRequest closing chan struct{} + closed chan struct{} testSync chan struct{} // used for testing } @@ -79,6 +81,11 @@ type workerHandle struct { preparing *activeResources active *activeResources + + // for sync manager goroutine closing + cleanupStarted bool + closedMgr chan struct{} + closingMgr chan struct{} } type schedWindowRequest struct { @@ -138,6 +145,7 @@ func newScheduler(spt abi.RegisteredSealProof) *scheduler { schedQueue: &requestQueue{}, closing: make(chan struct{}), + closed: make(chan struct{}), } } @@ -182,6 +190,8 @@ func (r *workerRequest) respond(err error) { } func (sh *scheduler) runSched() { + defer close(sh.closed) + go sh.runWorkerWatcher() for { @@ -366,11 +376,23 @@ func (sh *scheduler) trySched() { } func (sh *scheduler) runWorker(wid WorkerID) { + var ready sync.WaitGroup + ready.Add(1) + defer ready.Wait() + go func() { sh.workersLk.Lock() - worker := sh.workers[wid] + worker, found := sh.workers[wid] sh.workersLk.Unlock() + ready.Done() + + if !found { + panic(fmt.Sprintf("worker %d not found", wid)) + } + + defer close(worker.closedMgr) + scheduledWindows := make(chan *schedWindow, SchedWindows) taskDone := make(chan struct{}, 1) windowsRequested := 0 @@ -403,6 +425,8 @@ func (sh *scheduler) runWorker(wid WorkerID) { return case <-workerClosing: return + case <-worker.closingMgr: + return } } @@ -415,6 +439,8 @@ func (sh *scheduler) runWorker(wid WorkerID) { return case <-workerClosing: return + case <-worker.closingMgr: + return } assignLoop: @@ -518,6 +544,9 @@ func (sh *scheduler) assignWorker(taskDone chan struct{}, wid WorkerID, w *worke } func (sh *scheduler) newWorker(w *workerHandle) { + w.closedMgr = make(chan struct{}) + w.closingMgr = make(chan struct{}) + sh.workersLk.Lock() id := sh.nextWorker @@ -526,13 +555,13 @@ func (sh *scheduler) newWorker(w *workerHandle) { sh.workersLk.Unlock() + sh.runWorker(id) + select { case sh.watchClosing <- id: case <-sh.closing: return } - - sh.runWorker(id) } func (sh *scheduler) dropWorker(wid WorkerID) { @@ -540,37 +569,59 @@ func (sh *scheduler) dropWorker(wid WorkerID) { defer sh.workersLk.Unlock() w := sh.workers[wid] + + sh.workerCleanup(wid, w) + delete(sh.workers, wid) +} - newWindows := make([]*schedWindowRequest, 0, len(sh.openWindows)) - for _, window := range sh.openWindows { - if window.worker != wid { - newWindows = append(newWindows, window) - } +func (sh *scheduler) workerCleanup(wid WorkerID, w *workerHandle) { + if !w.cleanupStarted { + close(w.closingMgr) + } + select { + case <-w.closedMgr: + case <-time.After(time.Second): + log.Errorf("timeout closing worker manager goroutine %d", wid) } - sh.openWindows = newWindows - // TODO: sync close worker goroutine + if !w.cleanupStarted { + w.cleanupStarted = true - go func() { - if err := w.w.Close(); err != nil { - log.Warnf("closing worker %d: %+v", err) + newWindows := make([]*schedWindowRequest, 0, len(sh.openWindows)) + for _, window := range sh.openWindows { + if window.worker != wid { + newWindows = append(newWindows, window) + } } - }() + sh.openWindows = newWindows + + log.Debugf("dropWorker %d", wid) + + go func() { + if err := w.w.Close(); err != nil { + log.Warnf("closing worker %d: %+v", err) + } + }() + } } func (sh *scheduler) schedClose() { sh.workersLk.Lock() defer sh.workersLk.Unlock() + log.Debugf("closing scheduler") for i, w := range sh.workers { - if err := w.w.Close(); err != nil { - log.Errorf("closing worker %d: %+v", i, err) - } + sh.workerCleanup(i, w) } } -func (sh *scheduler) Close() error { +func (sh *scheduler) Close(ctx context.Context) error { close(sh.closing) + select { + case <-sh.closed: + case <-ctx.Done(): + return ctx.Err() + } return nil } diff --git a/sched_test.go b/sched_test.go index e6bd8d220..67a5eeed3 100644 --- a/sched_test.go +++ b/sched_test.go @@ -119,6 +119,7 @@ func (s *schedTestWorker) Closing(ctx context.Context) (<-chan struct{}, error) func (s *schedTestWorker) Close() error { if !s.closed { + log.Info("close schedTestWorker") s.closed = true close(s.closing) } @@ -169,11 +170,11 @@ func TestSchedStartStop(t *testing.T) { addTestWorker(t, sched, stores.NewIndex(), "fred", nil) - sched.schedClose() + require.NoError(t, sched.Close(context.TODO())) } func TestSched(t *testing.T) { - ctx, done := context.WithTimeout(context.Background(), 20*time.Second) + ctx, done := context.WithTimeout(context.Background(), 30*time.Second) defer done() spt := abi.RegisteredSealProof_StackedDrg32GiBV1 @@ -301,7 +302,7 @@ func TestSched(t *testing.T) { log.Info("wait for async stuff") rm.wg.Wait() - sched.schedClose() + require.NoError(t, sched.Close(context.TODO())) } } diff --git a/sched_watch.go b/sched_watch.go index 214489083..d93cf1af3 100644 --- a/sched_watch.go +++ b/sched_watch.go @@ -74,7 +74,11 @@ func (sh *scheduler) runWorkerWatcher() { caseToWorker[toSet] = wid default: - wid := caseToWorker[n] + wid, found := caseToWorker[n] + if !found { + log.Errorf("worker ID not found for case %d", n) + continue + } delete(caseToWorker, n) cases[n] = reflect.SelectCase{ From c7da20e53cfaa712a1aad22f4426b747f381c13c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Tue, 21 Jul 2020 20:01:25 +0200 Subject: [PATCH 178/199] Add api to get active tasks --- manager.go | 5 +- sched.go | 7 ++- sched_test.go | 5 +- sealtasks/task.go | 24 +++++++++ stats.go | 13 +++++ storiface/worker.go | 15 ++++++ work_tracker.go | 129 ++++++++++++++++++++++++++++++++++++++++++++ 7 files changed, 194 insertions(+), 4 deletions(-) create mode 100644 work_tracker.go diff --git a/manager.go b/manager.go index fc3be18c1..063456fa9 100644 --- a/manager.go +++ b/manager.go @@ -166,7 +166,10 @@ func (m *Manager) AddWorker(ctx context.Context, w Worker) error { } m.sched.newWorkers <- &workerHandle{ - w: w, + w: w, + wt: &workTracker{ + running: map[uint64]storiface.WorkerJob{}, + }, info: info, preparing: &activeResources{}, active: &activeResources{}, diff --git a/sched.go b/sched.go index bec5ee0c5..ed48d097b 100644 --- a/sched.go +++ b/sched.go @@ -82,6 +82,9 @@ type workerHandle struct { preparing *activeResources active *activeResources + // stats / tracking + wt *workTracker + // for sync manager goroutine closing cleanupStarted bool closedMgr chan struct{} @@ -486,7 +489,7 @@ func (sh *scheduler) assignWorker(taskDone chan struct{}, wid WorkerID, w *worke w.preparing.add(w.info.Resources, needRes) go func() { - err := req.prepare(req.ctx, w.w) + err := req.prepare(req.ctx, w.wt.worker(w.w)) sh.workersLk.Lock() if err != nil { @@ -519,7 +522,7 @@ func (sh *scheduler) assignWorker(taskDone chan struct{}, wid WorkerID, w *worke case <-sh.closing: } - err = req.work(req.ctx, w.w) + err = req.work(req.ctx, w.wt.worker(w.w)) select { case req.ret <- workerResponse{err: err}: diff --git a/sched_test.go b/sched_test.go index 67a5eeed3..caf7f0b4b 100644 --- a/sched_test.go +++ b/sched_test.go @@ -156,7 +156,10 @@ func addTestWorker(t *testing.T, sched *scheduler, index *stores.Index, name str require.NoError(t, err) sched.newWorkers <- &workerHandle{ - w: w, + w: w, + wt: &workTracker{ + running: map[uint64]storiface.WorkerJob{}, + }, info: info, preparing: &activeResources{}, active: &activeResources{}, diff --git a/sealtasks/task.go b/sealtasks/task.go index 978107c85..ad5ce01bb 100644 --- a/sealtasks/task.go +++ b/sealtasks/task.go @@ -28,6 +28,30 @@ var order = map[TaskType]int{ TTReadUnsealed: 0, } +var shortNames = map[TaskType]string{ + TTAddPiece: "AP ", + + TTPreCommit1: "PC1", + TTPreCommit2: "PC2", + TTCommit1: "C1 ", + TTCommit2: "C2 ", + + TTFinalize: "FIN", + + TTFetch: "GET", + TTUnseal: "UNS", + TTReadUnsealed: "RD ", +} + func (a TaskType) Less(b TaskType) bool { return order[a] < order[b] } + +func (a TaskType) Short() string { + n, ok := shortNames[a] + if !ok { + return "UNK" + } + + return n +} diff --git a/stats.go b/stats.go index dbbee07f3..ee88898a4 100644 --- a/stats.go +++ b/stats.go @@ -20,3 +20,16 @@ func (m *Manager) WorkerStats() map[uint64]storiface.WorkerStats { return out } + +func (m *Manager) WorkerJobs() map[uint64][]storiface.WorkerJob { + m.sched.workersLk.Lock() + defer m.sched.workersLk.Unlock() + + out := map[uint64][]storiface.WorkerJob{} + + for id, handle := range m.sched.workers { + out[uint64(id)] = handle.wt.Running() + } + + return out +} diff --git a/storiface/worker.go b/storiface/worker.go index 0f49e8971..01ef59d36 100644 --- a/storiface/worker.go +++ b/storiface/worker.go @@ -1,5 +1,12 @@ package storiface +import ( + "time" + + "github.com/filecoin-project/sector-storage/sealtasks" + "github.com/filecoin-project/specs-actors/actors/abi" +) + type WorkerInfo struct { Hostname string @@ -24,3 +31,11 @@ type WorkerStats struct { GpuUsed bool CpuUse uint64 } + +type WorkerJob struct { + ID uint64 + Sector abi.SectorID + Task sealtasks.TaskType + + Start time.Time +} diff --git a/work_tracker.go b/work_tracker.go new file mode 100644 index 000000000..f1e243ed2 --- /dev/null +++ b/work_tracker.go @@ -0,0 +1,129 @@ +package sectorstorage + +import ( + "context" + "io" + "sync" + "time" + + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/specs-actors/actors/abi" + "github.com/filecoin-project/specs-storage/storage" + + "github.com/filecoin-project/sector-storage/sealtasks" + "github.com/filecoin-project/sector-storage/stores" + "github.com/filecoin-project/sector-storage/storiface" +) + +type workTracker struct { + lk sync.Mutex + + ctr uint64 + running map[uint64]storiface.WorkerJob + + // TODO: done, aggregate stats, queue stats, scheduler feedback +} + +func (wt *workTracker) track(sid abi.SectorID, task sealtasks.TaskType) func() { + wt.lk.Lock() + defer wt.lk.Unlock() + + id := wt.ctr + wt.ctr++ + + wt.running[id] = storiface.WorkerJob{ + ID: id, + Sector: sid, + Task: task, + Start: time.Now(), + } + + return func() { + wt.lk.Lock() + defer wt.lk.Unlock() + + delete(wt.running, id) + } +} + +func (wt *workTracker) worker(w Worker) Worker { + return &trackedWorker{ + Worker: w, + tracker: wt, + } +} + +func (wt *workTracker) Running() []storiface.WorkerJob { + wt.lk.Lock() + defer wt.lk.Unlock() + + out := make([]storiface.WorkerJob, 0, len(wt.running)) + for _, job := range wt.running { + out = append(out, job) + } + + return out +} + +type trackedWorker struct { + Worker + + tracker *workTracker +} + +func (t *trackedWorker) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storage.PreCommit1Out, error) { + defer t.tracker.track(sector, sealtasks.TTPreCommit1)() + + return t.Worker.SealPreCommit1(ctx, sector, ticket, pieces) +} + +func (t *trackedWorker) SealPreCommit2(ctx context.Context, sector abi.SectorID, pc1o storage.PreCommit1Out) (storage.SectorCids, error) { + defer t.tracker.track(sector, sealtasks.TTPreCommit2)() + + return t.Worker.SealPreCommit2(ctx, sector, pc1o) +} + +func (t *trackedWorker) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) { + defer t.tracker.track(sector, sealtasks.TTCommit1)() + + return t.Worker.SealCommit1(ctx, sector, ticket, seed, pieces, cids) +} + +func (t *trackedWorker) SealCommit2(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (storage.Proof, error) { + defer t.tracker.track(sector, sealtasks.TTCommit2)() + + return t.Worker.SealCommit2(ctx, sector, c1o) +} + +func (t *trackedWorker) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) error { + defer t.tracker.track(sector, sealtasks.TTFinalize)() + + return t.Worker.FinalizeSector(ctx, sector, keepUnsealed) +} + +func (t *trackedWorker) AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (abi.PieceInfo, error) { + defer t.tracker.track(sector, sealtasks.TTAddPiece)() + + return t.Worker.AddPiece(ctx, sector, pieceSizes, newPieceSize, pieceData) +} + +func (t *trackedWorker) Fetch(ctx context.Context, s abi.SectorID, ft stores.SectorFileType, ptype stores.PathType, am stores.AcquireMode) error { + defer t.tracker.track(s, sealtasks.TTFetch)() + + return t.Worker.Fetch(ctx, s, ft, ptype, am) +} + +func (t *trackedWorker) UnsealPiece(ctx context.Context, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, cid cid.Cid) error { + defer t.tracker.track(id, sealtasks.TTUnseal)() + + return t.Worker.UnsealPiece(ctx, id, index, size, randomness, cid) +} + +func (t *trackedWorker) ReadPiece(ctx context.Context, writer io.Writer, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) error { + defer t.tracker.track(id, sealtasks.TTReadUnsealed)() + + return t.Worker.ReadPiece(ctx, writer, id, index, size) +} + +var _ Worker = &trackedWorker{} From fafecdfd17395af434e6053396b08afa71568706 Mon Sep 17 00:00:00 2001 From: yaohcn Date: Thu, 23 Jul 2020 20:34:27 +0800 Subject: [PATCH 179/199] add RPC timeout --- sched.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/sched.go b/sched.go index ed48d097b..af28b1902 100644 --- a/sched.go +++ b/sched.go @@ -258,7 +258,9 @@ func (sh *scheduler) trySched() { continue } - ok, err := task.sel.Ok(task.ctx, task.taskType, sh.spt, worker) + rpcCtx, cancel := context.WithTimeout(task.ctx, SelectorTimeout) + ok, err := task.sel.Ok(rpcCtx, task.taskType, sh.spt, worker) + cancel() if err != nil { log.Errorf("trySched(1) req.sel.Ok error: %+v", err) continue From b2fe2c0e3b38b13040539cfa87df6d3522520fd0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Thu, 23 Jul 2020 19:00:20 +0200 Subject: [PATCH 180/199] mock: More accurate WindowPost --- mock/mock.go | 105 +++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 94 insertions(+), 11 deletions(-) diff --git a/mock/mock.go b/mock/mock.go index 55c38967c..55b103ab8 100644 --- a/mock/mock.go +++ b/mock/mock.go @@ -5,7 +5,6 @@ import ( "context" "fmt" "io" - "math" "math/rand" "sync" @@ -34,14 +33,22 @@ type SectorMgr struct { type mockVerif struct{} -func NewMockSectorMgr(ssize abi.SectorSize) *SectorMgr { +func NewMockSectorMgr(ssize abi.SectorSize, genesisSectors []abi.SectorID) *SectorMgr { rt, err := ffiwrapper.SealProofTypeFromSectorSize(ssize) if err != nil { panic(err) } + sectors := make(map[abi.SectorID]*sectorState) + for _, sid := range genesisSectors { + sectors[sid] = §orState{ + failed: false, + state: stateCommit, + } + } + return &SectorMgr{ - sectors: make(map[abi.SectorID]*sectorState), + sectors: sectors, pieces: map[cid.Cid][]byte{}, sectorSize: ssize, nextSectorID: 5, @@ -258,27 +265,57 @@ func AddOpFinish(ctx context.Context) (context.Context, func()) { } func (mgr *SectorMgr) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []abi.SectorInfo, randomness abi.PoStRandomness) ([]abi.PoStProof, error) { - return generateFakePoSt(sectorInfo, abi.RegisteredSealProof.RegisteredWinningPoStProof), nil + return generateFakePoSt(sectorInfo, abi.RegisteredSealProof.RegisteredWinningPoStProof, randomness), nil } func (mgr *SectorMgr) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []abi.SectorInfo, randomness abi.PoStRandomness) ([]abi.PoStProof, []abi.SectorID, error) { - return generateFakePoSt(sectorInfo, abi.RegisteredSealProof.RegisteredWindowPoStProof), nil, nil + si := make([]abi.SectorInfo, 0, len(sectorInfo)) + var skipped []abi.SectorID + + for _, info := range sectorInfo { + sid := abi.SectorID{ + Miner: minerID, + Number: info.SectorNumber, + } + + _, found := mgr.sectors[sid] + + if found && !mgr.sectors[sid].failed { + si = append(si, info) + } else { + skipped = append(skipped, sid) + } + } + + return generateFakePoSt(si, abi.RegisteredSealProof.RegisteredWindowPoStProof, randomness), skipped, nil } -func generateFakePoSt(sectorInfo []abi.SectorInfo, rpt func(abi.RegisteredSealProof) (abi.RegisteredPoStProof, error)) []abi.PoStProof { - se, err := sectorInfo[0].SealProof.WindowPoStPartitionSectors() +func generateFakePoSt(sectorInfo []abi.SectorInfo, rpt func(abi.RegisteredSealProof) (abi.RegisteredPoStProof, error), randomness abi.PoStRandomness) []abi.PoStProof { + sectors := abi.NewBitField() + for _, info := range sectorInfo { + sectors.Set(uint64(info.SectorNumber)) + } + + wp, err := rpt(sectorInfo[0].SealProof) if err != nil { panic(err) } - wp, err := rpt(sectorInfo[0].SealProof) + + var proofBuf bytes.Buffer + + _, err = proofBuf.Write(randomness) if err != nil { panic(err) } + if err := sectors.MarshalCBOR(&proofBuf); err != nil { + panic(err) + } + return []abi.PoStProof{ { PoStProof: wp, - ProofBytes: make([]byte, 192*int(math.Ceil(float64(len(sectorInfo))/float64(se)))), + ProofBytes: proofBuf.Bytes(), }, } } @@ -335,8 +372,18 @@ func (mgr *SectorMgr) Remove(ctx context.Context, sector abi.SectorID) error { return nil } -func (mgr *SectorMgr) CheckProvable(context.Context, abi.RegisteredSealProof, []abi.SectorID) ([]abi.SectorID, error) { - return nil, nil +func (mgr *SectorMgr) CheckProvable(ctx context.Context, spt abi.RegisteredSealProof, ids []abi.SectorID) ([]abi.SectorID, error) { + var bad []abi.SectorID + + for _, sid := range ids { + _, found := mgr.sectors[sid] + + if !found || mgr.sectors[sid].failed { + bad = append(bad, sid) + } + } + + return bad, nil } func (m mockVerif) VerifySeal(svi abi.SealVerifyInfo) (bool, error) { @@ -358,6 +405,42 @@ func (m mockVerif) VerifyWinningPoSt(ctx context.Context, info abi.WinningPoStVe } func (m mockVerif) VerifyWindowPoSt(ctx context.Context, info abi.WindowPoStVerifyInfo) (bool, error) { + if len(info.Proofs) != 1 { + return false, xerrors.Errorf("expected 1 proof entry") + } + + proof := info.Proofs[0] + + if !bytes.Equal(proof.ProofBytes[:len(info.Randomness)], info.Randomness) { + return false, xerrors.Errorf("bad randomness") + } + + sectors := abi.NewBitField() + if err := sectors.UnmarshalCBOR(bytes.NewReader(proof.ProofBytes[len(info.Randomness):])); err != nil { + return false, xerrors.Errorf("unmarshaling sectors bitfield from \"proof\": %w", err) + } + + challenged := abi.NewBitField() + for _, sector := range info.ChallengedSectors { + challenged.Set(uint64(sector.SectorNumber)) + } + + { + b1, err := sectors.MarshalJSON() + if err != nil { + return false, err + } + + b2, err := challenged.MarshalJSON() + if err != nil { + return false, err + } + + if !bytes.Equal(b1, b2) { + return false, xerrors.Errorf("proven and challenged sector sets didn't match: %s != !s", string(b1), string(b2)) + } + } + return true, nil } From 3791008b011c4aaa2572460f52eb5aa1771e7704 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Thu, 23 Jul 2020 19:46:51 +0200 Subject: [PATCH 181/199] mock: Fix tests --- mock/mock_test.go | 2 +- testworker_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/mock/mock_test.go b/mock/mock_test.go index 5f4b9c428..c7d43e8b9 100644 --- a/mock/mock_test.go +++ b/mock/mock_test.go @@ -9,7 +9,7 @@ import ( ) func TestOpFinish(t *testing.T) { - sb := NewMockSectorMgr(2048) + sb := NewMockSectorMgr(2048, nil) sid, pieces, err := sb.StageFakeData(123) if err != nil { diff --git a/testworker_test.go b/testworker_test.go index 5ca51b771..bdfff1915 100644 --- a/testworker_test.go +++ b/testworker_test.go @@ -37,7 +37,7 @@ func newTestWorker(wcfg WorkerConfig, lstor *stores.Local) *testWorker { acceptTasks: acceptTasks, lstor: lstor, - mockSeal: mock.NewMockSectorMgr(ssize), + mockSeal: mock.NewMockSectorMgr(ssize, nil), } } From ff9ffddd5769696b6769e9a89aa7869985ad3b03 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Fri, 24 Jul 2020 16:43:41 +0200 Subject: [PATCH 182/199] remote: Limit parallel fetches --- manager.go | 4 +++- manager_test.go | 2 +- stores/remote.go | 21 ++++++++++++++++++++- 3 files changed, 24 insertions(+), 3 deletions(-) diff --git a/manager.go b/manager.go index 063456fa9..64dd2dcbc 100644 --- a/manager.go +++ b/manager.go @@ -76,6 +76,8 @@ type Manager struct { } type SealerConfig struct { + ParallelFetchLimit int + // Local worker config AllowPreCommit1 bool AllowPreCommit2 bool @@ -96,7 +98,7 @@ func New(ctx context.Context, ls stores.LocalStorage, si stores.SectorIndex, cfg return nil, xerrors.Errorf("creating prover instance: %w", err) } - stor := stores.NewRemote(lstor, si, http.Header(sa)) + stor := stores.NewRemote(lstor, si, http.Header(sa), sc.ParallelFetchLimit) m := &Manager{ scfg: cfg, diff --git a/manager_test.go b/manager_test.go index 9cee303c5..10e6a5020 100644 --- a/manager_test.go +++ b/manager_test.go @@ -95,7 +95,7 @@ func newTestMgr(ctx context.Context, t *testing.T) (*Manager, *stores.Local, *st prover, err := ffiwrapper.New(&readonlyProvider{stor: lstor}, cfg) require.NoError(t, err) - stor := stores.NewRemote(lstor, si, nil) + stor := stores.NewRemote(lstor, si, nil, 6000) m := &Manager{ scfg: cfg, diff --git a/stores/remote.go b/stores/remote.go index c78f026f4..ee68b5ef6 100644 --- a/stores/remote.go +++ b/stores/remote.go @@ -29,6 +29,8 @@ type Remote struct { index SectorIndex auth http.Header + limit chan struct{} + fetchLk sync.Mutex fetching map[abi.SectorID]chan struct{} } @@ -41,12 +43,14 @@ func (r *Remote) RemoveCopies(ctx context.Context, s abi.SectorID, types SectorF return r.local.RemoveCopies(ctx, s, types) } -func NewRemote(local *Local, index SectorIndex, auth http.Header) *Remote { +func NewRemote(local *Local, index SectorIndex, auth http.Header, fetchLimit int) *Remote { return &Remote{ local: local, index: index, auth: auth, + limit: make(chan struct{}, fetchLimit), + fetching: map[abi.SectorID]chan struct{}{}, } } @@ -165,6 +169,21 @@ func (r *Remote) acquireFromRemote(ctx context.Context, s abi.SectorID, spt abi. func (r *Remote) fetch(ctx context.Context, url, outname string) error { log.Infof("Fetch %s -> %s", url, outname) + if len(r.limit) >= cap(r.limit) { + log.Infof("Throttling fetch, %d already running", len(r.limit)) + } + + // TODO: Smarter throttling + // * Priority (just going sequentially is still pretty good) + // * Per interface + // * Aware of remote load + select { + case r.limit <- struct{}{}: + defer func() { <-r.limit }() + case <-ctx.Done(): + return xerrors.Errorf("context error while waiting for fetch limiter: %w", ctx.Err()) + } + req, err := http.NewRequest("GET", url, nil) if err != nil { return xerrors.Errorf("request: %w", err) From 94e20ffee5d2278efe14418539b3e8dd4fb39755 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Fri, 24 Jul 2020 16:54:00 +0200 Subject: [PATCH 183/199] remote: Fetch into temp files --- stores/remote.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/stores/remote.go b/stores/remote.go index ee68b5ef6..2b6b19384 100644 --- a/stores/remote.go +++ b/stores/remote.go @@ -150,12 +150,18 @@ func (r *Remote) acquireFromRemote(ctx context.Context, s abi.SectorID, spt abi. // TODO: see what we have local, prefer that for _, url := range info.URLs { - err := r.fetch(ctx, url, dest) + tempDest := dest + ".fetch" + + err := r.fetch(ctx, url, tempDest) if err != nil { - merr = multierror.Append(merr, xerrors.Errorf("fetch error %s (storage %s) -> %s: %w", url, info.ID, dest, err)) + merr = multierror.Append(merr, xerrors.Errorf("fetch error %s (storage %s) -> %s: %w", url, info.ID, tempDest, err)) continue } + if err := move(tempDest, dest); err != nil { + return "", "", "", xerrors.Errorf("fetch move error (storage %s) %s -> %s: %w", info.ID, tempDest, dest, err) + } + if merr != nil { log.Warnw("acquireFromRemote encountered errors when fetching sector from remote", "errors", merr) } From 295300ff4671290c9751fdbbccef9b17fc182cae Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Fri, 24 Jul 2020 19:39:25 +0200 Subject: [PATCH 184/199] stores: Fix remote fetch move --- go.mod | 15 ++++++++------- go.sum | 47 +++++++++++++++++++++++++++++++---------------- stores/local.go | 6 +++++- stores/remote.go | 24 ++++++++++++++++++++++-- 4 files changed, 66 insertions(+), 26 deletions(-) diff --git a/go.mod b/go.mod index 994a99a3f..4912e5d1d 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( github.com/filecoin-project/filecoin-ffi v0.30.4-0.20200716204036-cddc56607e1d github.com/filecoin-project/go-bitfield v0.0.4-0.20200703174658-f4a5758051a1 github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f - github.com/filecoin-project/go-paramfetch v0.0.1 + github.com/filecoin-project/go-paramfetch v0.0.2-0.20200218225740-47c639bab663 github.com/filecoin-project/specs-actors v0.6.1 github.com/filecoin-project/specs-storage v0.1.1-0.20200622113353-88a9704877ea github.com/google/uuid v1.1.1 @@ -16,19 +16,20 @@ require ( github.com/hashicorp/go-multierror v1.0.0 github.com/ipfs/go-cid v0.0.6 github.com/ipfs/go-ipfs-files v0.0.7 + github.com/ipfs/go-ipld-cbor v0.0.5-0.20200204214505-252690b78669 // indirect github.com/ipfs/go-log v1.0.3 github.com/ipfs/go-log/v2 v2.0.3 github.com/mattn/go-isatty v0.0.9 // indirect github.com/mitchellh/go-homedir v1.1.0 github.com/stretchr/testify v1.4.0 go.opencensus.io v0.22.3 - go.uber.org/atomic v1.5.1 // indirect - go.uber.org/zap v1.13.0 // indirect - golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 // indirect - golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f // indirect - golang.org/x/sys v0.0.0-20200107162124-548cf772de50 // indirect - golang.org/x/tools v0.0.0-20200108195415-316d2f248479 // indirect + go.uber.org/zap v1.14.1 // indirect + golang.org/x/crypto v0.0.0-20200317142112-1b76d66859c6 // indirect + golang.org/x/lint v0.0.0-20200302205851-738671d3881b // indirect + golang.org/x/sys v0.0.0-20200317113312-5766fd39f98d // indirect + golang.org/x/tools v0.0.0-20200318150045-ba25ddc85566 // indirect golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 + honnef.co/go/tools v0.0.1-2020.1.3 // indirect ) replace github.com/filecoin-project/filecoin-ffi => ./extern/filecoin-ffi diff --git a/go.sum b/go.sum index 67fa4d38d..a1d803274 100644 --- a/go.sum +++ b/go.sum @@ -30,6 +30,7 @@ github.com/fatih/color v1.8.0/go.mod h1:3l45GVGkyrnYNl9HoIjnp2NnNWvh6hLAqD8yTfGj github.com/filecoin-project/go-address v0.0.2-0.20200218010043-eb9bb40ed5be h1:TooKBwR/g8jG0hZ3lqe9S5sy2vTUcLOZLlz3M5wGn2E= github.com/filecoin-project/go-address v0.0.2-0.20200218010043-eb9bb40ed5be/go.mod h1:SAOwJoakQ8EPjwNIsiakIQKsoKdkcbx8U3IapgCg9R0= github.com/filecoin-project/go-amt-ipld/v2 v2.0.1-0.20200131012142-05d80eeccc5e/go.mod h1:boRtQhzmxNocrMxOXo1NYn4oUc1NGvR8tEa79wApNXg= +github.com/filecoin-project/go-amt-ipld/v2 v2.0.1-0.20200424220931-6263827e49f2 h1:jamfsxfK0Q9yCMHt8MPWx7Aa/O9k2Lve8eSc6FILYGQ= github.com/filecoin-project/go-amt-ipld/v2 v2.0.1-0.20200424220931-6263827e49f2/go.mod h1:boRtQhzmxNocrMxOXo1NYn4oUc1NGvR8tEa79wApNXg= github.com/filecoin-project/go-bitfield v0.0.0-20200416002808-b3ee67ec9060/go.mod h1:iodsLxOFZnqKtjj2zkgqzoGNrv6vUqj69AT/J8DKXEw= github.com/filecoin-project/go-bitfield v0.0.1 h1:Xg/JnrqqE77aJVKdbEyR04n9FZQWhwrN+buDgQCVpZU= @@ -40,8 +41,8 @@ github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 h1:2pMX github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ= github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f h1:GxJzR3oRIMTPtpZ0b7QF8FKPK6/iPAc7trhlL5k/g+s= github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= -github.com/filecoin-project/go-paramfetch v0.0.1 h1:gV7bs5YaqlgpGFMiLxInGK2L1FyCXUE0rimz4L7ghoE= -github.com/filecoin-project/go-paramfetch v0.0.1/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc= +github.com/filecoin-project/go-paramfetch v0.0.2-0.20200218225740-47c639bab663 h1:eYxi6vI5CyeXD15X1bB3bledDXbqKxqf0wQzTLgwYwA= +github.com/filecoin-project/go-paramfetch v0.0.2-0.20200218225740-47c639bab663/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc= github.com/filecoin-project/specs-actors v0.3.0/go.mod h1:nQYnFbQ7Y0bHZyq6HDEuVlCPR+U3z5Q3wMOQ+2aiV+Y= github.com/filecoin-project/specs-actors v0.6.1 h1:rhHlEzqcuuQU6oKc4csuq+/kQBDZ4EXtSomoN2XApCA= github.com/filecoin-project/specs-actors v0.6.1/go.mod h1:dRdy3cURykh2R8O/DKqy8olScl70rmIS7GrB4hB1IDY= @@ -58,6 +59,7 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= @@ -92,6 +94,8 @@ github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyB github.com/ipfs/go-ipld-cbor v0.0.3/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= github.com/ipfs/go-ipld-cbor v0.0.4 h1:Aw3KPOKXjvrm6VjwJvFf1F1ekR/BH3jdof3Bk7OTiSA= github.com/ipfs/go-ipld-cbor v0.0.4/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= +github.com/ipfs/go-ipld-cbor v0.0.5-0.20200204214505-252690b78669 h1:jIVle1vGSzxyUhseYNEqd7qcDVRrIbJ7UxGwao70cF0= +github.com/ipfs/go-ipld-cbor v0.0.5-0.20200204214505-252690b78669/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= github.com/ipfs/go-ipld-format v0.0.1 h1:HCu4eB/Gh+KD/Q0M8u888RFkorTWNIL3da4oc5dwc80= github.com/ipfs/go-ipld-format v0.0.1/go.mod h1:kyJtbkDALmFHv3QR6et67i35QzO3S0dCDnkOJhcZkms= github.com/ipfs/go-ipld-format v0.0.2 h1:OVAGlyYT6JPZ0pEfGntFPS40lfrDmaDbQwNHEY2G9Zs= @@ -105,6 +109,7 @@ github.com/ipfs/go-log/v2 v2.0.3/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBW github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52 h1:QG4CGBqCeuBo6aZlGAamSkxWdgWfZGeE49eUOWJPA4c= github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52/go.mod h1:fdg+/X9Gg4AsAIzWpEHwnqd+QY3b7lajxyjE1m4hkq4= github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= +github.com/jbenet/goprocess v0.1.3 h1:YKyIEECS/XvcfHtBzxtjBBbWK+MbvA6dG8ASiqwvr10= github.com/jbenet/goprocess v0.1.3/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= @@ -228,25 +233,27 @@ github.com/whyrusleeping/cbor-gen v0.0.0-20200414195334-429a0b5e922e h1:JY8o/ebU github.com/whyrusleeping/cbor-gen v0.0.0-20200414195334-429a0b5e922e/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc/go.mod h1:bopw91TMyo8J3tvftk8xmU2kPmlrt4nScJQZU2hE5EM= github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.5.1 h1:rsqfU5vBkVknbhUGbAUwQKR2H4ItV8tjJ+6kJX4cxHM= -go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.4.0 h1:f3WCSC2KzAcBXGATIxAB1E2XuCpNU255wNKZ505qi3E= go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.13.0 h1:nR6NoDBgAf67s68NhaXbsojM+2gxp3S1hWkHDl27pVU= -go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.14.1 h1:nYDKopTbvAPq/NrUVZwT15y2lpROBiLLyoRTbXOYWOo= +go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -255,17 +262,19 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g= -golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200317142112-1b76d66859c6 h1:TjszyFsQsyZNHwdVdZ5m7bjmreu0znc2kRYsEml9/Ww= +golang.org/x/crypto v0.0.0-20200317142112-1b76d66859c6/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f h1:J5lckAjkw6qYlOZNj90mLYNTEKDvWeuc1yieZ8qUzUE= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -274,12 +283,14 @@ golang.org/x/net v0.0.0-20190227160552-c95aed5357e7/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180202135801-37707fdb30a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -295,8 +306,8 @@ golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191025021431-6c3a3bfe00ae h1:QoJmnb9uyPCrH8GIg9uRLn4Ta45yhcQtpymCd0AavO8= golang.org/x/sys v0.0.0-20191025021431-6c3a3bfe00ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200107162124-548cf772de50 h1:YvQ10rzcqWXLlJZ3XCUoO25savxmscf4+SC+ZqiCHhA= -golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200317113312-5766fd39f98d h1:62ap6LNOjDU6uGmKXHJbSfciMoV+FeI1sRXx/pLDL44= +golang.org/x/sys v0.0.0-20200317113312-5766fd39f98d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -310,9 +321,11 @@ golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200108195415-316d2f248479 h1:csuS+MHeEA2eWhyjQCMaPMq4z1+/PohkBSjJZHSIbOE= -golang.org/x/tools v0.0.0-20200108195415-316d2f248479/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200318150045-ba25ddc85566 h1:OXjomkWHhzUx4+HldlJ2TsMxJdWgEo5CTtspD1wdhdk= +golang.org/x/tools v0.0.0-20200318150045-ba25ddc85566/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= @@ -341,5 +354,7 @@ gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= howett.net/plist v0.0.0-20181124034731-591f970eefbb h1:jhnBjNi9UFpfpl8YZhA9CrOqpnJdvzuiHsl/dnxl11M= howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= diff --git a/stores/local.go b/stores/local.go index cbc9dbae1..f1affb397 100644 --- a/stores/local.go +++ b/stores/local.go @@ -3,7 +3,6 @@ package stores import ( "context" "encoding/json" - "github.com/filecoin-project/sector-storage/fsutil" "io/ioutil" "math/bits" "math/rand" @@ -14,6 +13,7 @@ import ( "golang.org/x/xerrors" + "github.com/filecoin-project/sector-storage/fsutil" "github.com/filecoin-project/specs-actors/actors/abi" ) @@ -179,6 +179,10 @@ func (st *Local) OpenPath(ctx context.Context, p string) error { } for _, ent := range ents { + if ent.Name() == FetchTempSubdir { + continue + } + sid, err := ParseSectorID(ent.Name()) if err != nil { return xerrors.Errorf("parse sector id %s: %w", ent.Name(), err) diff --git a/stores/remote.go b/stores/remote.go index 2b6b19384..42b730b40 100644 --- a/stores/remote.go +++ b/stores/remote.go @@ -11,6 +11,7 @@ import ( "net/url" "os" gopath "path" + "path/filepath" "sort" "sync" @@ -24,6 +25,8 @@ import ( "github.com/filecoin-project/sector-storage/tarutil" ) +var FetchTempSubdir = "fetching" + type Remote struct { local *Local index SectorIndex @@ -124,6 +127,16 @@ func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, spt abi.Regi return paths, stores, nil } +func tempDest(spath string) (string, error) { + st, b := filepath.Split(spath) + tempdir := filepath.Join(st, FetchTempSubdir) + if err := os.MkdirAll(tempdir, 755); err != nil { + return "", xerrors.Errorf("creating temp fetch dir: %w", err) + } + + return filepath.Join(tempdir, b), nil +} + func (r *Remote) acquireFromRemote(ctx context.Context, s abi.SectorID, spt abi.RegisteredSealProof, fileType SectorFileType, pathType PathType, op AcquireMode) (string, ID, string, error) { si, err := r.index.StorageFindSector(ctx, s, fileType, false) if err != nil { @@ -150,9 +163,16 @@ func (r *Remote) acquireFromRemote(ctx context.Context, s abi.SectorID, spt abi. // TODO: see what we have local, prefer that for _, url := range info.URLs { - tempDest := dest + ".fetch" + tempDest, err := tempDest(dest) + if err != nil { + return "", "", "", err + } - err := r.fetch(ctx, url, tempDest) + if err := os.RemoveAll(dest); err != nil { + return "", "", "", xerrors.Errorf("removing dest: %w", err) + } + + err = r.fetch(ctx, url, tempDest) if err != nil { merr = multierror.Append(merr, xerrors.Errorf("fetch error %s (storage %s) -> %s: %w", url, info.ID, tempDest, err)) continue From 7aa1ccf726afa9a4ab68908d9f89c7be190d81a2 Mon Sep 17 00:00:00 2001 From: ocean <3408598@qq.com> Date: Mon, 27 Jul 2020 14:21:29 +0800 Subject: [PATCH 185/199] modify workerRequest --- sched.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/sched.go b/sched.go index af28b1902..a7a6d3e86 100644 --- a/sched.go +++ b/sched.go @@ -122,6 +122,7 @@ type workerRequest struct { index int // The index of the item in the heap. + indexHeap int ret chan<- workerResponse ctx context.Context } @@ -250,6 +251,7 @@ func (sh *scheduler) trySched() { task := (*sh.schedQueue)[sqi] needRes := ResourceTable[task.taskType][sh.spt] + task.indexHeap = sqi for wnd, windowRequest := range sh.openWindows { worker := sh.workers[windowRequest.worker] @@ -312,7 +314,7 @@ func (sh *scheduler) trySched() { needRes := ResourceTable[task.taskType][sh.spt] selectedWindow := -1 - for _, wnd := range acceptableWindows[sqi+scheduled] { + for _, wnd := range acceptableWindows[task.indexHeap] { wid := sh.openWindows[wnd].worker wr := sh.workers[wid].info.Resources From 9fd91bb70aeb0fc7048f6fa1e0e10380573a8948 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Mon, 27 Jul 2020 12:17:09 +0200 Subject: [PATCH 186/199] sched: function to dump scheduler state --- manager.go | 4 ++++ sched.go | 62 +++++++++++++++++++++++++++++++++++++++++++++++++++--- 2 files changed, 63 insertions(+), 3 deletions(-) diff --git a/manager.go b/manager.go index 64dd2dcbc..4791eb5e6 100644 --- a/manager.go +++ b/manager.go @@ -494,6 +494,10 @@ func (m *Manager) FsStat(ctx context.Context, id stores.ID) (fsutil.FsStat, erro return m.storage.FsStat(ctx, id) } +func (m *Manager) SchedDiag(ctx context.Context) (interface{}, error) { + return m.sched.Info(ctx) +} + func (m *Manager) Close(ctx context.Context) error { return m.sched.Close(ctx) } diff --git a/sched.go b/sched.go index a7a6d3e86..239b52063 100644 --- a/sched.go +++ b/sched.go @@ -69,6 +69,8 @@ type scheduler struct { schedQueue *requestQueue openWindows []*schedWindowRequest + info chan func(interface{}) + closing chan struct{} closed chan struct{} testSync chan struct{} // used for testing @@ -148,6 +150,8 @@ func newScheduler(spt abi.RegisteredSealProof) *scheduler { schedQueue: &requestQueue{}, + info: make(chan func(interface{})), + closing: make(chan struct{}), closed: make(chan struct{}), } @@ -193,6 +197,17 @@ func (r *workerRequest) respond(err error) { } } +type SchedDiagRequestInfo struct { + Sector abi.SectorID + TaskType sealtasks.TaskType + Priority int +} + +type SchedDiagInfo struct { + Requests []SchedDiagRequestInfo + OpenWindows []WorkerID +} + func (sh *scheduler) runSched() { defer close(sh.closed) @@ -217,6 +232,9 @@ func (sh *scheduler) runSched() { sh.openWindows = append(sh.openWindows, req) sh.trySched() + case ireq := <-sh.info: + ireq(sh.diag()) + case <-sh.closing: sh.schedClose() return @@ -224,6 +242,26 @@ func (sh *scheduler) runSched() { } } +func (sh *scheduler) diag() SchedDiagInfo { + var out SchedDiagInfo + + for sqi := 0; sqi < sh.schedQueue.Len(); sqi++ { + task := (*sh.schedQueue)[sqi] + + out.Requests = append(out.Requests, SchedDiagRequestInfo{ + Sector: task.sector, + TaskType: task.taskType, + Priority: task.priority, + }) + } + + for _, window := range sh.openWindows { + out.OpenWindows = append(out.OpenWindows, window.worker) + } + + return out +} + func (sh *scheduler) trySched() { /* This assigns tasks to workers based on: @@ -244,7 +282,7 @@ func (sh *scheduler) trySched() { windows := make([]schedWindow, len(sh.openWindows)) acceptableWindows := make([][]int, sh.schedQueue.Len()) - log.Debugf("trySched %d queued; %d open windows", sh.schedQueue.Len(), len(windows)) + log.Debugf("SCHED %d queued; %d open windows", sh.schedQueue.Len(), len(windows)) // Step 1 for sqi := 0; sqi < sh.schedQueue.Len(); sqi++ { @@ -306,6 +344,9 @@ func (sh *scheduler) trySched() { }) } + log.Debugf("SCHED windows: %+v", windows) + log.Debugf("SCHED Acceptable win: %+v", acceptableWindows) + // Step 2 scheduled := 0 @@ -318,14 +359,14 @@ func (sh *scheduler) trySched() { wid := sh.openWindows[wnd].worker wr := sh.workers[wid].info.Resources - log.Debugf("trySched try assign sqi:%d sector %d to window %d", sqi, task.sector.Number, wnd) + log.Debugf("SCHED try assign sqi:%d sector %d to window %d", sqi, task.sector.Number, wnd) // TODO: allow bigger windows if !windows[wnd].allocated.canHandleRequest(needRes, wid, wr) { continue } - log.Debugf("trySched ASSIGNED sqi:%d sector %d to window %d", sqi, task.sector.Number, wnd) + log.Debugf("SCHED ASSIGNED sqi:%d sector %d to window %d", sqi, task.sector.Number, wnd) windows[wnd].allocated.add(wr, needRes) @@ -623,6 +664,21 @@ func (sh *scheduler) schedClose() { } } +func (sh *scheduler) Info(ctx context.Context) (interface{}, error) { + ch := make(chan interface{}, 1) + + sh.info <- func(res interface{}) { + ch <- res + } + + select { + case res := <- ch: + return res, nil + case <-ctx.Done(): + return nil, ctx.Err() + } +} + func (sh *scheduler) Close(ctx context.Context) error { close(sh.closing) select { From 510897a4979877b4d51fecbfaac9f9436f201ee6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Mon, 27 Jul 2020 13:20:18 +0200 Subject: [PATCH 187/199] container/heap doesn't provide sorted array --- request_queue.go | 19 +++++++++-------- request_queue_test.go | 48 +++++++++++++++++++++++++++++++++++-------- sched.go | 5 ++--- 3 files changed, 52 insertions(+), 20 deletions(-) diff --git a/request_queue.go b/request_queue.go index 0d35e9f1d..b40375dc8 100644 --- a/request_queue.go +++ b/request_queue.go @@ -1,6 +1,6 @@ package sectorstorage -import "container/heap" +import "sort" type requestQueue []*workerRequest @@ -24,21 +24,22 @@ func (q requestQueue) Swap(i, j int) { q[j].index = j } -func (q *requestQueue) Push(x interface{}) { +func (q *requestQueue) Push(x *workerRequest) { n := len(*q) - item := x.(*workerRequest) + item := x item.index = n *q = append(*q, item) + sort.Sort(q) } -func (q *requestQueue) Pop() interface{} { +func (q *requestQueue) Remove(i int) *workerRequest { old := *q n := len(old) - item := old[n-1] - old[n-1] = nil // avoid memory leak - item.index = -1 // for safety + item := old[i] + old[i] = old[n - 1] + old[n - 1] = nil + item.index = -1 *q = old[0 : n-1] + sort.Sort(q) return item } - -var _ heap.Interface = &requestQueue{} diff --git a/request_queue_test.go b/request_queue_test.go index 9bf231e39..cb4a5d5dd 100644 --- a/request_queue_test.go +++ b/request_queue_test.go @@ -1,7 +1,7 @@ package sectorstorage import ( - "container/heap" + "fmt" "testing" "github.com/filecoin-project/sector-storage/sealtasks" @@ -10,19 +10,51 @@ import ( func TestRequestQueue(t *testing.T) { rq := &requestQueue{} - heap.Push(rq, &workerRequest{taskType: sealtasks.TTAddPiece}) - heap.Push(rq, &workerRequest{taskType: sealtasks.TTPreCommit1}) - heap.Push(rq, &workerRequest{taskType: sealtasks.TTPreCommit2}) - heap.Push(rq, &workerRequest{taskType: sealtasks.TTPreCommit1}) - heap.Push(rq, &workerRequest{taskType: sealtasks.TTAddPiece}) + rq.Push(&workerRequest{taskType: sealtasks.TTAddPiece}) + rq.Push(&workerRequest{taskType: sealtasks.TTPreCommit1}) + rq.Push(&workerRequest{taskType: sealtasks.TTPreCommit2}) + rq.Push(&workerRequest{taskType: sealtasks.TTPreCommit1}) + rq.Push(&workerRequest{taskType: sealtasks.TTAddPiece}) - pt := heap.Pop(rq).(*workerRequest) + dump := func(s string) { + fmt.Println("---") + fmt.Println(s) + + for sqi := 0; sqi < rq.Len(); sqi++ { + task := (*rq)[sqi] + fmt.Println(sqi, task.taskType) + } + } + + dump("start") + + pt := rq.Remove(0) + + dump("pop 1") if pt.taskType != sealtasks.TTPreCommit2 { t.Error("expected precommit2, got", pt.taskType) } - pt = heap.Pop(rq).(*workerRequest) + pt = rq.Remove(0) + + dump("pop 2") + + if pt.taskType != sealtasks.TTPreCommit1 { + t.Error("expected precommit1, got", pt.taskType) + } + + pt = rq.Remove(1) + + dump("pop 3") + + if pt.taskType != sealtasks.TTAddPiece { + t.Error("expected addpiece, got", pt.taskType) + } + + pt = rq.Remove(0) + + dump("pop 4") if pt.taskType != sealtasks.TTPreCommit1 { t.Error("expected precommit1, got", pt.taskType) diff --git a/sched.go b/sched.go index 239b52063..b549eb7c9 100644 --- a/sched.go +++ b/sched.go @@ -1,7 +1,6 @@ package sectorstorage import ( - "container/heap" "context" "fmt" "math/rand" @@ -222,7 +221,7 @@ func (sh *scheduler) runSched() { sh.dropWorker(wid) case req := <-sh.schedule: - heap.Push(sh.schedQueue, req) + sh.schedQueue.Push(req) sh.trySched() if sh.testSync != nil { @@ -381,7 +380,7 @@ func (sh *scheduler) trySched() { windows[selectedWindow].todo = append(windows[selectedWindow].todo, task) - heap.Remove(sh.schedQueue, sqi) + sh.schedQueue.Remove(sqi) sqi-- scheduled++ } From 9377cb376d25f79dfd6b8bbf4141da8d17ffdd32 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Mon, 27 Jul 2020 13:21:36 +0200 Subject: [PATCH 188/199] gofmt --- request_queue.go | 4 ++-- sched.go | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/request_queue.go b/request_queue.go index b40375dc8..85d3abf46 100644 --- a/request_queue.go +++ b/request_queue.go @@ -36,8 +36,8 @@ func (q *requestQueue) Remove(i int) *workerRequest { old := *q n := len(old) item := old[i] - old[i] = old[n - 1] - old[n - 1] = nil + old[i] = old[n-1] + old[n-1] = nil item.index = -1 *q = old[0 : n-1] sort.Sort(q) diff --git a/sched.go b/sched.go index b549eb7c9..ed648bf19 100644 --- a/sched.go +++ b/sched.go @@ -124,8 +124,8 @@ type workerRequest struct { index int // The index of the item in the heap. indexHeap int - ret chan<- workerResponse - ctx context.Context + ret chan<- workerResponse + ctx context.Context } type workerResponse struct { @@ -203,7 +203,7 @@ type SchedDiagRequestInfo struct { } type SchedDiagInfo struct { - Requests []SchedDiagRequestInfo + Requests []SchedDiagRequestInfo OpenWindows []WorkerID } @@ -671,7 +671,7 @@ func (sh *scheduler) Info(ctx context.Context) (interface{}, error) { } select { - case res := <- ch: + case res := <-ch: return res, nil case <-ctx.Done(): return nil, ctx.Err() From 366de97ab5530f60d681b0afc3373c36a6eabca4 Mon Sep 17 00:00:00 2001 From: jackoelv Date: Tue, 28 Jul 2020 18:20:58 +0800 Subject: [PATCH 189/199] Update local.go StorageBestAlloc returns a sorted slice of storage paths. if no break, It would alloc the worst-last path but not the first-best path. --- stores/local.go | 1 + 1 file changed, 1 insertion(+) diff --git a/stores/local.go b/stores/local.go index 26b7ccb75..21819cd7a 100644 --- a/stores/local.go +++ b/stores/local.go @@ -273,6 +273,7 @@ func (st *Local) AcquireSector(ctx context.Context, sid abi.SectorID, spt abi.Re best = filepath.Join(p.local, fileType.String(), SectorName(sid)) bestID = si.ID + break } if best == "" { From 3ee28c3b6d9ad17d9a9783801f7a39d49fc95c11 Mon Sep 17 00:00:00 2001 From: Aayush Rajasekaran Date: Thu, 30 Jul 2020 00:54:28 -0400 Subject: [PATCH 190/199] Update go-bitfield and specs-actors --- ffiwrapper/partialfile.go | 7 ++++++- go.mod | 11 +++++------ go.sum | 34 +++++++++++++++++++--------------- 3 files changed, 30 insertions(+), 22 deletions(-) diff --git a/ffiwrapper/partialfile.go b/ffiwrapper/partialfile.go index 8c4fdcc72..b1ab8c53c 100644 --- a/ffiwrapper/partialfile.go +++ b/ffiwrapper/partialfile.go @@ -145,10 +145,15 @@ func openPartialFile(maxPieceSize abi.PaddedPieceSize, path string) (*partialFil return xerrors.Errorf("getting trailer run iterator: %w", err) } - lastSet, err := rlepluslazy.LastIndex(it, true) + f, err := rlepluslazy.Fill(it) + if err != nil { + return xerrors.Errorf("filling bitfield: %w", err) + } + lastSet, err := rlepluslazy.Count(f) if err != nil { return xerrors.Errorf("finding last set byte index: %w", err) } + if lastSet > uint64(maxPieceSize) { return xerrors.Errorf("last set byte at index higher than sector size: %d > %d", lastSet, maxPieceSize) } diff --git a/go.mod b/go.mod index 4912e5d1d..8398c98a3 100644 --- a/go.mod +++ b/go.mod @@ -6,10 +6,10 @@ require ( github.com/detailyang/go-fallocate v0.0.0-20180908115635-432fa640bd2e github.com/elastic/go-sysinfo v1.3.0 github.com/filecoin-project/filecoin-ffi v0.30.4-0.20200716204036-cddc56607e1d - github.com/filecoin-project/go-bitfield v0.0.4-0.20200703174658-f4a5758051a1 + github.com/filecoin-project/go-bitfield v0.1.2 github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f github.com/filecoin-project/go-paramfetch v0.0.2-0.20200218225740-47c639bab663 - github.com/filecoin-project/specs-actors v0.6.1 + github.com/filecoin-project/specs-actors v0.8.2 github.com/filecoin-project/specs-storage v0.1.1-0.20200622113353-88a9704877ea github.com/google/uuid v1.1.1 github.com/gorilla/mux v1.7.4 @@ -17,13 +17,12 @@ require ( github.com/ipfs/go-cid v0.0.6 github.com/ipfs/go-ipfs-files v0.0.7 github.com/ipfs/go-ipld-cbor v0.0.5-0.20200204214505-252690b78669 // indirect - github.com/ipfs/go-log v1.0.3 - github.com/ipfs/go-log/v2 v2.0.3 + github.com/ipfs/go-log v1.0.4 + github.com/ipfs/go-log/v2 v2.0.5 github.com/mattn/go-isatty v0.0.9 // indirect github.com/mitchellh/go-homedir v1.1.0 - github.com/stretchr/testify v1.4.0 + github.com/stretchr/testify v1.6.1 go.opencensus.io v0.22.3 - go.uber.org/zap v1.14.1 // indirect golang.org/x/crypto v0.0.0-20200317142112-1b76d66859c6 // indirect golang.org/x/lint v0.0.0-20200302205851-738671d3881b // indirect golang.org/x/sys v0.0.0-20200317113312-5766fd39f98d // indirect diff --git a/go.sum b/go.sum index a1d803274..942ff725c 100644 --- a/go.sum +++ b/go.sum @@ -2,7 +2,6 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= -github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= @@ -30,13 +29,12 @@ github.com/fatih/color v1.8.0/go.mod h1:3l45GVGkyrnYNl9HoIjnp2NnNWvh6hLAqD8yTfGj github.com/filecoin-project/go-address v0.0.2-0.20200218010043-eb9bb40ed5be h1:TooKBwR/g8jG0hZ3lqe9S5sy2vTUcLOZLlz3M5wGn2E= github.com/filecoin-project/go-address v0.0.2-0.20200218010043-eb9bb40ed5be/go.mod h1:SAOwJoakQ8EPjwNIsiakIQKsoKdkcbx8U3IapgCg9R0= github.com/filecoin-project/go-amt-ipld/v2 v2.0.1-0.20200131012142-05d80eeccc5e/go.mod h1:boRtQhzmxNocrMxOXo1NYn4oUc1NGvR8tEa79wApNXg= -github.com/filecoin-project/go-amt-ipld/v2 v2.0.1-0.20200424220931-6263827e49f2 h1:jamfsxfK0Q9yCMHt8MPWx7Aa/O9k2Lve8eSc6FILYGQ= github.com/filecoin-project/go-amt-ipld/v2 v2.0.1-0.20200424220931-6263827e49f2/go.mod h1:boRtQhzmxNocrMxOXo1NYn4oUc1NGvR8tEa79wApNXg= +github.com/filecoin-project/go-amt-ipld/v2 v2.1.0/go.mod h1:nfFPoGyX0CU9SkXX8EoCcSuHN1XcbN0c6KBh7yvP5fs= github.com/filecoin-project/go-bitfield v0.0.0-20200416002808-b3ee67ec9060/go.mod h1:iodsLxOFZnqKtjj2zkgqzoGNrv6vUqj69AT/J8DKXEw= -github.com/filecoin-project/go-bitfield v0.0.1 h1:Xg/JnrqqE77aJVKdbEyR04n9FZQWhwrN+buDgQCVpZU= github.com/filecoin-project/go-bitfield v0.0.1/go.mod h1:Ry9/iUlWSyjPUzlAvdnfy4Gtvrq4kWmWDztCU1yEgJY= -github.com/filecoin-project/go-bitfield v0.0.4-0.20200703174658-f4a5758051a1 h1:xuHlrdznafh7ul5t4xEncnA4qgpQvJZEw+mr98eqHXw= -github.com/filecoin-project/go-bitfield v0.0.4-0.20200703174658-f4a5758051a1/go.mod h1:Ry9/iUlWSyjPUzlAvdnfy4Gtvrq4kWmWDztCU1yEgJY= +github.com/filecoin-project/go-bitfield v0.1.2 h1:TjLregCoyP1/5lm7WCM0axyV1myIHwbjGa21skuu5tk= +github.com/filecoin-project/go-bitfield v0.1.2/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 h1:2pMXdBnCiXjfCYx/hLqFxccPoqsSveQFxVLvNxy9bus= github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ= github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f h1:GxJzR3oRIMTPtpZ0b7QF8FKPK6/iPAc7trhlL5k/g+s= @@ -44,8 +42,9 @@ github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f/go github.com/filecoin-project/go-paramfetch v0.0.2-0.20200218225740-47c639bab663 h1:eYxi6vI5CyeXD15X1bB3bledDXbqKxqf0wQzTLgwYwA= github.com/filecoin-project/go-paramfetch v0.0.2-0.20200218225740-47c639bab663/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc= github.com/filecoin-project/specs-actors v0.3.0/go.mod h1:nQYnFbQ7Y0bHZyq6HDEuVlCPR+U3z5Q3wMOQ+2aiV+Y= -github.com/filecoin-project/specs-actors v0.6.1 h1:rhHlEzqcuuQU6oKc4csuq+/kQBDZ4EXtSomoN2XApCA= github.com/filecoin-project/specs-actors v0.6.1/go.mod h1:dRdy3cURykh2R8O/DKqy8olScl70rmIS7GrB4hB1IDY= +github.com/filecoin-project/specs-actors v0.8.2 h1:fpAPOPqWqmzJCWHpm6P1XDRSpQrxyY5Pzh5H3doYs7Q= +github.com/filecoin-project/specs-actors v0.8.2/go.mod h1:Q3ACV5kBLvqPaYbthc/J1lGMJ5OwogmD9pzdtPRMdCw= github.com/filecoin-project/specs-storage v0.1.1-0.20200622113353-88a9704877ea h1:iixjULRQFPn7Q9KlIqfwLJnlAXO10bbkI+xy5GKGdLY= github.com/filecoin-project/specs-storage v0.1.1-0.20200622113353-88a9704877ea/go.mod h1:Pr5ntAaxsh+sLG/LYiL4tKzvA83Vk5vLODYhfNwOg7k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= @@ -84,9 +83,11 @@ github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUP github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.5 h1:o0Ix8e/ql7Zb5UVUJEUfjsWCIY8t48++9lR8qi6oiJU= github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= +github.com/ipfs/go-cid v0.0.6-0.20200501230655-7c82f3b81c00/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= github.com/ipfs/go-cid v0.0.6 h1:go0y+GcDOGeJIV01FeBsta4FHngoA4Wz7KMeLkXAhMs= github.com/ipfs/go-cid v0.0.6/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= github.com/ipfs/go-hamt-ipld v0.0.15-0.20200131012125-dd88a59d3f2e/go.mod h1:9aQJu/i/TaRDW6jqB5U217dLIDopn50wxLdHXM2CTfE= +github.com/ipfs/go-hamt-ipld v0.1.1/go.mod h1:1EZCr2v0jlCnhpa+aZ0JZYp8Tt2w16+JJOAVz17YcDk= github.com/ipfs/go-ipfs-files v0.0.7 h1:s5BRD12ndahqYifeH1S8Z73zqZhR+3IdKYAG9PiETs0= github.com/ipfs/go-ipfs-files v0.0.7/go.mod h1:wiN/jSG8FKyk7N0WyctKSvq3ljIa2NNTiZB55kpTdOs= github.com/ipfs/go-ipfs-util v0.0.1 h1:Wz9bL2wB2YBJqggkA4dD7oSmqB4cAnpNbGrlHJulv50= @@ -102,14 +103,13 @@ github.com/ipfs/go-ipld-format v0.0.2 h1:OVAGlyYT6JPZ0pEfGntFPS40lfrDmaDbQwNHEY2 github.com/ipfs/go-ipld-format v0.0.2/go.mod h1:4B6+FM2u9OJ9zCV+kSbgFAZlOrv1Hqbf0INGQgiKf9k= github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM= github.com/ipfs/go-log v1.0.0/go.mod h1:JO7RzlMK6rA+CIxFMLOuB6Wf5b81GDiKElL7UPSIKjA= -github.com/ipfs/go-log v1.0.3 h1:Gg7SUYSZ7BrqaKMwM+hRgcAkKv4QLfzP4XPQt5Sx/OI= -github.com/ipfs/go-log v1.0.3/go.mod h1:OsLySYkwIbiSUR/yBTdv1qPtcE4FW3WPWk/ewz9Ru+A= -github.com/ipfs/go-log/v2 v2.0.3 h1:Q2gXcBoCALyLN/pUQlz1qgu0x3uFV6FzP9oXhpfyJpc= -github.com/ipfs/go-log/v2 v2.0.3/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= +github.com/ipfs/go-log v1.0.4 h1:6nLQdX4W8P9yZZFH7mO+X/PzjN8Laozm/lMJ6esdgzY= +github.com/ipfs/go-log v1.0.4/go.mod h1:oDCg2FkjogeFOhqqb+N39l2RpTNPL6F/StPkB3kPgcs= +github.com/ipfs/go-log/v2 v2.0.5 h1:fL4YI+1g5V/b1Yxr1qAiXTMg1H8z9vx/VmJxBuQMHvU= +github.com/ipfs/go-log/v2 v2.0.5/go.mod h1:eZs4Xt4ZUJQFM3DlanGhy7TkwwawCZcSByscwkWG+dw= github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52 h1:QG4CGBqCeuBo6aZlGAamSkxWdgWfZGeE49eUOWJPA4c= github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52/go.mod h1:fdg+/X9Gg4AsAIzWpEHwnqd+QY3b7lajxyjE1m4hkq4= github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= -github.com/jbenet/goprocess v0.1.3 h1:YKyIEECS/XvcfHtBzxtjBBbWK+MbvA6dG8ASiqwvr10= github.com/jbenet/goprocess v0.1.3/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= @@ -132,9 +132,7 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/libp2p/go-flow-metrics v0.0.3/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs= -github.com/libp2p/go-libp2p-core v0.3.0 h1:F7PqduvrztDtFsAa/bcheQ3azmNo+Nq7m8hQY5GiUW8= github.com/libp2p/go-libp2p-core v0.3.0/go.mod h1:ACp3DmS3/N64c2jDzcV429ukDpicbL6+TrrxANBjPGw= -github.com/libp2p/go-openssl v0.0.4 h1:d27YZvLoTyMhIN4njrkr8zMDOM4lfpHIp6A+TK9fovg= github.com/libp2p/go-openssl v0.0.4/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= @@ -166,7 +164,6 @@ github.com/multiformats/go-base32 v0.0.3 h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= github.com/multiformats/go-base36 v0.1.0 h1:JR6TyF7JjGd3m6FbLU2cOxhC0Li8z8dLNGQ89tUg4F4= github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM= -github.com/multiformats/go-multiaddr v0.2.0 h1:lR52sFwcTCuQb6bTfnXF6zA2XfyYvyd+5a9qECv/J90= github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4= github.com/multiformats/go-multibase v0.0.1 h1:PN9/v21eLywrFWdFNsFKaU04kLJzuYzmrJR+ubhT9qA= github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= @@ -212,7 +209,6 @@ github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa/go.mod h1:2 github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337 h1:WN9BUFbdyOsSH/XohnWpXOlq9NBD5sGAB2FciQMUEe8= github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smola/gocompat v0.2.0/go.mod h1:1B0MlxbmoZNo3h8guHp8HztB3BSYR5itql9qtVc0ypY= -github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 h1:RC6RW7j+1+HkWaX/Yh71Ee5ZHaHYt7ZP4sQgUrm6cDU= github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572/go.mod h1:w0SWMsp6j9O/dk4/ZpIhL+3CkG8ofA2vuv7k+ltqUMc= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= @@ -223,6 +219,8 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436 h1:qOpVTI+BrstcjTZLm2Yz/3sOnqkzj3FQoh0g+E5s3Gc= github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= github.com/warpfork/go-wish v0.0.0-20190328234359-8b3e70f8e830 h1:8kxMKmKzXXL4Ru1nyhvdms/JjWt+3YLpvRb/bAjO/y0= @@ -231,8 +229,12 @@ github.com/whyrusleeping/cbor-gen v0.0.0-20191216205031-b047b6acb3c0/go.mod h1:x github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= github.com/whyrusleeping/cbor-gen v0.0.0-20200414195334-429a0b5e922e h1:JY8o/ebUUrCYetWmjRCNghxC59cOEaili83rxPRQCLw= github.com/whyrusleeping/cbor-gen v0.0.0-20200414195334-429a0b5e922e/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= +github.com/whyrusleeping/cbor-gen v0.0.0-20200504204219-64967432584d/go.mod h1:W5MvapuoHRP8rz4vxjwCK1pDqF1aQcWsV5PZ+AHbqdg= +github.com/whyrusleeping/cbor-gen v0.0.0-20200715143311-227fab5a2377 h1:LHFlP/ktDvOnCap7PsT87cs7Gwd0p+qv6Qm5g2ZPR+I= +github.com/whyrusleeping/cbor-gen v0.0.0-20200715143311-227fab5a2377/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc/go.mod h1:bopw91TMyo8J3tvftk8xmU2kPmlrt4nScJQZU2hE5EM= github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= +github.com/xorcare/golden v0.6.0/go.mod h1:7T39/ZMvaSEZlBPoYfVFmsBLmUl3uz9IuzWj/U6FtvQ= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= @@ -351,6 +353,8 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= From 224a60fb23ca3b7be29bbdf1c0294b25cb0a4a37 Mon Sep 17 00:00:00 2001 From: David Dias Date: Thu, 30 Jul 2020 08:33:52 +0200 Subject: [PATCH 191/199] Update README.md --- README.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/README.md b/README.md index b6f73ab23..ea1cf459a 100644 --- a/README.md +++ b/README.md @@ -8,6 +8,10 @@ The sector-storage project provides a implementation-nonspecific reference implementation of the [specs-storage](https://github.com/filecoin-project/specs-storage) interface. +## Disclaimer + +Please report your issues with regards to storage-fsm at the [lotus issue tracker](https://github.com/filecoin-project/lotus/issues) + ## Architecture ![high-level architecture](docs/sector-storage.svg) From 15ca665c2167bd58d551e024cb36bfcaa27e0d59 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Thu, 30 Jul 2020 12:16:04 +0200 Subject: [PATCH 192/199] Typo --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index ea1cf459a..a4661f9d8 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,7 @@ The sector-storage project provides a implementation-nonspecific reference imple ## Disclaimer -Please report your issues with regards to storage-fsm at the [lotus issue tracker](https://github.com/filecoin-project/lotus/issues) +Please report your issues with regards to sector-storage at the [lotus issue tracker](https://github.com/filecoin-project/lotus/issues) ## Architecture From 3d2084ab931932da4b6dddd32b8b35df0fe3bc19 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Thu, 30 Jul 2020 22:03:43 +0200 Subject: [PATCH 193/199] Try to not unseal in ReadPiece when don't need to --- ffiwrapper/partialfile.go | 19 +++++++++++++++++++ ffiwrapper/sealer_cgo.go | 27 +++++++++++++++++++-------- ffiwrapper/types.go | 2 +- localworker.go | 4 ++-- manager.go | 32 +++++++++++++++++++++++++++++--- work_tracker.go | 2 +- 6 files changed, 71 insertions(+), 15 deletions(-) diff --git a/ffiwrapper/partialfile.go b/ffiwrapper/partialfile.go index b1ab8c53c..f6c03f1a3 100644 --- a/ffiwrapper/partialfile.go +++ b/ffiwrapper/partialfile.go @@ -279,6 +279,25 @@ func (pf *partialFile) Allocated() (rlepluslazy.RunIterator, error) { return pf.allocated.RunIterator() } +func (pf *partialFile) HasAllocated(offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) { + have, err := pf.Allocated() + if err != nil { + return false, err + } + + u, err := rlepluslazy.Union(have, pieceRun(offset.Padded(), size.Padded())) + if err != nil { + return false, err + } + + uc, err := rlepluslazy.Count(u) + if err != nil { + return false, err + } + + return abi.PaddedPieceSize(uc) == size.Padded(), nil +} + func pieceRun(offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) rlepluslazy.RunIterator { var runs []rlepluslazy.Run if offset > 0 { diff --git a/ffiwrapper/sealer_cgo.go b/ffiwrapper/sealer_cgo.go index 416bfa70b..8a4f18bc7 100644 --- a/ffiwrapper/sealer_cgo.go +++ b/ffiwrapper/sealer_cgo.go @@ -361,10 +361,10 @@ func (sb *Sealer) UnsealPiece(ctx context.Context, sector abi.SectorID, offset s return nil } -func (sb *Sealer) ReadPiece(ctx context.Context, writer io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) error { +func (sb *Sealer) ReadPiece(ctx context.Context, writer io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) { path, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, stores.FTNone, stores.PathStorage) if err != nil { - return xerrors.Errorf("acquire unsealed sector path: %w", err) + return false, xerrors.Errorf("acquire unsealed sector path: %w", err) } defer done() @@ -372,30 +372,41 @@ func (sb *Sealer) ReadPiece(ctx context.Context, writer io.Writer, sector abi.Se pf, err := openPartialFile(maxPieceSize, path.Unsealed) if xerrors.Is(err, os.ErrNotExist) { - return xerrors.Errorf("opening partial file: %w", err) + return false, xerrors.Errorf("opening partial file: %w", err) + } + + ok, err := pf.HasAllocated(offset, size) + if err != nil { + pf.Close() + return false, err + } + + if !ok { + pf.Close() + return false, nil } f, err := pf.Reader(offset.Padded(), size.Padded()) if err != nil { pf.Close() - return xerrors.Errorf("getting partial file reader: %w", err) + return false, xerrors.Errorf("getting partial file reader: %w", err) } upr, err := fr32.NewUnpadReader(f, size.Padded()) if err != nil { - return xerrors.Errorf("creating unpadded reader: %w", err) + return false, xerrors.Errorf("creating unpadded reader: %w", err) } if _, err := io.CopyN(writer, upr, int64(size)); err != nil { pf.Close() - return xerrors.Errorf("reading unsealed file: %w", err) + return false, xerrors.Errorf("reading unsealed file: %w", err) } if err := pf.Close(); err != nil { - return xerrors.Errorf("closing partial file: %w", err) + return false, xerrors.Errorf("closing partial file: %w", err) } - return nil + return false, nil } func (sb *Sealer) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) { diff --git a/ffiwrapper/types.go b/ffiwrapper/types.go index 13c0ee990..bc3c44f54 100644 --- a/ffiwrapper/types.go +++ b/ffiwrapper/types.go @@ -29,7 +29,7 @@ type Storage interface { StorageSealer UnsealPiece(ctx context.Context, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd cid.Cid) error - ReadPiece(ctx context.Context, writer io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) error + ReadPiece(ctx context.Context, writer io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) } type Verifier interface { diff --git a/localworker.go b/localworker.go index 7b9bbdee1..14ed1cd0b 100644 --- a/localworker.go +++ b/localworker.go @@ -237,10 +237,10 @@ func (l *LocalWorker) UnsealPiece(ctx context.Context, sector abi.SectorID, inde return nil } -func (l *LocalWorker) ReadPiece(ctx context.Context, writer io.Writer, sector abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) error { +func (l *LocalWorker) ReadPiece(ctx context.Context, writer io.Writer, sector abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) { sb, err := l.sb() if err != nil { - return err + return false, err } return sb.ReadPiece(ctx, writer, sector, index, size) diff --git a/manager.go b/manager.go index 4791eb5e6..5f2b8e334 100644 --- a/manager.go +++ b/manager.go @@ -34,7 +34,7 @@ type Worker interface { Fetch(ctx context.Context, s abi.SectorID, ft stores.SectorFileType, ptype stores.PathType, am stores.AcquireMode) error UnsealPiece(context.Context, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) error - ReadPiece(context.Context, io.Writer, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize) error + ReadPiece(context.Context, io.Writer, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize) (bool, error) TaskTypes(context.Context) (map[sealtasks.TaskType]struct{}, error) @@ -221,7 +221,28 @@ func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector abi.Sect return xerrors.Errorf("creating unsealPiece selector: %w", err) } - // TODO: Optimization: don't send unseal to a worker if the requested range is already unsealed + var readOk bool + + if len(best) > 0 { + // There is unsealed sector, see if we can read from it + + selector, err = newExistingSelector(ctx, m.index, sector, stores.FTUnsealed, false) + if err != nil { + return xerrors.Errorf("creating readPiece selector: %w", err) + } + + err = m.sched.Schedule(ctx, sector, sealtasks.TTReadUnsealed, selector, schedFetch(sector, stores.FTUnsealed, stores.PathSealing, stores.AcquireMove), func(ctx context.Context, w Worker) error { + readOk, err = w.ReadPiece(ctx, sink, sector, offset, size) + return err + }) + if err != nil { + return xerrors.Errorf("reading piece from sealed sector: %w", err) + } + + if readOk { + return nil + } + } unsealFetch := func(ctx context.Context, worker Worker) error { if err := worker.Fetch(ctx, sector, stores.FTSealed|stores.FTCache, stores.PathSealing, stores.AcquireCopy); err != nil { @@ -249,12 +270,17 @@ func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector abi.Sect } err = m.sched.Schedule(ctx, sector, sealtasks.TTReadUnsealed, selector, schedFetch(sector, stores.FTUnsealed, stores.PathSealing, stores.AcquireMove), func(ctx context.Context, w Worker) error { - return w.ReadPiece(ctx, sink, sector, offset, size) + readOk, err = w.ReadPiece(ctx, sink, sector, offset, size) + return err }) if err != nil { return xerrors.Errorf("reading piece from sealed sector: %w", err) } + if readOk { + return xerrors.Errorf("failed to read unsealed piece") + } + return nil } diff --git a/work_tracker.go b/work_tracker.go index f1e243ed2..7453752c9 100644 --- a/work_tracker.go +++ b/work_tracker.go @@ -120,7 +120,7 @@ func (t *trackedWorker) UnsealPiece(ctx context.Context, id abi.SectorID, index return t.Worker.UnsealPiece(ctx, id, index, size, randomness, cid) } -func (t *trackedWorker) ReadPiece(ctx context.Context, writer io.Writer, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) error { +func (t *trackedWorker) ReadPiece(ctx context.Context, writer io.Writer, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) { defer t.tracker.track(id, sealtasks.TTReadUnsealed)() return t.Worker.ReadPiece(ctx, writer, id, index, size) From 7153e1dd05b5e7aca17098eb47a65f9855bfdec5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Thu, 30 Jul 2020 22:38:05 +0200 Subject: [PATCH 194/199] Fix tests --- ffiwrapper/partialfile.go | 2 +- ffiwrapper/sealer_test.go | 17 ++++++++++------- sched_test.go | 2 +- testworker_test.go | 2 +- 4 files changed, 13 insertions(+), 10 deletions(-) diff --git a/ffiwrapper/partialfile.go b/ffiwrapper/partialfile.go index f6c03f1a3..3e8b32288 100644 --- a/ffiwrapper/partialfile.go +++ b/ffiwrapper/partialfile.go @@ -285,7 +285,7 @@ func (pf *partialFile) HasAllocated(offset storiface.UnpaddedByteIndex, size abi return false, err } - u, err := rlepluslazy.Union(have, pieceRun(offset.Padded(), size.Padded())) + u, err := rlepluslazy.And(have, pieceRun(offset.Padded(), size.Padded())) if err != nil { return false, err } diff --git a/ffiwrapper/sealer_test.go b/ffiwrapper/sealer_test.go index 0b5018d84..f795be159 100644 --- a/ffiwrapper/sealer_test.go +++ b/ffiwrapper/sealer_test.go @@ -111,7 +111,7 @@ func (s *seal) unseal(t *testing.T, sb *Sealer, sp *basicfs.Provider, si abi.Sec defer done() var b bytes.Buffer - err := sb.ReadPiece(context.TODO(), &b, si, 0, 1016) + _, err := sb.ReadPiece(context.TODO(), &b, si, 0, 1016) if err != nil { t.Fatal(err) } @@ -130,7 +130,7 @@ func (s *seal) unseal(t *testing.T, sb *Sealer, sp *basicfs.Provider, si abi.Sec } sd() - err = sb.ReadPiece(context.TODO(), &b, si, 0, 1016) + _, err = sb.ReadPiece(context.TODO(), &b, si, 0, 1016) if err == nil { t.Fatal("HOW?!") } @@ -141,7 +141,7 @@ func (s *seal) unseal(t *testing.T, sb *Sealer, sp *basicfs.Provider, si abi.Sec } b.Reset() - err = sb.ReadPiece(context.TODO(), &b, si, 0, 1016) + _, err = sb.ReadPiece(context.TODO(), &b, si, 0, 1016) if err != nil { t.Fatal(err) } @@ -150,14 +150,17 @@ func (s *seal) unseal(t *testing.T, sb *Sealer, sp *basicfs.Provider, si abi.Sec require.Equal(t, expect, b.Bytes()) b.Reset() - err = sb.ReadPiece(context.TODO(), &b, si, 0, 2032) + have, err := sb.ReadPiece(context.TODO(), &b, si, 0, 2032) if err != nil { t.Fatal(err) } - expect = append(expect, bytes.Repeat([]byte{0}, 1016)...) - if !bytes.Equal(b.Bytes(), expect) { - t.Fatal("read wrong bytes") + if have { + t.Errorf("didn't expect to read things") + } + + if b.Len() != 0 { + t.Fatal("read bytes") } } diff --git a/sched_test.go b/sched_test.go index caf7f0b4b..c96f7838c 100644 --- a/sched_test.go +++ b/sched_test.go @@ -88,7 +88,7 @@ func (s *schedTestWorker) UnsealPiece(ctx context.Context, id abi.SectorID, inde panic("implement me") } -func (s *schedTestWorker) ReadPiece(ctx context.Context, writer io.Writer, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) error { +func (s *schedTestWorker) ReadPiece(ctx context.Context, writer io.Writer, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) { panic("implement me") } diff --git a/testworker_test.go b/testworker_test.go index bdfff1915..40151a84d 100644 --- a/testworker_test.go +++ b/testworker_test.go @@ -53,7 +53,7 @@ func (t *testWorker) UnsealPiece(ctx context.Context, id abi.SectorID, index sto panic("implement me") } -func (t *testWorker) ReadPiece(ctx context.Context, writer io.Writer, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) error { +func (t *testWorker) ReadPiece(ctx context.Context, writer io.Writer, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) { panic("implement me") } From ed251d9f82123e86cab87adf57a60ea8d3762e26 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Mon, 3 Aug 2020 14:18:11 +0200 Subject: [PATCH 195/199] Fix some locking issues --- sched.go | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/sched.go b/sched.go index ed648bf19..e8eda4834 100644 --- a/sched.go +++ b/sched.go @@ -52,7 +52,7 @@ type WorkerSelector interface { type scheduler struct { spt abi.RegisteredSealProof - workersLk sync.Mutex + workersLk sync.RWMutex nextWorker WorkerID workers map[WorkerID]*workerHandle @@ -83,6 +83,8 @@ type workerHandle struct { preparing *activeResources active *activeResources + lk sync.Mutex + // stats / tracking wt *workTracker @@ -283,6 +285,9 @@ func (sh *scheduler) trySched() { log.Debugf("SCHED %d queued; %d open windows", sh.schedQueue.Len(), len(windows)) + sh.workersLk.RLock() + defer sh.workersLk.RUnlock() + // Step 1 for sqi := 0; sqi < sh.schedQueue.Len(); sqi++ { task := (*sh.schedQueue)[sqi] @@ -428,9 +433,9 @@ func (sh *scheduler) runWorker(wid WorkerID) { defer ready.Wait() go func() { - sh.workersLk.Lock() + sh.workersLk.RLock() worker, found := sh.workers[wid] - sh.workersLk.Unlock() + sh.workersLk.RUnlock() ready.Done() @@ -498,16 +503,19 @@ func (sh *scheduler) runWorker(wid WorkerID) { todo := activeWindows[0].todo[0] needRes := ResourceTable[todo.taskType][sh.spt] - sh.workersLk.Lock() + sh.workersLk.RLock() + worker.lk.Lock() ok := worker.preparing.canHandleRequest(needRes, wid, worker.info.Resources) + worker.lk.Unlock() + if !ok { - sh.workersLk.Unlock() + sh.workersLk.RUnlock() break assignLoop } log.Debugf("assign worker sector %d", todo.sector.Number) err := sh.assignWorker(taskDone, wid, worker, todo) - sh.workersLk.Unlock() + sh.workersLk.RUnlock() if err != nil { log.Error("assignWorker error: %+v", err) @@ -530,14 +538,18 @@ func (sh *scheduler) runWorker(wid WorkerID) { func (sh *scheduler) assignWorker(taskDone chan struct{}, wid WorkerID, w *workerHandle, req *workerRequest) error { needRes := ResourceTable[req.taskType][sh.spt] + w.lk.Lock() w.preparing.add(w.info.Resources, needRes) + w.lk.Unlock() go func() { err := req.prepare(req.ctx, w.wt.worker(w.w)) sh.workersLk.Lock() if err != nil { + w.lk.Lock() w.preparing.free(w.info.Resources, needRes) + w.lk.Unlock() sh.workersLk.Unlock() select { @@ -557,7 +569,9 @@ func (sh *scheduler) assignWorker(taskDone chan struct{}, wid WorkerID, w *worke } err = w.active.withResources(wid, w.info.Resources, needRes, &sh.workersLk, func() error { + w.lk.Lock() w.preparing.free(w.info.Resources, needRes) + w.lk.Unlock() sh.workersLk.Unlock() defer sh.workersLk.Lock() // we MUST return locked from this function From 3cab915fd225717efd7fc9b099b854e11af2d056 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Mon, 3 Aug 2020 20:49:04 +0200 Subject: [PATCH 196/199] mock: Make it possible to unfail sectors --- mock/mock.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mock/mock.go b/mock/mock.go index 55b103ab8..6eb71cd6b 100644 --- a/mock/mock.go +++ b/mock/mock.go @@ -236,7 +236,7 @@ func (mgr *SectorMgr) SealCommit2(ctx context.Context, sid abi.SectorID, phase1O // Test Instrumentation Methods -func (mgr *SectorMgr) FailSector(sid abi.SectorID) error { +func (mgr *SectorMgr) MarkFailed(sid abi.SectorID, failed bool) error { mgr.lk.Lock() defer mgr.lk.Unlock() ss, ok := mgr.sectors[sid] @@ -244,7 +244,7 @@ func (mgr *SectorMgr) FailSector(sid abi.SectorID) error { return fmt.Errorf("no such sector in storage") } - ss.failed = true + ss.failed = failed return nil } From b58eba0d999d33c6596ea84ed390aa5fc34f814d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Tue, 4 Aug 2020 16:20:59 +0200 Subject: [PATCH 197/199] remote: Fetch storage reservation --- fsutil/filesize_unix.go | 6 +++- stores/interface.go | 8 ++--- stores/local.go | 21 +++++++++--- stores/remote.go | 71 ++++++++++++++++++++++++++++------------- 4 files changed, 74 insertions(+), 32 deletions(-) diff --git a/fsutil/filesize_unix.go b/fsutil/filesize_unix.go index 41b62daf6..dacdcd515 100644 --- a/fsutil/filesize_unix.go +++ b/fsutil/filesize_unix.go @@ -1,6 +1,7 @@ package fsutil import ( + "os" "syscall" "golang.org/x/xerrors" @@ -14,12 +15,15 @@ type SizeInfo struct { func FileSize(path string) (SizeInfo, error) { var stat syscall.Stat_t if err := syscall.Stat(path, &stat); err != nil { + if err == syscall.ENOENT { + return SizeInfo{}, os.ErrNotExist + } return SizeInfo{}, xerrors.Errorf("stat: %w", err) } // NOTE: stat.Blocks is in 512B blocks, NOT in stat.Blksize // See https://www.gnu.org/software/libc/manual/html_node/Attribute-Meanings.html return SizeInfo{ - int64(stat.Blocks) * 512, + int64(stat.Blocks) * 512, // NOTE: int64 cast is needed on osx }, nil } diff --git a/stores/interface.go b/stores/interface.go index 836705f40..142769b1b 100644 --- a/stores/interface.go +++ b/stores/interface.go @@ -9,15 +9,15 @@ import ( type PathType string const ( - PathStorage = "storage" - PathSealing = "sealing" + PathStorage PathType = "storage" + PathSealing PathType = "sealing" ) type AcquireMode string const ( - AcquireMove = "move" - AcquireCopy = "copy" + AcquireMove AcquireMode = "move" + AcquireCopy AcquireMode = "copy" ) type Store interface { diff --git a/stores/local.go b/stores/local.go index 9a7ec6108..9efab6480 100644 --- a/stores/local.go +++ b/stores/local.go @@ -50,7 +50,10 @@ type LocalStorage interface { SetStorage(func(*StorageConfig)) error Stat(path string) (fsutil.FsStat, error) - DiskUsage(path string) (int64, error) // returns real disk usage for a file/directory + + // returns real disk usage for a file/directory + // os.ErrNotExit when file doesn't exist + DiskUsage(path string) (int64, error) } const MetaFile = "sectorstore.json" @@ -77,7 +80,7 @@ type path struct { func (p *path) stat(ls LocalStorage) (fsutil.FsStat, error) { stat, err := ls.Stat(p.local) if err != nil { - return fsutil.FsStat{}, err + return fsutil.FsStat{}, xerrors.Errorf("stat %s: %w", p.local, err) } stat.Reserved = p.reserved @@ -88,7 +91,17 @@ func (p *path) stat(ls LocalStorage) (fsutil.FsStat, error) { continue } - used, err := ls.DiskUsage(p.sectorPath(id, fileType)) + sp := p.sectorPath(id, fileType) + + used, err := ls.DiskUsage(sp) + if err == os.ErrNotExist { + p, ferr := tempFetchDest(sp, false) + if ferr != nil { + return fsutil.FsStat{}, ferr + } + + used, err = ls.DiskUsage(p) + } if err != nil { log.Errorf("getting disk usage of '%s': %+v", p.sectorPath(id, fileType), err) continue @@ -279,7 +292,7 @@ func (st *Local) Reserve(ctx context.Context, sid abi.SectorID, spt abi.Register stat, err := p.stat(st.localStorage) if err != nil { - return nil, err + return nil, xerrors.Errorf("getting local storage stat: %w", err) } overhead := int64(overheadTab[fileType]) * int64(ssize) / FSOverheadDen diff --git a/stores/remote.go b/stores/remote.go index 42b730b40..93dc2ca58 100644 --- a/stores/remote.go +++ b/stores/remote.go @@ -95,6 +95,33 @@ func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, spt abi.Regi return SectorPaths{}, SectorPaths{}, xerrors.Errorf("local acquire error: %w", err) } + var toFetch SectorFileType + for _, fileType := range PathTypes { + if fileType&existing == 0 { + continue + } + + if PathByType(paths, fileType) == "" { + toFetch |= fileType + } + } + + apaths, ids, err := r.local.AcquireSector(ctx, s, spt, FTNone, toFetch, pathType, op) + if err != nil { + return SectorPaths{}, SectorPaths{}, xerrors.Errorf("allocate local sector for fetching: %w", err) + } + + odt := FSOverheadSeal + if pathType == PathStorage { + odt = FsOverheadFinalized + } + + releaseStorage, err := r.local.Reserve(ctx, s, spt, toFetch, ids, odt) + if err != nil { + return SectorPaths{}, SectorPaths{}, xerrors.Errorf("reserving storage space: %w", err) + } + defer releaseStorage() + for _, fileType := range PathTypes { if fileType&existing == 0 { continue @@ -104,15 +131,18 @@ func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, spt abi.Regi continue } - ap, storageID, url, err := r.acquireFromRemote(ctx, s, spt, fileType, pathType, op) + dest := PathByType(apaths, fileType) + storageID := PathByType(ids, fileType) + + url, err := r.acquireFromRemote(ctx, s, fileType, dest) if err != nil { return SectorPaths{}, SectorPaths{}, err } - SetPathByType(&paths, fileType, ap) - SetPathByType(&stores, fileType, string(storageID)) + SetPathByType(&paths, fileType, dest) + SetPathByType(&stores, fileType, storageID) - if err := r.index.StorageDeclareSector(ctx, storageID, s, fileType, op == AcquireMove); err != nil { + if err := r.index.StorageDeclareSector(ctx, ID(storageID), s, fileType, op == AcquireMove); err != nil { log.Warnf("declaring sector %v in %s failed: %+v", s, storageID, err) continue } @@ -127,49 +157,44 @@ func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, spt abi.Regi return paths, stores, nil } -func tempDest(spath string) (string, error) { +func tempFetchDest(spath string, create bool) (string, error) { st, b := filepath.Split(spath) tempdir := filepath.Join(st, FetchTempSubdir) - if err := os.MkdirAll(tempdir, 755); err != nil { - return "", xerrors.Errorf("creating temp fetch dir: %w", err) + if create { + if err := os.MkdirAll(tempdir, 0755); err != nil { + return "", xerrors.Errorf("creating temp fetch dir: %w", err) + } } return filepath.Join(tempdir, b), nil } -func (r *Remote) acquireFromRemote(ctx context.Context, s abi.SectorID, spt abi.RegisteredSealProof, fileType SectorFileType, pathType PathType, op AcquireMode) (string, ID, string, error) { +func (r *Remote) acquireFromRemote(ctx context.Context, s abi.SectorID, fileType SectorFileType, dest string) (string, error) { si, err := r.index.StorageFindSector(ctx, s, fileType, false) if err != nil { - return "", "", "", err + return "", err } if len(si) == 0 { - return "", "", "", xerrors.Errorf("failed to acquire sector %v from remote(%d): %w", s, fileType, storiface.ErrSectorNotFound) + return "", xerrors.Errorf("failed to acquire sector %v from remote(%d): %w", s, fileType, storiface.ErrSectorNotFound) } sort.Slice(si, func(i, j int) bool { return si[i].Weight < si[j].Weight }) - apaths, ids, err := r.local.AcquireSector(ctx, s, spt, FTNone, fileType, pathType, op) - if err != nil { - return "", "", "", xerrors.Errorf("allocate local sector for fetching: %w", err) - } - dest := PathByType(apaths, fileType) - storageID := PathByType(ids, fileType) - var merr error for _, info := range si { // TODO: see what we have local, prefer that for _, url := range info.URLs { - tempDest, err := tempDest(dest) + tempDest, err := tempFetchDest(dest, true) if err != nil { - return "", "", "", err + return "", err } if err := os.RemoveAll(dest); err != nil { - return "", "", "", xerrors.Errorf("removing dest: %w", err) + return "", xerrors.Errorf("removing dest: %w", err) } err = r.fetch(ctx, url, tempDest) @@ -179,17 +204,17 @@ func (r *Remote) acquireFromRemote(ctx context.Context, s abi.SectorID, spt abi. } if err := move(tempDest, dest); err != nil { - return "", "", "", xerrors.Errorf("fetch move error (storage %s) %s -> %s: %w", info.ID, tempDest, dest, err) + return "", xerrors.Errorf("fetch move error (storage %s) %s -> %s: %w", info.ID, tempDest, dest, err) } if merr != nil { log.Warnw("acquireFromRemote encountered errors when fetching sector from remote", "errors", merr) } - return dest, ID(storageID), url, nil + return url, nil } } - return "", "", "", xerrors.Errorf("failed to acquire sector %v from remote (tried %v): %w", s, si, merr) + return "", xerrors.Errorf("failed to acquire sector %v from remote (tried %v): %w", s, si, merr) } func (r *Remote) fetch(ctx context.Context, url, outname string) error { From de3d3b48f71bbcb8e2b7bb9732d4ed7cc4ca1c7a Mon Sep 17 00:00:00 2001 From: yaohcn Date: Wed, 5 Aug 2020 20:36:49 +0800 Subject: [PATCH 198/199] fix 2806 --- manager.go | 50 ++++++++++++-------------------------------- sched_test.go | 2 +- selector_alloc.go | 2 +- selector_existing.go | 26 ++++++++++++++--------- 4 files changed, 31 insertions(+), 49 deletions(-) diff --git a/manager.go b/manager.go index 5f2b8e334..303df2169 100644 --- a/manager.go +++ b/manager.go @@ -213,12 +213,9 @@ func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector abi.Sect var selector WorkerSelector if len(best) == 0 { // new - selector = newAllocSelector(ctx, m.index, stores.FTUnsealed, stores.PathSealing) + selector = newAllocSelector(m.index, stores.FTUnsealed, stores.PathSealing) } else { // append to existing - selector, err = newExistingSelector(ctx, m.index, sector, stores.FTUnsealed, false) - } - if err != nil { - return xerrors.Errorf("creating unsealPiece selector: %w", err) + selector = newExistingSelector(m.index, sector, stores.FTUnsealed, false) } var readOk bool @@ -226,10 +223,7 @@ func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector abi.Sect if len(best) > 0 { // There is unsealed sector, see if we can read from it - selector, err = newExistingSelector(ctx, m.index, sector, stores.FTUnsealed, false) - if err != nil { - return xerrors.Errorf("creating readPiece selector: %w", err) - } + selector = newExistingSelector(m.index, sector, stores.FTUnsealed, false) err = m.sched.Schedule(ctx, sector, sealtasks.TTReadUnsealed, selector, schedFetch(sector, stores.FTUnsealed, stores.PathSealing, stores.AcquireMove), func(ctx context.Context, w Worker) error { readOk, err = w.ReadPiece(ctx, sink, sector, offset, size) @@ -264,10 +258,7 @@ func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector abi.Sect return err } - selector, err = newExistingSelector(ctx, m.index, sector, stores.FTUnsealed, false) - if err != nil { - return xerrors.Errorf("creating readPiece selector: %w", err) - } + selector = newExistingSelector(m.index, sector, stores.FTUnsealed, false) err = m.sched.Schedule(ctx, sector, sealtasks.TTReadUnsealed, selector, schedFetch(sector, stores.FTUnsealed, stores.PathSealing, stores.AcquireMove), func(ctx context.Context, w Worker) error { readOk, err = w.ReadPiece(ctx, sink, sector, offset, size) @@ -300,12 +291,9 @@ func (m *Manager) AddPiece(ctx context.Context, sector abi.SectorID, existingPie var selector WorkerSelector var err error if len(existingPieces) == 0 { // new - selector = newAllocSelector(ctx, m.index, stores.FTUnsealed, stores.PathSealing) + selector = newAllocSelector(m.index, stores.FTUnsealed, stores.PathSealing) } else { // use existing - selector, err = newExistingSelector(ctx, m.index, sector, stores.FTUnsealed, false) - } - if err != nil { - return abi.PieceInfo{}, xerrors.Errorf("creating path selector: %w", err) + selector = newExistingSelector(m.index, sector, stores.FTUnsealed, false) } var out abi.PieceInfo @@ -331,7 +319,7 @@ func (m *Manager) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticke // TODO: also consider where the unsealed data sits - selector := newAllocSelector(ctx, m.index, stores.FTCache|stores.FTSealed, stores.PathSealing) + selector := newAllocSelector(m.index, stores.FTCache|stores.FTSealed, stores.PathSealing) err = m.sched.Schedule(ctx, sector, sealtasks.TTPreCommit1, selector, schedFetch(sector, stores.FTUnsealed, stores.PathSealing, stores.AcquireMove), func(ctx context.Context, w Worker) error { p, err := w.SealPreCommit1(ctx, sector, ticket, pieces) @@ -353,10 +341,7 @@ func (m *Manager) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase return storage.SectorCids{}, xerrors.Errorf("acquiring sector lock: %w", err) } - selector, err := newExistingSelector(ctx, m.index, sector, stores.FTCache|stores.FTSealed, true) - if err != nil { - return storage.SectorCids{}, xerrors.Errorf("creating path selector: %w", err) - } + selector := newExistingSelector(m.index, sector, stores.FTCache|stores.FTSealed, true) err = m.sched.Schedule(ctx, sector, sealtasks.TTPreCommit2, selector, schedFetch(sector, stores.FTCache|stores.FTSealed, stores.PathSealing, stores.AcquireMove), func(ctx context.Context, w Worker) error { p, err := w.SealPreCommit2(ctx, sector, phase1Out) @@ -380,10 +365,7 @@ func (m *Manager) SealCommit1(ctx context.Context, sector abi.SectorID, ticket a // NOTE: We set allowFetch to false in so that we always execute on a worker // with direct access to the data. We want to do that because this step is // generally very cheap / fast, and transferring data is not worth the effort - selector, err := newExistingSelector(ctx, m.index, sector, stores.FTCache|stores.FTSealed, false) - if err != nil { - return storage.Commit1Out{}, xerrors.Errorf("creating path selector: %w", err) - } + selector := newExistingSelector(m.index, sector, stores.FTCache|stores.FTSealed, false) err = m.sched.Schedule(ctx, sector, sealtasks.TTCommit1, selector, schedFetch(sector, stores.FTCache|stores.FTSealed, stores.PathSealing, stores.AcquireMove), func(ctx context.Context, w Worker) error { p, err := w.SealCommit1(ctx, sector, ticket, seed, pieces, cids) @@ -431,12 +413,9 @@ func (m *Manager) FinalizeSector(ctx context.Context, sector abi.SectorID, keepU } } - selector, err := newExistingSelector(ctx, m.index, sector, stores.FTCache|stores.FTSealed, false) - if err != nil { - return xerrors.Errorf("creating path selector: %w", err) - } + selector := newExistingSelector(m.index, sector, stores.FTCache|stores.FTSealed, false) - err = m.sched.Schedule(ctx, sector, sealtasks.TTFinalize, selector, + err := m.sched.Schedule(ctx, sector, sealtasks.TTFinalize, selector, schedFetch(sector, stores.FTCache|stores.FTSealed|unsealed, stores.PathSealing, stores.AcquireMove), func(ctx context.Context, w Worker) error { return w.FinalizeSector(ctx, sector, keepUnsealed) @@ -445,7 +424,7 @@ func (m *Manager) FinalizeSector(ctx context.Context, sector abi.SectorID, keepU return err } - fetchSel := newAllocSelector(ctx, m.index, stores.FTCache|stores.FTSealed, stores.PathStorage) + fetchSel := newAllocSelector(m.index, stores.FTCache|stores.FTSealed, stores.PathStorage) moveUnsealed := unsealed { if len(keepUnsealed) == 0 { @@ -490,10 +469,7 @@ func (m *Manager) Remove(ctx context.Context, sector abi.SectorID) error { } } - selector, err := newExistingSelector(ctx, m.index, sector, stores.FTCache|stores.FTSealed, false) - if err != nil { - return xerrors.Errorf("creating selector: %w", err) - } + selector := newExistingSelector(m.index, sector, stores.FTCache|stores.FTSealed, false) return m.sched.Schedule(ctx, sector, sealtasks.TTFinalize, selector, schedFetch(sector, stores.FTCache|stores.FTSealed|unsealed, stores.PathStorage, stores.AcquireMove), diff --git a/sched_test.go b/sched_test.go index c96f7838c..6490e738e 100644 --- a/sched_test.go +++ b/sched_test.go @@ -207,7 +207,7 @@ func TestSched(t *testing.T) { done := make(chan struct{}) rm.done[taskName] = done - sel := newAllocSelector(ctx, index, stores.FTCache, stores.PathSealing) + sel := newAllocSelector(index, stores.FTCache, stores.PathSealing) rm.wg.Add(1) go func() { diff --git a/selector_alloc.go b/selector_alloc.go index 35221921f..cf7937587 100644 --- a/selector_alloc.go +++ b/selector_alloc.go @@ -17,7 +17,7 @@ type allocSelector struct { ptype stores.PathType } -func newAllocSelector(ctx context.Context, index stores.SectorIndex, alloc stores.SectorFileType, ptype stores.PathType) *allocSelector { +func newAllocSelector(index stores.SectorIndex, alloc stores.SectorFileType, ptype stores.PathType) *allocSelector { return &allocSelector{ index: index, alloc: alloc, diff --git a/selector_existing.go b/selector_existing.go index 3f99010cb..20cb1b209 100644 --- a/selector_existing.go +++ b/selector_existing.go @@ -12,18 +12,19 @@ import ( ) type existingSelector struct { - best []stores.SectorStorageInfo + index stores.SectorIndex + sector abi.SectorID + alloc stores.SectorFileType + allowFetch bool } -func newExistingSelector(ctx context.Context, index stores.SectorIndex, sector abi.SectorID, alloc stores.SectorFileType, allowFetch bool) (*existingSelector, error) { - best, err := index.StorageFindSector(ctx, sector, alloc, allowFetch) - if err != nil { - return nil, err - } - +func newExistingSelector(index stores.SectorIndex, sector abi.SectorID, alloc stores.SectorFileType, allowFetch bool) *existingSelector { return &existingSelector{ - best: best, - }, nil + index: index, + sector: sector, + alloc: alloc, + allowFetch: allowFetch, + } } func (s *existingSelector) Ok(ctx context.Context, task sealtasks.TaskType, spt abi.RegisteredSealProof, whnd *workerHandle) (bool, error) { @@ -45,7 +46,12 @@ func (s *existingSelector) Ok(ctx context.Context, task sealtasks.TaskType, spt have[path.ID] = struct{}{} } - for _, info := range s.best { + best, err := s.index.StorageFindSector(ctx, s.sector, s.alloc, s.allowFetch) + if err != nil { + return false, xerrors.Errorf("finding best storage: %w", err) + } + + for _, info := range best { if _, ok := have[info.ID]; ok { return true, nil } From 77e4adb5567e31b8ad164a1df6de36bc2eb96466 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Wed, 5 Aug 2020 22:11:53 +0200 Subject: [PATCH 199/199] gofmt --- selector_existing.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/selector_existing.go b/selector_existing.go index 20cb1b209..a11c39007 100644 --- a/selector_existing.go +++ b/selector_existing.go @@ -12,17 +12,17 @@ import ( ) type existingSelector struct { - index stores.SectorIndex - sector abi.SectorID - alloc stores.SectorFileType + index stores.SectorIndex + sector abi.SectorID + alloc stores.SectorFileType allowFetch bool } func newExistingSelector(index stores.SectorIndex, sector abi.SectorID, alloc stores.SectorFileType, allowFetch bool) *existingSelector { return &existingSelector{ - index: index, - sector: sector, - alloc: alloc, + index: index, + sector: sector, + alloc: alloc, allowFetch: allowFetch, } }