commit
be1231fef4
2
extern/filecoin-ffi
vendored
2
extern/filecoin-ffi
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 5342c7c97d1a1df4650629d14f2823d52889edd9
|
Subproject commit cddc56607e1d851ea6d09d49404bd7db70cb3c2e
|
@ -43,9 +43,11 @@ func (m *Manager) CheckProvable(ctx context.Context, spt abi.RegisteredSealProof
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
lp, _, err := m.localStore.AcquireSector(ctx, sector, spt, stores.FTSealed|stores.FTCache, stores.FTNone, false, stores.AcquireMove)
|
lp, _, err := m.localStore.AcquireSector(ctx, sector, spt, stores.FTSealed|stores.FTCache, stores.FTNone, stores.PathStorage, stores.AcquireMove)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("acquire sector in checkProvable: %w", err)
|
log.Warnw("CheckProvable Sector FAULT: acquire sector in checkProvable", "sector", sector, "error", err)
|
||||||
|
bad = append(bad, sector)
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if lp.Sealed == "" || lp.Cache == "" {
|
if lp.Sealed == "" || lp.Cache == "" {
|
||||||
|
@ -12,6 +12,7 @@ import (
|
|||||||
rlepluslazy "github.com/filecoin-project/go-bitfield/rle"
|
rlepluslazy "github.com/filecoin-project/go-bitfield/rle"
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/sector-storage/fsutil"
|
||||||
"github.com/filecoin-project/sector-storage/storiface"
|
"github.com/filecoin-project/sector-storage/storiface"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -144,10 +145,15 @@ func openPartialFile(maxPieceSize abi.PaddedPieceSize, path string) (*partialFil
|
|||||||
return xerrors.Errorf("getting trailer run iterator: %w", err)
|
return xerrors.Errorf("getting trailer run iterator: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
lastSet, err := rlepluslazy.LastIndex(it, true)
|
f, err := rlepluslazy.Fill(it)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("filling bitfield: %w", err)
|
||||||
|
}
|
||||||
|
lastSet, err := rlepluslazy.Count(f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("finding last set byte index: %w", err)
|
return xerrors.Errorf("finding last set byte index: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if lastSet > uint64(maxPieceSize) {
|
if lastSet > uint64(maxPieceSize) {
|
||||||
return xerrors.Errorf("last set byte at index higher than sector size: %d > %d", lastSet, maxPieceSize)
|
return xerrors.Errorf("last set byte at index higher than sector size: %d > %d", lastSet, maxPieceSize)
|
||||||
}
|
}
|
||||||
@ -218,6 +224,28 @@ func (pf *partialFile) MarkAllocated(offset storiface.PaddedByteIndex, size abi.
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (pf *partialFile) Free(offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) error {
|
||||||
|
have, err := pf.allocated.RunIterator()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := fsutil.Deallocate(pf.file, int64(offset), int64(size)); err != nil {
|
||||||
|
return xerrors.Errorf("deallocating: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
s, err := rlepluslazy.Subtract(have, pieceRun(offset, size))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := writeTrailer(int64(pf.maxPiece), pf.file, s); err != nil {
|
||||||
|
return xerrors.Errorf("writing trailer: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (pf *partialFile) Reader(offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) (*os.File, error) {
|
func (pf *partialFile) Reader(offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) (*os.File, error) {
|
||||||
if _, err := pf.file.Seek(int64(offset), io.SeekStart); err != nil {
|
if _, err := pf.file.Seek(int64(offset), io.SeekStart); err != nil {
|
||||||
return nil, xerrors.Errorf("seek piece start: %w", err)
|
return nil, xerrors.Errorf("seek piece start: %w", err)
|
||||||
@ -251,6 +279,25 @@ func (pf *partialFile) Allocated() (rlepluslazy.RunIterator, error) {
|
|||||||
return pf.allocated.RunIterator()
|
return pf.allocated.RunIterator()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (pf *partialFile) HasAllocated(offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) {
|
||||||
|
have, err := pf.Allocated()
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
u, err := rlepluslazy.And(have, pieceRun(offset.Padded(), size.Padded()))
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
uc, err := rlepluslazy.Count(u)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return abi.PaddedPieceSize(uc) == size.Padded(), nil
|
||||||
|
}
|
||||||
|
|
||||||
func pieceRun(offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) rlepluslazy.RunIterator {
|
func pieceRun(offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) rlepluslazy.RunIterator {
|
||||||
var runs []rlepluslazy.Run
|
var runs []rlepluslazy.Run
|
||||||
if offset > 0 {
|
if offset > 0 {
|
||||||
|
@ -15,6 +15,7 @@ import (
|
|||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
ffi "github.com/filecoin-project/filecoin-ffi"
|
ffi "github.com/filecoin-project/filecoin-ffi"
|
||||||
|
rlepluslazy "github.com/filecoin-project/go-bitfield/rle"
|
||||||
commcid "github.com/filecoin-project/go-fil-commcid"
|
commcid "github.com/filecoin-project/go-fil-commcid"
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||||
"github.com/filecoin-project/specs-storage/storage"
|
"github.com/filecoin-project/specs-storage/storage"
|
||||||
@ -81,7 +82,7 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector abi.SectorID, existingPie
|
|||||||
|
|
||||||
var stagedPath stores.SectorPaths
|
var stagedPath stores.SectorPaths
|
||||||
if len(existingPieceSizes) == 0 {
|
if len(existingPieceSizes) == 0 {
|
||||||
stagedPath, done, err = sb.sectors.AcquireSector(ctx, sector, 0, stores.FTUnsealed, true)
|
stagedPath, done, err = sb.sectors.AcquireSector(ctx, sector, 0, stores.FTUnsealed, stores.PathSealing)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return abi.PieceInfo{}, xerrors.Errorf("acquire unsealed sector: %w", err)
|
return abi.PieceInfo{}, xerrors.Errorf("acquire unsealed sector: %w", err)
|
||||||
}
|
}
|
||||||
@ -91,7 +92,7 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector abi.SectorID, existingPie
|
|||||||
return abi.PieceInfo{}, xerrors.Errorf("creating unsealed sector file: %w", err)
|
return abi.PieceInfo{}, xerrors.Errorf("creating unsealed sector file: %w", err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
stagedPath, done, err = sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, 0, true)
|
stagedPath, done, err = sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, 0, stores.PathSealing)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return abi.PieceInfo{}, xerrors.Errorf("acquire unsealed sector: %w", err)
|
return abi.PieceInfo{}, xerrors.Errorf("acquire unsealed sector: %w", err)
|
||||||
}
|
}
|
||||||
@ -167,14 +168,14 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector abi.SectorID, existingPie
|
|||||||
return abi.PieceInfo{}, xerrors.Errorf("generate unsealed CID: %w", err)
|
return abi.PieceInfo{}, xerrors.Errorf("generate unsealed CID: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
commp, err := commcid.CIDToDataCommitmentV1(pieceCID)
|
// validate that the pieceCID was properly formed
|
||||||
if err != nil {
|
if _, err := commcid.CIDToPieceCommitmentV1(pieceCID); err != nil {
|
||||||
return abi.PieceInfo{}, err
|
return abi.PieceInfo{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return abi.PieceInfo{
|
return abi.PieceInfo{
|
||||||
Size: pieceSize.Padded(),
|
Size: pieceSize.Padded(),
|
||||||
PieceCID: commcid.PieceCommitmentV1ToCID(commp),
|
PieceCID: pieceCID,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -198,12 +199,12 @@ func (sb *Sealer) UnsealPiece(ctx context.Context, sector abi.SectorID, offset s
|
|||||||
maxPieceSize := abi.PaddedPieceSize(sb.ssize)
|
maxPieceSize := abi.PaddedPieceSize(sb.ssize)
|
||||||
|
|
||||||
// try finding existing
|
// try finding existing
|
||||||
unsealedPath, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, stores.FTNone, false)
|
unsealedPath, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, stores.FTNone, stores.PathStorage)
|
||||||
var pf *partialFile
|
var pf *partialFile
|
||||||
|
|
||||||
switch {
|
switch {
|
||||||
case xerrors.Is(err, storiface.ErrSectorNotFound):
|
case xerrors.Is(err, storiface.ErrSectorNotFound):
|
||||||
unsealedPath, done, err = sb.sectors.AcquireSector(ctx, sector, stores.FTNone, stores.FTUnsealed, false)
|
unsealedPath, done, err = sb.sectors.AcquireSector(ctx, sector, stores.FTNone, stores.FTUnsealed, stores.PathStorage)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("acquire unsealed sector path (allocate): %w", err)
|
return xerrors.Errorf("acquire unsealed sector path (allocate): %w", err)
|
||||||
}
|
}
|
||||||
@ -240,7 +241,7 @@ func (sb *Sealer) UnsealPiece(ctx context.Context, sector abi.SectorID, offset s
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
srcPaths, srcDone, err := sb.sectors.AcquireSector(ctx, sector, stores.FTCache|stores.FTSealed, stores.FTNone, false)
|
srcPaths, srcDone, err := sb.sectors.AcquireSector(ctx, sector, stores.FTCache|stores.FTSealed, stores.FTNone, stores.PathStorage)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("acquire sealed sector paths: %w", err)
|
return xerrors.Errorf("acquire sealed sector paths: %w", err)
|
||||||
}
|
}
|
||||||
@ -253,7 +254,10 @@ func (sb *Sealer) UnsealPiece(ctx context.Context, sector abi.SectorID, offset s
|
|||||||
defer sealed.Close()
|
defer sealed.Close()
|
||||||
|
|
||||||
var at, nextat abi.PaddedPieceSize
|
var at, nextat abi.PaddedPieceSize
|
||||||
for {
|
first := true
|
||||||
|
for first || toUnseal.HasNext() {
|
||||||
|
first = false
|
||||||
|
|
||||||
piece, err := toUnseal.NextRun()
|
piece, err := toUnseal.NextRun()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("getting next range to unseal: %w", err)
|
return xerrors.Errorf("getting next range to unseal: %w", err)
|
||||||
@ -357,10 +361,10 @@ func (sb *Sealer) UnsealPiece(ctx context.Context, sector abi.SectorID, offset s
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sb *Sealer) ReadPiece(ctx context.Context, writer io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) error {
|
func (sb *Sealer) ReadPiece(ctx context.Context, writer io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) {
|
||||||
path, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, stores.FTNone, false)
|
path, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, stores.FTNone, stores.PathStorage)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("acquire unsealed sector path: %w", err)
|
return false, xerrors.Errorf("acquire unsealed sector path: %w", err)
|
||||||
}
|
}
|
||||||
defer done()
|
defer done()
|
||||||
|
|
||||||
@ -368,34 +372,45 @@ func (sb *Sealer) ReadPiece(ctx context.Context, writer io.Writer, sector abi.Se
|
|||||||
|
|
||||||
pf, err := openPartialFile(maxPieceSize, path.Unsealed)
|
pf, err := openPartialFile(maxPieceSize, path.Unsealed)
|
||||||
if xerrors.Is(err, os.ErrNotExist) {
|
if xerrors.Is(err, os.ErrNotExist) {
|
||||||
return xerrors.Errorf("opening partial file: %w", err)
|
return false, xerrors.Errorf("opening partial file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ok, err := pf.HasAllocated(offset, size)
|
||||||
|
if err != nil {
|
||||||
|
pf.Close()
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !ok {
|
||||||
|
pf.Close()
|
||||||
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
f, err := pf.Reader(offset.Padded(), size.Padded())
|
f, err := pf.Reader(offset.Padded(), size.Padded())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
pf.Close()
|
pf.Close()
|
||||||
return xerrors.Errorf("getting partial file reader: %w", err)
|
return false, xerrors.Errorf("getting partial file reader: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
upr, err := fr32.NewUnpadReader(f, size.Padded())
|
upr, err := fr32.NewUnpadReader(f, size.Padded())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("creating unpadded reader: %w", err)
|
return false, xerrors.Errorf("creating unpadded reader: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := io.CopyN(writer, upr, int64(size)); err != nil {
|
if _, err := io.CopyN(writer, upr, int64(size)); err != nil {
|
||||||
pf.Close()
|
pf.Close()
|
||||||
return xerrors.Errorf("reading unsealed file: %w", err)
|
return false, xerrors.Errorf("reading unsealed file: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := pf.Close(); err != nil {
|
if err := pf.Close(); err != nil {
|
||||||
return xerrors.Errorf("closing partial file: %w", err)
|
return false, xerrors.Errorf("closing partial file: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sb *Sealer) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) {
|
func (sb *Sealer) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) {
|
||||||
paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, stores.FTSealed|stores.FTCache, true)
|
paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, stores.FTSealed|stores.FTCache, stores.PathSealing)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("acquiring sector paths: %w", err)
|
return nil, xerrors.Errorf("acquiring sector paths: %w", err)
|
||||||
}
|
}
|
||||||
@ -452,7 +467,7 @@ func (sb *Sealer) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticke
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (sb *Sealer) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage.PreCommit1Out) (storage.SectorCids, error) {
|
func (sb *Sealer) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage.PreCommit1Out) (storage.SectorCids, error) {
|
||||||
paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTSealed|stores.FTCache, 0, true)
|
paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTSealed|stores.FTCache, 0, stores.PathSealing)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return storage.SectorCids{}, xerrors.Errorf("acquiring sector paths: %w", err)
|
return storage.SectorCids{}, xerrors.Errorf("acquiring sector paths: %w", err)
|
||||||
}
|
}
|
||||||
@ -470,7 +485,7 @@ func (sb *Sealer) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (sb *Sealer) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) {
|
func (sb *Sealer) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) {
|
||||||
paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTSealed|stores.FTCache, 0, true)
|
paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTSealed|stores.FTCache, 0, stores.PathSealing)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("acquire sector paths: %w", err)
|
return nil, xerrors.Errorf("acquire sector paths: %w", err)
|
||||||
}
|
}
|
||||||
@ -502,10 +517,62 @@ func (sb *Sealer) SealCommit2(ctx context.Context, sector abi.SectorID, phase1Ou
|
|||||||
|
|
||||||
func (sb *Sealer) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) error {
|
func (sb *Sealer) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) error {
|
||||||
if len(keepUnsealed) > 0 {
|
if len(keepUnsealed) > 0 {
|
||||||
return xerrors.Errorf("keepUnsealed unsupported") // TODO: impl for fastretrieval copies
|
maxPieceSize := abi.PaddedPieceSize(sb.ssize)
|
||||||
|
|
||||||
|
sr := pieceRun(0, maxPieceSize)
|
||||||
|
|
||||||
|
for _, s := range keepUnsealed {
|
||||||
|
si := &rlepluslazy.RunSliceIterator{}
|
||||||
|
if s.Offset != 0 {
|
||||||
|
si.Runs = append(si.Runs, rlepluslazy.Run{Val: false, Len: uint64(s.Offset)})
|
||||||
|
}
|
||||||
|
si.Runs = append(si.Runs, rlepluslazy.Run{Val: true, Len: uint64(s.Size)})
|
||||||
|
|
||||||
|
var err error
|
||||||
|
sr, err = rlepluslazy.Subtract(sr, si)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, 0, stores.PathStorage)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("acquiring sector cache path: %w", err)
|
||||||
|
}
|
||||||
|
defer done()
|
||||||
|
|
||||||
|
pf, err := openPartialFile(maxPieceSize, paths.Unsealed)
|
||||||
|
if xerrors.Is(err, os.ErrNotExist) {
|
||||||
|
return xerrors.Errorf("opening partial file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var at uint64
|
||||||
|
for sr.HasNext() {
|
||||||
|
r, err := sr.NextRun()
|
||||||
|
if err != nil {
|
||||||
|
_ = pf.Close()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
offset := at
|
||||||
|
at += r.Len
|
||||||
|
if !r.Val {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
err = pf.Free(storiface.PaddedByteIndex(abi.UnpaddedPieceSize(offset).Padded()), abi.UnpaddedPieceSize(r.Len).Padded())
|
||||||
|
if err != nil {
|
||||||
|
_ = pf.Close()
|
||||||
|
return xerrors.Errorf("free partial file range: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := pf.Close(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTCache, 0, false)
|
paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTCache, 0, stores.PathStorage)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("acquiring sector cache path: %w", err)
|
return xerrors.Errorf("acquiring sector cache path: %w", err)
|
||||||
}
|
}
|
||||||
@ -542,33 +609,65 @@ func GeneratePieceCIDFromFile(proofType abi.RegisteredSealProof, piece io.Reader
|
|||||||
return pieceCID, werr()
|
return pieceCID, werr()
|
||||||
}
|
}
|
||||||
|
|
||||||
func GenerateUnsealedCID(proofType abi.RegisteredSealProof, pieces []abi.PieceInfo) (cid.Cid, error) {
|
func GetRequiredPadding(oldLength abi.PaddedPieceSize, newPieceLength abi.PaddedPieceSize) ([]abi.PaddedPieceSize, abi.PaddedPieceSize) {
|
||||||
|
|
||||||
|
padPieces := make([]abi.PaddedPieceSize, 0)
|
||||||
|
|
||||||
|
toFill := uint64(-oldLength % newPieceLength)
|
||||||
|
|
||||||
|
n := bits.OnesCount64(toFill)
|
||||||
var sum abi.PaddedPieceSize
|
var sum abi.PaddedPieceSize
|
||||||
for _, p := range pieces {
|
for i := 0; i < n; i++ {
|
||||||
sum += p.Size
|
next := bits.TrailingZeros64(toFill)
|
||||||
|
psize := uint64(1) << uint(next)
|
||||||
|
toFill ^= psize
|
||||||
|
|
||||||
|
padded := abi.PaddedPieceSize(psize)
|
||||||
|
padPieces = append(padPieces, padded)
|
||||||
|
sum += padded
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return padPieces, sum
|
||||||
|
}
|
||||||
|
|
||||||
|
func GenerateUnsealedCID(proofType abi.RegisteredSealProof, pieces []abi.PieceInfo) (cid.Cid, error) {
|
||||||
ssize, err := proofType.SectorSize()
|
ssize, err := proofType.SectorSize()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return cid.Undef, err
|
return cid.Undef, err
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
pssize := abi.PaddedPieceSize(ssize)
|
||||||
// pad remaining space with 0 CommPs
|
allPieces := make([]abi.PieceInfo, 0, len(pieces))
|
||||||
toFill := uint64(abi.PaddedPieceSize(ssize) - sum)
|
if len(pieces) == 0 {
|
||||||
n := bits.OnesCount64(toFill)
|
allPieces = append(allPieces, abi.PieceInfo{
|
||||||
for i := 0; i < n; i++ {
|
Size: pssize,
|
||||||
next := bits.TrailingZeros64(toFill)
|
PieceCID: zerocomm.ZeroPieceCommitment(pssize.Unpadded()),
|
||||||
psize := uint64(1) << uint(next)
|
})
|
||||||
toFill ^= psize
|
} else {
|
||||||
|
var sum abi.PaddedPieceSize
|
||||||
|
|
||||||
unpadded := abi.PaddedPieceSize(psize).Unpadded()
|
padTo := func(pads []abi.PaddedPieceSize) {
|
||||||
pieces = append(pieces, abi.PieceInfo{
|
for _, p := range pads {
|
||||||
Size: unpadded.Padded(),
|
allPieces = append(allPieces, abi.PieceInfo{
|
||||||
PieceCID: zerocomm.ZeroPieceCommitment(unpadded),
|
Size: p,
|
||||||
})
|
PieceCID: zerocomm.ZeroPieceCommitment(p.Unpadded()),
|
||||||
|
})
|
||||||
|
|
||||||
|
sum += p
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for _, p := range pieces {
|
||||||
|
ps, _ := GetRequiredPadding(sum, p.Size)
|
||||||
|
padTo(ps)
|
||||||
|
|
||||||
|
allPieces = append(allPieces, p)
|
||||||
|
sum += p.Size
|
||||||
|
}
|
||||||
|
|
||||||
|
ps, _ := GetRequiredPadding(sum, pssize)
|
||||||
|
padTo(ps)
|
||||||
}
|
}
|
||||||
|
|
||||||
return ffi.GenerateUnsealedCID(proofType, pieces)
|
return ffi.GenerateUnsealedCID(proofType, allPieces)
|
||||||
}
|
}
|
||||||
|
@ -4,6 +4,7 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
@ -110,7 +111,7 @@ func (s *seal) unseal(t *testing.T, sb *Sealer, sp *basicfs.Provider, si abi.Sec
|
|||||||
defer done()
|
defer done()
|
||||||
|
|
||||||
var b bytes.Buffer
|
var b bytes.Buffer
|
||||||
err := sb.ReadPiece(context.TODO(), &b, si, 0, 1016)
|
_, err := sb.ReadPiece(context.TODO(), &b, si, 0, 1016)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -120,7 +121,7 @@ func (s *seal) unseal(t *testing.T, sb *Sealer, sp *basicfs.Provider, si abi.Sec
|
|||||||
t.Fatal("read wrong bytes")
|
t.Fatal("read wrong bytes")
|
||||||
}
|
}
|
||||||
|
|
||||||
p, sd, err := sp.AcquireSector(context.TODO(), si, stores.FTUnsealed, stores.FTNone, false)
|
p, sd, err := sp.AcquireSector(context.TODO(), si, stores.FTUnsealed, stores.FTNone, stores.PathStorage)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -129,7 +130,7 @@ func (s *seal) unseal(t *testing.T, sb *Sealer, sp *basicfs.Provider, si abi.Sec
|
|||||||
}
|
}
|
||||||
sd()
|
sd()
|
||||||
|
|
||||||
err = sb.ReadPiece(context.TODO(), &b, si, 0, 1016)
|
_, err = sb.ReadPiece(context.TODO(), &b, si, 0, 1016)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatal("HOW?!")
|
t.Fatal("HOW?!")
|
||||||
}
|
}
|
||||||
@ -140,7 +141,7 @@ func (s *seal) unseal(t *testing.T, sb *Sealer, sp *basicfs.Provider, si abi.Sec
|
|||||||
}
|
}
|
||||||
|
|
||||||
b.Reset()
|
b.Reset()
|
||||||
err = sb.ReadPiece(context.TODO(), &b, si, 0, 1016)
|
_, err = sb.ReadPiece(context.TODO(), &b, si, 0, 1016)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -149,14 +150,17 @@ func (s *seal) unseal(t *testing.T, sb *Sealer, sp *basicfs.Provider, si abi.Sec
|
|||||||
require.Equal(t, expect, b.Bytes())
|
require.Equal(t, expect, b.Bytes())
|
||||||
|
|
||||||
b.Reset()
|
b.Reset()
|
||||||
err = sb.ReadPiece(context.TODO(), &b, si, 0, 2032)
|
have, err := sb.ReadPiece(context.TODO(), &b, si, 0, 2032)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
expect = append(expect, bytes.Repeat([]byte{0}, 1016)...)
|
if have {
|
||||||
if !bytes.Equal(b.Bytes(), expect) {
|
t.Errorf("didn't expect to read things")
|
||||||
t.Fatal("read wrong bytes")
|
}
|
||||||
|
|
||||||
|
if b.Len() != 0 {
|
||||||
|
t.Fatal("read bytes")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -488,3 +492,105 @@ func requireFDsClosed(t *testing.T, start int) {
|
|||||||
log.Infow("open FDs", "start", start, "now", openNow)
|
log.Infow("open FDs", "start", start, "now", openNow)
|
||||||
require.Equal(t, start, openNow, "FDs shouldn't leak")
|
require.Equal(t, start, openNow, "FDs shouldn't leak")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestGenerateUnsealedCID(t *testing.T) {
|
||||||
|
pt := abi.RegisteredSealProof_StackedDrg2KiBV1
|
||||||
|
ups := int(abi.PaddedPieceSize(2048).Unpadded())
|
||||||
|
|
||||||
|
commP := func(b []byte) cid.Cid {
|
||||||
|
pf, werr, err := ToReadableFile(bytes.NewReader(b), int64(len(b)))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
c, err := ffi.GeneratePieceCIDFromFile(pt, pf, abi.UnpaddedPieceSize(len(b)))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.NoError(t, werr())
|
||||||
|
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
testCommEq := func(name string, in [][]byte, expect [][]byte) {
|
||||||
|
t.Run(name, func(t *testing.T) {
|
||||||
|
upi := make([]abi.PieceInfo, len(in))
|
||||||
|
for i, b := range in {
|
||||||
|
upi[i] = abi.PieceInfo{
|
||||||
|
Size: abi.UnpaddedPieceSize(len(b)).Padded(),
|
||||||
|
PieceCID: commP(b),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sectorPi := []abi.PieceInfo{
|
||||||
|
{
|
||||||
|
Size: 2048,
|
||||||
|
PieceCID: commP(bytes.Join(expect, nil)),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
expectCid, err := GenerateUnsealedCID(pt, sectorPi)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
actualCid, err := GenerateUnsealedCID(pt, upi)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, expectCid, actualCid)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
barr := func(b byte, den int) []byte {
|
||||||
|
return bytes.Repeat([]byte{b}, ups/den)
|
||||||
|
}
|
||||||
|
|
||||||
|
// 0000
|
||||||
|
testCommEq("zero",
|
||||||
|
nil,
|
||||||
|
[][]byte{barr(0, 1)},
|
||||||
|
)
|
||||||
|
|
||||||
|
// 1111
|
||||||
|
testCommEq("one",
|
||||||
|
[][]byte{barr(1, 1)},
|
||||||
|
[][]byte{barr(1, 1)},
|
||||||
|
)
|
||||||
|
|
||||||
|
// 11 00
|
||||||
|
testCommEq("one|2",
|
||||||
|
[][]byte{barr(1, 2)},
|
||||||
|
[][]byte{barr(1, 2), barr(0, 2)},
|
||||||
|
)
|
||||||
|
|
||||||
|
// 1 0 00
|
||||||
|
testCommEq("one|4",
|
||||||
|
[][]byte{barr(1, 4)},
|
||||||
|
[][]byte{barr(1, 4), barr(0, 4), barr(0, 2)},
|
||||||
|
)
|
||||||
|
|
||||||
|
// 11 2 0
|
||||||
|
testCommEq("one|2-two|4",
|
||||||
|
[][]byte{barr(1, 2), barr(2, 4)},
|
||||||
|
[][]byte{barr(1, 2), barr(2, 4), barr(0, 4)},
|
||||||
|
)
|
||||||
|
|
||||||
|
// 1 0 22
|
||||||
|
testCommEq("one|4-two|2",
|
||||||
|
[][]byte{barr(1, 4), barr(2, 2)},
|
||||||
|
[][]byte{barr(1, 4), barr(0, 4), barr(2, 2)},
|
||||||
|
)
|
||||||
|
|
||||||
|
// 1 0 22 0000
|
||||||
|
testCommEq("one|8-two|4",
|
||||||
|
[][]byte{barr(1, 8), barr(2, 4)},
|
||||||
|
[][]byte{barr(1, 8), barr(0, 8), barr(2, 4), barr(0, 2)},
|
||||||
|
)
|
||||||
|
|
||||||
|
// 11 2 0 0000
|
||||||
|
testCommEq("one|4-two|8",
|
||||||
|
[][]byte{barr(1, 4), barr(2, 8)},
|
||||||
|
[][]byte{barr(1, 4), barr(2, 8), barr(0, 8), barr(0, 2)},
|
||||||
|
)
|
||||||
|
|
||||||
|
// 1 0 22 3 0 00 4444 5 0 00
|
||||||
|
testCommEq("one|16-two|8-three|16-four|4-five|16",
|
||||||
|
[][]byte{barr(1, 16), barr(2, 8), barr(3, 16), barr(4, 4), barr(5, 16)},
|
||||||
|
[][]byte{barr(1, 16), barr(0, 16), barr(2, 8), barr(3, 16), barr(0, 16), barr(0, 8), barr(4, 4), barr(5, 16), barr(0, 16), barr(0, 8)},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
@ -29,7 +29,7 @@ type Storage interface {
|
|||||||
StorageSealer
|
StorageSealer
|
||||||
|
|
||||||
UnsealPiece(ctx context.Context, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd cid.Cid) error
|
UnsealPiece(ctx context.Context, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd cid.Cid) error
|
||||||
ReadPiece(ctx context.Context, writer io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) error
|
ReadPiece(ctx context.Context, writer io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type Verifier interface {
|
type Verifier interface {
|
||||||
|
@ -15,7 +15,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func (sb *Sealer) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []abi.SectorInfo, randomness abi.PoStRandomness) ([]abi.PoStProof, error) {
|
func (sb *Sealer) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []abi.SectorInfo, randomness abi.PoStRandomness) ([]abi.PoStProof, error) {
|
||||||
randomness[31] = 0 // TODO: Not correct, fixme
|
randomness[31] &= 0x3f
|
||||||
privsectors, skipped, done, err := sb.pubSectorToPriv(ctx, minerID, sectorInfo, nil, abi.RegisteredSealProof.RegisteredWinningPoStProof) // TODO: FAULTS?
|
privsectors, skipped, done, err := sb.pubSectorToPriv(ctx, minerID, sectorInfo, nil, abi.RegisteredSealProof.RegisteredWinningPoStProof) // TODO: FAULTS?
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -29,7 +29,7 @@ func (sb *Sealer) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID,
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (sb *Sealer) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []abi.SectorInfo, randomness abi.PoStRandomness) ([]abi.PoStProof, []abi.SectorID, error) {
|
func (sb *Sealer) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []abi.SectorInfo, randomness abi.PoStRandomness) ([]abi.PoStProof, []abi.SectorID, error) {
|
||||||
randomness[31] = 0 // TODO: Not correct, fixme
|
randomness[31] &= 0x3f
|
||||||
privsectors, skipped, done, err := sb.pubSectorToPriv(ctx, minerID, sectorInfo, nil, abi.RegisteredSealProof.RegisteredWindowPoStProof)
|
privsectors, skipped, done, err := sb.pubSectorToPriv(ctx, minerID, sectorInfo, nil, abi.RegisteredSealProof.RegisteredWindowPoStProof)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, xerrors.Errorf("gathering sector info: %w", err)
|
return nil, nil, xerrors.Errorf("gathering sector info: %w", err)
|
||||||
@ -62,7 +62,7 @@ func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorIn
|
|||||||
|
|
||||||
sid := abi.SectorID{Miner: mid, Number: s.SectorNumber}
|
sid := abi.SectorID{Miner: mid, Number: s.SectorNumber}
|
||||||
|
|
||||||
paths, d, err := sb.sectors.AcquireSector(ctx, sid, stores.FTCache|stores.FTSealed, 0, false)
|
paths, d, err := sb.sectors.AcquireSector(ctx, sid, stores.FTCache|stores.FTSealed, 0, stores.PathStorage)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnw("failed to acquire sector, skipping", "sector", sid, "error", err)
|
log.Warnw("failed to acquire sector, skipping", "sector", sid, "error", err)
|
||||||
skipped = append(skipped, sid)
|
skipped = append(skipped, sid)
|
||||||
@ -98,7 +98,7 @@ func (proofVerifier) VerifySeal(info abi.SealVerifyInfo) (bool, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (proofVerifier) VerifyWinningPoSt(ctx context.Context, info abi.WinningPoStVerifyInfo) (bool, error) {
|
func (proofVerifier) VerifyWinningPoSt(ctx context.Context, info abi.WinningPoStVerifyInfo) (bool, error) {
|
||||||
info.Randomness[31] = 0 // TODO: Not correct, fixme
|
info.Randomness[31] &= 0x3f
|
||||||
_, span := trace.StartSpan(ctx, "VerifyWinningPoSt")
|
_, span := trace.StartSpan(ctx, "VerifyWinningPoSt")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
@ -106,7 +106,7 @@ func (proofVerifier) VerifyWinningPoSt(ctx context.Context, info abi.WinningPoSt
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (proofVerifier) VerifyWindowPoSt(ctx context.Context, info abi.WindowPoStVerifyInfo) (bool, error) {
|
func (proofVerifier) VerifyWindowPoSt(ctx context.Context, info abi.WindowPoStVerifyInfo) (bool, error) {
|
||||||
info.Randomness[31] = 0 // TODO: Not correct, fixme
|
info.Randomness[31] &= 0x3f
|
||||||
_, span := trace.StartSpan(ctx, "VerifyWindowPoSt")
|
_, span := trace.StartSpan(ctx, "VerifyWindowPoSt")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
@ -114,6 +114,6 @@ func (proofVerifier) VerifyWindowPoSt(ctx context.Context, info abi.WindowPoStVe
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (proofVerifier) GenerateWinningPoStSectorChallenge(ctx context.Context, proofType abi.RegisteredPoStProof, minerID abi.ActorID, randomness abi.PoStRandomness, eligibleSectorCount uint64) ([]uint64, error) {
|
func (proofVerifier) GenerateWinningPoStSectorChallenge(ctx context.Context, proofType abi.RegisteredPoStProof, minerID abi.ActorID, randomness abi.PoStRandomness, eligibleSectorCount uint64) ([]uint64, error) {
|
||||||
randomness[31] = 0 // TODO: Not correct, fixme
|
randomness[31] &= 0x3f
|
||||||
return ffi.GenerateWinningPoStSectorChallenge(proofType, minerID, randomness, eligibleSectorCount)
|
return ffi.GenerateWinningPoStSectorChallenge(proofType, minerID, randomness, eligibleSectorCount)
|
||||||
}
|
}
|
||||||
|
28
fsutil/dealloc_linux.go
Normal file
28
fsutil/dealloc_linux.go
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
package fsutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
logging "github.com/ipfs/go-log/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
var log = logging.Logger("fsutil")
|
||||||
|
|
||||||
|
const FallocFlPunchHole = 0x02 // linux/falloc.h
|
||||||
|
|
||||||
|
func Deallocate(file *os.File, offset int64, length int64) error {
|
||||||
|
if length == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
err := syscall.Fallocate(int(file.Fd()), FallocFlPunchHole, offset, length)
|
||||||
|
if errno, ok := err.(syscall.Errno); ok {
|
||||||
|
if errno == syscall.EOPNOTSUPP || errno == syscall.ENOSYS {
|
||||||
|
log.Warnf("could not deallocate space, ignoring: %v", errno)
|
||||||
|
err = nil // log and ignore
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
17
fsutil/dealloc_other.go
Normal file
17
fsutil/dealloc_other.go
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
// +build !linux
|
||||||
|
|
||||||
|
package fsutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
|
||||||
|
logging "github.com/ipfs/go-log/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
var log = logging.Logger("fsutil")
|
||||||
|
|
||||||
|
func Deallocate(file *os.File, offset int64, length int64) error {
|
||||||
|
log.Warnf("deallocating space not supported")
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
29
fsutil/filesize_unix.go
Normal file
29
fsutil/filesize_unix.go
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
package fsutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
)
|
||||||
|
|
||||||
|
type SizeInfo struct {
|
||||||
|
OnDisk int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileSize returns bytes used by a file on disk
|
||||||
|
func FileSize(path string) (SizeInfo, error) {
|
||||||
|
var stat syscall.Stat_t
|
||||||
|
if err := syscall.Stat(path, &stat); err != nil {
|
||||||
|
if err == syscall.ENOENT {
|
||||||
|
return SizeInfo{}, os.ErrNotExist
|
||||||
|
}
|
||||||
|
return SizeInfo{}, xerrors.Errorf("stat: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NOTE: stat.Blocks is in 512B blocks, NOT in stat.Blksize
|
||||||
|
// See https://www.gnu.org/software/libc/manual/html_node/Attribute-Meanings.html
|
||||||
|
return SizeInfo{
|
||||||
|
int64(stat.Blocks) * 512, // NOTE: int64 cast is needed on osx
|
||||||
|
}, nil
|
||||||
|
}
|
7
fsutil/statfs.go
Normal file
7
fsutil/statfs.go
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
package fsutil
|
||||||
|
|
||||||
|
type FsStat struct {
|
||||||
|
Capacity int64
|
||||||
|
Available int64 // Available to use for sector storage
|
||||||
|
Reserved int64
|
||||||
|
}
|
19
fsutil/statfs_unix.go
Normal file
19
fsutil/statfs_unix.go
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
package fsutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Statfs(path string) (FsStat, error) {
|
||||||
|
var stat syscall.Statfs_t
|
||||||
|
if err := syscall.Statfs(path, &stat); err != nil {
|
||||||
|
return FsStat{}, xerrors.Errorf("statfs: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return FsStat{
|
||||||
|
Capacity: int64(stat.Blocks) * int64(stat.Bsize),
|
||||||
|
Available: int64(stat.Bavail) * int64(stat.Bsize),
|
||||||
|
}, nil
|
||||||
|
}
|
28
fsutil/statfs_windows.go
Normal file
28
fsutil/statfs_windows.go
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
package fsutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Statfs(volumePath string) (FsStat, error) {
|
||||||
|
// From https://github.com/ricochet2200/go-disk-usage/blob/master/du/diskusage_windows.go
|
||||||
|
|
||||||
|
h := syscall.MustLoadDLL("kernel32.dll")
|
||||||
|
c := h.MustFindProc("GetDiskFreeSpaceExW")
|
||||||
|
|
||||||
|
var freeBytes int64
|
||||||
|
var totalBytes int64
|
||||||
|
var availBytes int64
|
||||||
|
|
||||||
|
c.Call(
|
||||||
|
uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(volumePath))),
|
||||||
|
uintptr(unsafe.Pointer(&freeBytes)),
|
||||||
|
uintptr(unsafe.Pointer(&totalBytes)),
|
||||||
|
uintptr(unsafe.Pointer(&availBytes)))
|
||||||
|
|
||||||
|
return FsStat{
|
||||||
|
Capacity: totalBytes,
|
||||||
|
Available: availBytes,
|
||||||
|
}, nil
|
||||||
|
}
|
30
go.mod
30
go.mod
@ -5,30 +5,30 @@ go 1.13
|
|||||||
require (
|
require (
|
||||||
github.com/detailyang/go-fallocate v0.0.0-20180908115635-432fa640bd2e
|
github.com/detailyang/go-fallocate v0.0.0-20180908115635-432fa640bd2e
|
||||||
github.com/elastic/go-sysinfo v1.3.0
|
github.com/elastic/go-sysinfo v1.3.0
|
||||||
github.com/filecoin-project/filecoin-ffi v0.0.0-20200326153646-e899cc1dd072
|
github.com/filecoin-project/filecoin-ffi v0.30.4-0.20200716204036-cddc56607e1d
|
||||||
github.com/filecoin-project/go-bitfield v0.0.2-0.20200518150651-562fdb554b6e
|
github.com/filecoin-project/go-bitfield v0.1.2
|
||||||
github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5
|
github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f
|
||||||
github.com/filecoin-project/go-paramfetch v0.0.1
|
github.com/filecoin-project/go-paramfetch v0.0.2-0.20200218225740-47c639bab663
|
||||||
github.com/filecoin-project/specs-actors v0.6.1
|
github.com/filecoin-project/specs-actors v0.8.2
|
||||||
github.com/filecoin-project/specs-storage v0.1.1-0.20200622113353-88a9704877ea
|
github.com/filecoin-project/specs-storage v0.1.1-0.20200622113353-88a9704877ea
|
||||||
github.com/google/uuid v1.1.1
|
github.com/google/uuid v1.1.1
|
||||||
github.com/gorilla/mux v1.7.4
|
github.com/gorilla/mux v1.7.4
|
||||||
github.com/hashicorp/go-multierror v1.0.0
|
github.com/hashicorp/go-multierror v1.0.0
|
||||||
github.com/ipfs/go-cid v0.0.5
|
github.com/ipfs/go-cid v0.0.6
|
||||||
github.com/ipfs/go-ipfs-files v0.0.7
|
github.com/ipfs/go-ipfs-files v0.0.7
|
||||||
github.com/ipfs/go-log v1.0.3
|
github.com/ipfs/go-ipld-cbor v0.0.5-0.20200204214505-252690b78669 // indirect
|
||||||
github.com/ipfs/go-log/v2 v2.0.3
|
github.com/ipfs/go-log v1.0.4
|
||||||
|
github.com/ipfs/go-log/v2 v2.0.5
|
||||||
github.com/mattn/go-isatty v0.0.9 // indirect
|
github.com/mattn/go-isatty v0.0.9 // indirect
|
||||||
github.com/mitchellh/go-homedir v1.1.0
|
github.com/mitchellh/go-homedir v1.1.0
|
||||||
github.com/stretchr/testify v1.4.0
|
github.com/stretchr/testify v1.6.1
|
||||||
go.opencensus.io v0.22.3
|
go.opencensus.io v0.22.3
|
||||||
go.uber.org/atomic v1.5.1 // indirect
|
golang.org/x/crypto v0.0.0-20200317142112-1b76d66859c6 // indirect
|
||||||
go.uber.org/zap v1.13.0 // indirect
|
golang.org/x/lint v0.0.0-20200302205851-738671d3881b // indirect
|
||||||
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 // indirect
|
golang.org/x/sys v0.0.0-20200317113312-5766fd39f98d // indirect
|
||||||
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f // indirect
|
golang.org/x/tools v0.0.0-20200318150045-ba25ddc85566 // indirect
|
||||||
golang.org/x/sys v0.0.0-20200107162124-548cf772de50 // indirect
|
|
||||||
golang.org/x/tools v0.0.0-20200108195415-316d2f248479 // indirect
|
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543
|
||||||
|
honnef.co/go/tools v0.0.1-2020.1.3 // indirect
|
||||||
)
|
)
|
||||||
|
|
||||||
replace github.com/filecoin-project/filecoin-ffi => ./extern/filecoin-ffi
|
replace github.com/filecoin-project/filecoin-ffi => ./extern/filecoin-ffi
|
||||||
|
89
go.sum
89
go.sum
@ -2,7 +2,6 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT
|
|||||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
|
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
|
||||||
github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw=
|
|
||||||
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
|
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
|
||||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
|
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
|
||||||
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
|
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
|
||||||
@ -31,20 +30,21 @@ github.com/filecoin-project/go-address v0.0.2-0.20200218010043-eb9bb40ed5be h1:T
|
|||||||
github.com/filecoin-project/go-address v0.0.2-0.20200218010043-eb9bb40ed5be/go.mod h1:SAOwJoakQ8EPjwNIsiakIQKsoKdkcbx8U3IapgCg9R0=
|
github.com/filecoin-project/go-address v0.0.2-0.20200218010043-eb9bb40ed5be/go.mod h1:SAOwJoakQ8EPjwNIsiakIQKsoKdkcbx8U3IapgCg9R0=
|
||||||
github.com/filecoin-project/go-amt-ipld/v2 v2.0.1-0.20200131012142-05d80eeccc5e/go.mod h1:boRtQhzmxNocrMxOXo1NYn4oUc1NGvR8tEa79wApNXg=
|
github.com/filecoin-project/go-amt-ipld/v2 v2.0.1-0.20200131012142-05d80eeccc5e/go.mod h1:boRtQhzmxNocrMxOXo1NYn4oUc1NGvR8tEa79wApNXg=
|
||||||
github.com/filecoin-project/go-amt-ipld/v2 v2.0.1-0.20200424220931-6263827e49f2/go.mod h1:boRtQhzmxNocrMxOXo1NYn4oUc1NGvR8tEa79wApNXg=
|
github.com/filecoin-project/go-amt-ipld/v2 v2.0.1-0.20200424220931-6263827e49f2/go.mod h1:boRtQhzmxNocrMxOXo1NYn4oUc1NGvR8tEa79wApNXg=
|
||||||
|
github.com/filecoin-project/go-amt-ipld/v2 v2.1.0/go.mod h1:nfFPoGyX0CU9SkXX8EoCcSuHN1XcbN0c6KBh7yvP5fs=
|
||||||
github.com/filecoin-project/go-bitfield v0.0.0-20200416002808-b3ee67ec9060/go.mod h1:iodsLxOFZnqKtjj2zkgqzoGNrv6vUqj69AT/J8DKXEw=
|
github.com/filecoin-project/go-bitfield v0.0.0-20200416002808-b3ee67ec9060/go.mod h1:iodsLxOFZnqKtjj2zkgqzoGNrv6vUqj69AT/J8DKXEw=
|
||||||
github.com/filecoin-project/go-bitfield v0.0.1 h1:Xg/JnrqqE77aJVKdbEyR04n9FZQWhwrN+buDgQCVpZU=
|
|
||||||
github.com/filecoin-project/go-bitfield v0.0.1/go.mod h1:Ry9/iUlWSyjPUzlAvdnfy4Gtvrq4kWmWDztCU1yEgJY=
|
github.com/filecoin-project/go-bitfield v0.0.1/go.mod h1:Ry9/iUlWSyjPUzlAvdnfy4Gtvrq4kWmWDztCU1yEgJY=
|
||||||
github.com/filecoin-project/go-bitfield v0.0.2-0.20200518150651-562fdb554b6e h1:gkG/7G+iKy4He+IiQNeQn+nndFznb/vCoOR8iRQsm60=
|
github.com/filecoin-project/go-bitfield v0.1.2 h1:TjLregCoyP1/5lm7WCM0axyV1myIHwbjGa21skuu5tk=
|
||||||
github.com/filecoin-project/go-bitfield v0.0.2-0.20200518150651-562fdb554b6e/go.mod h1:Ry9/iUlWSyjPUzlAvdnfy4Gtvrq4kWmWDztCU1yEgJY=
|
github.com/filecoin-project/go-bitfield v0.1.2/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM=
|
||||||
github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 h1:2pMXdBnCiXjfCYx/hLqFxccPoqsSveQFxVLvNxy9bus=
|
github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 h1:2pMXdBnCiXjfCYx/hLqFxccPoqsSveQFxVLvNxy9bus=
|
||||||
github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ=
|
github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ=
|
||||||
github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5 h1:yvQJCW9mmi9zy+51xA01Ea2X7/dL7r8eKDPuGUjRmbo=
|
github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f h1:GxJzR3oRIMTPtpZ0b7QF8FKPK6/iPAc7trhlL5k/g+s=
|
||||||
github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5/go.mod h1:JbkIgFF/Z9BDlvrJO1FuKkaWsH673/UdFaiVS6uIHlA=
|
github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ=
|
||||||
github.com/filecoin-project/go-paramfetch v0.0.1 h1:gV7bs5YaqlgpGFMiLxInGK2L1FyCXUE0rimz4L7ghoE=
|
github.com/filecoin-project/go-paramfetch v0.0.2-0.20200218225740-47c639bab663 h1:eYxi6vI5CyeXD15X1bB3bledDXbqKxqf0wQzTLgwYwA=
|
||||||
github.com/filecoin-project/go-paramfetch v0.0.1/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc=
|
github.com/filecoin-project/go-paramfetch v0.0.2-0.20200218225740-47c639bab663/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc=
|
||||||
github.com/filecoin-project/specs-actors v0.3.0/go.mod h1:nQYnFbQ7Y0bHZyq6HDEuVlCPR+U3z5Q3wMOQ+2aiV+Y=
|
github.com/filecoin-project/specs-actors v0.3.0/go.mod h1:nQYnFbQ7Y0bHZyq6HDEuVlCPR+U3z5Q3wMOQ+2aiV+Y=
|
||||||
github.com/filecoin-project/specs-actors v0.6.1 h1:rhHlEzqcuuQU6oKc4csuq+/kQBDZ4EXtSomoN2XApCA=
|
|
||||||
github.com/filecoin-project/specs-actors v0.6.1/go.mod h1:dRdy3cURykh2R8O/DKqy8olScl70rmIS7GrB4hB1IDY=
|
github.com/filecoin-project/specs-actors v0.6.1/go.mod h1:dRdy3cURykh2R8O/DKqy8olScl70rmIS7GrB4hB1IDY=
|
||||||
|
github.com/filecoin-project/specs-actors v0.8.2 h1:fpAPOPqWqmzJCWHpm6P1XDRSpQrxyY5Pzh5H3doYs7Q=
|
||||||
|
github.com/filecoin-project/specs-actors v0.8.2/go.mod h1:Q3ACV5kBLvqPaYbthc/J1lGMJ5OwogmD9pzdtPRMdCw=
|
||||||
github.com/filecoin-project/specs-storage v0.1.1-0.20200622113353-88a9704877ea h1:iixjULRQFPn7Q9KlIqfwLJnlAXO10bbkI+xy5GKGdLY=
|
github.com/filecoin-project/specs-storage v0.1.1-0.20200622113353-88a9704877ea h1:iixjULRQFPn7Q9KlIqfwLJnlAXO10bbkI+xy5GKGdLY=
|
||||||
github.com/filecoin-project/specs-storage v0.1.1-0.20200622113353-88a9704877ea/go.mod h1:Pr5ntAaxsh+sLG/LYiL4tKzvA83Vk5vLODYhfNwOg7k=
|
github.com/filecoin-project/specs-storage v0.1.1-0.20200622113353-88a9704877ea/go.mod h1:Pr5ntAaxsh+sLG/LYiL4tKzvA83Vk5vLODYhfNwOg7k=
|
||||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||||
@ -58,6 +58,7 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb
|
|||||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
|
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
|
||||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||||
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
||||||
@ -82,7 +83,11 @@ github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUP
|
|||||||
github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM=
|
github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM=
|
||||||
github.com/ipfs/go-cid v0.0.5 h1:o0Ix8e/ql7Zb5UVUJEUfjsWCIY8t48++9lR8qi6oiJU=
|
github.com/ipfs/go-cid v0.0.5 h1:o0Ix8e/ql7Zb5UVUJEUfjsWCIY8t48++9lR8qi6oiJU=
|
||||||
github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog=
|
github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog=
|
||||||
|
github.com/ipfs/go-cid v0.0.6-0.20200501230655-7c82f3b81c00/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog=
|
||||||
|
github.com/ipfs/go-cid v0.0.6 h1:go0y+GcDOGeJIV01FeBsta4FHngoA4Wz7KMeLkXAhMs=
|
||||||
|
github.com/ipfs/go-cid v0.0.6/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I=
|
||||||
github.com/ipfs/go-hamt-ipld v0.0.15-0.20200131012125-dd88a59d3f2e/go.mod h1:9aQJu/i/TaRDW6jqB5U217dLIDopn50wxLdHXM2CTfE=
|
github.com/ipfs/go-hamt-ipld v0.0.15-0.20200131012125-dd88a59d3f2e/go.mod h1:9aQJu/i/TaRDW6jqB5U217dLIDopn50wxLdHXM2CTfE=
|
||||||
|
github.com/ipfs/go-hamt-ipld v0.1.1/go.mod h1:1EZCr2v0jlCnhpa+aZ0JZYp8Tt2w16+JJOAVz17YcDk=
|
||||||
github.com/ipfs/go-ipfs-files v0.0.7 h1:s5BRD12ndahqYifeH1S8Z73zqZhR+3IdKYAG9PiETs0=
|
github.com/ipfs/go-ipfs-files v0.0.7 h1:s5BRD12ndahqYifeH1S8Z73zqZhR+3IdKYAG9PiETs0=
|
||||||
github.com/ipfs/go-ipfs-files v0.0.7/go.mod h1:wiN/jSG8FKyk7N0WyctKSvq3ljIa2NNTiZB55kpTdOs=
|
github.com/ipfs/go-ipfs-files v0.0.7/go.mod h1:wiN/jSG8FKyk7N0WyctKSvq3ljIa2NNTiZB55kpTdOs=
|
||||||
github.com/ipfs/go-ipfs-util v0.0.1 h1:Wz9bL2wB2YBJqggkA4dD7oSmqB4cAnpNbGrlHJulv50=
|
github.com/ipfs/go-ipfs-util v0.0.1 h1:Wz9bL2wB2YBJqggkA4dD7oSmqB4cAnpNbGrlHJulv50=
|
||||||
@ -90,16 +95,18 @@ github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyB
|
|||||||
github.com/ipfs/go-ipld-cbor v0.0.3/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc=
|
github.com/ipfs/go-ipld-cbor v0.0.3/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc=
|
||||||
github.com/ipfs/go-ipld-cbor v0.0.4 h1:Aw3KPOKXjvrm6VjwJvFf1F1ekR/BH3jdof3Bk7OTiSA=
|
github.com/ipfs/go-ipld-cbor v0.0.4 h1:Aw3KPOKXjvrm6VjwJvFf1F1ekR/BH3jdof3Bk7OTiSA=
|
||||||
github.com/ipfs/go-ipld-cbor v0.0.4/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4=
|
github.com/ipfs/go-ipld-cbor v0.0.4/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4=
|
||||||
|
github.com/ipfs/go-ipld-cbor v0.0.5-0.20200204214505-252690b78669 h1:jIVle1vGSzxyUhseYNEqd7qcDVRrIbJ7UxGwao70cF0=
|
||||||
|
github.com/ipfs/go-ipld-cbor v0.0.5-0.20200204214505-252690b78669/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4=
|
||||||
github.com/ipfs/go-ipld-format v0.0.1 h1:HCu4eB/Gh+KD/Q0M8u888RFkorTWNIL3da4oc5dwc80=
|
github.com/ipfs/go-ipld-format v0.0.1 h1:HCu4eB/Gh+KD/Q0M8u888RFkorTWNIL3da4oc5dwc80=
|
||||||
github.com/ipfs/go-ipld-format v0.0.1/go.mod h1:kyJtbkDALmFHv3QR6et67i35QzO3S0dCDnkOJhcZkms=
|
github.com/ipfs/go-ipld-format v0.0.1/go.mod h1:kyJtbkDALmFHv3QR6et67i35QzO3S0dCDnkOJhcZkms=
|
||||||
github.com/ipfs/go-ipld-format v0.0.2 h1:OVAGlyYT6JPZ0pEfGntFPS40lfrDmaDbQwNHEY2G9Zs=
|
github.com/ipfs/go-ipld-format v0.0.2 h1:OVAGlyYT6JPZ0pEfGntFPS40lfrDmaDbQwNHEY2G9Zs=
|
||||||
github.com/ipfs/go-ipld-format v0.0.2/go.mod h1:4B6+FM2u9OJ9zCV+kSbgFAZlOrv1Hqbf0INGQgiKf9k=
|
github.com/ipfs/go-ipld-format v0.0.2/go.mod h1:4B6+FM2u9OJ9zCV+kSbgFAZlOrv1Hqbf0INGQgiKf9k=
|
||||||
github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM=
|
github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM=
|
||||||
github.com/ipfs/go-log v1.0.0/go.mod h1:JO7RzlMK6rA+CIxFMLOuB6Wf5b81GDiKElL7UPSIKjA=
|
github.com/ipfs/go-log v1.0.0/go.mod h1:JO7RzlMK6rA+CIxFMLOuB6Wf5b81GDiKElL7UPSIKjA=
|
||||||
github.com/ipfs/go-log v1.0.3 h1:Gg7SUYSZ7BrqaKMwM+hRgcAkKv4QLfzP4XPQt5Sx/OI=
|
github.com/ipfs/go-log v1.0.4 h1:6nLQdX4W8P9yZZFH7mO+X/PzjN8Laozm/lMJ6esdgzY=
|
||||||
github.com/ipfs/go-log v1.0.3/go.mod h1:OsLySYkwIbiSUR/yBTdv1qPtcE4FW3WPWk/ewz9Ru+A=
|
github.com/ipfs/go-log v1.0.4/go.mod h1:oDCg2FkjogeFOhqqb+N39l2RpTNPL6F/StPkB3kPgcs=
|
||||||
github.com/ipfs/go-log/v2 v2.0.3 h1:Q2gXcBoCALyLN/pUQlz1qgu0x3uFV6FzP9oXhpfyJpc=
|
github.com/ipfs/go-log/v2 v2.0.5 h1:fL4YI+1g5V/b1Yxr1qAiXTMg1H8z9vx/VmJxBuQMHvU=
|
||||||
github.com/ipfs/go-log/v2 v2.0.3/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0=
|
github.com/ipfs/go-log/v2 v2.0.5/go.mod h1:eZs4Xt4ZUJQFM3DlanGhy7TkwwawCZcSByscwkWG+dw=
|
||||||
github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52 h1:QG4CGBqCeuBo6aZlGAamSkxWdgWfZGeE49eUOWJPA4c=
|
github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52 h1:QG4CGBqCeuBo6aZlGAamSkxWdgWfZGeE49eUOWJPA4c=
|
||||||
github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52/go.mod h1:fdg+/X9Gg4AsAIzWpEHwnqd+QY3b7lajxyjE1m4hkq4=
|
github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52/go.mod h1:fdg+/X9Gg4AsAIzWpEHwnqd+QY3b7lajxyjE1m4hkq4=
|
||||||
github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA=
|
github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA=
|
||||||
@ -125,9 +132,7 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
|||||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||||
github.com/libp2p/go-flow-metrics v0.0.3/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs=
|
github.com/libp2p/go-flow-metrics v0.0.3/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs=
|
||||||
github.com/libp2p/go-libp2p-core v0.3.0 h1:F7PqduvrztDtFsAa/bcheQ3azmNo+Nq7m8hQY5GiUW8=
|
|
||||||
github.com/libp2p/go-libp2p-core v0.3.0/go.mod h1:ACp3DmS3/N64c2jDzcV429ukDpicbL6+TrrxANBjPGw=
|
github.com/libp2p/go-libp2p-core v0.3.0/go.mod h1:ACp3DmS3/N64c2jDzcV429ukDpicbL6+TrrxANBjPGw=
|
||||||
github.com/libp2p/go-openssl v0.0.4 h1:d27YZvLoTyMhIN4njrkr8zMDOM4lfpHIp6A+TK9fovg=
|
|
||||||
github.com/libp2p/go-openssl v0.0.4/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc=
|
github.com/libp2p/go-openssl v0.0.4/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc=
|
||||||
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||||
@ -157,15 +162,20 @@ github.com/mr-tron/base58 v1.1.3 h1:v+sk57XuaCKGXpWtVBX8YJzO7hMGx4Aajh4TQbdEFdc=
|
|||||||
github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
|
github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
|
||||||
github.com/multiformats/go-base32 v0.0.3 h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp8Nq/kkI=
|
github.com/multiformats/go-base32 v0.0.3 h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp8Nq/kkI=
|
||||||
github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA=
|
github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA=
|
||||||
github.com/multiformats/go-multiaddr v0.2.0 h1:lR52sFwcTCuQb6bTfnXF6zA2XfyYvyd+5a9qECv/J90=
|
github.com/multiformats/go-base36 v0.1.0 h1:JR6TyF7JjGd3m6FbLU2cOxhC0Li8z8dLNGQ89tUg4F4=
|
||||||
|
github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM=
|
||||||
github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4=
|
github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4=
|
||||||
github.com/multiformats/go-multibase v0.0.1 h1:PN9/v21eLywrFWdFNsFKaU04kLJzuYzmrJR+ubhT9qA=
|
github.com/multiformats/go-multibase v0.0.1 h1:PN9/v21eLywrFWdFNsFKaU04kLJzuYzmrJR+ubhT9qA=
|
||||||
github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs=
|
github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs=
|
||||||
|
github.com/multiformats/go-multibase v0.0.3 h1:l/B6bJDQjvQ5G52jw4QGSYeOTZoAwIO77RblWplfIqk=
|
||||||
|
github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc=
|
||||||
github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U=
|
github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U=
|
||||||
github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew=
|
github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew=
|
||||||
github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew=
|
github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew=
|
||||||
github.com/multiformats/go-multihash v0.0.13 h1:06x+mk/zj1FoMsgNejLpy6QTvJqlSt/BhLEy87zidlc=
|
github.com/multiformats/go-multihash v0.0.13 h1:06x+mk/zj1FoMsgNejLpy6QTvJqlSt/BhLEy87zidlc=
|
||||||
github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc=
|
github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc=
|
||||||
|
github.com/multiformats/go-multihash v0.0.14 h1:QoBceQYQQtNUuf6s7wHxnE2c8bhbMqhfGzNI032se/I=
|
||||||
|
github.com/multiformats/go-multihash v0.0.14/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc=
|
||||||
github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE=
|
github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE=
|
||||||
github.com/multiformats/go-varint v0.0.2/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE=
|
github.com/multiformats/go-varint v0.0.2/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE=
|
||||||
github.com/multiformats/go-varint v0.0.5 h1:XVZwSo04Cs3j/jS0uAEPpT3JY6DzMcVLLoWOSnCxOjg=
|
github.com/multiformats/go-varint v0.0.5 h1:XVZwSo04Cs3j/jS0uAEPpT3JY6DzMcVLLoWOSnCxOjg=
|
||||||
@ -199,7 +209,6 @@ github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa/go.mod h1:2
|
|||||||
github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337 h1:WN9BUFbdyOsSH/XohnWpXOlq9NBD5sGAB2FciQMUEe8=
|
github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337 h1:WN9BUFbdyOsSH/XohnWpXOlq9NBD5sGAB2FciQMUEe8=
|
||||||
github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||||
github.com/smola/gocompat v0.2.0/go.mod h1:1B0MlxbmoZNo3h8guHp8HztB3BSYR5itql9qtVc0ypY=
|
github.com/smola/gocompat v0.2.0/go.mod h1:1B0MlxbmoZNo3h8guHp8HztB3BSYR5itql9qtVc0ypY=
|
||||||
github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 h1:RC6RW7j+1+HkWaX/Yh71Ee5ZHaHYt7ZP4sQgUrm6cDU=
|
|
||||||
github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572/go.mod h1:w0SWMsp6j9O/dk4/ZpIhL+3CkG8ofA2vuv7k+ltqUMc=
|
github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572/go.mod h1:w0SWMsp6j9O/dk4/ZpIhL+3CkG8ofA2vuv7k+ltqUMc=
|
||||||
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
|
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
|
||||||
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||||
@ -210,6 +219,8 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf
|
|||||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||||
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
|
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
|
||||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||||
|
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
|
||||||
|
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436 h1:qOpVTI+BrstcjTZLm2Yz/3sOnqkzj3FQoh0g+E5s3Gc=
|
github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436 h1:qOpVTI+BrstcjTZLm2Yz/3sOnqkzj3FQoh0g+E5s3Gc=
|
||||||
github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw=
|
github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw=
|
||||||
github.com/warpfork/go-wish v0.0.0-20190328234359-8b3e70f8e830 h1:8kxMKmKzXXL4Ru1nyhvdms/JjWt+3YLpvRb/bAjO/y0=
|
github.com/warpfork/go-wish v0.0.0-20190328234359-8b3e70f8e830 h1:8kxMKmKzXXL4Ru1nyhvdms/JjWt+3YLpvRb/bAjO/y0=
|
||||||
@ -218,27 +229,33 @@ github.com/whyrusleeping/cbor-gen v0.0.0-20191216205031-b047b6acb3c0/go.mod h1:x
|
|||||||
github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI=
|
github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI=
|
||||||
github.com/whyrusleeping/cbor-gen v0.0.0-20200414195334-429a0b5e922e h1:JY8o/ebUUrCYetWmjRCNghxC59cOEaili83rxPRQCLw=
|
github.com/whyrusleeping/cbor-gen v0.0.0-20200414195334-429a0b5e922e h1:JY8o/ebUUrCYetWmjRCNghxC59cOEaili83rxPRQCLw=
|
||||||
github.com/whyrusleeping/cbor-gen v0.0.0-20200414195334-429a0b5e922e/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI=
|
github.com/whyrusleeping/cbor-gen v0.0.0-20200414195334-429a0b5e922e/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI=
|
||||||
|
github.com/whyrusleeping/cbor-gen v0.0.0-20200504204219-64967432584d/go.mod h1:W5MvapuoHRP8rz4vxjwCK1pDqF1aQcWsV5PZ+AHbqdg=
|
||||||
|
github.com/whyrusleeping/cbor-gen v0.0.0-20200715143311-227fab5a2377 h1:LHFlP/ktDvOnCap7PsT87cs7Gwd0p+qv6Qm5g2ZPR+I=
|
||||||
|
github.com/whyrusleeping/cbor-gen v0.0.0-20200715143311-227fab5a2377/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ=
|
||||||
github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc/go.mod h1:bopw91TMyo8J3tvftk8xmU2kPmlrt4nScJQZU2hE5EM=
|
github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc/go.mod h1:bopw91TMyo8J3tvftk8xmU2kPmlrt4nScJQZU2hE5EM=
|
||||||
github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE=
|
github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE=
|
||||||
|
github.com/xorcare/golden v0.6.0/go.mod h1:7T39/ZMvaSEZlBPoYfVFmsBLmUl3uz9IuzWj/U6FtvQ=
|
||||||
|
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||||
go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8=
|
go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8=
|
||||||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||||
go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU=
|
go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU=
|
||||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||||
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||||
go.uber.org/atomic v1.5.1 h1:rsqfU5vBkVknbhUGbAUwQKR2H4ItV8tjJ+6kJX4cxHM=
|
go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk=
|
||||||
go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||||
go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
|
go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
|
||||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||||
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
|
|
||||||
go.uber.org/multierr v1.4.0 h1:f3WCSC2KzAcBXGATIxAB1E2XuCpNU255wNKZ505qi3E=
|
go.uber.org/multierr v1.4.0 h1:f3WCSC2KzAcBXGATIxAB1E2XuCpNU255wNKZ505qi3E=
|
||||||
go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
|
go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
|
||||||
|
go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A=
|
||||||
|
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
|
||||||
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4=
|
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4=
|
||||||
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
|
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
|
||||||
go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM=
|
go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM=
|
||||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||||
go.uber.org/zap v1.13.0 h1:nR6NoDBgAf67s68NhaXbsojM+2gxp3S1hWkHDl27pVU=
|
go.uber.org/zap v1.14.1 h1:nYDKopTbvAPq/NrUVZwT15y2lpROBiLLyoRTbXOYWOo=
|
||||||
go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
|
go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc=
|
||||||
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||||
golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||||
@ -247,17 +264,19 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U
|
|||||||
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8=
|
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8=
|
||||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g=
|
golang.org/x/crypto v0.0.0-20200317142112-1b76d66859c6 h1:TjszyFsQsyZNHwdVdZ5m7bjmreu0znc2kRYsEml9/Ww=
|
||||||
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20200317142112-1b76d66859c6/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f h1:J5lckAjkw6qYlOZNj90mLYNTEKDvWeuc1yieZ8qUzUE=
|
golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k=
|
||||||
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
|
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||||
|
golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ=
|
||||||
|
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
@ -266,12 +285,14 @@ golang.org/x/net v0.0.0-20190227160552-c95aed5357e7/go.mod h1:mL1N/T3taQHkDXs73r
|
|||||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sys v0.0.0-20180202135801-37707fdb30a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180202135801-37707fdb30a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
@ -287,8 +308,8 @@ golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||||||
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20191025021431-6c3a3bfe00ae h1:QoJmnb9uyPCrH8GIg9uRLn4Ta45yhcQtpymCd0AavO8=
|
golang.org/x/sys v0.0.0-20191025021431-6c3a3bfe00ae h1:QoJmnb9uyPCrH8GIg9uRLn4Ta45yhcQtpymCd0AavO8=
|
||||||
golang.org/x/sys v0.0.0-20191025021431-6c3a3bfe00ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20191025021431-6c3a3bfe00ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200107162124-548cf772de50 h1:YvQ10rzcqWXLlJZ3XCUoO25savxmscf4+SC+ZqiCHhA=
|
golang.org/x/sys v0.0.0-20200317113312-5766fd39f98d h1:62ap6LNOjDU6uGmKXHJbSfciMoV+FeI1sRXx/pLDL44=
|
||||||
golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200317113312-5766fd39f98d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
@ -302,9 +323,11 @@ golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3
|
|||||||
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||||
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
golang.org/x/tools v0.0.0-20200108195415-316d2f248479 h1:csuS+MHeEA2eWhyjQCMaPMq4z1+/PohkBSjJZHSIbOE=
|
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
golang.org/x/tools v0.0.0-20200108195415-316d2f248479/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20200318150045-ba25ddc85566 h1:OXjomkWHhzUx4+HldlJ2TsMxJdWgEo5CTtspD1wdhdk=
|
||||||
|
golang.org/x/tools v0.0.0-20200318150045-ba25ddc85566/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||||
@ -330,8 +353,12 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|||||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
|
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
|
||||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||||
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM=
|
honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM=
|
||||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||||
|
honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U=
|
||||||
|
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||||
howett.net/plist v0.0.0-20181124034731-591f970eefbb h1:jhnBjNi9UFpfpl8YZhA9CrOqpnJdvzuiHsl/dnxl11M=
|
howett.net/plist v0.0.0-20181124034731-591f970eefbb h1:jhnBjNi9UFpfpl8YZhA9CrOqpnJdvzuiHsl/dnxl11M=
|
||||||
howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0=
|
howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0=
|
||||||
|
@ -61,14 +61,22 @@ type localWorkerPathProvider struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (l *localWorkerPathProvider) AcquireSector(ctx context.Context, sector abi.SectorID, existing stores.SectorFileType, allocate stores.SectorFileType, sealing stores.PathType) (stores.SectorPaths, func(), error) {
|
func (l *localWorkerPathProvider) AcquireSector(ctx context.Context, sector abi.SectorID, existing stores.SectorFileType, allocate stores.SectorFileType, sealing stores.PathType) (stores.SectorPaths, func(), error) {
|
||||||
|
|
||||||
paths, storageIDs, err := l.w.storage.AcquireSector(ctx, sector, l.w.scfg.SealProofType, existing, allocate, sealing, l.op)
|
paths, storageIDs, err := l.w.storage.AcquireSector(ctx, sector, l.w.scfg.SealProofType, existing, allocate, sealing, l.op)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return stores.SectorPaths{}, nil, err
|
return stores.SectorPaths{}, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
releaseStorage, err := l.w.localStore.Reserve(ctx, sector, l.w.scfg.SealProofType, allocate, storageIDs, stores.FSOverheadSeal)
|
||||||
|
if err != nil {
|
||||||
|
return stores.SectorPaths{}, nil, xerrors.Errorf("reserving storage space: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
log.Debugf("acquired sector %d (e:%d; a:%d): %v", sector, existing, allocate, paths)
|
log.Debugf("acquired sector %d (e:%d; a:%d): %v", sector, existing, allocate, paths)
|
||||||
|
|
||||||
return paths, func() {
|
return paths, func() {
|
||||||
|
releaseStorage()
|
||||||
|
|
||||||
for _, fileType := range pathTypes {
|
for _, fileType := range pathTypes {
|
||||||
if fileType&allocate == 0 {
|
if fileType&allocate == 0 {
|
||||||
continue
|
continue
|
||||||
@ -171,8 +179,10 @@ func (l *LocalWorker) FinalizeSector(ctx context.Context, sector abi.SectorID, k
|
|||||||
return xerrors.Errorf("finalizing sector: %w", err)
|
return xerrors.Errorf("finalizing sector: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := l.storage.Remove(ctx, sector, stores.FTUnsealed, true); err != nil {
|
if len(keepUnsealed) == 0 {
|
||||||
return xerrors.Errorf("removing unsealed data: %w", err)
|
if err := l.storage.Remove(ctx, sector, stores.FTUnsealed, true); err != nil {
|
||||||
|
return xerrors.Errorf("removing unsealed data: %w", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@ -227,10 +237,10 @@ func (l *LocalWorker) UnsealPiece(ctx context.Context, sector abi.SectorID, inde
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *LocalWorker) ReadPiece(ctx context.Context, writer io.Writer, sector abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) error {
|
func (l *LocalWorker) ReadPiece(ctx context.Context, writer io.Writer, sector abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) {
|
||||||
sb, err := l.sb()
|
sb, err := l.sb()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return sb.ReadPiece(ctx, writer, sector, index, size)
|
return sb.ReadPiece(ctx, writer, sector, index, size)
|
||||||
|
108
manager.go
108
manager.go
@ -3,6 +3,7 @@ package sectorstorage
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
|
"github.com/filecoin-project/sector-storage/fsutil"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
@ -33,7 +34,7 @@ type Worker interface {
|
|||||||
|
|
||||||
Fetch(ctx context.Context, s abi.SectorID, ft stores.SectorFileType, ptype stores.PathType, am stores.AcquireMode) error
|
Fetch(ctx context.Context, s abi.SectorID, ft stores.SectorFileType, ptype stores.PathType, am stores.AcquireMode) error
|
||||||
UnsealPiece(context.Context, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) error
|
UnsealPiece(context.Context, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) error
|
||||||
ReadPiece(context.Context, io.Writer, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize) error
|
ReadPiece(context.Context, io.Writer, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize) (bool, error)
|
||||||
|
|
||||||
TaskTypes(context.Context) (map[sealtasks.TaskType]struct{}, error)
|
TaskTypes(context.Context) (map[sealtasks.TaskType]struct{}, error)
|
||||||
|
|
||||||
@ -75,6 +76,8 @@ type Manager struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type SealerConfig struct {
|
type SealerConfig struct {
|
||||||
|
ParallelFetchLimit int
|
||||||
|
|
||||||
// Local worker config
|
// Local worker config
|
||||||
AllowPreCommit1 bool
|
AllowPreCommit1 bool
|
||||||
AllowPreCommit2 bool
|
AllowPreCommit2 bool
|
||||||
@ -95,7 +98,7 @@ func New(ctx context.Context, ls stores.LocalStorage, si stores.SectorIndex, cfg
|
|||||||
return nil, xerrors.Errorf("creating prover instance: %w", err)
|
return nil, xerrors.Errorf("creating prover instance: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
stor := stores.NewRemote(lstor, si, http.Header(sa))
|
stor := stores.NewRemote(lstor, si, http.Header(sa), sc.ParallelFetchLimit)
|
||||||
|
|
||||||
m := &Manager{
|
m := &Manager{
|
||||||
scfg: cfg,
|
scfg: cfg,
|
||||||
@ -165,7 +168,10 @@ func (m *Manager) AddWorker(ctx context.Context, w Worker) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
m.sched.newWorkers <- &workerHandle{
|
m.sched.newWorkers <- &workerHandle{
|
||||||
w: w,
|
w: w,
|
||||||
|
wt: &workTracker{
|
||||||
|
running: map[uint64]storiface.WorkerJob{},
|
||||||
|
},
|
||||||
info: info,
|
info: info,
|
||||||
preparing: &activeResources{},
|
preparing: &activeResources{},
|
||||||
active: &activeResources{},
|
active: &activeResources{},
|
||||||
@ -207,23 +213,38 @@ func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector abi.Sect
|
|||||||
|
|
||||||
var selector WorkerSelector
|
var selector WorkerSelector
|
||||||
if len(best) == 0 { // new
|
if len(best) == 0 { // new
|
||||||
selector, err = newAllocSelector(ctx, m.index, stores.FTUnsealed, stores.PathSealing)
|
selector = newAllocSelector(m.index, stores.FTUnsealed, stores.PathSealing)
|
||||||
} else { // append to existing
|
} else { // append to existing
|
||||||
selector, err = newExistingSelector(ctx, m.index, sector, stores.FTUnsealed, false)
|
selector = newExistingSelector(m.index, sector, stores.FTUnsealed, false)
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return xerrors.Errorf("creating unsealPiece selector: %w", err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Optimization: don't send unseal to a worker if the requested range is already unsealed
|
var readOk bool
|
||||||
|
|
||||||
|
if len(best) > 0 {
|
||||||
|
// There is unsealed sector, see if we can read from it
|
||||||
|
|
||||||
|
selector = newExistingSelector(m.index, sector, stores.FTUnsealed, false)
|
||||||
|
|
||||||
|
err = m.sched.Schedule(ctx, sector, sealtasks.TTReadUnsealed, selector, schedFetch(sector, stores.FTUnsealed, stores.PathSealing, stores.AcquireMove), func(ctx context.Context, w Worker) error {
|
||||||
|
readOk, err = w.ReadPiece(ctx, sink, sector, offset, size)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("reading piece from sealed sector: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if readOk {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
unsealFetch := func(ctx context.Context, worker Worker) error {
|
unsealFetch := func(ctx context.Context, worker Worker) error {
|
||||||
if err := worker.Fetch(ctx, sector, stores.FTSealed|stores.FTCache, true, stores.AcquireCopy); err != nil {
|
if err := worker.Fetch(ctx, sector, stores.FTSealed|stores.FTCache, stores.PathSealing, stores.AcquireCopy); err != nil {
|
||||||
return xerrors.Errorf("copy sealed/cache sector data: %w", err)
|
return xerrors.Errorf("copy sealed/cache sector data: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(best) > 0 {
|
if len(best) > 0 {
|
||||||
if err := worker.Fetch(ctx, sector, stores.FTUnsealed, true, stores.AcquireMove); err != nil {
|
if err := worker.Fetch(ctx, sector, stores.FTUnsealed, stores.PathSealing, stores.AcquireMove); err != nil {
|
||||||
return xerrors.Errorf("copy unsealed sector data: %w", err)
|
return xerrors.Errorf("copy unsealed sector data: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -237,18 +258,20 @@ func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector abi.Sect
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
selector, err = newExistingSelector(ctx, m.index, sector, stores.FTUnsealed, false)
|
selector = newExistingSelector(m.index, sector, stores.FTUnsealed, false)
|
||||||
if err != nil {
|
|
||||||
return xerrors.Errorf("creating readPiece selector: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = m.sched.Schedule(ctx, sector, sealtasks.TTReadUnsealed, selector, schedFetch(sector, stores.FTUnsealed, stores.PathSealing, stores.AcquireMove), func(ctx context.Context, w Worker) error {
|
err = m.sched.Schedule(ctx, sector, sealtasks.TTReadUnsealed, selector, schedFetch(sector, stores.FTUnsealed, stores.PathSealing, stores.AcquireMove), func(ctx context.Context, w Worker) error {
|
||||||
return w.ReadPiece(ctx, sink, sector, offset, size)
|
readOk, err = w.ReadPiece(ctx, sink, sector, offset, size)
|
||||||
|
return err
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("reading piece from sealed sector: %w", err)
|
return xerrors.Errorf("reading piece from sealed sector: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if readOk {
|
||||||
|
return xerrors.Errorf("failed to read unsealed piece")
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -268,12 +291,9 @@ func (m *Manager) AddPiece(ctx context.Context, sector abi.SectorID, existingPie
|
|||||||
var selector WorkerSelector
|
var selector WorkerSelector
|
||||||
var err error
|
var err error
|
||||||
if len(existingPieces) == 0 { // new
|
if len(existingPieces) == 0 { // new
|
||||||
selector, err = newAllocSelector(ctx, m.index, stores.FTUnsealed, stores.PathSealing)
|
selector = newAllocSelector(m.index, stores.FTUnsealed, stores.PathSealing)
|
||||||
} else { // use existing
|
} else { // use existing
|
||||||
selector, err = newExistingSelector(ctx, m.index, sector, stores.FTUnsealed, false)
|
selector = newExistingSelector(m.index, sector, stores.FTUnsealed, false)
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return abi.PieceInfo{}, xerrors.Errorf("creating path selector: %w", err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var out abi.PieceInfo
|
var out abi.PieceInfo
|
||||||
@ -299,10 +319,7 @@ func (m *Manager) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticke
|
|||||||
|
|
||||||
// TODO: also consider where the unsealed data sits
|
// TODO: also consider where the unsealed data sits
|
||||||
|
|
||||||
selector, err := newAllocSelector(ctx, m.index, stores.FTCache|stores.FTSealed, stores.PathSealing)
|
selector := newAllocSelector(m.index, stores.FTCache|stores.FTSealed, stores.PathSealing)
|
||||||
if err != nil {
|
|
||||||
return nil, xerrors.Errorf("creating path selector: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = m.sched.Schedule(ctx, sector, sealtasks.TTPreCommit1, selector, schedFetch(sector, stores.FTUnsealed, stores.PathSealing, stores.AcquireMove), func(ctx context.Context, w Worker) error {
|
err = m.sched.Schedule(ctx, sector, sealtasks.TTPreCommit1, selector, schedFetch(sector, stores.FTUnsealed, stores.PathSealing, stores.AcquireMove), func(ctx context.Context, w Worker) error {
|
||||||
p, err := w.SealPreCommit1(ctx, sector, ticket, pieces)
|
p, err := w.SealPreCommit1(ctx, sector, ticket, pieces)
|
||||||
@ -324,10 +341,7 @@ func (m *Manager) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase
|
|||||||
return storage.SectorCids{}, xerrors.Errorf("acquiring sector lock: %w", err)
|
return storage.SectorCids{}, xerrors.Errorf("acquiring sector lock: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
selector, err := newExistingSelector(ctx, m.index, sector, stores.FTCache|stores.FTSealed, true)
|
selector := newExistingSelector(m.index, sector, stores.FTCache|stores.FTSealed, true)
|
||||||
if err != nil {
|
|
||||||
return storage.SectorCids{}, xerrors.Errorf("creating path selector: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = m.sched.Schedule(ctx, sector, sealtasks.TTPreCommit2, selector, schedFetch(sector, stores.FTCache|stores.FTSealed, stores.PathSealing, stores.AcquireMove), func(ctx context.Context, w Worker) error {
|
err = m.sched.Schedule(ctx, sector, sealtasks.TTPreCommit2, selector, schedFetch(sector, stores.FTCache|stores.FTSealed, stores.PathSealing, stores.AcquireMove), func(ctx context.Context, w Worker) error {
|
||||||
p, err := w.SealPreCommit2(ctx, sector, phase1Out)
|
p, err := w.SealPreCommit2(ctx, sector, phase1Out)
|
||||||
@ -351,10 +365,7 @@ func (m *Manager) SealCommit1(ctx context.Context, sector abi.SectorID, ticket a
|
|||||||
// NOTE: We set allowFetch to false in so that we always execute on a worker
|
// NOTE: We set allowFetch to false in so that we always execute on a worker
|
||||||
// with direct access to the data. We want to do that because this step is
|
// with direct access to the data. We want to do that because this step is
|
||||||
// generally very cheap / fast, and transferring data is not worth the effort
|
// generally very cheap / fast, and transferring data is not worth the effort
|
||||||
selector, err := newExistingSelector(ctx, m.index, sector, stores.FTCache|stores.FTSealed, false)
|
selector := newExistingSelector(m.index, sector, stores.FTCache|stores.FTSealed, false)
|
||||||
if err != nil {
|
|
||||||
return storage.Commit1Out{}, xerrors.Errorf("creating path selector: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = m.sched.Schedule(ctx, sector, sealtasks.TTCommit1, selector, schedFetch(sector, stores.FTCache|stores.FTSealed, stores.PathSealing, stores.AcquireMove), func(ctx context.Context, w Worker) error {
|
err = m.sched.Schedule(ctx, sector, sealtasks.TTCommit1, selector, schedFetch(sector, stores.FTCache|stores.FTSealed, stores.PathSealing, stores.AcquireMove), func(ctx context.Context, w Worker) error {
|
||||||
p, err := w.SealCommit1(ctx, sector, ticket, seed, pieces, cids)
|
p, err := w.SealCommit1(ctx, sector, ticket, seed, pieces, cids)
|
||||||
@ -402,12 +413,9 @@ func (m *Manager) FinalizeSector(ctx context.Context, sector abi.SectorID, keepU
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
selector, err := newExistingSelector(ctx, m.index, sector, stores.FTCache|stores.FTSealed, false)
|
selector := newExistingSelector(m.index, sector, stores.FTCache|stores.FTSealed, false)
|
||||||
if err != nil {
|
|
||||||
return xerrors.Errorf("creating path selector: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = m.sched.Schedule(ctx, sector, sealtasks.TTFinalize, selector,
|
err := m.sched.Schedule(ctx, sector, sealtasks.TTFinalize, selector,
|
||||||
schedFetch(sector, stores.FTCache|stores.FTSealed|unsealed, stores.PathSealing, stores.AcquireMove),
|
schedFetch(sector, stores.FTCache|stores.FTSealed|unsealed, stores.PathSealing, stores.AcquireMove),
|
||||||
func(ctx context.Context, w Worker) error {
|
func(ctx context.Context, w Worker) error {
|
||||||
return w.FinalizeSector(ctx, sector, keepUnsealed)
|
return w.FinalizeSector(ctx, sector, keepUnsealed)
|
||||||
@ -416,11 +424,7 @@ func (m *Manager) FinalizeSector(ctx context.Context, sector abi.SectorID, keepU
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
fetchSel, err := newAllocSelector(ctx, m.index, stores.FTCache|stores.FTSealed, stores.PathStorage)
|
fetchSel := newAllocSelector(m.index, stores.FTCache|stores.FTSealed, stores.PathStorage)
|
||||||
if err != nil {
|
|
||||||
return xerrors.Errorf("creating fetchSel: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
moveUnsealed := unsealed
|
moveUnsealed := unsealed
|
||||||
{
|
{
|
||||||
if len(keepUnsealed) == 0 {
|
if len(keepUnsealed) == 0 {
|
||||||
@ -441,7 +445,8 @@ func (m *Manager) FinalizeSector(ctx context.Context, sector abi.SectorID, keepU
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *Manager) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) error {
|
func (m *Manager) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) error {
|
||||||
return xerrors.Errorf("implement me")
|
log.Warnw("ReleaseUnsealed todo")
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Manager) Remove(ctx context.Context, sector abi.SectorID) error {
|
func (m *Manager) Remove(ctx context.Context, sector abi.SectorID) error {
|
||||||
@ -464,10 +469,7 @@ func (m *Manager) Remove(ctx context.Context, sector abi.SectorID) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
selector, err := newExistingSelector(ctx, m.index, sector, stores.FTCache|stores.FTSealed, false)
|
selector := newExistingSelector(m.index, sector, stores.FTCache|stores.FTSealed, false)
|
||||||
if err != nil {
|
|
||||||
return xerrors.Errorf("creating selector: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return m.sched.Schedule(ctx, sector, sealtasks.TTFinalize, selector,
|
return m.sched.Schedule(ctx, sector, sealtasks.TTFinalize, selector,
|
||||||
schedFetch(sector, stores.FTCache|stores.FTSealed|unsealed, stores.PathStorage, stores.AcquireMove),
|
schedFetch(sector, stores.FTCache|stores.FTSealed|unsealed, stores.PathStorage, stores.AcquireMove),
|
||||||
@ -490,12 +492,16 @@ func (m *Manager) StorageLocal(ctx context.Context) (map[stores.ID]string, error
|
|||||||
return out, nil
|
return out, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Manager) FsStat(ctx context.Context, id stores.ID) (stores.FsStat, error) {
|
func (m *Manager) FsStat(ctx context.Context, id stores.ID) (fsutil.FsStat, error) {
|
||||||
return m.storage.FsStat(ctx, id)
|
return m.storage.FsStat(ctx, id)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Manager) Close() error {
|
func (m *Manager) SchedDiag(ctx context.Context) (interface{}, error) {
|
||||||
return m.sched.Close()
|
return m.sched.Info(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Manager) Close(ctx context.Context) error {
|
||||||
|
return m.sched.Close(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ SectorManager = &Manager{}
|
var _ SectorManager = &Manager{}
|
||||||
|
@ -5,6 +5,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/filecoin-project/sector-storage/fsutil"
|
||||||
"github.com/filecoin-project/sector-storage/sealtasks"
|
"github.com/filecoin-project/sector-storage/sealtasks"
|
||||||
logging "github.com/ipfs/go-log"
|
logging "github.com/ipfs/go-log"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
@ -22,8 +23,16 @@ import (
|
|||||||
"github.com/filecoin-project/sector-storage/stores"
|
"github.com/filecoin-project/sector-storage/stores"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
logging.SetAllLoggers(logging.LevelDebug)
|
||||||
|
}
|
||||||
|
|
||||||
type testStorage stores.StorageConfig
|
type testStorage stores.StorageConfig
|
||||||
|
|
||||||
|
func (t testStorage) DiskUsage(path string) (int64, error) {
|
||||||
|
return 1, nil // close enough
|
||||||
|
}
|
||||||
|
|
||||||
func newTestStorage(t *testing.T) *testStorage {
|
func newTestStorage(t *testing.T) *testStorage {
|
||||||
tp, err := ioutil.TempDir(os.TempDir(), "sector-storage-test-")
|
tp, err := ioutil.TempDir(os.TempDir(), "sector-storage-test-")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -65,8 +74,8 @@ func (t *testStorage) SetStorage(f func(*stores.StorageConfig)) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *testStorage) Stat(path string) (stores.FsStat, error) {
|
func (t *testStorage) Stat(path string) (fsutil.FsStat, error) {
|
||||||
return stores.Stat(path)
|
return fsutil.Statfs(path)
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ stores.LocalStorage = &testStorage{}
|
var _ stores.LocalStorage = &testStorage{}
|
||||||
@ -86,7 +95,7 @@ func newTestMgr(ctx context.Context, t *testing.T) (*Manager, *stores.Local, *st
|
|||||||
prover, err := ffiwrapper.New(&readonlyProvider{stor: lstor}, cfg)
|
prover, err := ffiwrapper.New(&readonlyProvider{stor: lstor}, cfg)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
stor := stores.NewRemote(lstor, si, nil)
|
stor := stores.NewRemote(lstor, si, nil, 6000)
|
||||||
|
|
||||||
m := &Manager{
|
m := &Manager{
|
||||||
scfg: cfg,
|
scfg: cfg,
|
||||||
|
117
mock/mock.go
117
mock/mock.go
@ -5,7 +5,6 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"math"
|
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
@ -34,14 +33,22 @@ type SectorMgr struct {
|
|||||||
|
|
||||||
type mockVerif struct{}
|
type mockVerif struct{}
|
||||||
|
|
||||||
func NewMockSectorMgr(ssize abi.SectorSize) *SectorMgr {
|
func NewMockSectorMgr(ssize abi.SectorSize, genesisSectors []abi.SectorID) *SectorMgr {
|
||||||
rt, err := ffiwrapper.SealProofTypeFromSectorSize(ssize)
|
rt, err := ffiwrapper.SealProofTypeFromSectorSize(ssize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
sectors := make(map[abi.SectorID]*sectorState)
|
||||||
|
for _, sid := range genesisSectors {
|
||||||
|
sectors[sid] = §orState{
|
||||||
|
failed: false,
|
||||||
|
state: stateCommit,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return &SectorMgr{
|
return &SectorMgr{
|
||||||
sectors: make(map[abi.SectorID]*sectorState),
|
sectors: sectors,
|
||||||
pieces: map[cid.Cid][]byte{},
|
pieces: map[cid.Cid][]byte{},
|
||||||
sectorSize: ssize,
|
sectorSize: ssize,
|
||||||
nextSectorID: 5,
|
nextSectorID: 5,
|
||||||
@ -161,7 +168,7 @@ func (mgr *SectorMgr) SealPreCommit1(ctx context.Context, sid abi.SectorID, tick
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
cc, _, err := commcid.CIDToCommitment(commd)
|
_, _, cc, err := commcid.CIDToCommitment(commd)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
@ -175,14 +182,14 @@ func (mgr *SectorMgr) SealPreCommit2(ctx context.Context, sid abi.SectorID, phas
|
|||||||
db := []byte(string(phase1Out))
|
db := []byte(string(phase1Out))
|
||||||
db[0] ^= 'd'
|
db[0] ^= 'd'
|
||||||
|
|
||||||
d := commcid.DataCommitmentV1ToCID(db)
|
d, _ := commcid.DataCommitmentV1ToCID(db)
|
||||||
|
|
||||||
commr := make([]byte, 32)
|
commr := make([]byte, 32)
|
||||||
for i := range db {
|
for i := range db {
|
||||||
commr[32-(i+1)] = db[i]
|
commr[32-(i+1)] = db[i]
|
||||||
}
|
}
|
||||||
|
|
||||||
commR := commcid.ReplicaCommitmentV1ToCID(commr)
|
commR, _ := commcid.ReplicaCommitmentV1ToCID(commr)
|
||||||
|
|
||||||
return storage.SectorCids{
|
return storage.SectorCids{
|
||||||
Unsealed: d,
|
Unsealed: d,
|
||||||
@ -229,7 +236,7 @@ func (mgr *SectorMgr) SealCommit2(ctx context.Context, sid abi.SectorID, phase1O
|
|||||||
|
|
||||||
// Test Instrumentation Methods
|
// Test Instrumentation Methods
|
||||||
|
|
||||||
func (mgr *SectorMgr) FailSector(sid abi.SectorID) error {
|
func (mgr *SectorMgr) MarkFailed(sid abi.SectorID, failed bool) error {
|
||||||
mgr.lk.Lock()
|
mgr.lk.Lock()
|
||||||
defer mgr.lk.Unlock()
|
defer mgr.lk.Unlock()
|
||||||
ss, ok := mgr.sectors[sid]
|
ss, ok := mgr.sectors[sid]
|
||||||
@ -237,7 +244,7 @@ func (mgr *SectorMgr) FailSector(sid abi.SectorID) error {
|
|||||||
return fmt.Errorf("no such sector in storage")
|
return fmt.Errorf("no such sector in storage")
|
||||||
}
|
}
|
||||||
|
|
||||||
ss.failed = true
|
ss.failed = failed
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -258,27 +265,57 @@ func AddOpFinish(ctx context.Context) (context.Context, func()) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (mgr *SectorMgr) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []abi.SectorInfo, randomness abi.PoStRandomness) ([]abi.PoStProof, error) {
|
func (mgr *SectorMgr) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []abi.SectorInfo, randomness abi.PoStRandomness) ([]abi.PoStProof, error) {
|
||||||
return generateFakePoSt(sectorInfo, abi.RegisteredSealProof.RegisteredWinningPoStProof), nil
|
return generateFakePoSt(sectorInfo, abi.RegisteredSealProof.RegisteredWinningPoStProof, randomness), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mgr *SectorMgr) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []abi.SectorInfo, randomness abi.PoStRandomness) ([]abi.PoStProof, []abi.SectorID, error) {
|
func (mgr *SectorMgr) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []abi.SectorInfo, randomness abi.PoStRandomness) ([]abi.PoStProof, []abi.SectorID, error) {
|
||||||
return generateFakePoSt(sectorInfo, abi.RegisteredSealProof.RegisteredWindowPoStProof), nil, nil
|
si := make([]abi.SectorInfo, 0, len(sectorInfo))
|
||||||
|
var skipped []abi.SectorID
|
||||||
|
|
||||||
|
for _, info := range sectorInfo {
|
||||||
|
sid := abi.SectorID{
|
||||||
|
Miner: minerID,
|
||||||
|
Number: info.SectorNumber,
|
||||||
|
}
|
||||||
|
|
||||||
|
_, found := mgr.sectors[sid]
|
||||||
|
|
||||||
|
if found && !mgr.sectors[sid].failed {
|
||||||
|
si = append(si, info)
|
||||||
|
} else {
|
||||||
|
skipped = append(skipped, sid)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return generateFakePoSt(si, abi.RegisteredSealProof.RegisteredWindowPoStProof, randomness), skipped, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func generateFakePoSt(sectorInfo []abi.SectorInfo, rpt func(abi.RegisteredSealProof) (abi.RegisteredPoStProof, error)) []abi.PoStProof {
|
func generateFakePoSt(sectorInfo []abi.SectorInfo, rpt func(abi.RegisteredSealProof) (abi.RegisteredPoStProof, error), randomness abi.PoStRandomness) []abi.PoStProof {
|
||||||
se, err := sectorInfo[0].SealProof.WindowPoStPartitionSectors()
|
sectors := abi.NewBitField()
|
||||||
|
for _, info := range sectorInfo {
|
||||||
|
sectors.Set(uint64(info.SectorNumber))
|
||||||
|
}
|
||||||
|
|
||||||
|
wp, err := rpt(sectorInfo[0].SealProof)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
wp, err := rpt(sectorInfo[0].SealProof)
|
|
||||||
|
var proofBuf bytes.Buffer
|
||||||
|
|
||||||
|
_, err = proofBuf.Write(randomness)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := sectors.MarshalCBOR(&proofBuf); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
return []abi.PoStProof{
|
return []abi.PoStProof{
|
||||||
{
|
{
|
||||||
PoStProof: wp,
|
PoStProof: wp,
|
||||||
ProofBytes: make([]byte, 192*int(math.Ceil(float64(len(sectorInfo))/float64(se)))),
|
ProofBytes: proofBuf.Bytes(),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -320,7 +357,7 @@ func (mgr *SectorMgr) FinalizeSector(context.Context, abi.SectorID, []storage.Ra
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (mgr *SectorMgr) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) error {
|
func (mgr *SectorMgr) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) error {
|
||||||
panic("implement me")
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mgr *SectorMgr) Remove(ctx context.Context, sector abi.SectorID) error {
|
func (mgr *SectorMgr) Remove(ctx context.Context, sector abi.SectorID) error {
|
||||||
@ -335,8 +372,18 @@ func (mgr *SectorMgr) Remove(ctx context.Context, sector abi.SectorID) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mgr *SectorMgr) CheckProvable(context.Context, abi.RegisteredSealProof, []abi.SectorID) ([]abi.SectorID, error) {
|
func (mgr *SectorMgr) CheckProvable(ctx context.Context, spt abi.RegisteredSealProof, ids []abi.SectorID) ([]abi.SectorID, error) {
|
||||||
return nil, nil
|
var bad []abi.SectorID
|
||||||
|
|
||||||
|
for _, sid := range ids {
|
||||||
|
_, found := mgr.sectors[sid]
|
||||||
|
|
||||||
|
if !found || mgr.sectors[sid].failed {
|
||||||
|
bad = append(bad, sid)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return bad, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m mockVerif) VerifySeal(svi abi.SealVerifyInfo) (bool, error) {
|
func (m mockVerif) VerifySeal(svi abi.SealVerifyInfo) (bool, error) {
|
||||||
@ -358,6 +405,42 @@ func (m mockVerif) VerifyWinningPoSt(ctx context.Context, info abi.WinningPoStVe
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m mockVerif) VerifyWindowPoSt(ctx context.Context, info abi.WindowPoStVerifyInfo) (bool, error) {
|
func (m mockVerif) VerifyWindowPoSt(ctx context.Context, info abi.WindowPoStVerifyInfo) (bool, error) {
|
||||||
|
if len(info.Proofs) != 1 {
|
||||||
|
return false, xerrors.Errorf("expected 1 proof entry")
|
||||||
|
}
|
||||||
|
|
||||||
|
proof := info.Proofs[0]
|
||||||
|
|
||||||
|
if !bytes.Equal(proof.ProofBytes[:len(info.Randomness)], info.Randomness) {
|
||||||
|
return false, xerrors.Errorf("bad randomness")
|
||||||
|
}
|
||||||
|
|
||||||
|
sectors := abi.NewBitField()
|
||||||
|
if err := sectors.UnmarshalCBOR(bytes.NewReader(proof.ProofBytes[len(info.Randomness):])); err != nil {
|
||||||
|
return false, xerrors.Errorf("unmarshaling sectors bitfield from \"proof\": %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
challenged := abi.NewBitField()
|
||||||
|
for _, sector := range info.ChallengedSectors {
|
||||||
|
challenged.Set(uint64(sector.SectorNumber))
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
b1, err := sectors.MarshalJSON()
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
b2, err := challenged.MarshalJSON()
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !bytes.Equal(b1, b2) {
|
||||||
|
return false, xerrors.Errorf("proven and challenged sector sets didn't match: %s != !s", string(b1), string(b2))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -9,7 +9,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestOpFinish(t *testing.T) {
|
func TestOpFinish(t *testing.T) {
|
||||||
sb := NewMockSectorMgr(2048)
|
sb := NewMockSectorMgr(2048, nil)
|
||||||
|
|
||||||
sid, pieces, err := sb.StageFakeData(123)
|
sid, pieces, err := sb.StageFakeData(123)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
package sectorstorage
|
package sectorstorage
|
||||||
|
|
||||||
import "container/heap"
|
import "sort"
|
||||||
|
|
||||||
type requestQueue []*workerRequest
|
type requestQueue []*workerRequest
|
||||||
|
|
||||||
@ -24,21 +24,22 @@ func (q requestQueue) Swap(i, j int) {
|
|||||||
q[j].index = j
|
q[j].index = j
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *requestQueue) Push(x interface{}) {
|
func (q *requestQueue) Push(x *workerRequest) {
|
||||||
n := len(*q)
|
n := len(*q)
|
||||||
item := x.(*workerRequest)
|
item := x
|
||||||
item.index = n
|
item.index = n
|
||||||
*q = append(*q, item)
|
*q = append(*q, item)
|
||||||
|
sort.Sort(q)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *requestQueue) Pop() interface{} {
|
func (q *requestQueue) Remove(i int) *workerRequest {
|
||||||
old := *q
|
old := *q
|
||||||
n := len(old)
|
n := len(old)
|
||||||
item := old[n-1]
|
item := old[i]
|
||||||
old[n-1] = nil // avoid memory leak
|
old[i] = old[n-1]
|
||||||
item.index = -1 // for safety
|
old[n-1] = nil
|
||||||
|
item.index = -1
|
||||||
*q = old[0 : n-1]
|
*q = old[0 : n-1]
|
||||||
|
sort.Sort(q)
|
||||||
return item
|
return item
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ heap.Interface = &requestQueue{}
|
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
package sectorstorage
|
package sectorstorage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"container/heap"
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/filecoin-project/sector-storage/sealtasks"
|
"github.com/filecoin-project/sector-storage/sealtasks"
|
||||||
@ -10,19 +10,51 @@ import (
|
|||||||
func TestRequestQueue(t *testing.T) {
|
func TestRequestQueue(t *testing.T) {
|
||||||
rq := &requestQueue{}
|
rq := &requestQueue{}
|
||||||
|
|
||||||
heap.Push(rq, &workerRequest{taskType: sealtasks.TTAddPiece})
|
rq.Push(&workerRequest{taskType: sealtasks.TTAddPiece})
|
||||||
heap.Push(rq, &workerRequest{taskType: sealtasks.TTPreCommit1})
|
rq.Push(&workerRequest{taskType: sealtasks.TTPreCommit1})
|
||||||
heap.Push(rq, &workerRequest{taskType: sealtasks.TTPreCommit2})
|
rq.Push(&workerRequest{taskType: sealtasks.TTPreCommit2})
|
||||||
heap.Push(rq, &workerRequest{taskType: sealtasks.TTPreCommit1})
|
rq.Push(&workerRequest{taskType: sealtasks.TTPreCommit1})
|
||||||
heap.Push(rq, &workerRequest{taskType: sealtasks.TTAddPiece})
|
rq.Push(&workerRequest{taskType: sealtasks.TTAddPiece})
|
||||||
|
|
||||||
pt := heap.Pop(rq).(*workerRequest)
|
dump := func(s string) {
|
||||||
|
fmt.Println("---")
|
||||||
|
fmt.Println(s)
|
||||||
|
|
||||||
|
for sqi := 0; sqi < rq.Len(); sqi++ {
|
||||||
|
task := (*rq)[sqi]
|
||||||
|
fmt.Println(sqi, task.taskType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
dump("start")
|
||||||
|
|
||||||
|
pt := rq.Remove(0)
|
||||||
|
|
||||||
|
dump("pop 1")
|
||||||
|
|
||||||
if pt.taskType != sealtasks.TTPreCommit2 {
|
if pt.taskType != sealtasks.TTPreCommit2 {
|
||||||
t.Error("expected precommit2, got", pt.taskType)
|
t.Error("expected precommit2, got", pt.taskType)
|
||||||
}
|
}
|
||||||
|
|
||||||
pt = heap.Pop(rq).(*workerRequest)
|
pt = rq.Remove(0)
|
||||||
|
|
||||||
|
dump("pop 2")
|
||||||
|
|
||||||
|
if pt.taskType != sealtasks.TTPreCommit1 {
|
||||||
|
t.Error("expected precommit1, got", pt.taskType)
|
||||||
|
}
|
||||||
|
|
||||||
|
pt = rq.Remove(1)
|
||||||
|
|
||||||
|
dump("pop 3")
|
||||||
|
|
||||||
|
if pt.taskType != sealtasks.TTAddPiece {
|
||||||
|
t.Error("expected addpiece, got", pt.taskType)
|
||||||
|
}
|
||||||
|
|
||||||
|
pt = rq.Remove(0)
|
||||||
|
|
||||||
|
dump("pop 4")
|
||||||
|
|
||||||
if pt.taskType != sealtasks.TTPreCommit1 {
|
if pt.taskType != sealtasks.TTPreCommit1 {
|
||||||
t.Error("expected precommit1, got", pt.taskType)
|
t.Error("expected precommit1, got", pt.taskType)
|
||||||
|
737
sched.go
737
sched.go
@ -1,13 +1,13 @@
|
|||||||
package sectorstorage
|
package sectorstorage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"container/heap"
|
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"math/rand"
|
||||||
"sort"
|
"sort"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/hashicorp/go-multierror"
|
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||||
@ -20,6 +20,11 @@ type schedPrioCtxKey int
|
|||||||
|
|
||||||
var SchedPriorityKey schedPrioCtxKey
|
var SchedPriorityKey schedPrioCtxKey
|
||||||
var DefaultSchedPriority = 0
|
var DefaultSchedPriority = 0
|
||||||
|
var SelectorTimeout = 5 * time.Second
|
||||||
|
|
||||||
|
var (
|
||||||
|
SchedWindows = 2
|
||||||
|
)
|
||||||
|
|
||||||
func getPriority(ctx context.Context) int {
|
func getPriority(ctx context.Context) int {
|
||||||
sp := ctx.Value(SchedPriorityKey)
|
sp := ctx.Value(SchedPriorityKey)
|
||||||
@ -47,7 +52,7 @@ type WorkerSelector interface {
|
|||||||
type scheduler struct {
|
type scheduler struct {
|
||||||
spt abi.RegisteredSealProof
|
spt abi.RegisteredSealProof
|
||||||
|
|
||||||
workersLk sync.Mutex
|
workersLk sync.RWMutex
|
||||||
nextWorker WorkerID
|
nextWorker WorkerID
|
||||||
workers map[WorkerID]*workerHandle
|
workers map[WorkerID]*workerHandle
|
||||||
|
|
||||||
@ -56,11 +61,77 @@ type scheduler struct {
|
|||||||
watchClosing chan WorkerID
|
watchClosing chan WorkerID
|
||||||
workerClosing chan WorkerID
|
workerClosing chan WorkerID
|
||||||
|
|
||||||
schedule chan *workerRequest
|
schedule chan *workerRequest
|
||||||
workerFree chan WorkerID
|
windowRequests chan *schedWindowRequest
|
||||||
closing chan struct{}
|
|
||||||
|
|
||||||
schedQueue *requestQueue
|
// owned by the sh.runSched goroutine
|
||||||
|
schedQueue *requestQueue
|
||||||
|
openWindows []*schedWindowRequest
|
||||||
|
|
||||||
|
info chan func(interface{})
|
||||||
|
|
||||||
|
closing chan struct{}
|
||||||
|
closed chan struct{}
|
||||||
|
testSync chan struct{} // used for testing
|
||||||
|
}
|
||||||
|
|
||||||
|
type workerHandle struct {
|
||||||
|
w Worker
|
||||||
|
|
||||||
|
info storiface.WorkerInfo
|
||||||
|
|
||||||
|
preparing *activeResources
|
||||||
|
active *activeResources
|
||||||
|
|
||||||
|
lk sync.Mutex
|
||||||
|
|
||||||
|
// stats / tracking
|
||||||
|
wt *workTracker
|
||||||
|
|
||||||
|
// for sync manager goroutine closing
|
||||||
|
cleanupStarted bool
|
||||||
|
closedMgr chan struct{}
|
||||||
|
closingMgr chan struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
type schedWindowRequest struct {
|
||||||
|
worker WorkerID
|
||||||
|
|
||||||
|
done chan *schedWindow
|
||||||
|
}
|
||||||
|
|
||||||
|
type schedWindow struct {
|
||||||
|
allocated activeResources
|
||||||
|
todo []*workerRequest
|
||||||
|
}
|
||||||
|
|
||||||
|
type activeResources struct {
|
||||||
|
memUsedMin uint64
|
||||||
|
memUsedMax uint64
|
||||||
|
gpuUsed bool
|
||||||
|
cpuUse uint64
|
||||||
|
|
||||||
|
cond *sync.Cond
|
||||||
|
}
|
||||||
|
|
||||||
|
type workerRequest struct {
|
||||||
|
sector abi.SectorID
|
||||||
|
taskType sealtasks.TaskType
|
||||||
|
priority int // larger values more important
|
||||||
|
sel WorkerSelector
|
||||||
|
|
||||||
|
prepare WorkerAction
|
||||||
|
work WorkerAction
|
||||||
|
|
||||||
|
index int // The index of the item in the heap.
|
||||||
|
|
||||||
|
indexHeap int
|
||||||
|
ret chan<- workerResponse
|
||||||
|
ctx context.Context
|
||||||
|
}
|
||||||
|
|
||||||
|
type workerResponse struct {
|
||||||
|
err error
|
||||||
}
|
}
|
||||||
|
|
||||||
func newScheduler(spt abi.RegisteredSealProof) *scheduler {
|
func newScheduler(spt abi.RegisteredSealProof) *scheduler {
|
||||||
@ -75,11 +146,15 @@ func newScheduler(spt abi.RegisteredSealProof) *scheduler {
|
|||||||
watchClosing: make(chan WorkerID),
|
watchClosing: make(chan WorkerID),
|
||||||
workerClosing: make(chan WorkerID),
|
workerClosing: make(chan WorkerID),
|
||||||
|
|
||||||
schedule: make(chan *workerRequest),
|
schedule: make(chan *workerRequest),
|
||||||
workerFree: make(chan WorkerID),
|
windowRequests: make(chan *schedWindowRequest),
|
||||||
closing: make(chan struct{}),
|
|
||||||
|
|
||||||
schedQueue: &requestQueue{},
|
schedQueue: &requestQueue{},
|
||||||
|
|
||||||
|
info: make(chan func(interface{})),
|
||||||
|
|
||||||
|
closing: make(chan struct{}),
|
||||||
|
closed: make(chan struct{}),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -115,25 +190,6 @@ func (sh *scheduler) Schedule(ctx context.Context, sector abi.SectorID, taskType
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type workerRequest struct {
|
|
||||||
sector abi.SectorID
|
|
||||||
taskType sealtasks.TaskType
|
|
||||||
priority int // larger values more important
|
|
||||||
sel WorkerSelector
|
|
||||||
|
|
||||||
prepare WorkerAction
|
|
||||||
work WorkerAction
|
|
||||||
|
|
||||||
index int // The index of the item in the heap.
|
|
||||||
|
|
||||||
ret chan<- workerResponse
|
|
||||||
ctx context.Context
|
|
||||||
}
|
|
||||||
|
|
||||||
type workerResponse struct {
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *workerRequest) respond(err error) {
|
func (r *workerRequest) respond(err error) {
|
||||||
select {
|
select {
|
||||||
case r.ret <- workerResponse{err: err}:
|
case r.ret <- workerResponse{err: err}:
|
||||||
@ -142,46 +198,44 @@ func (r *workerRequest) respond(err error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type activeResources struct {
|
type SchedDiagRequestInfo struct {
|
||||||
memUsedMin uint64
|
Sector abi.SectorID
|
||||||
memUsedMax uint64
|
TaskType sealtasks.TaskType
|
||||||
gpuUsed bool
|
Priority int
|
||||||
cpuUse uint64
|
|
||||||
|
|
||||||
cond *sync.Cond
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type workerHandle struct {
|
type SchedDiagInfo struct {
|
||||||
w Worker
|
Requests []SchedDiagRequestInfo
|
||||||
|
OpenWindows []WorkerID
|
||||||
info storiface.WorkerInfo
|
|
||||||
|
|
||||||
preparing *activeResources
|
|
||||||
active *activeResources
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sh *scheduler) runSched() {
|
func (sh *scheduler) runSched() {
|
||||||
|
defer close(sh.closed)
|
||||||
|
|
||||||
go sh.runWorkerWatcher()
|
go sh.runWorkerWatcher()
|
||||||
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case w := <-sh.newWorkers:
|
case w := <-sh.newWorkers:
|
||||||
sh.schedNewWorker(w)
|
sh.newWorker(w)
|
||||||
case wid := <-sh.workerClosing:
|
|
||||||
sh.schedDropWorker(wid)
|
case wid := <-sh.workerClosing:
|
||||||
case req := <-sh.schedule:
|
sh.dropWorker(wid)
|
||||||
scheduled, err := sh.maybeSchedRequest(req)
|
|
||||||
if err != nil {
|
case req := <-sh.schedule:
|
||||||
req.respond(err)
|
sh.schedQueue.Push(req)
|
||||||
continue
|
sh.trySched()
|
||||||
}
|
|
||||||
if scheduled {
|
if sh.testSync != nil {
|
||||||
continue
|
sh.testSync <- struct{}{}
|
||||||
}
|
}
|
||||||
|
case req := <-sh.windowRequests:
|
||||||
|
sh.openWindows = append(sh.openWindows, req)
|
||||||
|
sh.trySched()
|
||||||
|
|
||||||
|
case ireq := <-sh.info:
|
||||||
|
ireq(sh.diag())
|
||||||
|
|
||||||
heap.Push(sh.schedQueue, req)
|
|
||||||
case wid := <-sh.workerFree:
|
|
||||||
sh.onWorkerFreed(wid)
|
|
||||||
case <-sh.closing:
|
case <-sh.closing:
|
||||||
sh.schedClose()
|
sh.schedClose()
|
||||||
return
|
return
|
||||||
@ -189,119 +243,317 @@ func (sh *scheduler) runSched() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sh *scheduler) onWorkerFreed(wid WorkerID) {
|
func (sh *scheduler) diag() SchedDiagInfo {
|
||||||
sh.workersLk.Lock()
|
var out SchedDiagInfo
|
||||||
w, ok := sh.workers[wid]
|
|
||||||
sh.workersLk.Unlock()
|
for sqi := 0; sqi < sh.schedQueue.Len(); sqi++ {
|
||||||
if !ok {
|
task := (*sh.schedQueue)[sqi]
|
||||||
log.Warnf("onWorkerFreed on invalid worker %d", wid)
|
|
||||||
|
out.Requests = append(out.Requests, SchedDiagRequestInfo{
|
||||||
|
Sector: task.sector,
|
||||||
|
TaskType: task.taskType,
|
||||||
|
Priority: task.priority,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, window := range sh.openWindows {
|
||||||
|
out.OpenWindows = append(out.OpenWindows, window.worker)
|
||||||
|
}
|
||||||
|
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sh *scheduler) trySched() {
|
||||||
|
/*
|
||||||
|
This assigns tasks to workers based on:
|
||||||
|
- Task priority (achieved by handling sh.schedQueue in order, since it's already sorted by priority)
|
||||||
|
- Worker resource availability
|
||||||
|
- Task-specified worker preference (acceptableWindows array below sorted by this preference)
|
||||||
|
- Window request age
|
||||||
|
|
||||||
|
1. For each task in the schedQueue find windows which can handle them
|
||||||
|
1.1. Create list of windows capable of handling a task
|
||||||
|
1.2. Sort windows according to task selector preferences
|
||||||
|
2. Going through schedQueue again, assign task to first acceptable window
|
||||||
|
with resources available
|
||||||
|
3. Submit windows with scheduled tasks to workers
|
||||||
|
|
||||||
|
*/
|
||||||
|
|
||||||
|
windows := make([]schedWindow, len(sh.openWindows))
|
||||||
|
acceptableWindows := make([][]int, sh.schedQueue.Len())
|
||||||
|
|
||||||
|
log.Debugf("SCHED %d queued; %d open windows", sh.schedQueue.Len(), len(windows))
|
||||||
|
|
||||||
|
sh.workersLk.RLock()
|
||||||
|
defer sh.workersLk.RUnlock()
|
||||||
|
|
||||||
|
// Step 1
|
||||||
|
for sqi := 0; sqi < sh.schedQueue.Len(); sqi++ {
|
||||||
|
task := (*sh.schedQueue)[sqi]
|
||||||
|
needRes := ResourceTable[task.taskType][sh.spt]
|
||||||
|
|
||||||
|
task.indexHeap = sqi
|
||||||
|
for wnd, windowRequest := range sh.openWindows {
|
||||||
|
worker := sh.workers[windowRequest.worker]
|
||||||
|
|
||||||
|
// TODO: allow bigger windows
|
||||||
|
if !windows[wnd].allocated.canHandleRequest(needRes, windowRequest.worker, worker.info.Resources) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
rpcCtx, cancel := context.WithTimeout(task.ctx, SelectorTimeout)
|
||||||
|
ok, err := task.sel.Ok(rpcCtx, task.taskType, sh.spt, worker)
|
||||||
|
cancel()
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("trySched(1) req.sel.Ok error: %+v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
acceptableWindows[sqi] = append(acceptableWindows[sqi], wnd)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(acceptableWindows[sqi]) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pick best worker (shuffle in case some workers are equally as good)
|
||||||
|
rand.Shuffle(len(acceptableWindows[sqi]), func(i, j int) {
|
||||||
|
acceptableWindows[sqi][i], acceptableWindows[sqi][j] = acceptableWindows[sqi][j], acceptableWindows[sqi][i]
|
||||||
|
})
|
||||||
|
sort.SliceStable(acceptableWindows[sqi], func(i, j int) bool {
|
||||||
|
wii := sh.openWindows[acceptableWindows[sqi][i]].worker
|
||||||
|
wji := sh.openWindows[acceptableWindows[sqi][j]].worker
|
||||||
|
|
||||||
|
if wii == wji {
|
||||||
|
// for the same worker prefer older windows
|
||||||
|
return acceptableWindows[sqi][i] < acceptableWindows[sqi][j]
|
||||||
|
}
|
||||||
|
|
||||||
|
wi := sh.workers[wii]
|
||||||
|
wj := sh.workers[wji]
|
||||||
|
|
||||||
|
rpcCtx, cancel := context.WithTimeout(task.ctx, SelectorTimeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
r, err := task.sel.Cmp(rpcCtx, task.taskType, wi, wj)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("selecting best worker: %s", err)
|
||||||
|
}
|
||||||
|
return r
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debugf("SCHED windows: %+v", windows)
|
||||||
|
log.Debugf("SCHED Acceptable win: %+v", acceptableWindows)
|
||||||
|
|
||||||
|
// Step 2
|
||||||
|
scheduled := 0
|
||||||
|
|
||||||
|
for sqi := 0; sqi < sh.schedQueue.Len(); sqi++ {
|
||||||
|
task := (*sh.schedQueue)[sqi]
|
||||||
|
needRes := ResourceTable[task.taskType][sh.spt]
|
||||||
|
|
||||||
|
selectedWindow := -1
|
||||||
|
for _, wnd := range acceptableWindows[task.indexHeap] {
|
||||||
|
wid := sh.openWindows[wnd].worker
|
||||||
|
wr := sh.workers[wid].info.Resources
|
||||||
|
|
||||||
|
log.Debugf("SCHED try assign sqi:%d sector %d to window %d", sqi, task.sector.Number, wnd)
|
||||||
|
|
||||||
|
// TODO: allow bigger windows
|
||||||
|
if !windows[wnd].allocated.canHandleRequest(needRes, wid, wr) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debugf("SCHED ASSIGNED sqi:%d sector %d to window %d", sqi, task.sector.Number, wnd)
|
||||||
|
|
||||||
|
windows[wnd].allocated.add(wr, needRes)
|
||||||
|
|
||||||
|
selectedWindow = wnd
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if selectedWindow < 0 {
|
||||||
|
// all windows full
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
windows[selectedWindow].todo = append(windows[selectedWindow].todo, task)
|
||||||
|
|
||||||
|
sh.schedQueue.Remove(sqi)
|
||||||
|
sqi--
|
||||||
|
scheduled++
|
||||||
|
}
|
||||||
|
|
||||||
|
// Step 3
|
||||||
|
|
||||||
|
if scheduled == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < sh.schedQueue.Len(); i++ {
|
scheduledWindows := map[int]struct{}{}
|
||||||
req := (*sh.schedQueue)[i]
|
for wnd, window := range windows {
|
||||||
|
if len(window.todo) == 0 {
|
||||||
ok, err := req.sel.Ok(req.ctx, req.taskType, sh.spt, w)
|
// Nothing scheduled here, keep the window open
|
||||||
if err != nil {
|
|
||||||
log.Errorf("onWorkerFreed req.sel.Ok error: %+v", err)
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if !ok {
|
scheduledWindows[wnd] = struct{}{}
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
scheduled, err := sh.maybeSchedRequest(req)
|
window := window // copy
|
||||||
if err != nil {
|
select {
|
||||||
req.respond(err)
|
case sh.openWindows[wnd].done <- &window:
|
||||||
continue
|
default:
|
||||||
}
|
log.Error("expected sh.openWindows[wnd].done to be buffered")
|
||||||
|
|
||||||
if scheduled {
|
|
||||||
heap.Remove(sh.schedQueue, i)
|
|
||||||
i--
|
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Rewrite sh.openWindows array, removing scheduled windows
|
||||||
|
newOpenWindows := make([]*schedWindowRequest, 0, len(sh.openWindows)-len(scheduledWindows))
|
||||||
|
for wnd, window := range sh.openWindows {
|
||||||
|
if _, scheduled := scheduledWindows[wnd]; scheduled {
|
||||||
|
// keep unscheduled windows open
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
newOpenWindows = append(newOpenWindows, window)
|
||||||
|
}
|
||||||
|
|
||||||
|
sh.openWindows = newOpenWindows
|
||||||
}
|
}
|
||||||
|
|
||||||
var selectorTimeout = 5 * time.Second
|
func (sh *scheduler) runWorker(wid WorkerID) {
|
||||||
|
var ready sync.WaitGroup
|
||||||
func (sh *scheduler) maybeSchedRequest(req *workerRequest) (bool, error) {
|
ready.Add(1)
|
||||||
sh.workersLk.Lock()
|
defer ready.Wait()
|
||||||
defer sh.workersLk.Unlock()
|
|
||||||
|
|
||||||
tried := 0
|
|
||||||
var acceptable []WorkerID
|
|
||||||
|
|
||||||
needRes := ResourceTable[req.taskType][sh.spt]
|
|
||||||
|
|
||||||
for wid, worker := range sh.workers {
|
|
||||||
rpcCtx, cancel := context.WithTimeout(req.ctx, selectorTimeout)
|
|
||||||
ok, err := req.sel.Ok(rpcCtx, req.taskType, sh.spt, worker)
|
|
||||||
cancel()
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if !ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
tried++
|
|
||||||
|
|
||||||
if !canHandleRequest(needRes, wid, worker.info.Resources, worker.preparing) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
acceptable = append(acceptable, wid)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(acceptable) > 0 {
|
|
||||||
{
|
|
||||||
var serr error
|
|
||||||
|
|
||||||
sort.SliceStable(acceptable, func(i, j int) bool {
|
|
||||||
rpcCtx, cancel := context.WithTimeout(req.ctx, selectorTimeout)
|
|
||||||
defer cancel()
|
|
||||||
r, err := req.sel.Cmp(rpcCtx, req.taskType, sh.workers[acceptable[i]], sh.workers[acceptable[j]])
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
serr = multierror.Append(serr, err)
|
|
||||||
}
|
|
||||||
return r
|
|
||||||
})
|
|
||||||
|
|
||||||
if serr != nil {
|
|
||||||
return false, xerrors.Errorf("error(s) selecting best worker: %w", serr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return true, sh.assignWorker(acceptable[0], sh.workers[acceptable[0]], req)
|
|
||||||
}
|
|
||||||
|
|
||||||
if tried == 0 {
|
|
||||||
return false, xerrors.New("maybeSchedRequest didn't find any good workers")
|
|
||||||
}
|
|
||||||
|
|
||||||
return false, nil // put in waiting queue
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sh *scheduler) assignWorker(wid WorkerID, w *workerHandle, req *workerRequest) error {
|
|
||||||
needRes := ResourceTable[req.taskType][sh.spt]
|
|
||||||
|
|
||||||
w.preparing.add(w.info.Resources, needRes)
|
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
err := req.prepare(req.ctx, w.w)
|
sh.workersLk.RLock()
|
||||||
|
worker, found := sh.workers[wid]
|
||||||
|
sh.workersLk.RUnlock()
|
||||||
|
|
||||||
|
ready.Done()
|
||||||
|
|
||||||
|
if !found {
|
||||||
|
panic(fmt.Sprintf("worker %d not found", wid))
|
||||||
|
}
|
||||||
|
|
||||||
|
defer close(worker.closedMgr)
|
||||||
|
|
||||||
|
scheduledWindows := make(chan *schedWindow, SchedWindows)
|
||||||
|
taskDone := make(chan struct{}, 1)
|
||||||
|
windowsRequested := 0
|
||||||
|
|
||||||
|
var activeWindows []*schedWindow
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.TODO())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
workerClosing, err := worker.w.Closing(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
log.Warnw("Worker closing", "workerid", wid)
|
||||||
|
|
||||||
|
// TODO: close / return all queued tasks
|
||||||
|
}()
|
||||||
|
|
||||||
|
for {
|
||||||
|
// ask for more windows if we need them
|
||||||
|
for ; windowsRequested < SchedWindows; windowsRequested++ {
|
||||||
|
select {
|
||||||
|
case sh.windowRequests <- &schedWindowRequest{
|
||||||
|
worker: wid,
|
||||||
|
done: scheduledWindows,
|
||||||
|
}:
|
||||||
|
case <-sh.closing:
|
||||||
|
return
|
||||||
|
case <-workerClosing:
|
||||||
|
return
|
||||||
|
case <-worker.closingMgr:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case w := <-scheduledWindows:
|
||||||
|
activeWindows = append(activeWindows, w)
|
||||||
|
case <-taskDone:
|
||||||
|
log.Debugw("task done", "workerid", wid)
|
||||||
|
case <-sh.closing:
|
||||||
|
return
|
||||||
|
case <-workerClosing:
|
||||||
|
return
|
||||||
|
case <-worker.closingMgr:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
assignLoop:
|
||||||
|
// process windows in order
|
||||||
|
for len(activeWindows) > 0 {
|
||||||
|
// process tasks within a window in order
|
||||||
|
for len(activeWindows[0].todo) > 0 {
|
||||||
|
todo := activeWindows[0].todo[0]
|
||||||
|
needRes := ResourceTable[todo.taskType][sh.spt]
|
||||||
|
|
||||||
|
sh.workersLk.RLock()
|
||||||
|
worker.lk.Lock()
|
||||||
|
ok := worker.preparing.canHandleRequest(needRes, wid, worker.info.Resources)
|
||||||
|
worker.lk.Unlock()
|
||||||
|
|
||||||
|
if !ok {
|
||||||
|
sh.workersLk.RUnlock()
|
||||||
|
break assignLoop
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debugf("assign worker sector %d", todo.sector.Number)
|
||||||
|
err := sh.assignWorker(taskDone, wid, worker, todo)
|
||||||
|
sh.workersLk.RUnlock()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
log.Error("assignWorker error: %+v", err)
|
||||||
|
go todo.respond(xerrors.Errorf("assignWorker error: %w", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
activeWindows[0].todo = activeWindows[0].todo[1:]
|
||||||
|
}
|
||||||
|
|
||||||
|
copy(activeWindows, activeWindows[1:])
|
||||||
|
activeWindows[len(activeWindows)-1] = nil
|
||||||
|
activeWindows = activeWindows[:len(activeWindows)-1]
|
||||||
|
|
||||||
|
windowsRequested--
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sh *scheduler) assignWorker(taskDone chan struct{}, wid WorkerID, w *workerHandle, req *workerRequest) error {
|
||||||
|
needRes := ResourceTable[req.taskType][sh.spt]
|
||||||
|
|
||||||
|
w.lk.Lock()
|
||||||
|
w.preparing.add(w.info.Resources, needRes)
|
||||||
|
w.lk.Unlock()
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
err := req.prepare(req.ctx, w.wt.worker(w.w))
|
||||||
sh.workersLk.Lock()
|
sh.workersLk.Lock()
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
w.lk.Lock()
|
||||||
w.preparing.free(w.info.Resources, needRes)
|
w.preparing.free(w.info.Resources, needRes)
|
||||||
|
w.lk.Unlock()
|
||||||
sh.workersLk.Unlock()
|
sh.workersLk.Unlock()
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case sh.workerFree <- wid:
|
case taskDone <- struct{}{}:
|
||||||
case <-sh.closing:
|
case <-sh.closing:
|
||||||
log.Warnf("scheduler closed while sending response (prepare error: %+v)", err)
|
log.Warnf("scheduler closed while sending response (prepare error: %+v)", err)
|
||||||
}
|
}
|
||||||
@ -317,16 +569,18 @@ func (sh *scheduler) assignWorker(wid WorkerID, w *workerHandle, req *workerRequ
|
|||||||
}
|
}
|
||||||
|
|
||||||
err = w.active.withResources(wid, w.info.Resources, needRes, &sh.workersLk, func() error {
|
err = w.active.withResources(wid, w.info.Resources, needRes, &sh.workersLk, func() error {
|
||||||
|
w.lk.Lock()
|
||||||
w.preparing.free(w.info.Resources, needRes)
|
w.preparing.free(w.info.Resources, needRes)
|
||||||
|
w.lk.Unlock()
|
||||||
sh.workersLk.Unlock()
|
sh.workersLk.Unlock()
|
||||||
defer sh.workersLk.Lock() // we MUST return locked from this function
|
defer sh.workersLk.Lock() // we MUST return locked from this function
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case sh.workerFree <- wid:
|
case taskDone <- struct{}{}:
|
||||||
case <-sh.closing:
|
case <-sh.closing:
|
||||||
}
|
}
|
||||||
|
|
||||||
err = req.work(req.ctx, w.w)
|
err = req.work(req.ctx, w.wt.worker(w.w))
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case req.ret <- workerResponse{err: err}:
|
case req.ret <- workerResponse{err: err}:
|
||||||
@ -350,110 +604,10 @@ func (sh *scheduler) assignWorker(wid WorkerID, w *workerHandle, req *workerRequ
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *activeResources) withResources(id WorkerID, wr storiface.WorkerResources, r Resources, locker sync.Locker, cb func() error) error {
|
func (sh *scheduler) newWorker(w *workerHandle) {
|
||||||
for !canHandleRequest(r, id, wr, a) {
|
w.closedMgr = make(chan struct{})
|
||||||
if a.cond == nil {
|
w.closingMgr = make(chan struct{})
|
||||||
a.cond = sync.NewCond(locker)
|
|
||||||
}
|
|
||||||
a.cond.Wait()
|
|
||||||
}
|
|
||||||
|
|
||||||
a.add(wr, r)
|
|
||||||
|
|
||||||
err := cb()
|
|
||||||
|
|
||||||
a.free(wr, r)
|
|
||||||
if a.cond != nil {
|
|
||||||
a.cond.Broadcast()
|
|
||||||
}
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *activeResources) add(wr storiface.WorkerResources, r Resources) {
|
|
||||||
a.gpuUsed = r.CanGPU
|
|
||||||
if r.MultiThread() {
|
|
||||||
a.cpuUse += wr.CPUs
|
|
||||||
} else {
|
|
||||||
a.cpuUse += uint64(r.Threads)
|
|
||||||
}
|
|
||||||
|
|
||||||
a.memUsedMin += r.MinMemory
|
|
||||||
a.memUsedMax += r.MaxMemory
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *activeResources) free(wr storiface.WorkerResources, r Resources) {
|
|
||||||
if r.CanGPU {
|
|
||||||
a.gpuUsed = false
|
|
||||||
}
|
|
||||||
if r.MultiThread() {
|
|
||||||
a.cpuUse -= wr.CPUs
|
|
||||||
} else {
|
|
||||||
a.cpuUse -= uint64(r.Threads)
|
|
||||||
}
|
|
||||||
|
|
||||||
a.memUsedMin -= r.MinMemory
|
|
||||||
a.memUsedMax -= r.MaxMemory
|
|
||||||
}
|
|
||||||
|
|
||||||
func canHandleRequest(needRes Resources, wid WorkerID, res storiface.WorkerResources, active *activeResources) bool {
|
|
||||||
|
|
||||||
// TODO: dedupe needRes.BaseMinMemory per task type (don't add if that task is already running)
|
|
||||||
minNeedMem := res.MemReserved + active.memUsedMin + needRes.MinMemory + needRes.BaseMinMemory
|
|
||||||
if minNeedMem > res.MemPhysical {
|
|
||||||
log.Debugf("sched: not scheduling on worker %d; not enough physical memory - need: %dM, have %dM", wid, minNeedMem/mib, res.MemPhysical/mib)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
maxNeedMem := res.MemReserved + active.memUsedMax + needRes.MaxMemory + needRes.BaseMinMemory
|
|
||||||
|
|
||||||
if maxNeedMem > res.MemSwap+res.MemPhysical {
|
|
||||||
log.Debugf("sched: not scheduling on worker %d; not enough virtual memory - need: %dM, have %dM", wid, maxNeedMem/mib, (res.MemSwap+res.MemPhysical)/mib)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if needRes.MultiThread() {
|
|
||||||
if active.cpuUse > 0 {
|
|
||||||
log.Debugf("sched: not scheduling on worker %d; multicore process needs %d threads, %d in use, target %d", wid, res.CPUs, active.cpuUse, res.CPUs)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if active.cpuUse+uint64(needRes.Threads) > res.CPUs {
|
|
||||||
log.Debugf("sched: not scheduling on worker %d; not enough threads, need %d, %d in use, target %d", wid, needRes.Threads, active.cpuUse, res.CPUs)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(res.GPUs) > 0 && needRes.CanGPU {
|
|
||||||
if active.gpuUsed {
|
|
||||||
log.Debugf("sched: not scheduling on worker %d; GPU in use", wid)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *activeResources) utilization(wr storiface.WorkerResources) float64 {
|
|
||||||
var max float64
|
|
||||||
|
|
||||||
cpu := float64(a.cpuUse) / float64(wr.CPUs)
|
|
||||||
max = cpu
|
|
||||||
|
|
||||||
memMin := float64(a.memUsedMin+wr.MemReserved) / float64(wr.MemPhysical)
|
|
||||||
if memMin > max {
|
|
||||||
max = memMin
|
|
||||||
}
|
|
||||||
|
|
||||||
memMax := float64(a.memUsedMax+wr.MemReserved) / float64(wr.MemPhysical+wr.MemSwap)
|
|
||||||
if memMax > max {
|
|
||||||
max = memMax
|
|
||||||
}
|
|
||||||
|
|
||||||
return max
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sh *scheduler) schedNewWorker(w *workerHandle) {
|
|
||||||
sh.workersLk.Lock()
|
sh.workersLk.Lock()
|
||||||
|
|
||||||
id := sh.nextWorker
|
id := sh.nextWorker
|
||||||
@ -462,41 +616,88 @@ func (sh *scheduler) schedNewWorker(w *workerHandle) {
|
|||||||
|
|
||||||
sh.workersLk.Unlock()
|
sh.workersLk.Unlock()
|
||||||
|
|
||||||
|
sh.runWorker(id)
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case sh.watchClosing <- id:
|
case sh.watchClosing <- id:
|
||||||
case <-sh.closing:
|
case <-sh.closing:
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
sh.onWorkerFreed(id)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sh *scheduler) schedDropWorker(wid WorkerID) {
|
func (sh *scheduler) dropWorker(wid WorkerID) {
|
||||||
sh.workersLk.Lock()
|
sh.workersLk.Lock()
|
||||||
defer sh.workersLk.Unlock()
|
defer sh.workersLk.Unlock()
|
||||||
|
|
||||||
w := sh.workers[wid]
|
w := sh.workers[wid]
|
||||||
delete(sh.workers, wid)
|
|
||||||
|
|
||||||
go func() {
|
sh.workerCleanup(wid, w)
|
||||||
if err := w.w.Close(); err != nil {
|
|
||||||
log.Warnf("closing worker %d: %+v", err)
|
delete(sh.workers, wid)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sh *scheduler) workerCleanup(wid WorkerID, w *workerHandle) {
|
||||||
|
if !w.cleanupStarted {
|
||||||
|
close(w.closingMgr)
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case <-w.closedMgr:
|
||||||
|
case <-time.After(time.Second):
|
||||||
|
log.Errorf("timeout closing worker manager goroutine %d", wid)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !w.cleanupStarted {
|
||||||
|
w.cleanupStarted = true
|
||||||
|
|
||||||
|
newWindows := make([]*schedWindowRequest, 0, len(sh.openWindows))
|
||||||
|
for _, window := range sh.openWindows {
|
||||||
|
if window.worker != wid {
|
||||||
|
newWindows = append(newWindows, window)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}()
|
sh.openWindows = newWindows
|
||||||
|
|
||||||
|
log.Debugf("dropWorker %d", wid)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
if err := w.w.Close(); err != nil {
|
||||||
|
log.Warnf("closing worker %d: %+v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sh *scheduler) schedClose() {
|
func (sh *scheduler) schedClose() {
|
||||||
sh.workersLk.Lock()
|
sh.workersLk.Lock()
|
||||||
defer sh.workersLk.Unlock()
|
defer sh.workersLk.Unlock()
|
||||||
|
log.Debugf("closing scheduler")
|
||||||
|
|
||||||
for i, w := range sh.workers {
|
for i, w := range sh.workers {
|
||||||
if err := w.w.Close(); err != nil {
|
sh.workerCleanup(i, w)
|
||||||
log.Errorf("closing worker %d: %+v", i, err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sh *scheduler) Close() error {
|
func (sh *scheduler) Info(ctx context.Context) (interface{}, error) {
|
||||||
|
ch := make(chan interface{}, 1)
|
||||||
|
|
||||||
|
sh.info <- func(res interface{}) {
|
||||||
|
ch <- res
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case res := <-ch:
|
||||||
|
return res, nil
|
||||||
|
case <-ctx.Done():
|
||||||
|
return nil, ctx.Err()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sh *scheduler) Close(ctx context.Context) error {
|
||||||
close(sh.closing)
|
close(sh.closing)
|
||||||
|
select {
|
||||||
|
case <-sh.closed:
|
||||||
|
case <-ctx.Done():
|
||||||
|
return ctx.Err()
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
110
sched_resources.go
Normal file
110
sched_resources.go
Normal file
@ -0,0 +1,110 @@
|
|||||||
|
package sectorstorage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/sector-storage/storiface"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (a *activeResources) withResources(id WorkerID, wr storiface.WorkerResources, r Resources, locker sync.Locker, cb func() error) error {
|
||||||
|
for !a.canHandleRequest(r, id, wr) {
|
||||||
|
if a.cond == nil {
|
||||||
|
a.cond = sync.NewCond(locker)
|
||||||
|
}
|
||||||
|
a.cond.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
a.add(wr, r)
|
||||||
|
|
||||||
|
err := cb()
|
||||||
|
|
||||||
|
a.free(wr, r)
|
||||||
|
if a.cond != nil {
|
||||||
|
a.cond.Broadcast()
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *activeResources) add(wr storiface.WorkerResources, r Resources) {
|
||||||
|
a.gpuUsed = r.CanGPU
|
||||||
|
if r.MultiThread() {
|
||||||
|
a.cpuUse += wr.CPUs
|
||||||
|
} else {
|
||||||
|
a.cpuUse += uint64(r.Threads)
|
||||||
|
}
|
||||||
|
|
||||||
|
a.memUsedMin += r.MinMemory
|
||||||
|
a.memUsedMax += r.MaxMemory
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *activeResources) free(wr storiface.WorkerResources, r Resources) {
|
||||||
|
if r.CanGPU {
|
||||||
|
a.gpuUsed = false
|
||||||
|
}
|
||||||
|
if r.MultiThread() {
|
||||||
|
a.cpuUse -= wr.CPUs
|
||||||
|
} else {
|
||||||
|
a.cpuUse -= uint64(r.Threads)
|
||||||
|
}
|
||||||
|
|
||||||
|
a.memUsedMin -= r.MinMemory
|
||||||
|
a.memUsedMax -= r.MaxMemory
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *activeResources) canHandleRequest(needRes Resources, wid WorkerID, res storiface.WorkerResources) bool {
|
||||||
|
|
||||||
|
// TODO: dedupe needRes.BaseMinMemory per task type (don't add if that task is already running)
|
||||||
|
minNeedMem := res.MemReserved + a.memUsedMin + needRes.MinMemory + needRes.BaseMinMemory
|
||||||
|
if minNeedMem > res.MemPhysical {
|
||||||
|
log.Debugf("sched: not scheduling on worker %d; not enough physical memory - need: %dM, have %dM", wid, minNeedMem/mib, res.MemPhysical/mib)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
maxNeedMem := res.MemReserved + a.memUsedMax + needRes.MaxMemory + needRes.BaseMinMemory
|
||||||
|
|
||||||
|
if maxNeedMem > res.MemSwap+res.MemPhysical {
|
||||||
|
log.Debugf("sched: not scheduling on worker %d; not enough virtual memory - need: %dM, have %dM", wid, maxNeedMem/mib, (res.MemSwap+res.MemPhysical)/mib)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if needRes.MultiThread() {
|
||||||
|
if a.cpuUse > 0 {
|
||||||
|
log.Debugf("sched: not scheduling on worker %d; multicore process needs %d threads, %d in use, target %d", wid, res.CPUs, a.cpuUse, res.CPUs)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if a.cpuUse+uint64(needRes.Threads) > res.CPUs {
|
||||||
|
log.Debugf("sched: not scheduling on worker %d; not enough threads, need %d, %d in use, target %d", wid, needRes.Threads, a.cpuUse, res.CPUs)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(res.GPUs) > 0 && needRes.CanGPU {
|
||||||
|
if a.gpuUsed {
|
||||||
|
log.Debugf("sched: not scheduling on worker %d; GPU in use", wid)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *activeResources) utilization(wr storiface.WorkerResources) float64 {
|
||||||
|
var max float64
|
||||||
|
|
||||||
|
cpu := float64(a.cpuUse) / float64(wr.CPUs)
|
||||||
|
max = cpu
|
||||||
|
|
||||||
|
memMin := float64(a.memUsedMin+wr.MemReserved) / float64(wr.MemPhysical)
|
||||||
|
if memMin > max {
|
||||||
|
max = memMin
|
||||||
|
}
|
||||||
|
|
||||||
|
memMax := float64(a.memUsedMax+wr.MemReserved) / float64(wr.MemPhysical+wr.MemSwap)
|
||||||
|
if memMax > max {
|
||||||
|
max = memMax
|
||||||
|
}
|
||||||
|
|
||||||
|
return max
|
||||||
|
}
|
435
sched_test.go
435
sched_test.go
@ -2,9 +2,23 @@ package sectorstorage
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"runtime"
|
||||||
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/sector-storage/fsutil"
|
||||||
|
"github.com/filecoin-project/sector-storage/sealtasks"
|
||||||
|
"github.com/filecoin-project/sector-storage/stores"
|
||||||
|
"github.com/filecoin-project/sector-storage/storiface"
|
||||||
|
"github.com/filecoin-project/specs-storage/storage"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestWithPriority(t *testing.T) {
|
func TestWithPriority(t *testing.T) {
|
||||||
@ -16,3 +30,424 @@ func TestWithPriority(t *testing.T) {
|
|||||||
|
|
||||||
require.Equal(t, 2222, getPriority(ctx))
|
require.Equal(t, 2222, getPriority(ctx))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type schedTestWorker struct {
|
||||||
|
name string
|
||||||
|
taskTypes map[sealtasks.TaskType]struct{}
|
||||||
|
paths []stores.StoragePath
|
||||||
|
|
||||||
|
closed bool
|
||||||
|
closing chan struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *schedTestWorker) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storage.PreCommit1Out, error) {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *schedTestWorker) SealPreCommit2(ctx context.Context, sector abi.SectorID, pc1o storage.PreCommit1Out) (storage.SectorCids, error) {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *schedTestWorker) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *schedTestWorker) SealCommit2(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (storage.Proof, error) {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *schedTestWorker) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) error {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *schedTestWorker) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) error {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *schedTestWorker) Remove(ctx context.Context, sector abi.SectorID) error {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *schedTestWorker) NewSector(ctx context.Context, sector abi.SectorID) error {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *schedTestWorker) AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (abi.PieceInfo, error) {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *schedTestWorker) MoveStorage(ctx context.Context, sector abi.SectorID) error {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *schedTestWorker) Fetch(ctx context.Context, id abi.SectorID, ft stores.SectorFileType, ptype stores.PathType, am stores.AcquireMode) error {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *schedTestWorker) UnsealPiece(ctx context.Context, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, cid cid.Cid) error {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *schedTestWorker) ReadPiece(ctx context.Context, writer io.Writer, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *schedTestWorker) TaskTypes(ctx context.Context) (map[sealtasks.TaskType]struct{}, error) {
|
||||||
|
return s.taskTypes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *schedTestWorker) Paths(ctx context.Context) ([]stores.StoragePath, error) {
|
||||||
|
return s.paths, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *schedTestWorker) Info(ctx context.Context) (storiface.WorkerInfo, error) {
|
||||||
|
return storiface.WorkerInfo{
|
||||||
|
Hostname: s.name,
|
||||||
|
Resources: storiface.WorkerResources{
|
||||||
|
MemPhysical: 128 << 30,
|
||||||
|
MemSwap: 200 << 30,
|
||||||
|
MemReserved: 2 << 30,
|
||||||
|
CPUs: 32,
|
||||||
|
GPUs: []string{"a GPU"},
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *schedTestWorker) Closing(ctx context.Context) (<-chan struct{}, error) {
|
||||||
|
return s.closing, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *schedTestWorker) Close() error {
|
||||||
|
if !s.closed {
|
||||||
|
log.Info("close schedTestWorker")
|
||||||
|
s.closed = true
|
||||||
|
close(s.closing)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ Worker = &schedTestWorker{}
|
||||||
|
|
||||||
|
func addTestWorker(t *testing.T, sched *scheduler, index *stores.Index, name string, taskTypes map[sealtasks.TaskType]struct{}) {
|
||||||
|
w := &schedTestWorker{
|
||||||
|
name: name,
|
||||||
|
taskTypes: taskTypes,
|
||||||
|
paths: []stores.StoragePath{{ID: "bb-8", Weight: 2, LocalPath: "<octopus>food</octopus>", CanSeal: true, CanStore: true}},
|
||||||
|
|
||||||
|
closing: make(chan struct{}),
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, path := range w.paths {
|
||||||
|
err := index.StorageAttach(context.TODO(), stores.StorageInfo{
|
||||||
|
ID: path.ID,
|
||||||
|
URLs: nil,
|
||||||
|
Weight: path.Weight,
|
||||||
|
CanSeal: path.CanSeal,
|
||||||
|
CanStore: path.CanStore,
|
||||||
|
}, fsutil.FsStat{
|
||||||
|
Capacity: 1 << 40,
|
||||||
|
Available: 1 << 40,
|
||||||
|
Reserved: 3,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
info, err := w.Info(context.TODO())
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
sched.newWorkers <- &workerHandle{
|
||||||
|
w: w,
|
||||||
|
wt: &workTracker{
|
||||||
|
running: map[uint64]storiface.WorkerJob{},
|
||||||
|
},
|
||||||
|
info: info,
|
||||||
|
preparing: &activeResources{},
|
||||||
|
active: &activeResources{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSchedStartStop(t *testing.T) {
|
||||||
|
spt := abi.RegisteredSealProof_StackedDrg32GiBV1
|
||||||
|
sched := newScheduler(spt)
|
||||||
|
go sched.runSched()
|
||||||
|
|
||||||
|
addTestWorker(t, sched, stores.NewIndex(), "fred", nil)
|
||||||
|
|
||||||
|
require.NoError(t, sched.Close(context.TODO()))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSched(t *testing.T) {
|
||||||
|
ctx, done := context.WithTimeout(context.Background(), 30*time.Second)
|
||||||
|
defer done()
|
||||||
|
|
||||||
|
spt := abi.RegisteredSealProof_StackedDrg32GiBV1
|
||||||
|
|
||||||
|
type workerSpec struct {
|
||||||
|
name string
|
||||||
|
taskTypes map[sealtasks.TaskType]struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
noopAction := func(ctx context.Context, w Worker) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type runMeta struct {
|
||||||
|
done map[string]chan struct{}
|
||||||
|
|
||||||
|
wg sync.WaitGroup
|
||||||
|
}
|
||||||
|
|
||||||
|
type task func(*testing.T, *scheduler, *stores.Index, *runMeta)
|
||||||
|
|
||||||
|
sched := func(taskName, expectWorker string, sid abi.SectorNumber, taskType sealtasks.TaskType) task {
|
||||||
|
_, _, l, _ := runtime.Caller(1)
|
||||||
|
_, _, l2, _ := runtime.Caller(2)
|
||||||
|
|
||||||
|
return func(t *testing.T, sched *scheduler, index *stores.Index, rm *runMeta) {
|
||||||
|
done := make(chan struct{})
|
||||||
|
rm.done[taskName] = done
|
||||||
|
|
||||||
|
sel := newAllocSelector(index, stores.FTCache, stores.PathSealing)
|
||||||
|
|
||||||
|
rm.wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer rm.wg.Done()
|
||||||
|
|
||||||
|
sectorNum := abi.SectorID{
|
||||||
|
Miner: 8,
|
||||||
|
Number: sid,
|
||||||
|
}
|
||||||
|
|
||||||
|
err := sched.Schedule(ctx, sectorNum, taskType, sel, func(ctx context.Context, w Worker) error {
|
||||||
|
wi, err := w.Info(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, expectWorker, wi.Hostname)
|
||||||
|
|
||||||
|
log.Info("IN ", taskName)
|
||||||
|
|
||||||
|
for {
|
||||||
|
_, ok := <-done
|
||||||
|
if !ok {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Info("OUT ", taskName)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}, noopAction)
|
||||||
|
require.NoError(t, err, fmt.Sprint(l, l2))
|
||||||
|
}()
|
||||||
|
|
||||||
|
<-sched.testSync
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
taskStarted := func(name string) task {
|
||||||
|
_, _, l, _ := runtime.Caller(1)
|
||||||
|
_, _, l2, _ := runtime.Caller(2)
|
||||||
|
return func(t *testing.T, sched *scheduler, index *stores.Index, rm *runMeta) {
|
||||||
|
select {
|
||||||
|
case rm.done[name] <- struct{}{}:
|
||||||
|
case <-ctx.Done():
|
||||||
|
t.Fatal("ctx error", ctx.Err(), l, l2)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
taskDone := func(name string) task {
|
||||||
|
_, _, l, _ := runtime.Caller(1)
|
||||||
|
_, _, l2, _ := runtime.Caller(2)
|
||||||
|
return func(t *testing.T, sched *scheduler, index *stores.Index, rm *runMeta) {
|
||||||
|
select {
|
||||||
|
case rm.done[name] <- struct{}{}:
|
||||||
|
case <-ctx.Done():
|
||||||
|
t.Fatal("ctx error", ctx.Err(), l, l2)
|
||||||
|
}
|
||||||
|
close(rm.done[name])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
taskNotScheduled := func(name string) task {
|
||||||
|
_, _, l, _ := runtime.Caller(1)
|
||||||
|
_, _, l2, _ := runtime.Caller(2)
|
||||||
|
return func(t *testing.T, sched *scheduler, index *stores.Index, rm *runMeta) {
|
||||||
|
select {
|
||||||
|
case rm.done[name] <- struct{}{}:
|
||||||
|
t.Fatal("not expected", l, l2)
|
||||||
|
case <-time.After(10 * time.Millisecond): // TODO: better synchronization thingy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
testFunc := func(workers []workerSpec, tasks []task) func(t *testing.T) {
|
||||||
|
return func(t *testing.T) {
|
||||||
|
index := stores.NewIndex()
|
||||||
|
|
||||||
|
sched := newScheduler(spt)
|
||||||
|
sched.testSync = make(chan struct{})
|
||||||
|
|
||||||
|
go sched.runSched()
|
||||||
|
|
||||||
|
for _, worker := range workers {
|
||||||
|
addTestWorker(t, sched, index, worker.name, worker.taskTypes)
|
||||||
|
}
|
||||||
|
|
||||||
|
rm := runMeta{
|
||||||
|
done: map[string]chan struct{}{},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, task := range tasks {
|
||||||
|
task(t, sched, index, &rm)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Info("wait for async stuff")
|
||||||
|
rm.wg.Wait()
|
||||||
|
|
||||||
|
require.NoError(t, sched.Close(context.TODO()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
multTask := func(tasks ...task) task {
|
||||||
|
return func(t *testing.T, s *scheduler, index *stores.Index, meta *runMeta) {
|
||||||
|
for _, tsk := range tasks {
|
||||||
|
tsk(t, s, index, meta)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("one-pc1", testFunc([]workerSpec{
|
||||||
|
{name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}}},
|
||||||
|
}, []task{
|
||||||
|
sched("pc1-1", "fred", 8, sealtasks.TTPreCommit1),
|
||||||
|
taskDone("pc1-1"),
|
||||||
|
}))
|
||||||
|
|
||||||
|
t.Run("pc1-2workers-1", testFunc([]workerSpec{
|
||||||
|
{name: "fred2", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit2: {}}},
|
||||||
|
{name: "fred1", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}}},
|
||||||
|
}, []task{
|
||||||
|
sched("pc1-1", "fred1", 8, sealtasks.TTPreCommit1),
|
||||||
|
taskDone("pc1-1"),
|
||||||
|
}))
|
||||||
|
|
||||||
|
t.Run("pc1-2workers-2", testFunc([]workerSpec{
|
||||||
|
{name: "fred1", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}}},
|
||||||
|
{name: "fred2", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit2: {}}},
|
||||||
|
}, []task{
|
||||||
|
sched("pc1-1", "fred1", 8, sealtasks.TTPreCommit1),
|
||||||
|
taskDone("pc1-1"),
|
||||||
|
}))
|
||||||
|
|
||||||
|
t.Run("pc1-block-pc2", testFunc([]workerSpec{
|
||||||
|
{name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}, sealtasks.TTPreCommit2: {}}},
|
||||||
|
}, []task{
|
||||||
|
sched("pc1", "fred", 8, sealtasks.TTPreCommit1),
|
||||||
|
taskStarted("pc1"),
|
||||||
|
|
||||||
|
sched("pc2", "fred", 8, sealtasks.TTPreCommit2),
|
||||||
|
taskNotScheduled("pc2"),
|
||||||
|
|
||||||
|
taskDone("pc1"),
|
||||||
|
taskDone("pc2"),
|
||||||
|
}))
|
||||||
|
|
||||||
|
t.Run("pc2-block-pc1", testFunc([]workerSpec{
|
||||||
|
{name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}, sealtasks.TTPreCommit2: {}}},
|
||||||
|
}, []task{
|
||||||
|
sched("pc2", "fred", 8, sealtasks.TTPreCommit2),
|
||||||
|
taskStarted("pc2"),
|
||||||
|
|
||||||
|
sched("pc1", "fred", 8, sealtasks.TTPreCommit1),
|
||||||
|
taskNotScheduled("pc1"),
|
||||||
|
|
||||||
|
taskDone("pc2"),
|
||||||
|
taskDone("pc1"),
|
||||||
|
}))
|
||||||
|
|
||||||
|
t.Run("pc1-batching", testFunc([]workerSpec{
|
||||||
|
{name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}}},
|
||||||
|
}, []task{
|
||||||
|
sched("t1", "fred", 8, sealtasks.TTPreCommit1),
|
||||||
|
taskStarted("t1"),
|
||||||
|
|
||||||
|
sched("t2", "fred", 8, sealtasks.TTPreCommit1),
|
||||||
|
taskStarted("t2"),
|
||||||
|
|
||||||
|
// with worker settings, we can only run 2 parallel PC1s
|
||||||
|
|
||||||
|
// start 2 more to fill fetch buffer
|
||||||
|
|
||||||
|
sched("t3", "fred", 8, sealtasks.TTPreCommit1),
|
||||||
|
taskNotScheduled("t3"),
|
||||||
|
|
||||||
|
sched("t4", "fred", 8, sealtasks.TTPreCommit1),
|
||||||
|
taskNotScheduled("t4"),
|
||||||
|
|
||||||
|
taskDone("t1"),
|
||||||
|
taskDone("t2"),
|
||||||
|
|
||||||
|
taskStarted("t3"),
|
||||||
|
taskStarted("t4"),
|
||||||
|
|
||||||
|
taskDone("t3"),
|
||||||
|
taskDone("t4"),
|
||||||
|
}))
|
||||||
|
|
||||||
|
twoPC1 := func(prefix string, sid abi.SectorNumber, schedAssert func(name string) task) task {
|
||||||
|
return multTask(
|
||||||
|
sched(prefix+"-a", "fred", sid, sealtasks.TTPreCommit1),
|
||||||
|
schedAssert(prefix+"-a"),
|
||||||
|
|
||||||
|
sched(prefix+"-b", "fred", sid+1, sealtasks.TTPreCommit1),
|
||||||
|
schedAssert(prefix+"-b"),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
twoPC1Act := func(prefix string, schedAssert func(name string) task) task {
|
||||||
|
return multTask(
|
||||||
|
schedAssert(prefix+"-a"),
|
||||||
|
schedAssert(prefix+"-b"),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// run this one a bunch of times, it had a very annoying tendency to fail randomly
|
||||||
|
for i := 0; i < 40; i++ {
|
||||||
|
t.Run("pc1-pc2-prio", testFunc([]workerSpec{
|
||||||
|
{name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}, sealtasks.TTPreCommit2: {}}},
|
||||||
|
}, []task{
|
||||||
|
// fill queues
|
||||||
|
twoPC1("w0", 0, taskStarted),
|
||||||
|
twoPC1("w1", 2, taskNotScheduled),
|
||||||
|
|
||||||
|
// windowed
|
||||||
|
|
||||||
|
sched("t1", "fred", 8, sealtasks.TTPreCommit1),
|
||||||
|
taskNotScheduled("t1"),
|
||||||
|
|
||||||
|
sched("t2", "fred", 9, sealtasks.TTPreCommit1),
|
||||||
|
taskNotScheduled("t2"),
|
||||||
|
|
||||||
|
sched("t3", "fred", 10, sealtasks.TTPreCommit2),
|
||||||
|
taskNotScheduled("t3"),
|
||||||
|
|
||||||
|
twoPC1Act("w0", taskDone),
|
||||||
|
twoPC1Act("w1", taskStarted),
|
||||||
|
|
||||||
|
twoPC1Act("w1", taskDone),
|
||||||
|
|
||||||
|
taskStarted("t3"),
|
||||||
|
taskNotScheduled("t1"),
|
||||||
|
taskNotScheduled("t2"),
|
||||||
|
|
||||||
|
taskDone("t3"),
|
||||||
|
|
||||||
|
taskStarted("t1"),
|
||||||
|
taskStarted("t2"),
|
||||||
|
|
||||||
|
taskDone("t1"),
|
||||||
|
taskDone("t2"),
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -74,7 +74,11 @@ func (sh *scheduler) runWorkerWatcher() {
|
|||||||
|
|
||||||
caseToWorker[toSet] = wid
|
caseToWorker[toSet] = wid
|
||||||
default:
|
default:
|
||||||
wid := caseToWorker[n]
|
wid, found := caseToWorker[n]
|
||||||
|
if !found {
|
||||||
|
log.Errorf("worker ID not found for case %d", n)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
delete(caseToWorker, n)
|
delete(caseToWorker, n)
|
||||||
cases[n] = reflect.SelectCase{
|
cases[n] = reflect.SelectCase{
|
||||||
|
@ -28,6 +28,30 @@ var order = map[TaskType]int{
|
|||||||
TTReadUnsealed: 0,
|
TTReadUnsealed: 0,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var shortNames = map[TaskType]string{
|
||||||
|
TTAddPiece: "AP ",
|
||||||
|
|
||||||
|
TTPreCommit1: "PC1",
|
||||||
|
TTPreCommit2: "PC2",
|
||||||
|
TTCommit1: "C1 ",
|
||||||
|
TTCommit2: "C2 ",
|
||||||
|
|
||||||
|
TTFinalize: "FIN",
|
||||||
|
|
||||||
|
TTFetch: "GET",
|
||||||
|
TTUnseal: "UNS",
|
||||||
|
TTReadUnsealed: "RD ",
|
||||||
|
}
|
||||||
|
|
||||||
func (a TaskType) Less(b TaskType) bool {
|
func (a TaskType) Less(b TaskType) bool {
|
||||||
return order[a] < order[b]
|
return order[a] < order[b]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (a TaskType) Short() string {
|
||||||
|
n, ok := shortNames[a]
|
||||||
|
if !ok {
|
||||||
|
return "UNK"
|
||||||
|
}
|
||||||
|
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
@ -17,12 +17,12 @@ type allocSelector struct {
|
|||||||
ptype stores.PathType
|
ptype stores.PathType
|
||||||
}
|
}
|
||||||
|
|
||||||
func newAllocSelector(ctx context.Context, index stores.SectorIndex, alloc stores.SectorFileType, ptype stores.PathType) (*allocSelector, error) {
|
func newAllocSelector(index stores.SectorIndex, alloc stores.SectorFileType, ptype stores.PathType) *allocSelector {
|
||||||
return &allocSelector{
|
return &allocSelector{
|
||||||
index: index,
|
index: index,
|
||||||
alloc: alloc,
|
alloc: alloc,
|
||||||
ptype: ptype,
|
ptype: ptype,
|
||||||
}, nil
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *allocSelector) Ok(ctx context.Context, task sealtasks.TaskType, spt abi.RegisteredSealProof, whnd *workerHandle) (bool, error) {
|
func (s *allocSelector) Ok(ctx context.Context, task sealtasks.TaskType, spt abi.RegisteredSealProof, whnd *workerHandle) (bool, error) {
|
||||||
|
@ -12,18 +12,19 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type existingSelector struct {
|
type existingSelector struct {
|
||||||
best []stores.SectorStorageInfo
|
index stores.SectorIndex
|
||||||
|
sector abi.SectorID
|
||||||
|
alloc stores.SectorFileType
|
||||||
|
allowFetch bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func newExistingSelector(ctx context.Context, index stores.SectorIndex, sector abi.SectorID, alloc stores.SectorFileType, allowFetch bool) (*existingSelector, error) {
|
func newExistingSelector(index stores.SectorIndex, sector abi.SectorID, alloc stores.SectorFileType, allowFetch bool) *existingSelector {
|
||||||
best, err := index.StorageFindSector(ctx, sector, alloc, allowFetch)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &existingSelector{
|
return &existingSelector{
|
||||||
best: best,
|
index: index,
|
||||||
}, nil
|
sector: sector,
|
||||||
|
alloc: alloc,
|
||||||
|
allowFetch: allowFetch,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *existingSelector) Ok(ctx context.Context, task sealtasks.TaskType, spt abi.RegisteredSealProof, whnd *workerHandle) (bool, error) {
|
func (s *existingSelector) Ok(ctx context.Context, task sealtasks.TaskType, spt abi.RegisteredSealProof, whnd *workerHandle) (bool, error) {
|
||||||
@ -45,7 +46,12 @@ func (s *existingSelector) Ok(ctx context.Context, task sealtasks.TaskType, spt
|
|||||||
have[path.ID] = struct{}{}
|
have[path.ID] = struct{}{}
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, info := range s.best {
|
best, err := s.index.StorageFindSector(ctx, s.sector, s.alloc, s.allowFetch)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("finding best storage: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, info := range best {
|
||||||
if _, ok := have[info.ID]; ok {
|
if _, ok := have[info.ID]; ok {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
13
stats.go
13
stats.go
@ -20,3 +20,16 @@ func (m *Manager) WorkerStats() map[uint64]storiface.WorkerStats {
|
|||||||
|
|
||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *Manager) WorkerJobs() map[uint64][]storiface.WorkerJob {
|
||||||
|
m.sched.workersLk.Lock()
|
||||||
|
defer m.sched.workersLk.Unlock()
|
||||||
|
|
||||||
|
out := map[uint64][]storiface.WorkerJob{}
|
||||||
|
|
||||||
|
for id, handle := range m.sched.workers {
|
||||||
|
out[uint64(id)] = handle.wt.Running()
|
||||||
|
}
|
||||||
|
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
@ -19,15 +19,17 @@ const (
|
|||||||
FTNone SectorFileType = 0
|
FTNone SectorFileType = 0
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const FSOverheadDen = 10
|
||||||
|
|
||||||
var FSOverheadSeal = map[SectorFileType]int{ // 10x overheads
|
var FSOverheadSeal = map[SectorFileType]int{ // 10x overheads
|
||||||
FTUnsealed: 10,
|
FTUnsealed: FSOverheadDen,
|
||||||
FTSealed: 10,
|
FTSealed: FSOverheadDen,
|
||||||
FTCache: 70, // TODO: confirm for 32G
|
FTCache: 141, // 11 layers + D(2x ssize) + C + R
|
||||||
}
|
}
|
||||||
|
|
||||||
var FsOverheadFinalized = map[SectorFileType]int{
|
var FsOverheadFinalized = map[SectorFileType]int{
|
||||||
FTUnsealed: 10,
|
FTUnsealed: FSOverheadDen,
|
||||||
FTSealed: 10,
|
FTSealed: FSOverheadDen,
|
||||||
FTCache: 2,
|
FTCache: 2,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -67,7 +69,7 @@ func (t SectorFileType) SealSpaceUse(spt abi.RegisteredSealProof) (uint64, error
|
|||||||
return 0, xerrors.Errorf("no seal overhead info for %s", pathType)
|
return 0, xerrors.Errorf("no seal overhead info for %s", pathType)
|
||||||
}
|
}
|
||||||
|
|
||||||
need += uint64(oh) * uint64(ssize) / 10
|
need += uint64(oh) * uint64(ssize) / FSOverheadDen
|
||||||
}
|
}
|
||||||
|
|
||||||
return need, nil
|
return need, nil
|
||||||
|
@ -72,13 +72,15 @@ func (handler *FetchHandler) remoteGetSector(w http.ResponseWriter, r *http.Requ
|
|||||||
// The caller has a lock on this sector already, no need to get one here
|
// The caller has a lock on this sector already, no need to get one here
|
||||||
|
|
||||||
// passing 0 spt because we don't allocate anything
|
// passing 0 spt because we don't allocate anything
|
||||||
paths, _, err := handler.Local.AcquireSector(r.Context(), id, 0, ft, FTNone, false, AcquireMove)
|
paths, _, err := handler.Local.AcquireSector(r.Context(), id, 0, ft, FTNone, PathStorage, AcquireMove)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("%+v", err)
|
log.Error("%+v", err)
|
||||||
w.WriteHeader(500)
|
w.WriteHeader(500)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: reserve local storage here
|
||||||
|
|
||||||
path := PathByType(paths, ft)
|
path := PathByType(paths, ft)
|
||||||
if path == "" {
|
if path == "" {
|
||||||
log.Error("acquired path was empty")
|
log.Error("acquired path was empty")
|
||||||
|
@ -2,6 +2,7 @@ package stores
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"github.com/filecoin-project/sector-storage/fsutil"
|
||||||
"net/url"
|
"net/url"
|
||||||
gopath "path"
|
gopath "path"
|
||||||
"sort"
|
"sort"
|
||||||
@ -28,13 +29,10 @@ type StorageInfo struct {
|
|||||||
|
|
||||||
CanSeal bool
|
CanSeal bool
|
||||||
CanStore bool
|
CanStore bool
|
||||||
|
|
||||||
LastHeartbeat time.Time
|
|
||||||
HeartbeatErr error
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type HealthReport struct {
|
type HealthReport struct {
|
||||||
Stat FsStat
|
Stat fsutil.FsStat
|
||||||
Err error
|
Err error
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -50,7 +48,7 @@ type SectorStorageInfo struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type SectorIndex interface { // part of storage-miner api
|
type SectorIndex interface { // part of storage-miner api
|
||||||
StorageAttach(context.Context, StorageInfo, FsStat) error
|
StorageAttach(context.Context, StorageInfo, fsutil.FsStat) error
|
||||||
StorageInfo(context.Context, ID) (StorageInfo, error)
|
StorageInfo(context.Context, ID) (StorageInfo, error)
|
||||||
StorageReportHealth(context.Context, ID, HealthReport) error
|
StorageReportHealth(context.Context, ID, HealthReport) error
|
||||||
|
|
||||||
@ -77,7 +75,7 @@ type declMeta struct {
|
|||||||
|
|
||||||
type storageEntry struct {
|
type storageEntry struct {
|
||||||
info *StorageInfo
|
info *StorageInfo
|
||||||
fsi FsStat
|
fsi fsutil.FsStat
|
||||||
|
|
||||||
lastHeartbeat time.Time
|
lastHeartbeat time.Time
|
||||||
heartbeatErr error
|
heartbeatErr error
|
||||||
@ -130,7 +128,7 @@ func (i *Index) StorageList(ctx context.Context) (map[ID][]Decl, error) {
|
|||||||
return out, nil
|
return out, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *Index) StorageAttach(ctx context.Context, si StorageInfo, st FsStat) error {
|
func (i *Index) StorageAttach(ctx context.Context, si StorageInfo, st fsutil.FsStat) error {
|
||||||
i.lk.Lock()
|
i.lk.Lock()
|
||||||
defer i.lk.Unlock()
|
defer i.lk.Unlock()
|
||||||
|
|
||||||
@ -361,7 +359,7 @@ func (i *Index) StorageBestAlloc(ctx context.Context, allocate SectorFileType, s
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if spaceReq > p.fsi.Available {
|
if spaceReq > uint64(p.fsi.Available) {
|
||||||
log.Debugf("not allocating on %s, out of space (available: %d, need: %d)", p.info.ID, p.fsi.Available, spaceReq)
|
log.Debugf("not allocating on %s, out of space (available: %d, need: %d)", p.info.ID, p.fsi.Available, spaceReq)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -2,25 +2,22 @@ package stores
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"syscall"
|
"github.com/filecoin-project/sector-storage/fsutil"
|
||||||
|
|
||||||
"golang.org/x/xerrors"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||||
)
|
)
|
||||||
|
|
||||||
type PathType bool
|
type PathType string
|
||||||
|
|
||||||
const (
|
const (
|
||||||
PathStorage = false
|
PathStorage PathType = "storage"
|
||||||
PathSealing = true
|
PathSealing PathType = "sealing"
|
||||||
)
|
)
|
||||||
|
|
||||||
type AcquireMode string
|
type AcquireMode string
|
||||||
|
|
||||||
const (
|
const (
|
||||||
AcquireMove = "move"
|
AcquireMove AcquireMode = "move"
|
||||||
AcquireCopy = "copy"
|
AcquireCopy AcquireMode = "copy"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Store interface {
|
type Store interface {
|
||||||
@ -34,23 +31,5 @@ type Store interface {
|
|||||||
// move sectors into storage
|
// move sectors into storage
|
||||||
MoveStorage(ctx context.Context, s abi.SectorID, spt abi.RegisteredSealProof, types SectorFileType) error
|
MoveStorage(ctx context.Context, s abi.SectorID, spt abi.RegisteredSealProof, types SectorFileType) error
|
||||||
|
|
||||||
FsStat(ctx context.Context, id ID) (FsStat, error)
|
FsStat(ctx context.Context, id ID) (fsutil.FsStat, error)
|
||||||
}
|
|
||||||
|
|
||||||
func Stat(path string) (FsStat, error) {
|
|
||||||
var stat syscall.Statfs_t
|
|
||||||
if err := syscall.Statfs(path, &stat); err != nil {
|
|
||||||
return FsStat{}, xerrors.Errorf("statfs: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return FsStat{
|
|
||||||
Capacity: stat.Blocks * uint64(stat.Bsize),
|
|
||||||
Available: stat.Bavail * uint64(stat.Bsize),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type FsStat struct {
|
|
||||||
Capacity uint64
|
|
||||||
Available uint64 // Available to use for sector storage
|
|
||||||
Used uint64
|
|
||||||
}
|
}
|
||||||
|
144
stores/local.go
144
stores/local.go
@ -13,6 +13,7 @@ import (
|
|||||||
|
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/sector-storage/fsutil"
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -48,7 +49,11 @@ type LocalStorage interface {
|
|||||||
GetStorage() (StorageConfig, error)
|
GetStorage() (StorageConfig, error)
|
||||||
SetStorage(func(*StorageConfig)) error
|
SetStorage(func(*StorageConfig)) error
|
||||||
|
|
||||||
Stat(path string) (FsStat, error)
|
Stat(path string) (fsutil.FsStat, error)
|
||||||
|
|
||||||
|
// returns real disk usage for a file/directory
|
||||||
|
// os.ErrNotExit when file doesn't exist
|
||||||
|
DiskUsage(path string) (int64, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
const MetaFile = "sectorstore.json"
|
const MetaFile = "sectorstore.json"
|
||||||
@ -67,6 +72,60 @@ type Local struct {
|
|||||||
|
|
||||||
type path struct {
|
type path struct {
|
||||||
local string // absolute local path
|
local string // absolute local path
|
||||||
|
|
||||||
|
reserved int64
|
||||||
|
reservations map[abi.SectorID]SectorFileType
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *path) stat(ls LocalStorage) (fsutil.FsStat, error) {
|
||||||
|
stat, err := ls.Stat(p.local)
|
||||||
|
if err != nil {
|
||||||
|
return fsutil.FsStat{}, xerrors.Errorf("stat %s: %w", p.local, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
stat.Reserved = p.reserved
|
||||||
|
|
||||||
|
for id, ft := range p.reservations {
|
||||||
|
for _, fileType := range PathTypes {
|
||||||
|
if fileType&ft == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
sp := p.sectorPath(id, fileType)
|
||||||
|
|
||||||
|
used, err := ls.DiskUsage(sp)
|
||||||
|
if err == os.ErrNotExist {
|
||||||
|
p, ferr := tempFetchDest(sp, false)
|
||||||
|
if ferr != nil {
|
||||||
|
return fsutil.FsStat{}, ferr
|
||||||
|
}
|
||||||
|
|
||||||
|
used, err = ls.DiskUsage(p)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("getting disk usage of '%s': %+v", p.sectorPath(id, fileType), err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
stat.Reserved -= used
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if stat.Reserved < 0 {
|
||||||
|
log.Warnf("negative reserved storage: p.reserved=%d, reserved: %d", p.reserved, stat.Reserved)
|
||||||
|
stat.Reserved = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
stat.Available -= stat.Reserved
|
||||||
|
if stat.Available < 0 {
|
||||||
|
stat.Available = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
return stat, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *path) sectorPath(sid abi.SectorID, fileType SectorFileType) string {
|
||||||
|
return filepath.Join(p.local, fileType.String(), SectorName(sid))
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewLocal(ctx context.Context, ls LocalStorage, index SectorIndex, urls []string) (*Local, error) {
|
func NewLocal(ctx context.Context, ls LocalStorage, index SectorIndex, urls []string) (*Local, error) {
|
||||||
@ -98,9 +157,12 @@ func (st *Local) OpenPath(ctx context.Context, p string) error {
|
|||||||
|
|
||||||
out := &path{
|
out := &path{
|
||||||
local: p,
|
local: p,
|
||||||
|
|
||||||
|
reserved: 0,
|
||||||
|
reservations: map[abi.SectorID]SectorFileType{},
|
||||||
}
|
}
|
||||||
|
|
||||||
fst, err := st.localStorage.Stat(p)
|
fst, err := out.stat(st.localStorage)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -130,6 +192,10 @@ func (st *Local) OpenPath(ctx context.Context, p string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, ent := range ents {
|
for _, ent := range ents {
|
||||||
|
if ent.Name() == FetchTempSubdir {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
sid, err := ParseSectorID(ent.Name())
|
sid, err := ParseSectorID(ent.Name())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("parse sector id %s: %w", ent.Name(), err)
|
return xerrors.Errorf("parse sector id %s: %w", ent.Name(), err)
|
||||||
@ -179,7 +245,7 @@ func (st *Local) reportHealth(ctx context.Context) {
|
|||||||
|
|
||||||
toReport := map[ID]HealthReport{}
|
toReport := map[ID]HealthReport{}
|
||||||
for id, p := range st.paths {
|
for id, p := range st.paths {
|
||||||
stat, err := st.localStorage.Stat(p.local)
|
stat, err := p.stat(st.localStorage)
|
||||||
|
|
||||||
toReport[id] = HealthReport{
|
toReport[id] = HealthReport{
|
||||||
Stat: stat,
|
Stat: stat,
|
||||||
@ -197,6 +263,61 @@ func (st *Local) reportHealth(ctx context.Context) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (st *Local) Reserve(ctx context.Context, sid abi.SectorID, spt abi.RegisteredSealProof, ft SectorFileType, storageIDs SectorPaths, overheadTab map[SectorFileType]int) (func(), error) {
|
||||||
|
ssize, err := spt.SectorSize()
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("getting sector size: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
st.localLk.Lock()
|
||||||
|
|
||||||
|
done := func() {}
|
||||||
|
deferredDone := func() { done() }
|
||||||
|
defer func() {
|
||||||
|
st.localLk.Unlock()
|
||||||
|
deferredDone()
|
||||||
|
}()
|
||||||
|
|
||||||
|
for _, fileType := range PathTypes {
|
||||||
|
if fileType&ft == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
id := ID(PathByType(storageIDs, fileType))
|
||||||
|
|
||||||
|
p, ok := st.paths[id]
|
||||||
|
if !ok {
|
||||||
|
return nil, errPathNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
stat, err := p.stat(st.localStorage)
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("getting local storage stat: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
overhead := int64(overheadTab[fileType]) * int64(ssize) / FSOverheadDen
|
||||||
|
|
||||||
|
if stat.Available < overhead {
|
||||||
|
return nil, xerrors.Errorf("can't reserve %d bytes in '%s' (id:%s), only %d available", overhead, p.local, id, stat.Available)
|
||||||
|
}
|
||||||
|
|
||||||
|
p.reserved += overhead
|
||||||
|
|
||||||
|
prevDone := done
|
||||||
|
done = func() {
|
||||||
|
prevDone()
|
||||||
|
|
||||||
|
st.localLk.Lock()
|
||||||
|
defer st.localLk.Unlock()
|
||||||
|
|
||||||
|
p.reserved -= overhead
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
deferredDone = func() {}
|
||||||
|
return done, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (st *Local) AcquireSector(ctx context.Context, sid abi.SectorID, spt abi.RegisteredSealProof, existing SectorFileType, allocate SectorFileType, pathType PathType, op AcquireMode) (SectorPaths, SectorPaths, error) {
|
func (st *Local) AcquireSector(ctx context.Context, sid abi.SectorID, spt abi.RegisteredSealProof, existing SectorFileType, allocate SectorFileType, pathType PathType, op AcquireMode) (SectorPaths, SectorPaths, error) {
|
||||||
if existing|allocate != existing^allocate {
|
if existing|allocate != existing^allocate {
|
||||||
return SectorPaths{}, SectorPaths{}, xerrors.New("can't both find and allocate a sector")
|
return SectorPaths{}, SectorPaths{}, xerrors.New("can't both find and allocate a sector")
|
||||||
@ -229,7 +350,7 @@ func (st *Local) AcquireSector(ctx context.Context, sid abi.SectorID, spt abi.Re
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
spath := filepath.Join(p.local, fileType.String(), SectorName(sid))
|
spath := p.sectorPath(sid, fileType)
|
||||||
SetPathByType(&out, fileType, spath)
|
SetPathByType(&out, fileType, spath)
|
||||||
SetPathByType(&storageIDs, fileType, string(info.ID))
|
SetPathByType(&storageIDs, fileType, string(info.ID))
|
||||||
|
|
||||||
@ -271,8 +392,9 @@ func (st *Local) AcquireSector(ctx context.Context, sid abi.SectorID, spt abi.Re
|
|||||||
|
|
||||||
// TODO: Check free space
|
// TODO: Check free space
|
||||||
|
|
||||||
best = filepath.Join(p.local, fileType.String(), SectorName(sid))
|
best = p.sectorPath(sid, fileType)
|
||||||
bestID = si.ID
|
bestID = si.ID
|
||||||
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
if best == "" {
|
if best == "" {
|
||||||
@ -387,7 +509,7 @@ func (st *Local) removeSector(ctx context.Context, sid abi.SectorID, typ SectorF
|
|||||||
return xerrors.Errorf("dropping sector from index: %w", err)
|
return xerrors.Errorf("dropping sector from index: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
spath := filepath.Join(p.local, typ.String(), SectorName(sid))
|
spath := p.sectorPath(sid, typ)
|
||||||
log.Infof("remove %s", spath)
|
log.Infof("remove %s", spath)
|
||||||
|
|
||||||
if err := os.RemoveAll(spath); err != nil {
|
if err := os.RemoveAll(spath); err != nil {
|
||||||
@ -398,12 +520,12 @@ func (st *Local) removeSector(ctx context.Context, sid abi.SectorID, typ SectorF
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (st *Local) MoveStorage(ctx context.Context, s abi.SectorID, spt abi.RegisteredSealProof, types SectorFileType) error {
|
func (st *Local) MoveStorage(ctx context.Context, s abi.SectorID, spt abi.RegisteredSealProof, types SectorFileType) error {
|
||||||
dest, destIds, err := st.AcquireSector(ctx, s, spt, FTNone, types, false, AcquireMove)
|
dest, destIds, err := st.AcquireSector(ctx, s, spt, FTNone, types, PathStorage, AcquireMove)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("acquire dest storage: %w", err)
|
return xerrors.Errorf("acquire dest storage: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
src, srcIds, err := st.AcquireSector(ctx, s, spt, types, FTNone, false, AcquireMove)
|
src, srcIds, err := st.AcquireSector(ctx, s, spt, types, FTNone, PathStorage, AcquireMove)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("acquire src storage: %w", err)
|
return xerrors.Errorf("acquire src storage: %w", err)
|
||||||
}
|
}
|
||||||
@ -454,16 +576,16 @@ func (st *Local) MoveStorage(ctx context.Context, s abi.SectorID, spt abi.Regist
|
|||||||
|
|
||||||
var errPathNotFound = xerrors.Errorf("fsstat: path not found")
|
var errPathNotFound = xerrors.Errorf("fsstat: path not found")
|
||||||
|
|
||||||
func (st *Local) FsStat(ctx context.Context, id ID) (FsStat, error) {
|
func (st *Local) FsStat(ctx context.Context, id ID) (fsutil.FsStat, error) {
|
||||||
st.localLk.RLock()
|
st.localLk.RLock()
|
||||||
defer st.localLk.RUnlock()
|
defer st.localLk.RUnlock()
|
||||||
|
|
||||||
p, ok := st.paths[id]
|
p, ok := st.paths[id]
|
||||||
if !ok {
|
if !ok {
|
||||||
return FsStat{}, errPathNotFound
|
return fsutil.FsStat{}, errPathNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
return st.localStorage.Stat(p.local)
|
return p.stat(st.localStorage)
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ Store = &Local{}
|
var _ Store = &Local{}
|
||||||
|
@ -3,6 +3,7 @@ package stores
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"github.com/filecoin-project/sector-storage/fsutil"
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
@ -19,6 +20,10 @@ type TestingLocalStorage struct {
|
|||||||
c StorageConfig
|
c StorageConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t *TestingLocalStorage) DiskUsage(path string) (int64, error) {
|
||||||
|
return 1, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (t *TestingLocalStorage) GetStorage() (StorageConfig, error) {
|
func (t *TestingLocalStorage) GetStorage() (StorageConfig, error) {
|
||||||
return t.c, nil
|
return t.c, nil
|
||||||
}
|
}
|
||||||
@ -28,11 +33,10 @@ func (t *TestingLocalStorage) SetStorage(f func(*StorageConfig)) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TestingLocalStorage) Stat(path string) (FsStat, error) {
|
func (t *TestingLocalStorage) Stat(path string) (fsutil.FsStat, error) {
|
||||||
return FsStat{
|
return fsutil.FsStat{
|
||||||
Capacity: pathSize,
|
Capacity: pathSize,
|
||||||
Available: pathSize,
|
Available: pathSize,
|
||||||
Used: 0,
|
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
133
stores/remote.go
133
stores/remote.go
@ -3,6 +3,7 @@ package stores
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"github.com/filecoin-project/sector-storage/fsutil"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"math/bits"
|
"math/bits"
|
||||||
"mime"
|
"mime"
|
||||||
@ -10,6 +11,7 @@ import (
|
|||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
gopath "path"
|
gopath "path"
|
||||||
|
"path/filepath"
|
||||||
"sort"
|
"sort"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
@ -23,11 +25,15 @@ import (
|
|||||||
"github.com/filecoin-project/sector-storage/tarutil"
|
"github.com/filecoin-project/sector-storage/tarutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var FetchTempSubdir = "fetching"
|
||||||
|
|
||||||
type Remote struct {
|
type Remote struct {
|
||||||
local *Local
|
local *Local
|
||||||
index SectorIndex
|
index SectorIndex
|
||||||
auth http.Header
|
auth http.Header
|
||||||
|
|
||||||
|
limit chan struct{}
|
||||||
|
|
||||||
fetchLk sync.Mutex
|
fetchLk sync.Mutex
|
||||||
fetching map[abi.SectorID]chan struct{}
|
fetching map[abi.SectorID]chan struct{}
|
||||||
}
|
}
|
||||||
@ -40,12 +46,14 @@ func (r *Remote) RemoveCopies(ctx context.Context, s abi.SectorID, types SectorF
|
|||||||
return r.local.RemoveCopies(ctx, s, types)
|
return r.local.RemoveCopies(ctx, s, types)
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewRemote(local *Local, index SectorIndex, auth http.Header) *Remote {
|
func NewRemote(local *Local, index SectorIndex, auth http.Header, fetchLimit int) *Remote {
|
||||||
return &Remote{
|
return &Remote{
|
||||||
local: local,
|
local: local,
|
||||||
index: index,
|
index: index,
|
||||||
auth: auth,
|
auth: auth,
|
||||||
|
|
||||||
|
limit: make(chan struct{}, fetchLimit),
|
||||||
|
|
||||||
fetching: map[abi.SectorID]chan struct{}{},
|
fetching: map[abi.SectorID]chan struct{}{},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -87,6 +95,33 @@ func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, spt abi.Regi
|
|||||||
return SectorPaths{}, SectorPaths{}, xerrors.Errorf("local acquire error: %w", err)
|
return SectorPaths{}, SectorPaths{}, xerrors.Errorf("local acquire error: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var toFetch SectorFileType
|
||||||
|
for _, fileType := range PathTypes {
|
||||||
|
if fileType&existing == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if PathByType(paths, fileType) == "" {
|
||||||
|
toFetch |= fileType
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
apaths, ids, err := r.local.AcquireSector(ctx, s, spt, FTNone, toFetch, pathType, op)
|
||||||
|
if err != nil {
|
||||||
|
return SectorPaths{}, SectorPaths{}, xerrors.Errorf("allocate local sector for fetching: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
odt := FSOverheadSeal
|
||||||
|
if pathType == PathStorage {
|
||||||
|
odt = FsOverheadFinalized
|
||||||
|
}
|
||||||
|
|
||||||
|
releaseStorage, err := r.local.Reserve(ctx, s, spt, toFetch, ids, odt)
|
||||||
|
if err != nil {
|
||||||
|
return SectorPaths{}, SectorPaths{}, xerrors.Errorf("reserving storage space: %w", err)
|
||||||
|
}
|
||||||
|
defer releaseStorage()
|
||||||
|
|
||||||
for _, fileType := range PathTypes {
|
for _, fileType := range PathTypes {
|
||||||
if fileType&existing == 0 {
|
if fileType&existing == 0 {
|
||||||
continue
|
continue
|
||||||
@ -96,15 +131,18 @@ func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, spt abi.Regi
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
ap, storageID, url, err := r.acquireFromRemote(ctx, s, spt, fileType, pathType, op)
|
dest := PathByType(apaths, fileType)
|
||||||
|
storageID := PathByType(ids, fileType)
|
||||||
|
|
||||||
|
url, err := r.acquireFromRemote(ctx, s, fileType, dest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return SectorPaths{}, SectorPaths{}, err
|
return SectorPaths{}, SectorPaths{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
SetPathByType(&paths, fileType, ap)
|
SetPathByType(&paths, fileType, dest)
|
||||||
SetPathByType(&stores, fileType, string(storageID))
|
SetPathByType(&stores, fileType, storageID)
|
||||||
|
|
||||||
if err := r.index.StorageDeclareSector(ctx, storageID, s, fileType, op == AcquireMove); err != nil {
|
if err := r.index.StorageDeclareSector(ctx, ID(storageID), s, fileType, op == AcquireMove); err != nil {
|
||||||
log.Warnf("declaring sector %v in %s failed: %+v", s, storageID, err)
|
log.Warnf("declaring sector %v in %s failed: %+v", s, storageID, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -119,51 +157,84 @@ func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, spt abi.Regi
|
|||||||
return paths, stores, nil
|
return paths, stores, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Remote) acquireFromRemote(ctx context.Context, s abi.SectorID, spt abi.RegisteredSealProof, fileType SectorFileType, pathType PathType, op AcquireMode) (string, ID, string, error) {
|
func tempFetchDest(spath string, create bool) (string, error) {
|
||||||
|
st, b := filepath.Split(spath)
|
||||||
|
tempdir := filepath.Join(st, FetchTempSubdir)
|
||||||
|
if create {
|
||||||
|
if err := os.MkdirAll(tempdir, 0755); err != nil {
|
||||||
|
return "", xerrors.Errorf("creating temp fetch dir: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return filepath.Join(tempdir, b), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Remote) acquireFromRemote(ctx context.Context, s abi.SectorID, fileType SectorFileType, dest string) (string, error) {
|
||||||
si, err := r.index.StorageFindSector(ctx, s, fileType, false)
|
si, err := r.index.StorageFindSector(ctx, s, fileType, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", "", "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(si) == 0 {
|
if len(si) == 0 {
|
||||||
return "", "", "", xerrors.Errorf("failed to acquire sector %v from remote(%d): %w", s, fileType, storiface.ErrSectorNotFound)
|
return "", xerrors.Errorf("failed to acquire sector %v from remote(%d): %w", s, fileType, storiface.ErrSectorNotFound)
|
||||||
}
|
}
|
||||||
|
|
||||||
sort.Slice(si, func(i, j int) bool {
|
sort.Slice(si, func(i, j int) bool {
|
||||||
return si[i].Weight < si[j].Weight
|
return si[i].Weight < si[j].Weight
|
||||||
})
|
})
|
||||||
|
|
||||||
apaths, ids, err := r.local.AcquireSector(ctx, s, spt, FTNone, fileType, pathType, op)
|
|
||||||
if err != nil {
|
|
||||||
return "", "", "", xerrors.Errorf("allocate local sector for fetching: %w", err)
|
|
||||||
}
|
|
||||||
dest := PathByType(apaths, fileType)
|
|
||||||
storageID := PathByType(ids, fileType)
|
|
||||||
|
|
||||||
var merr error
|
var merr error
|
||||||
for _, info := range si {
|
for _, info := range si {
|
||||||
// TODO: see what we have local, prefer that
|
// TODO: see what we have local, prefer that
|
||||||
|
|
||||||
for _, url := range info.URLs {
|
for _, url := range info.URLs {
|
||||||
err := r.fetch(ctx, url, dest)
|
tempDest, err := tempFetchDest(dest, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
merr = multierror.Append(merr, xerrors.Errorf("fetch error %s (storage %s) -> %s: %w", url, info.ID, dest, err))
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := os.RemoveAll(dest); err != nil {
|
||||||
|
return "", xerrors.Errorf("removing dest: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = r.fetch(ctx, url, tempDest)
|
||||||
|
if err != nil {
|
||||||
|
merr = multierror.Append(merr, xerrors.Errorf("fetch error %s (storage %s) -> %s: %w", url, info.ID, tempDest, err))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := move(tempDest, dest); err != nil {
|
||||||
|
return "", xerrors.Errorf("fetch move error (storage %s) %s -> %s: %w", info.ID, tempDest, dest, err)
|
||||||
|
}
|
||||||
|
|
||||||
if merr != nil {
|
if merr != nil {
|
||||||
log.Warnw("acquireFromRemote encountered errors when fetching sector from remote", "errors", merr)
|
log.Warnw("acquireFromRemote encountered errors when fetching sector from remote", "errors", merr)
|
||||||
}
|
}
|
||||||
return dest, ID(storageID), url, nil
|
return url, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return "", "", "", xerrors.Errorf("failed to acquire sector %v from remote (tried %v): %w", s, si, merr)
|
return "", xerrors.Errorf("failed to acquire sector %v from remote (tried %v): %w", s, si, merr)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Remote) fetch(ctx context.Context, url, outname string) error {
|
func (r *Remote) fetch(ctx context.Context, url, outname string) error {
|
||||||
log.Infof("Fetch %s -> %s", url, outname)
|
log.Infof("Fetch %s -> %s", url, outname)
|
||||||
|
|
||||||
|
if len(r.limit) >= cap(r.limit) {
|
||||||
|
log.Infof("Throttling fetch, %d already running", len(r.limit))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: Smarter throttling
|
||||||
|
// * Priority (just going sequentially is still pretty good)
|
||||||
|
// * Per interface
|
||||||
|
// * Aware of remote load
|
||||||
|
select {
|
||||||
|
case r.limit <- struct{}{}:
|
||||||
|
defer func() { <-r.limit }()
|
||||||
|
case <-ctx.Done():
|
||||||
|
return xerrors.Errorf("context error while waiting for fetch limiter: %w", ctx.Err())
|
||||||
|
}
|
||||||
|
|
||||||
req, err := http.NewRequest("GET", url, nil)
|
req, err := http.NewRequest("GET", url, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("request: %w", err)
|
return xerrors.Errorf("request: %w", err)
|
||||||
@ -270,7 +341,7 @@ func (r *Remote) deleteFromRemote(ctx context.Context, url string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Remote) FsStat(ctx context.Context, id ID) (FsStat, error) {
|
func (r *Remote) FsStat(ctx context.Context, id ID) (fsutil.FsStat, error) {
|
||||||
st, err := r.local.FsStat(ctx, id)
|
st, err := r.local.FsStat(ctx, id)
|
||||||
switch err {
|
switch err {
|
||||||
case nil:
|
case nil:
|
||||||
@ -278,53 +349,53 @@ func (r *Remote) FsStat(ctx context.Context, id ID) (FsStat, error) {
|
|||||||
case errPathNotFound:
|
case errPathNotFound:
|
||||||
break
|
break
|
||||||
default:
|
default:
|
||||||
return FsStat{}, xerrors.Errorf("local stat: %w", err)
|
return fsutil.FsStat{}, xerrors.Errorf("local stat: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
si, err := r.index.StorageInfo(ctx, id)
|
si, err := r.index.StorageInfo(ctx, id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return FsStat{}, xerrors.Errorf("getting remote storage info: %w", err)
|
return fsutil.FsStat{}, xerrors.Errorf("getting remote storage info: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(si.URLs) == 0 {
|
if len(si.URLs) == 0 {
|
||||||
return FsStat{}, xerrors.Errorf("no known URLs for remote storage %s", id)
|
return fsutil.FsStat{}, xerrors.Errorf("no known URLs for remote storage %s", id)
|
||||||
}
|
}
|
||||||
|
|
||||||
rl, err := url.Parse(si.URLs[0])
|
rl, err := url.Parse(si.URLs[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return FsStat{}, xerrors.Errorf("failed to parse url: %w", err)
|
return fsutil.FsStat{}, xerrors.Errorf("failed to parse url: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
rl.Path = gopath.Join(rl.Path, "stat", string(id))
|
rl.Path = gopath.Join(rl.Path, "stat", string(id))
|
||||||
|
|
||||||
req, err := http.NewRequest("GET", rl.String(), nil)
|
req, err := http.NewRequest("GET", rl.String(), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return FsStat{}, xerrors.Errorf("request: %w", err)
|
return fsutil.FsStat{}, xerrors.Errorf("request: %w", err)
|
||||||
}
|
}
|
||||||
req.Header = r.auth
|
req.Header = r.auth
|
||||||
req = req.WithContext(ctx)
|
req = req.WithContext(ctx)
|
||||||
|
|
||||||
resp, err := http.DefaultClient.Do(req)
|
resp, err := http.DefaultClient.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return FsStat{}, xerrors.Errorf("do request: %w", err)
|
return fsutil.FsStat{}, xerrors.Errorf("do request: %w", err)
|
||||||
}
|
}
|
||||||
switch resp.StatusCode {
|
switch resp.StatusCode {
|
||||||
case 200:
|
case 200:
|
||||||
break
|
break
|
||||||
case 404:
|
case 404:
|
||||||
return FsStat{}, errPathNotFound
|
return fsutil.FsStat{}, errPathNotFound
|
||||||
case 500:
|
case 500:
|
||||||
b, err := ioutil.ReadAll(resp.Body)
|
b, err := ioutil.ReadAll(resp.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return FsStat{}, xerrors.Errorf("fsstat: got http 500, then failed to read the error: %w", err)
|
return fsutil.FsStat{}, xerrors.Errorf("fsstat: got http 500, then failed to read the error: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return FsStat{}, xerrors.Errorf("fsstat: got http 500: %s", string(b))
|
return fsutil.FsStat{}, xerrors.Errorf("fsstat: got http 500: %s", string(b))
|
||||||
}
|
}
|
||||||
|
|
||||||
var out FsStat
|
var out fsutil.FsStat
|
||||||
if err := json.NewDecoder(resp.Body).Decode(&out); err != nil {
|
if err := json.NewDecoder(resp.Body).Decode(&out); err != nil {
|
||||||
return FsStat{}, xerrors.Errorf("decoding fsstat: %w", err)
|
return fsutil.FsStat{}, xerrors.Errorf("decoding fsstat: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
|
@ -1,5 +1,12 @@
|
|||||||
package storiface
|
package storiface
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/sector-storage/sealtasks"
|
||||||
|
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||||
|
)
|
||||||
|
|
||||||
type WorkerInfo struct {
|
type WorkerInfo struct {
|
||||||
Hostname string
|
Hostname string
|
||||||
|
|
||||||
@ -24,3 +31,11 @@ type WorkerStats struct {
|
|||||||
GpuUsed bool
|
GpuUsed bool
|
||||||
CpuUse uint64
|
CpuUse uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type WorkerJob struct {
|
||||||
|
ID uint64
|
||||||
|
Sector abi.SectorID
|
||||||
|
Task sealtasks.TaskType
|
||||||
|
|
||||||
|
Start time.Time
|
||||||
|
}
|
||||||
|
@ -37,7 +37,7 @@ func newTestWorker(wcfg WorkerConfig, lstor *stores.Local) *testWorker {
|
|||||||
acceptTasks: acceptTasks,
|
acceptTasks: acceptTasks,
|
||||||
lstor: lstor,
|
lstor: lstor,
|
||||||
|
|
||||||
mockSeal: mock.NewMockSectorMgr(ssize),
|
mockSeal: mock.NewMockSectorMgr(ssize, nil),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -53,7 +53,7 @@ func (t *testWorker) UnsealPiece(ctx context.Context, id abi.SectorID, index sto
|
|||||||
panic("implement me")
|
panic("implement me")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *testWorker) ReadPiece(ctx context.Context, writer io.Writer, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) error {
|
func (t *testWorker) ReadPiece(ctx context.Context, writer io.Writer, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) {
|
||||||
panic("implement me")
|
panic("implement me")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
129
work_tracker.go
Normal file
129
work_tracker.go
Normal file
@ -0,0 +1,129 @@
|
|||||||
|
package sectorstorage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||||
|
"github.com/filecoin-project/specs-storage/storage"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/sector-storage/sealtasks"
|
||||||
|
"github.com/filecoin-project/sector-storage/stores"
|
||||||
|
"github.com/filecoin-project/sector-storage/storiface"
|
||||||
|
)
|
||||||
|
|
||||||
|
type workTracker struct {
|
||||||
|
lk sync.Mutex
|
||||||
|
|
||||||
|
ctr uint64
|
||||||
|
running map[uint64]storiface.WorkerJob
|
||||||
|
|
||||||
|
// TODO: done, aggregate stats, queue stats, scheduler feedback
|
||||||
|
}
|
||||||
|
|
||||||
|
func (wt *workTracker) track(sid abi.SectorID, task sealtasks.TaskType) func() {
|
||||||
|
wt.lk.Lock()
|
||||||
|
defer wt.lk.Unlock()
|
||||||
|
|
||||||
|
id := wt.ctr
|
||||||
|
wt.ctr++
|
||||||
|
|
||||||
|
wt.running[id] = storiface.WorkerJob{
|
||||||
|
ID: id,
|
||||||
|
Sector: sid,
|
||||||
|
Task: task,
|
||||||
|
Start: time.Now(),
|
||||||
|
}
|
||||||
|
|
||||||
|
return func() {
|
||||||
|
wt.lk.Lock()
|
||||||
|
defer wt.lk.Unlock()
|
||||||
|
|
||||||
|
delete(wt.running, id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (wt *workTracker) worker(w Worker) Worker {
|
||||||
|
return &trackedWorker{
|
||||||
|
Worker: w,
|
||||||
|
tracker: wt,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (wt *workTracker) Running() []storiface.WorkerJob {
|
||||||
|
wt.lk.Lock()
|
||||||
|
defer wt.lk.Unlock()
|
||||||
|
|
||||||
|
out := make([]storiface.WorkerJob, 0, len(wt.running))
|
||||||
|
for _, job := range wt.running {
|
||||||
|
out = append(out, job)
|
||||||
|
}
|
||||||
|
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
type trackedWorker struct {
|
||||||
|
Worker
|
||||||
|
|
||||||
|
tracker *workTracker
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *trackedWorker) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storage.PreCommit1Out, error) {
|
||||||
|
defer t.tracker.track(sector, sealtasks.TTPreCommit1)()
|
||||||
|
|
||||||
|
return t.Worker.SealPreCommit1(ctx, sector, ticket, pieces)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *trackedWorker) SealPreCommit2(ctx context.Context, sector abi.SectorID, pc1o storage.PreCommit1Out) (storage.SectorCids, error) {
|
||||||
|
defer t.tracker.track(sector, sealtasks.TTPreCommit2)()
|
||||||
|
|
||||||
|
return t.Worker.SealPreCommit2(ctx, sector, pc1o)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *trackedWorker) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) {
|
||||||
|
defer t.tracker.track(sector, sealtasks.TTCommit1)()
|
||||||
|
|
||||||
|
return t.Worker.SealCommit1(ctx, sector, ticket, seed, pieces, cids)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *trackedWorker) SealCommit2(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (storage.Proof, error) {
|
||||||
|
defer t.tracker.track(sector, sealtasks.TTCommit2)()
|
||||||
|
|
||||||
|
return t.Worker.SealCommit2(ctx, sector, c1o)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *trackedWorker) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) error {
|
||||||
|
defer t.tracker.track(sector, sealtasks.TTFinalize)()
|
||||||
|
|
||||||
|
return t.Worker.FinalizeSector(ctx, sector, keepUnsealed)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *trackedWorker) AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (abi.PieceInfo, error) {
|
||||||
|
defer t.tracker.track(sector, sealtasks.TTAddPiece)()
|
||||||
|
|
||||||
|
return t.Worker.AddPiece(ctx, sector, pieceSizes, newPieceSize, pieceData)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *trackedWorker) Fetch(ctx context.Context, s abi.SectorID, ft stores.SectorFileType, ptype stores.PathType, am stores.AcquireMode) error {
|
||||||
|
defer t.tracker.track(s, sealtasks.TTFetch)()
|
||||||
|
|
||||||
|
return t.Worker.Fetch(ctx, s, ft, ptype, am)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *trackedWorker) UnsealPiece(ctx context.Context, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, cid cid.Cid) error {
|
||||||
|
defer t.tracker.track(id, sealtasks.TTUnseal)()
|
||||||
|
|
||||||
|
return t.Worker.UnsealPiece(ctx, id, index, size, randomness, cid)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *trackedWorker) ReadPiece(ctx context.Context, writer io.Writer, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) {
|
||||||
|
defer t.tracker.track(id, sealtasks.TTReadUnsealed)()
|
||||||
|
|
||||||
|
return t.Worker.ReadPiece(ctx, writer, id, index, size)
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ Worker = &trackedWorker{}
|
@ -51,5 +51,6 @@ var PieceComms = [Levels - Skip][32]byte{
|
|||||||
|
|
||||||
func ZeroPieceCommitment(sz abi.UnpaddedPieceSize) cid.Cid {
|
func ZeroPieceCommitment(sz abi.UnpaddedPieceSize) cid.Cid {
|
||||||
level := bits.TrailingZeros64(uint64(sz.Padded())) - Skip - 5 // 2^5 = 32
|
level := bits.TrailingZeros64(uint64(sz.Padded())) - Skip - 5 // 2^5 = 32
|
||||||
return commcid.PieceCommitmentV1ToCID(PieceComms[level][:])
|
commP, _ := commcid.PieceCommitmentV1ToCID(PieceComms[level][:])
|
||||||
|
return commP
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user