From a8997dc35c60db2ac6c9b9e74f14e20ab6fa8933 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Thu, 25 Jun 2020 21:53:51 +0200 Subject: [PATCH 01/51] ffiwrapper: Insert alignment between pieces --- ffiwrapper/sealer_cgo.go | 46 ++++++++++------- ffiwrapper/sealer_test.go | 103 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 132 insertions(+), 17 deletions(-) diff --git a/ffiwrapper/sealer_cgo.go b/ffiwrapper/sealer_cgo.go index 6510f81cc..177ddeae0 100644 --- a/ffiwrapper/sealer_cgo.go +++ b/ffiwrapper/sealer_cgo.go @@ -543,8 +543,35 @@ func GeneratePieceCIDFromFile(proofType abi.RegisteredSealProof, piece io.Reader } func GenerateUnsealedCID(proofType abi.RegisteredSealProof, pieces []abi.PieceInfo) (cid.Cid, error) { + allPieces := make([]abi.PieceInfo, 0, len(pieces)) var sum abi.PaddedPieceSize + + padTo := func(s abi.PaddedPieceSize, trailing bool) { + // pad remaining space with 0 CommPs + toFill := uint64(-sum % s) + if trailing && sum == 0 { + toFill = uint64(s) + } + + n := bits.OnesCount64(toFill) + for i := 0; i < n; i++ { + next := bits.TrailingZeros64(toFill) + psize := uint64(1) << uint(next) + toFill ^= psize + + padded := abi.PaddedPieceSize(psize) + allPieces = append(allPieces, abi.PieceInfo{ + Size: padded, + PieceCID: zerocomm.ZeroPieceCommitment(padded.Unpadded()), + }) + sum += padded + } + } + for _, p := range pieces { + padTo(p.Size, false) + + allPieces = append(allPieces, p) sum += p.Size } @@ -553,22 +580,7 @@ func GenerateUnsealedCID(proofType abi.RegisteredSealProof, pieces []abi.PieceIn return cid.Undef, err } - { - // pad remaining space with 0 CommPs - toFill := uint64(abi.PaddedPieceSize(ssize) - sum) - n := bits.OnesCount64(toFill) - for i := 0; i < n; i++ { - next := bits.TrailingZeros64(toFill) - psize := uint64(1) << uint(next) - toFill ^= psize + padTo(abi.PaddedPieceSize(ssize), true) - unpadded := abi.PaddedPieceSize(psize).Unpadded() - pieces = append(pieces, abi.PieceInfo{ - Size: unpadded.Padded(), - PieceCID: zerocomm.ZeroPieceCommitment(unpadded), - }) - } - } - - return ffi.GenerateUnsealedCID(proofType, pieces) + return ffi.GenerateUnsealedCID(proofType, allPieces) } diff --git a/ffiwrapper/sealer_test.go b/ffiwrapper/sealer_test.go index 5e6f02cd2..e9628c2dd 100644 --- a/ffiwrapper/sealer_test.go +++ b/ffiwrapper/sealer_test.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "fmt" + "github.com/ipfs/go-cid" "io" "io/ioutil" "math/rand" @@ -484,3 +485,105 @@ func requireFDsClosed(t *testing.T, start int) { log.Infow("open FDs", "start", start, "now", openNow) require.Equal(t, start, openNow, "FDs shouldn't leak") } + +func TestGenerateUnsealedCID(t *testing.T) { + pt := abi.RegisteredSealProof_StackedDrg2KiBV1 + ups := int(abi.PaddedPieceSize(2048).Unpadded()) + + commP := func(b []byte) cid.Cid { + pf, werr, err := ToReadableFile(bytes.NewReader(b), int64(len(b))) + require.NoError(t, err) + + c, err := ffi.GeneratePieceCIDFromFile(pt, pf, abi.UnpaddedPieceSize(len(b))) + require.NoError(t, err) + + require.NoError(t, werr()) + + return c + } + + testCommEq := func(name string, in [][]byte, expect [][]byte) { + t.Run(name, func(t *testing.T) { + upi := make([]abi.PieceInfo, len(in)) + for i, b := range in { + upi[i] = abi.PieceInfo{ + Size: abi.UnpaddedPieceSize(len(b)).Padded(), + PieceCID: commP(b), + } + } + + sectorPi := []abi.PieceInfo{ + { + Size: 2048, + PieceCID: commP(bytes.Join(expect, nil)), + }, + } + + expectCid, err := GenerateUnsealedCID(pt, sectorPi) + require.NoError(t, err) + + actualCid, err := GenerateUnsealedCID(pt, upi) + require.NoError(t, err) + + require.Equal(t, expectCid, actualCid) + }) + } + + barr := func(b byte, den int) []byte { + return bytes.Repeat([]byte{b}, ups/den) + } + + // 0000 + testCommEq("zero", + nil, + [][]byte{barr(0, 1)}, + ) + + // 1111 + testCommEq("one", + [][]byte{barr(1, 1)}, + [][]byte{barr(1, 1)}, + ) + + // 11 00 + testCommEq("one|2", + [][]byte{barr(1, 2)}, + [][]byte{barr(1, 2), barr(0, 2)}, + ) + + // 1 0 00 + testCommEq("one|4", + [][]byte{barr(1, 4)}, + [][]byte{barr(1, 4), barr(0, 4), barr(0, 2)}, + ) + + // 11 2 0 + testCommEq("one|2-two|4", + [][]byte{barr(1, 2), barr(2, 4)}, + [][]byte{barr(1, 2), barr(2, 4), barr(0, 4)}, + ) + + // 1 0 22 + testCommEq("one|4-two|2", + [][]byte{barr(1, 4), barr(2, 2)}, + [][]byte{barr(1, 4), barr(0, 4), barr(2, 2)}, + ) + + // 1 0 22 0000 + testCommEq("one|8-two|4", + [][]byte{barr(1, 8), barr(2, 4)}, + [][]byte{barr(1, 8), barr(0, 8), barr(2, 4), barr(0, 2)}, + ) + + // 11 2 0 0000 + testCommEq("one|4-two|8", + [][]byte{barr(1, 4), barr(2, 8)}, + [][]byte{barr(1, 4), barr(2, 8), barr(0, 8), barr(0, 2)}, + ) + + // 1 0 22 3 0 00 4444 5 0 00 + testCommEq("one|16-two|8-three|16-four|4-five|16", + [][]byte{barr(1, 16), barr(2, 8), barr(3, 16), barr(4, 4), barr(5, 16)}, + [][]byte{barr(1, 16), barr(0, 16), barr(2, 8), barr(3, 16), barr(0, 16), barr(0, 8), barr(4, 4), barr(5, 16), barr(0, 16), barr(0, 8)}, + ) +} From 31d9abfc8cac192758b62c27945e7480fc5fe328 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Fri, 3 Jul 2020 21:52:31 +0200 Subject: [PATCH 02/51] Implement FinalizeSector keepUnsealed --- ffiwrapper/partialfile.go | 23 ++++++++++++++++ ffiwrapper/sealer_cgo.go | 56 ++++++++++++++++++++++++++++++++++++++- fsutil/dealloc_linux.go | 28 ++++++++++++++++++++ fsutil/dealloc_other.go | 18 +++++++++++++ go.mod | 2 +- go.sum | 2 ++ localworker.go | 6 +++-- 7 files changed, 131 insertions(+), 4 deletions(-) create mode 100644 fsutil/dealloc_linux.go create mode 100644 fsutil/dealloc_other.go diff --git a/ffiwrapper/partialfile.go b/ffiwrapper/partialfile.go index a2c1f1151..8c4fdcc72 100644 --- a/ffiwrapper/partialfile.go +++ b/ffiwrapper/partialfile.go @@ -12,6 +12,7 @@ import ( rlepluslazy "github.com/filecoin-project/go-bitfield/rle" "github.com/filecoin-project/specs-actors/actors/abi" + "github.com/filecoin-project/sector-storage/fsutil" "github.com/filecoin-project/sector-storage/storiface" ) @@ -218,6 +219,28 @@ func (pf *partialFile) MarkAllocated(offset storiface.PaddedByteIndex, size abi. return nil } +func (pf *partialFile) Free(offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) error { + have, err := pf.allocated.RunIterator() + if err != nil { + return err + } + + if err := fsutil.Deallocate(pf.file, int64(offset), int64(size)); err != nil { + return xerrors.Errorf("deallocating: %w", err) + } + + s, err := rlepluslazy.Subtract(have, pieceRun(offset, size)) + if err != nil { + return err + } + + if err := writeTrailer(int64(pf.maxPiece), pf.file, s); err != nil { + return xerrors.Errorf("writing trailer: %w", err) + } + + return nil +} + func (pf *partialFile) Reader(offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) (*os.File, error) { if _, err := pf.file.Seek(int64(offset), io.SeekStart); err != nil { return nil, xerrors.Errorf("seek piece start: %w", err) diff --git a/ffiwrapper/sealer_cgo.go b/ffiwrapper/sealer_cgo.go index 177ddeae0..58d9d8c5b 100644 --- a/ffiwrapper/sealer_cgo.go +++ b/ffiwrapper/sealer_cgo.go @@ -15,6 +15,7 @@ import ( "golang.org/x/xerrors" ffi "github.com/filecoin-project/filecoin-ffi" + rlepluslazy "github.com/filecoin-project/go-bitfield/rle" commcid "github.com/filecoin-project/go-fil-commcid" "github.com/filecoin-project/specs-actors/actors/abi" "github.com/filecoin-project/specs-storage/storage" @@ -502,7 +503,60 @@ func (sb *Sealer) SealCommit2(ctx context.Context, sector abi.SectorID, phase1Ou func (sb *Sealer) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) error { if len(keepUnsealed) > 0 { - return xerrors.Errorf("keepUnsealed unsupported") // TODO: impl for fastretrieval copies + maxPieceSize := abi.PaddedPieceSize(sb.ssize) + + sr := pieceRun(0, maxPieceSize) + + for _, s := range keepUnsealed { + si := &rlepluslazy.RunSliceIterator{} + if s.Offset != 0 { + si.Runs = append(si.Runs, rlepluslazy.Run{Val: false, Len: uint64(s.Offset)}) + } + si.Runs = append(si.Runs, rlepluslazy.Run{Val: true, Len: uint64(s.Size)}) + + var err error + sr, err = rlepluslazy.Subtract(sr, si) + if err != nil { + return err + } + } + + + paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, 0, false) + if err != nil { + return xerrors.Errorf("acquiring sector cache path: %w", err) + } + defer done() + + pf, err := openPartialFile(maxPieceSize, paths.Unsealed) + if xerrors.Is(err, os.ErrNotExist) { + return xerrors.Errorf("opening partial file: %w", err) + } + + var at uint64 + for sr.HasNext() { + r, err := sr.NextRun() + if err != nil { + _ = pf.Close() + return err + } + + offset := at + at += r.Len + if !r.Val { + continue + } + + err = pf.Free(storiface.PaddedByteIndex(abi.UnpaddedPieceSize(offset).Padded()), abi.UnpaddedPieceSize(r.Len).Padded()) + if err != nil { + _ = pf.Close() + return xerrors.Errorf("free partial file range: %w", err) + } + } + + if err := pf.Close(); err != nil { + return err + } } paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTCache, 0, false) diff --git a/fsutil/dealloc_linux.go b/fsutil/dealloc_linux.go new file mode 100644 index 000000000..0b20c568d --- /dev/null +++ b/fsutil/dealloc_linux.go @@ -0,0 +1,28 @@ +package fsutil + +import ( + "os" + "syscall" + + logging "github.com/ipfs/go-log/v2" +) + +var log = logging.Logger("fsutil") + +const FallocFlPunchHole = 0x02 // linux/falloc.h + +func Deallocate(file *os.File, offset int64, length int64) error { + if length == 0 { + return nil + } + + err := syscall.Fallocate(int(file.Fd()), FallocFlPunchHole, offset, length) + if errno, ok := err.(syscall.Errno); ok { + if errno == syscall.EOPNOTSUPP || errno == syscall.ENOSYS { + log.Warnf("could not deallocate space, ignoring: %v", errno) + err = nil // log and ignore + } + } + + return err +} diff --git a/fsutil/dealloc_other.go b/fsutil/dealloc_other.go new file mode 100644 index 000000000..721116af1 --- /dev/null +++ b/fsutil/dealloc_other.go @@ -0,0 +1,18 @@ +// +build !linux + +package fsutil + +import ( + "os" + + logging "github.com/ipfs/go-log/v2" +) + +var log = logging.Logger("fsutil") + + +func Deallocate(file *os.File, offset int64, length int64) error { + log.Warnf("deallocating space not supported") + + return err +} diff --git a/go.mod b/go.mod index 9e51c0445..83424841f 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( github.com/detailyang/go-fallocate v0.0.0-20180908115635-432fa640bd2e github.com/elastic/go-sysinfo v1.3.0 github.com/filecoin-project/filecoin-ffi v0.0.0-20200326153646-e899cc1dd072 - github.com/filecoin-project/go-bitfield v0.0.2-0.20200518150651-562fdb554b6e + github.com/filecoin-project/go-bitfield v0.0.4-0.20200703174658-f4a5758051a1 github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5 github.com/filecoin-project/go-paramfetch v0.0.1 github.com/filecoin-project/specs-actors v0.6.1 diff --git a/go.sum b/go.sum index 2f97216e3..330b97579 100644 --- a/go.sum +++ b/go.sum @@ -36,6 +36,8 @@ github.com/filecoin-project/go-bitfield v0.0.1 h1:Xg/JnrqqE77aJVKdbEyR04n9FZQWhw github.com/filecoin-project/go-bitfield v0.0.1/go.mod h1:Ry9/iUlWSyjPUzlAvdnfy4Gtvrq4kWmWDztCU1yEgJY= github.com/filecoin-project/go-bitfield v0.0.2-0.20200518150651-562fdb554b6e h1:gkG/7G+iKy4He+IiQNeQn+nndFznb/vCoOR8iRQsm60= github.com/filecoin-project/go-bitfield v0.0.2-0.20200518150651-562fdb554b6e/go.mod h1:Ry9/iUlWSyjPUzlAvdnfy4Gtvrq4kWmWDztCU1yEgJY= +github.com/filecoin-project/go-bitfield v0.0.4-0.20200703174658-f4a5758051a1 h1:xuHlrdznafh7ul5t4xEncnA4qgpQvJZEw+mr98eqHXw= +github.com/filecoin-project/go-bitfield v0.0.4-0.20200703174658-f4a5758051a1/go.mod h1:Ry9/iUlWSyjPUzlAvdnfy4Gtvrq4kWmWDztCU1yEgJY= github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 h1:2pMXdBnCiXjfCYx/hLqFxccPoqsSveQFxVLvNxy9bus= github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ= github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5 h1:yvQJCW9mmi9zy+51xA01Ea2X7/dL7r8eKDPuGUjRmbo= diff --git a/localworker.go b/localworker.go index a1d82209a..a6042826a 100644 --- a/localworker.go +++ b/localworker.go @@ -171,8 +171,10 @@ func (l *LocalWorker) FinalizeSector(ctx context.Context, sector abi.SectorID, k return xerrors.Errorf("finalizing sector: %w", err) } - if err := l.storage.Remove(ctx, sector, stores.FTUnsealed, true); err != nil { - return xerrors.Errorf("removing unsealed data: %w", err) + if len(keepUnsealed) == 0 { + if err := l.storage.Remove(ctx, sector, stores.FTUnsealed, true); err != nil { + return xerrors.Errorf("removing unsealed data: %w", err) + } } return nil From 636bf90f842d7532adda8bf6ea45ffcd1d350ff4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Fri, 3 Jul 2020 22:23:36 +0200 Subject: [PATCH 03/51] Don't error in ReleaseUnsealed --- manager.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/manager.go b/manager.go index caea09cd0..767e87cf9 100644 --- a/manager.go +++ b/manager.go @@ -441,7 +441,8 @@ func (m *Manager) FinalizeSector(ctx context.Context, sector abi.SectorID, keepU } func (m *Manager) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) error { - return xerrors.Errorf("implement me") + log.Warnw("ReleaseUnsealed todo") + return nil } func (m *Manager) Remove(ctx context.Context, sector abi.SectorID) error { From 0fd142153a2b844e17676e26d29c084fd1c5708f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Fri, 3 Jul 2020 22:24:47 +0200 Subject: [PATCH 04/51] mod tidy --- go.sum | 2 -- 1 file changed, 2 deletions(-) diff --git a/go.sum b/go.sum index 330b97579..508d985d7 100644 --- a/go.sum +++ b/go.sum @@ -34,8 +34,6 @@ github.com/filecoin-project/go-amt-ipld/v2 v2.0.1-0.20200424220931-6263827e49f2/ github.com/filecoin-project/go-bitfield v0.0.0-20200416002808-b3ee67ec9060/go.mod h1:iodsLxOFZnqKtjj2zkgqzoGNrv6vUqj69AT/J8DKXEw= github.com/filecoin-project/go-bitfield v0.0.1 h1:Xg/JnrqqE77aJVKdbEyR04n9FZQWhwrN+buDgQCVpZU= github.com/filecoin-project/go-bitfield v0.0.1/go.mod h1:Ry9/iUlWSyjPUzlAvdnfy4Gtvrq4kWmWDztCU1yEgJY= -github.com/filecoin-project/go-bitfield v0.0.2-0.20200518150651-562fdb554b6e h1:gkG/7G+iKy4He+IiQNeQn+nndFznb/vCoOR8iRQsm60= -github.com/filecoin-project/go-bitfield v0.0.2-0.20200518150651-562fdb554b6e/go.mod h1:Ry9/iUlWSyjPUzlAvdnfy4Gtvrq4kWmWDztCU1yEgJY= github.com/filecoin-project/go-bitfield v0.0.4-0.20200703174658-f4a5758051a1 h1:xuHlrdznafh7ul5t4xEncnA4qgpQvJZEw+mr98eqHXw= github.com/filecoin-project/go-bitfield v0.0.4-0.20200703174658-f4a5758051a1/go.mod h1:Ry9/iUlWSyjPUzlAvdnfy4Gtvrq4kWmWDztCU1yEgJY= github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 h1:2pMXdBnCiXjfCYx/hLqFxccPoqsSveQFxVLvNxy9bus= From c5a96fdd08d3fd32ca8b19425adaa903cd9b344e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Mon, 6 Jul 2020 16:13:42 +0200 Subject: [PATCH 05/51] Change PathType to string --- faults.go | 6 ++++-- ffiwrapper/sealer_cgo.go | 20 ++++++++++---------- ffiwrapper/sealer_test.go | 2 +- ffiwrapper/verifier_cgo.go | 2 +- manager.go | 4 ++-- stores/filetype.go | 2 +- stores/http_handler.go | 2 +- stores/interface.go | 6 +++--- stores/local.go | 4 ++-- 9 files changed, 25 insertions(+), 23 deletions(-) diff --git a/faults.go b/faults.go index 11c1c3df2..0eebc42f0 100644 --- a/faults.go +++ b/faults.go @@ -43,9 +43,11 @@ func (m *Manager) CheckProvable(ctx context.Context, spt abi.RegisteredSealProof return nil } - lp, _, err := m.localStore.AcquireSector(ctx, sector, spt, stores.FTSealed|stores.FTCache, stores.FTNone, false, stores.AcquireMove) + lp, _, err := m.localStore.AcquireSector(ctx, sector, spt, stores.FTSealed|stores.FTCache, stores.FTNone, stores.PathStorage, stores.AcquireMove) if err != nil { - return xerrors.Errorf("acquire sector in checkProvable: %w", err) + log.Warnw("CheckProvable Sector FAULT: acquire sector in checkProvable", "sector", sector, "error", err) + bad = append(bad, sector) + return nil } if lp.Sealed == "" || lp.Cache == "" { diff --git a/ffiwrapper/sealer_cgo.go b/ffiwrapper/sealer_cgo.go index 177ddeae0..0ff8c8f2f 100644 --- a/ffiwrapper/sealer_cgo.go +++ b/ffiwrapper/sealer_cgo.go @@ -81,7 +81,7 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector abi.SectorID, existingPie var stagedPath stores.SectorPaths if len(existingPieceSizes) == 0 { - stagedPath, done, err = sb.sectors.AcquireSector(ctx, sector, 0, stores.FTUnsealed, true) + stagedPath, done, err = sb.sectors.AcquireSector(ctx, sector, 0, stores.FTUnsealed, stores.PathSealing) if err != nil { return abi.PieceInfo{}, xerrors.Errorf("acquire unsealed sector: %w", err) } @@ -91,7 +91,7 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector abi.SectorID, existingPie return abi.PieceInfo{}, xerrors.Errorf("creating unsealed sector file: %w", err) } } else { - stagedPath, done, err = sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, 0, true) + stagedPath, done, err = sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, 0, stores.PathSealing) if err != nil { return abi.PieceInfo{}, xerrors.Errorf("acquire unsealed sector: %w", err) } @@ -198,12 +198,12 @@ func (sb *Sealer) UnsealPiece(ctx context.Context, sector abi.SectorID, offset s maxPieceSize := abi.PaddedPieceSize(sb.ssize) // try finding existing - unsealedPath, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, stores.FTNone, false) + unsealedPath, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, stores.FTNone, stores.PathStorage) var pf *partialFile switch { case xerrors.Is(err, storiface.ErrSectorNotFound): - unsealedPath, done, err = sb.sectors.AcquireSector(ctx, sector, stores.FTNone, stores.FTUnsealed, false) + unsealedPath, done, err = sb.sectors.AcquireSector(ctx, sector, stores.FTNone, stores.FTUnsealed, stores.PathStorage) if err != nil { return xerrors.Errorf("acquire unsealed sector path (allocate): %w", err) } @@ -240,7 +240,7 @@ func (sb *Sealer) UnsealPiece(ctx context.Context, sector abi.SectorID, offset s return nil } - srcPaths, srcDone, err := sb.sectors.AcquireSector(ctx, sector, stores.FTCache|stores.FTSealed, stores.FTNone, false) + srcPaths, srcDone, err := sb.sectors.AcquireSector(ctx, sector, stores.FTCache|stores.FTSealed, stores.FTNone, stores.PathStorage) if err != nil { return xerrors.Errorf("acquire sealed sector paths: %w", err) } @@ -358,7 +358,7 @@ func (sb *Sealer) UnsealPiece(ctx context.Context, sector abi.SectorID, offset s } func (sb *Sealer) ReadPiece(ctx context.Context, writer io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) error { - path, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, stores.FTNone, false) + path, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, stores.FTNone, stores.PathStorage) if err != nil { return xerrors.Errorf("acquire unsealed sector path: %w", err) } @@ -395,7 +395,7 @@ func (sb *Sealer) ReadPiece(ctx context.Context, writer io.Writer, sector abi.Se } func (sb *Sealer) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) { - paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, stores.FTSealed|stores.FTCache, true) + paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, stores.FTSealed|stores.FTCache, stores.PathSealing) if err != nil { return nil, xerrors.Errorf("acquiring sector paths: %w", err) } @@ -452,7 +452,7 @@ func (sb *Sealer) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticke } func (sb *Sealer) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage.PreCommit1Out) (storage.SectorCids, error) { - paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTSealed|stores.FTCache, 0, true) + paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTSealed|stores.FTCache, 0, stores.PathSealing) if err != nil { return storage.SectorCids{}, xerrors.Errorf("acquiring sector paths: %w", err) } @@ -470,7 +470,7 @@ func (sb *Sealer) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase } func (sb *Sealer) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) { - paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTSealed|stores.FTCache, 0, true) + paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTSealed|stores.FTCache, 0, stores.PathSealing) if err != nil { return nil, xerrors.Errorf("acquire sector paths: %w", err) } @@ -505,7 +505,7 @@ func (sb *Sealer) FinalizeSector(ctx context.Context, sector abi.SectorID, keepU return xerrors.Errorf("keepUnsealed unsupported") // TODO: impl for fastretrieval copies } - paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTCache, 0, false) + paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTCache, 0, stores.PathStorage) if err != nil { return xerrors.Errorf("acquiring sector cache path: %w", err) } diff --git a/ffiwrapper/sealer_test.go b/ffiwrapper/sealer_test.go index 5b9c3d1ac..0b5018d84 100644 --- a/ffiwrapper/sealer_test.go +++ b/ffiwrapper/sealer_test.go @@ -121,7 +121,7 @@ func (s *seal) unseal(t *testing.T, sb *Sealer, sp *basicfs.Provider, si abi.Sec t.Fatal("read wrong bytes") } - p, sd, err := sp.AcquireSector(context.TODO(), si, stores.FTUnsealed, stores.FTNone, false) + p, sd, err := sp.AcquireSector(context.TODO(), si, stores.FTUnsealed, stores.FTNone, stores.PathStorage) if err != nil { t.Fatal(err) } diff --git a/ffiwrapper/verifier_cgo.go b/ffiwrapper/verifier_cgo.go index e3e8dd886..60d56dddc 100644 --- a/ffiwrapper/verifier_cgo.go +++ b/ffiwrapper/verifier_cgo.go @@ -62,7 +62,7 @@ func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorIn sid := abi.SectorID{Miner: mid, Number: s.SectorNumber} - paths, d, err := sb.sectors.AcquireSector(ctx, sid, stores.FTCache|stores.FTSealed, 0, false) + paths, d, err := sb.sectors.AcquireSector(ctx, sid, stores.FTCache|stores.FTSealed, 0, stores.PathStorage) if err != nil { log.Warnw("failed to acquire sector, skipping", "sector", sid, "error", err) skipped = append(skipped, sid) diff --git a/manager.go b/manager.go index caea09cd0..6c1b93ced 100644 --- a/manager.go +++ b/manager.go @@ -218,12 +218,12 @@ func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector abi.Sect // TODO: Optimization: don't send unseal to a worker if the requested range is already unsealed unsealFetch := func(ctx context.Context, worker Worker) error { - if err := worker.Fetch(ctx, sector, stores.FTSealed|stores.FTCache, true, stores.AcquireCopy); err != nil { + if err := worker.Fetch(ctx, sector, stores.FTSealed|stores.FTCache, stores.PathSealing, stores.AcquireCopy); err != nil { return xerrors.Errorf("copy sealed/cache sector data: %w", err) } if len(best) > 0 { - if err := worker.Fetch(ctx, sector, stores.FTUnsealed, true, stores.AcquireMove); err != nil { + if err := worker.Fetch(ctx, sector, stores.FTUnsealed, stores.PathSealing, stores.AcquireMove); err != nil { return xerrors.Errorf("copy unsealed sector data: %w", err) } } diff --git a/stores/filetype.go b/stores/filetype.go index c31dfefb2..60c47d1f7 100644 --- a/stores/filetype.go +++ b/stores/filetype.go @@ -22,7 +22,7 @@ const ( var FSOverheadSeal = map[SectorFileType]int{ // 10x overheads FTUnsealed: 10, FTSealed: 10, - FTCache: 70, // TODO: confirm for 32G + FTCache: 141, // 11 layers + D(2x ssize) + C + R } var FsOverheadFinalized = map[SectorFileType]int{ diff --git a/stores/http_handler.go b/stores/http_handler.go index 60f8a41c5..93fb94637 100644 --- a/stores/http_handler.go +++ b/stores/http_handler.go @@ -72,7 +72,7 @@ func (handler *FetchHandler) remoteGetSector(w http.ResponseWriter, r *http.Requ // The caller has a lock on this sector already, no need to get one here // passing 0 spt because we don't allocate anything - paths, _, err := handler.Local.AcquireSector(r.Context(), id, 0, ft, FTNone, false, AcquireMove) + paths, _, err := handler.Local.AcquireSector(r.Context(), id, 0, ft, FTNone, PathStorage, AcquireMove) if err != nil { log.Error("%+v", err) w.WriteHeader(500) diff --git a/stores/interface.go b/stores/interface.go index 54aaec90c..b61980125 100644 --- a/stores/interface.go +++ b/stores/interface.go @@ -9,11 +9,11 @@ import ( "github.com/filecoin-project/specs-actors/actors/abi" ) -type PathType bool +type PathType string const ( - PathStorage = false - PathSealing = true + PathStorage = "storage" + PathSealing = "sealing" ) type AcquireMode string diff --git a/stores/local.go b/stores/local.go index 26b7ccb75..ac63ae0dd 100644 --- a/stores/local.go +++ b/stores/local.go @@ -398,12 +398,12 @@ func (st *Local) removeSector(ctx context.Context, sid abi.SectorID, typ SectorF } func (st *Local) MoveStorage(ctx context.Context, s abi.SectorID, spt abi.RegisteredSealProof, types SectorFileType) error { - dest, destIds, err := st.AcquireSector(ctx, s, spt, FTNone, types, false, AcquireMove) + dest, destIds, err := st.AcquireSector(ctx, s, spt, FTNone, types, PathStorage, AcquireMove) if err != nil { return xerrors.Errorf("acquire dest storage: %w", err) } - src, srcIds, err := st.AcquireSector(ctx, s, spt, types, FTNone, false, AcquireMove) + src, srcIds, err := st.AcquireSector(ctx, s, spt, types, FTNone, PathStorage, AcquireMove) if err != nil { return xerrors.Errorf("acquire src storage: %w", err) } From 8099621cd0b2a73f96751431ddd861e27498fae1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Mon, 6 Jul 2020 18:36:44 +0200 Subject: [PATCH 06/51] stores: Allow reserving local storage --- localworker.go | 8 ++++ stores/filetype.go | 12 +++--- stores/http_handler.go | 2 + stores/index.go | 2 +- stores/interface.go | 11 +++--- stores/local.go | 83 ++++++++++++++++++++++++++++++++++++++++-- 6 files changed, 104 insertions(+), 14 deletions(-) diff --git a/localworker.go b/localworker.go index a1d82209a..d03ace359 100644 --- a/localworker.go +++ b/localworker.go @@ -61,14 +61,22 @@ type localWorkerPathProvider struct { } func (l *localWorkerPathProvider) AcquireSector(ctx context.Context, sector abi.SectorID, existing stores.SectorFileType, allocate stores.SectorFileType, sealing stores.PathType) (stores.SectorPaths, func(), error) { + paths, storageIDs, err := l.w.storage.AcquireSector(ctx, sector, l.w.scfg.SealProofType, existing, allocate, sealing, l.op) if err != nil { return stores.SectorPaths{}, nil, err } + releaseStorage, err := l.w.localStore.Reserve(ctx, sector, l.w.scfg.SealProofType, allocate, storageIDs, stores.FSOverheadSeal) + if err != nil { + return stores.SectorPaths{}, nil, xerrors.Errorf("reserving storage space: %w", err) + } + log.Debugf("acquired sector %d (e:%d; a:%d): %v", sector, existing, allocate, paths) return paths, func() { + releaseStorage() + for _, fileType := range pathTypes { if fileType&allocate == 0 { continue diff --git a/stores/filetype.go b/stores/filetype.go index 60c47d1f7..650b92f71 100644 --- a/stores/filetype.go +++ b/stores/filetype.go @@ -19,15 +19,17 @@ const ( FTNone SectorFileType = 0 ) +const FSOverheadDen = 10 + var FSOverheadSeal = map[SectorFileType]int{ // 10x overheads - FTUnsealed: 10, - FTSealed: 10, + FTUnsealed: FSOverheadDen, + FTSealed: FSOverheadDen, FTCache: 141, // 11 layers + D(2x ssize) + C + R } var FsOverheadFinalized = map[SectorFileType]int{ - FTUnsealed: 10, - FTSealed: 10, + FTUnsealed: FSOverheadDen, + FTSealed: FSOverheadDen, FTCache: 2, } @@ -67,7 +69,7 @@ func (t SectorFileType) SealSpaceUse(spt abi.RegisteredSealProof) (uint64, error return 0, xerrors.Errorf("no seal overhead info for %s", pathType) } - need += uint64(oh) * uint64(ssize) / 10 + need += uint64(oh) * uint64(ssize) / FSOverheadDen } return need, nil diff --git a/stores/http_handler.go b/stores/http_handler.go index 93fb94637..4f0556138 100644 --- a/stores/http_handler.go +++ b/stores/http_handler.go @@ -79,6 +79,8 @@ func (handler *FetchHandler) remoteGetSector(w http.ResponseWriter, r *http.Requ return } + // TODO: reserve local storage here + path := PathByType(paths, ft) if path == "" { log.Error("acquired path was empty") diff --git a/stores/index.go b/stores/index.go index 049e2dc20..e48ae02bb 100644 --- a/stores/index.go +++ b/stores/index.go @@ -361,7 +361,7 @@ func (i *Index) StorageBestAlloc(ctx context.Context, allocate SectorFileType, s continue } - if spaceReq > p.fsi.Available { + if spaceReq > uint64(p.fsi.Available) { log.Debugf("not allocating on %s, out of space (available: %d, need: %d)", p.info.ID, p.fsi.Available, spaceReq) continue } diff --git a/stores/interface.go b/stores/interface.go index b61980125..6fd4a7ad7 100644 --- a/stores/interface.go +++ b/stores/interface.go @@ -44,13 +44,14 @@ func Stat(path string) (FsStat, error) { } return FsStat{ - Capacity: stat.Blocks * uint64(stat.Bsize), - Available: stat.Bavail * uint64(stat.Bsize), + Capacity: int64(stat.Blocks) * stat.Bsize, + Available: int64(stat.Bavail) * stat.Bsize, }, nil } type FsStat struct { - Capacity uint64 - Available uint64 // Available to use for sector storage - Used uint64 + Capacity int64 + Available int64 // Available to use for sector storage + Used int64 + Reserved int64 } diff --git a/stores/local.go b/stores/local.go index ac63ae0dd..a21909d69 100644 --- a/stores/local.go +++ b/stores/local.go @@ -67,6 +67,25 @@ type Local struct { type path struct { local string // absolute local path + + reserved int64 + reservations map[abi.SectorID]SectorFileType +} + +type statFn func(path string) (FsStat, error) +func (p *path) stat(st statFn) (FsStat, error) { + stat, err := st(p.local) + if err != nil { + return FsStat{}, err + } + + stat.Reserved = p.reserved + stat.Available -= p.reserved + if stat.Available < 0 { + stat.Available = 0 + } + + return stat, err } func NewLocal(ctx context.Context, ls LocalStorage, index SectorIndex, urls []string) (*Local, error) { @@ -98,9 +117,12 @@ func (st *Local) OpenPath(ctx context.Context, p string) error { out := &path{ local: p, + + reserved: 0, + reservations: map[abi.SectorID]SectorFileType{}, } - fst, err := st.localStorage.Stat(p) + fst, err := out.stat(st.localStorage.Stat) if err != nil { return err } @@ -179,7 +201,7 @@ func (st *Local) reportHealth(ctx context.Context) { toReport := map[ID]HealthReport{} for id, p := range st.paths { - stat, err := st.localStorage.Stat(p.local) + stat, err := p.stat(st.localStorage.Stat) toReport[id] = HealthReport{ Stat: stat, @@ -197,6 +219,61 @@ func (st *Local) reportHealth(ctx context.Context) { } } +func (st *Local) Reserve(ctx context.Context, sid abi.SectorID, spt abi.RegisteredSealProof, ft SectorFileType, storageIDs SectorPaths, overheadTab map[SectorFileType]int) (func(), error) { + ssize, err := spt.SectorSize() + if err != nil { + return nil, xerrors.Errorf("getting sector size: %w", err) + } + + st.localLk.Lock() + + done := func(){} + deferredDone := func() { done() } + defer func() { + st.localLk.Unlock() + deferredDone() + }() + + for _, fileType := range PathTypes { + if fileType&ft == 0 { + continue + } + + id := ID(PathByType(storageIDs, fileType)) + + p, ok := st.paths[id] + if !ok { + return nil, errPathNotFound + } + + stat, err := p.stat(st.localStorage.Stat) + if err != nil { + return nil, err + } + + overhead := int64(overheadTab[fileType]) * int64(ssize) / FSOverheadDen + + if stat.Available < overhead { + return nil, xerrors.Errorf("can't reserve %d bytes in '%s' (id:%s), only %d available", overhead, p.local, id, stat.Available) + } + + p.reserved += overhead + + prevDone := done + done = func() { + prevDone() + + st.localLk.Lock() + defer st.localLk.Unlock() + + p.reserved -= overhead + } + } + + deferredDone = func() {} + return done, nil +} + func (st *Local) AcquireSector(ctx context.Context, sid abi.SectorID, spt abi.RegisteredSealProof, existing SectorFileType, allocate SectorFileType, pathType PathType, op AcquireMode) (SectorPaths, SectorPaths, error) { if existing|allocate != existing^allocate { return SectorPaths{}, SectorPaths{}, xerrors.New("can't both find and allocate a sector") @@ -463,7 +540,7 @@ func (st *Local) FsStat(ctx context.Context, id ID) (FsStat, error) { return FsStat{}, errPathNotFound } - return st.localStorage.Stat(p.local) + return p.stat(st.localStorage.Stat) } var _ Store = &Local{} From 7279a80dfafdce363883434002431d98e8cdfeee Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Mon, 6 Jul 2020 18:56:46 +0200 Subject: [PATCH 07/51] localstorage: don't double count reserved storage --- stores/local.go | 48 +++++++++++++++++++++++++++++++++++++----------- 1 file changed, 37 insertions(+), 11 deletions(-) diff --git a/stores/local.go b/stores/local.go index a21909d69..4e91748f8 100644 --- a/stores/local.go +++ b/stores/local.go @@ -49,6 +49,7 @@ type LocalStorage interface { SetStorage(func(*StorageConfig)) error Stat(path string) (FsStat, error) + DiskUsage(path string) (int64, error) // returns real disk usage for a file/directory } const MetaFile = "sectorstore.json" @@ -72,15 +73,36 @@ type path struct { reservations map[abi.SectorID]SectorFileType } -type statFn func(path string) (FsStat, error) -func (p *path) stat(st statFn) (FsStat, error) { - stat, err := st(p.local) +func (p *path) stat(ls LocalStorage) (FsStat, error) { + stat, err := ls.Stat(p.local) if err != nil { return FsStat{}, err } stat.Reserved = p.reserved - stat.Available -= p.reserved + + for id, ft := range p.reservations { + for _, fileType := range PathTypes { + if fileType&ft == 0 { + continue + } + + used, err := ls.DiskUsage(p.sectorPath(id, fileType)) + if err != nil { + log.Errorf("getting disk usage of '%s': %+v", p.sectorPath(id, fileType), err) + continue + } + + stat.Reserved -= used + } + } + + if stat.Reserved < 0 { + log.Warnf("negative reserved storage: p.reserved=%d, reserved: %d", p.reserved, stat.Reserved) + stat.Reserved = 0 + } + + stat.Available -= stat.Reserved if stat.Available < 0 { stat.Available = 0 } @@ -88,6 +110,10 @@ func (p *path) stat(st statFn) (FsStat, error) { return stat, err } +func (p *path) sectorPath(sid abi.SectorID, fileType SectorFileType) string { + return filepath.Join(p.local, fileType.String(), SectorName(sid)) +} + func NewLocal(ctx context.Context, ls LocalStorage, index SectorIndex, urls []string) (*Local, error) { l := &Local{ localStorage: ls, @@ -122,7 +148,7 @@ func (st *Local) OpenPath(ctx context.Context, p string) error { reservations: map[abi.SectorID]SectorFileType{}, } - fst, err := out.stat(st.localStorage.Stat) + fst, err := out.stat(st.localStorage) if err != nil { return err } @@ -201,7 +227,7 @@ func (st *Local) reportHealth(ctx context.Context) { toReport := map[ID]HealthReport{} for id, p := range st.paths { - stat, err := p.stat(st.localStorage.Stat) + stat, err := p.stat(st.localStorage) toReport[id] = HealthReport{ Stat: stat, @@ -246,7 +272,7 @@ func (st *Local) Reserve(ctx context.Context, sid abi.SectorID, spt abi.Register return nil, errPathNotFound } - stat, err := p.stat(st.localStorage.Stat) + stat, err := p.stat(st.localStorage) if err != nil { return nil, err } @@ -306,7 +332,7 @@ func (st *Local) AcquireSector(ctx context.Context, sid abi.SectorID, spt abi.Re continue } - spath := filepath.Join(p.local, fileType.String(), SectorName(sid)) + spath := p.sectorPath(sid, fileType) SetPathByType(&out, fileType, spath) SetPathByType(&storageIDs, fileType, string(info.ID)) @@ -348,7 +374,7 @@ func (st *Local) AcquireSector(ctx context.Context, sid abi.SectorID, spt abi.Re // TODO: Check free space - best = filepath.Join(p.local, fileType.String(), SectorName(sid)) + best = p.sectorPath(sid, fileType) bestID = si.ID } @@ -464,7 +490,7 @@ func (st *Local) removeSector(ctx context.Context, sid abi.SectorID, typ SectorF return xerrors.Errorf("dropping sector from index: %w", err) } - spath := filepath.Join(p.local, typ.String(), SectorName(sid)) + spath := p.sectorPath(sid, typ) log.Infof("remove %s", spath) if err := os.RemoveAll(spath); err != nil { @@ -540,7 +566,7 @@ func (st *Local) FsStat(ctx context.Context, id ID) (FsStat, error) { return FsStat{}, errPathNotFound } - return p.stat(st.localStorage.Stat) + return p.stat(st.localStorage) } var _ Store = &Local{} From 63c62c49cecb5aa8dec5c4f51368e1676382d86c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Mon, 6 Jul 2020 19:19:13 +0200 Subject: [PATCH 08/51] Fix tests --- manager_test.go | 4 ++++ stores/local_test.go | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/manager_test.go b/manager_test.go index ae318b487..b9198a2b3 100644 --- a/manager_test.go +++ b/manager_test.go @@ -24,6 +24,10 @@ import ( type testStorage stores.StorageConfig +func (t testStorage) DiskUsage(path string) (int64, error) { + return 1, nil // close enough +} + func newTestStorage(t *testing.T) *testStorage { tp, err := ioutil.TempDir(os.TempDir(), "sector-storage-test-") require.NoError(t, err) diff --git a/stores/local_test.go b/stores/local_test.go index 8e654d725..e748d061b 100644 --- a/stores/local_test.go +++ b/stores/local_test.go @@ -19,6 +19,10 @@ type TestingLocalStorage struct { c StorageConfig } +func (t *TestingLocalStorage) DiskUsage(path string) (int64, error) { + return 1, nil +} + func (t *TestingLocalStorage) GetStorage() (StorageConfig, error) { return t.c, nil } From 63ba9bd01836bcf2ed10242f3562bd17c38e2438 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Mon, 6 Jul 2020 19:19:24 +0200 Subject: [PATCH 09/51] gofmt --- stores/local.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/stores/local.go b/stores/local.go index 4e91748f8..cf52540ad 100644 --- a/stores/local.go +++ b/stores/local.go @@ -69,7 +69,7 @@ type Local struct { type path struct { local string // absolute local path - reserved int64 + reserved int64 reservations map[abi.SectorID]SectorFileType } @@ -144,7 +144,7 @@ func (st *Local) OpenPath(ctx context.Context, p string) error { out := &path{ local: p, - reserved: 0, + reserved: 0, reservations: map[abi.SectorID]SectorFileType{}, } @@ -253,7 +253,7 @@ func (st *Local) Reserve(ctx context.Context, sid abi.SectorID, spt abi.Register st.localLk.Lock() - done := func(){} + done := func() {} deferredDone := func() { done() } defer func() { st.localLk.Unlock() From 0bc41d562dd3ba0a824e825dd9ea79939742ca11 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Wed, 8 Jul 2020 16:58:09 +0200 Subject: [PATCH 10/51] Move statfs to fsutil --- ffiwrapper/sealer_cgo.go | 3 +-- fsutil/dealloc_other.go | 1 - fsutil/statfs.go | 7 +++++++ fsutil/statfs_unix.go | 19 +++++++++++++++++++ fsutil/statfs_windows.go | 28 ++++++++++++++++++++++++++++ manager.go | 3 ++- manager_test.go | 5 +++-- stores/index.go | 9 +++++---- stores/interface.go | 26 ++------------------------ stores/local.go | 11 ++++++----- stores/local_test.go | 6 +++--- stores/remote.go | 25 +++++++++++++------------ 12 files changed, 89 insertions(+), 54 deletions(-) create mode 100644 fsutil/statfs.go create mode 100644 fsutil/statfs_unix.go create mode 100644 fsutil/statfs_windows.go diff --git a/ffiwrapper/sealer_cgo.go b/ffiwrapper/sealer_cgo.go index d3abe1063..c766f5555 100644 --- a/ffiwrapper/sealer_cgo.go +++ b/ffiwrapper/sealer_cgo.go @@ -521,8 +521,7 @@ func (sb *Sealer) FinalizeSector(ctx context.Context, sector abi.SectorID, keepU } } - - paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, 0, false) + paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, 0, stores.PathStorage) if err != nil { return xerrors.Errorf("acquiring sector cache path: %w", err) } diff --git a/fsutil/dealloc_other.go b/fsutil/dealloc_other.go index 721116af1..3ae8973ff 100644 --- a/fsutil/dealloc_other.go +++ b/fsutil/dealloc_other.go @@ -10,7 +10,6 @@ import ( var log = logging.Logger("fsutil") - func Deallocate(file *os.File, offset int64, length int64) error { log.Warnf("deallocating space not supported") diff --git a/fsutil/statfs.go b/fsutil/statfs.go new file mode 100644 index 000000000..2a00ccb9a --- /dev/null +++ b/fsutil/statfs.go @@ -0,0 +1,7 @@ +package fsutil + +type FsStat struct { + Capacity int64 + Available int64 // Available to use for sector storage + Reserved int64 +} diff --git a/fsutil/statfs_unix.go b/fsutil/statfs_unix.go new file mode 100644 index 000000000..3e69d5a8e --- /dev/null +++ b/fsutil/statfs_unix.go @@ -0,0 +1,19 @@ +package fsutil + +import ( + "syscall" + + "golang.org/x/xerrors" +) + +func Statfs(path string) (FsStat, error) { + var stat syscall.Statfs_t + if err := syscall.Statfs(path, &stat); err != nil { + return FsStat{}, xerrors.Errorf("statfs: %w", err) + } + + return FsStat{ + Capacity: int64(stat.Blocks) * stat.Bsize, + Available: int64(stat.Bavail) * stat.Bsize, + }, nil +} diff --git a/fsutil/statfs_windows.go b/fsutil/statfs_windows.go new file mode 100644 index 000000000..d78565182 --- /dev/null +++ b/fsutil/statfs_windows.go @@ -0,0 +1,28 @@ +package fsutil + +import ( + "syscall" + "unsafe" +) + +func Statfs(volumePath string) (FsStat, error) { + // From https://github.com/ricochet2200/go-disk-usage/blob/master/du/diskusage_windows.go + + h := syscall.MustLoadDLL("kernel32.dll") + c := h.MustFindProc("GetDiskFreeSpaceExW") + + var freeBytes int64 + var totalBytes int64 + var availBytes int64 + + c.Call( + uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(volumePath))), + uintptr(unsafe.Pointer(&freeBytes)), + uintptr(unsafe.Pointer(&totalBytes)), + uintptr(unsafe.Pointer(&availBytes))) + + return FsStat{ + Capacity: totalBytes, + Available: availBytes, + }, nil +} diff --git a/manager.go b/manager.go index a7053c102..0c18645ac 100644 --- a/manager.go +++ b/manager.go @@ -3,6 +3,7 @@ package sectorstorage import ( "context" "errors" + "github.com/filecoin-project/sector-storage/fsutil" "io" "net/http" @@ -491,7 +492,7 @@ func (m *Manager) StorageLocal(ctx context.Context) (map[stores.ID]string, error return out, nil } -func (m *Manager) FsStat(ctx context.Context, id stores.ID) (stores.FsStat, error) { +func (m *Manager) FsStat(ctx context.Context, id stores.ID) (fsutil.FsStat, error) { return m.storage.FsStat(ctx, id) } diff --git a/manager_test.go b/manager_test.go index b9198a2b3..8539f8918 100644 --- a/manager_test.go +++ b/manager_test.go @@ -5,6 +5,7 @@ import ( "context" "encoding/json" "fmt" + "github.com/filecoin-project/sector-storage/fsutil" "github.com/filecoin-project/sector-storage/sealtasks" logging "github.com/ipfs/go-log" "io/ioutil" @@ -69,8 +70,8 @@ func (t *testStorage) SetStorage(f func(*stores.StorageConfig)) error { return nil } -func (t *testStorage) Stat(path string) (stores.FsStat, error) { - return stores.Stat(path) +func (t *testStorage) Stat(path string) (fsutil.FsStat, error) { + return fsutil.Statfs(path) } var _ stores.LocalStorage = &testStorage{} diff --git a/stores/index.go b/stores/index.go index e48ae02bb..c85dc125e 100644 --- a/stores/index.go +++ b/stores/index.go @@ -2,6 +2,7 @@ package stores import ( "context" + "github.com/filecoin-project/sector-storage/fsutil" "net/url" gopath "path" "sort" @@ -34,7 +35,7 @@ type StorageInfo struct { } type HealthReport struct { - Stat FsStat + Stat fsutil.FsStat Err error } @@ -50,7 +51,7 @@ type SectorStorageInfo struct { } type SectorIndex interface { // part of storage-miner api - StorageAttach(context.Context, StorageInfo, FsStat) error + StorageAttach(context.Context, StorageInfo, fsutil.FsStat) error StorageInfo(context.Context, ID) (StorageInfo, error) StorageReportHealth(context.Context, ID, HealthReport) error @@ -77,7 +78,7 @@ type declMeta struct { type storageEntry struct { info *StorageInfo - fsi FsStat + fsi fsutil.FsStat lastHeartbeat time.Time heartbeatErr error @@ -130,7 +131,7 @@ func (i *Index) StorageList(ctx context.Context) (map[ID][]Decl, error) { return out, nil } -func (i *Index) StorageAttach(ctx context.Context, si StorageInfo, st FsStat) error { +func (i *Index) StorageAttach(ctx context.Context, si StorageInfo, st fsutil.FsStat) error { i.lk.Lock() defer i.lk.Unlock() diff --git a/stores/interface.go b/stores/interface.go index 6fd4a7ad7..836705f40 100644 --- a/stores/interface.go +++ b/stores/interface.go @@ -2,10 +2,7 @@ package stores import ( "context" - "syscall" - - "golang.org/x/xerrors" - + "github.com/filecoin-project/sector-storage/fsutil" "github.com/filecoin-project/specs-actors/actors/abi" ) @@ -34,24 +31,5 @@ type Store interface { // move sectors into storage MoveStorage(ctx context.Context, s abi.SectorID, spt abi.RegisteredSealProof, types SectorFileType) error - FsStat(ctx context.Context, id ID) (FsStat, error) -} - -func Stat(path string) (FsStat, error) { - var stat syscall.Statfs_t - if err := syscall.Statfs(path, &stat); err != nil { - return FsStat{}, xerrors.Errorf("statfs: %w", err) - } - - return FsStat{ - Capacity: int64(stat.Blocks) * stat.Bsize, - Available: int64(stat.Bavail) * stat.Bsize, - }, nil -} - -type FsStat struct { - Capacity int64 - Available int64 // Available to use for sector storage - Used int64 - Reserved int64 + FsStat(ctx context.Context, id ID) (fsutil.FsStat, error) } diff --git a/stores/local.go b/stores/local.go index cf52540ad..cbc9dbae1 100644 --- a/stores/local.go +++ b/stores/local.go @@ -3,6 +3,7 @@ package stores import ( "context" "encoding/json" + "github.com/filecoin-project/sector-storage/fsutil" "io/ioutil" "math/bits" "math/rand" @@ -48,7 +49,7 @@ type LocalStorage interface { GetStorage() (StorageConfig, error) SetStorage(func(*StorageConfig)) error - Stat(path string) (FsStat, error) + Stat(path string) (fsutil.FsStat, error) DiskUsage(path string) (int64, error) // returns real disk usage for a file/directory } @@ -73,10 +74,10 @@ type path struct { reservations map[abi.SectorID]SectorFileType } -func (p *path) stat(ls LocalStorage) (FsStat, error) { +func (p *path) stat(ls LocalStorage) (fsutil.FsStat, error) { stat, err := ls.Stat(p.local) if err != nil { - return FsStat{}, err + return fsutil.FsStat{}, err } stat.Reserved = p.reserved @@ -557,13 +558,13 @@ func (st *Local) MoveStorage(ctx context.Context, s abi.SectorID, spt abi.Regist var errPathNotFound = xerrors.Errorf("fsstat: path not found") -func (st *Local) FsStat(ctx context.Context, id ID) (FsStat, error) { +func (st *Local) FsStat(ctx context.Context, id ID) (fsutil.FsStat, error) { st.localLk.RLock() defer st.localLk.RUnlock() p, ok := st.paths[id] if !ok { - return FsStat{}, errPathNotFound + return fsutil.FsStat{}, errPathNotFound } return p.stat(st.localStorage) diff --git a/stores/local_test.go b/stores/local_test.go index e748d061b..56ac7c020 100644 --- a/stores/local_test.go +++ b/stores/local_test.go @@ -3,6 +3,7 @@ package stores import ( "context" "encoding/json" + "github.com/filecoin-project/sector-storage/fsutil" "github.com/google/uuid" "io/ioutil" "os" @@ -32,11 +33,10 @@ func (t *TestingLocalStorage) SetStorage(f func(*StorageConfig)) error { return nil } -func (t *TestingLocalStorage) Stat(path string) (FsStat, error) { - return FsStat{ +func (t *TestingLocalStorage) Stat(path string) (fsutil.FsStat, error) { + return fsutil.FsStat{ Capacity: pathSize, Available: pathSize, - Used: 0, }, nil } diff --git a/stores/remote.go b/stores/remote.go index 30fe3abf9..c78f026f4 100644 --- a/stores/remote.go +++ b/stores/remote.go @@ -3,6 +3,7 @@ package stores import ( "context" "encoding/json" + "github.com/filecoin-project/sector-storage/fsutil" "io/ioutil" "math/bits" "mime" @@ -270,7 +271,7 @@ func (r *Remote) deleteFromRemote(ctx context.Context, url string) error { return nil } -func (r *Remote) FsStat(ctx context.Context, id ID) (FsStat, error) { +func (r *Remote) FsStat(ctx context.Context, id ID) (fsutil.FsStat, error) { st, err := r.local.FsStat(ctx, id) switch err { case nil: @@ -278,53 +279,53 @@ func (r *Remote) FsStat(ctx context.Context, id ID) (FsStat, error) { case errPathNotFound: break default: - return FsStat{}, xerrors.Errorf("local stat: %w", err) + return fsutil.FsStat{}, xerrors.Errorf("local stat: %w", err) } si, err := r.index.StorageInfo(ctx, id) if err != nil { - return FsStat{}, xerrors.Errorf("getting remote storage info: %w", err) + return fsutil.FsStat{}, xerrors.Errorf("getting remote storage info: %w", err) } if len(si.URLs) == 0 { - return FsStat{}, xerrors.Errorf("no known URLs for remote storage %s", id) + return fsutil.FsStat{}, xerrors.Errorf("no known URLs for remote storage %s", id) } rl, err := url.Parse(si.URLs[0]) if err != nil { - return FsStat{}, xerrors.Errorf("failed to parse url: %w", err) + return fsutil.FsStat{}, xerrors.Errorf("failed to parse url: %w", err) } rl.Path = gopath.Join(rl.Path, "stat", string(id)) req, err := http.NewRequest("GET", rl.String(), nil) if err != nil { - return FsStat{}, xerrors.Errorf("request: %w", err) + return fsutil.FsStat{}, xerrors.Errorf("request: %w", err) } req.Header = r.auth req = req.WithContext(ctx) resp, err := http.DefaultClient.Do(req) if err != nil { - return FsStat{}, xerrors.Errorf("do request: %w", err) + return fsutil.FsStat{}, xerrors.Errorf("do request: %w", err) } switch resp.StatusCode { case 200: break case 404: - return FsStat{}, errPathNotFound + return fsutil.FsStat{}, errPathNotFound case 500: b, err := ioutil.ReadAll(resp.Body) if err != nil { - return FsStat{}, xerrors.Errorf("fsstat: got http 500, then failed to read the error: %w", err) + return fsutil.FsStat{}, xerrors.Errorf("fsstat: got http 500, then failed to read the error: %w", err) } - return FsStat{}, xerrors.Errorf("fsstat: got http 500: %s", string(b)) + return fsutil.FsStat{}, xerrors.Errorf("fsstat: got http 500: %s", string(b)) } - var out FsStat + var out fsutil.FsStat if err := json.NewDecoder(resp.Body).Decode(&out); err != nil { - return FsStat{}, xerrors.Errorf("decoding fsstat: %w", err) + return fsutil.FsStat{}, xerrors.Errorf("decoding fsstat: %w", err) } defer resp.Body.Close() From c0a242a1eb664dbbc32e6454c26a4890eb95d5dd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Wed, 8 Jul 2020 17:09:35 +0200 Subject: [PATCH 11/51] fsutil: FileSize util --- fsutil/filesize_unix.go | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) create mode 100644 fsutil/filesize_unix.go diff --git a/fsutil/filesize_unix.go b/fsutil/filesize_unix.go new file mode 100644 index 000000000..e45ccca17 --- /dev/null +++ b/fsutil/filesize_unix.go @@ -0,0 +1,25 @@ +package fsutil + +import ( + "syscall" + + "golang.org/x/xerrors" +) + +type SizeInfo struct { + OnDisk int64 +} + +// FileSize returns bytes used by a file on disk +func FileSize(path string) (SizeInfo, error) { + var stat syscall.Stat_t + if err := syscall.Stat(path, &stat); err != nil { + return SizeInfo{}, xerrors.Errorf("stat: %w", err) + } + + // NOTE: stat.Blocks is in 512B blocks, NOT in stat.Blksize + // See https://www.gnu.org/software/libc/manual/html_node/Attribute-Meanings.html + return SizeInfo{ + stat.Blocks * 512, + }, nil +} \ No newline at end of file From 56570a22005f77a5eb744109a3bd845c3a5def0f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Wed, 8 Jul 2020 17:39:58 +0200 Subject: [PATCH 12/51] mock: Implemet ReleaseUnsealed correctly --- mock/mock.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mock/mock.go b/mock/mock.go index cbc3a1f99..7c9ed57f0 100644 --- a/mock/mock.go +++ b/mock/mock.go @@ -320,7 +320,7 @@ func (mgr *SectorMgr) FinalizeSector(context.Context, abi.SectorID, []storage.Ra } func (mgr *SectorMgr) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) error { - panic("implement me") + return nil } func (mgr *SectorMgr) Remove(ctx context.Context, sector abi.SectorID) error { From 9af64c9b217e6b118ec29a669be7fb455bfe54e2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Wed, 8 Jul 2020 19:51:26 +0200 Subject: [PATCH 13/51] ffiwrapper: Fix UnsealPiece --- ffiwrapper/sealer_cgo.go | 5 ++++- fsutil/filesize_unix.go | 2 +- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/ffiwrapper/sealer_cgo.go b/ffiwrapper/sealer_cgo.go index c766f5555..88218921c 100644 --- a/ffiwrapper/sealer_cgo.go +++ b/ffiwrapper/sealer_cgo.go @@ -254,7 +254,10 @@ func (sb *Sealer) UnsealPiece(ctx context.Context, sector abi.SectorID, offset s defer sealed.Close() var at, nextat abi.PaddedPieceSize - for { + first := true + for first || toUnseal.HasNext() { + first = false + piece, err := toUnseal.NextRun() if err != nil { return xerrors.Errorf("getting next range to unseal: %w", err) diff --git a/fsutil/filesize_unix.go b/fsutil/filesize_unix.go index e45ccca17..d596e4be7 100644 --- a/fsutil/filesize_unix.go +++ b/fsutil/filesize_unix.go @@ -22,4 +22,4 @@ func FileSize(path string) (SizeInfo, error) { return SizeInfo{ stat.Blocks * 512, }, nil -} \ No newline at end of file +} From ac7dc28cfb2c1439e40cae38e4ed5757e696b39a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Thu, 9 Jul 2020 12:58:52 +0200 Subject: [PATCH 14/51] sched: WIP Windows --- sched.go | 436 ++++++++++++++++++++++++++++--------------------------- 1 file changed, 222 insertions(+), 214 deletions(-) diff --git a/sched.go b/sched.go index 9fddd7bd9..af6981b08 100644 --- a/sched.go +++ b/sched.go @@ -3,11 +3,11 @@ package sectorstorage import ( "container/heap" "context" + "math/rand" "sort" "sync" "time" - "github.com/hashicorp/go-multierror" "golang.org/x/xerrors" "github.com/filecoin-project/specs-actors/actors/abi" @@ -20,6 +20,11 @@ type schedPrioCtxKey int var SchedPriorityKey schedPrioCtxKey var DefaultSchedPriority = 0 +var SelectorTimeout = 5 * time.Second + +var ( + SchedWindows = 2 +) func getPriority(ctx context.Context) int { sp := ctx.Value(SchedPriorityKey) @@ -56,11 +61,63 @@ type scheduler struct { watchClosing chan WorkerID workerClosing chan WorkerID - schedule chan *workerRequest - workerFree chan WorkerID - closing chan struct{} + schedule chan *workerRequest + windowRequests chan *schedWindowRequest - schedQueue *requestQueue + // owned by the sh.runSched goroutine + schedQueue *requestQueue + openWindows []*schedWindowRequest + + closing chan struct{} +} + +type workerHandle struct { + w Worker + + info storiface.WorkerInfo + + preparing *activeResources + active *activeResources +} + +type schedWindowRequest struct { + worker WorkerID + + done chan *schedWindow +} + +type schedWindow struct { + worker WorkerID + allocated *activeResources + todo []*workerRequest +} + +type activeResources struct { + memUsedMin uint64 + memUsedMax uint64 + gpuUsed bool + cpuUse uint64 + + cond *sync.Cond +} + +type workerRequest struct { + sector abi.SectorID + taskType sealtasks.TaskType + priority int // larger values more important + sel WorkerSelector + + prepare WorkerAction + work WorkerAction + + index int // The index of the item in the heap. + + ret chan<- workerResponse + ctx context.Context +} + +type workerResponse struct { + err error } func newScheduler(spt abi.RegisteredSealProof) *scheduler { @@ -75,9 +132,8 @@ func newScheduler(spt abi.RegisteredSealProof) *scheduler { watchClosing: make(chan WorkerID), workerClosing: make(chan WorkerID), - schedule: make(chan *workerRequest), - workerFree: make(chan WorkerID), - closing: make(chan struct{}), + schedule: make(chan *workerRequest), + closing: make(chan struct{}), schedQueue: &requestQueue{}, } @@ -115,25 +171,6 @@ func (sh *scheduler) Schedule(ctx context.Context, sector abi.SectorID, taskType } } -type workerRequest struct { - sector abi.SectorID - taskType sealtasks.TaskType - priority int // larger values more important - sel WorkerSelector - - prepare WorkerAction - work WorkerAction - - index int // The index of the item in the heap. - - ret chan<- workerResponse - ctx context.Context -} - -type workerResponse struct { - err error -} - func (r *workerRequest) respond(err error) { select { case r.ret <- workerResponse{err: err}: @@ -142,46 +179,25 @@ func (r *workerRequest) respond(err error) { } } -type activeResources struct { - memUsedMin uint64 - memUsedMax uint64 - gpuUsed bool - cpuUse uint64 - - cond *sync.Cond -} - -type workerHandle struct { - w Worker - - info storiface.WorkerInfo - - preparing *activeResources - active *activeResources -} - func (sh *scheduler) runSched() { go sh.runWorkerWatcher() for { select { case w := <-sh.newWorkers: - sh.schedNewWorker(w) - case wid := <-sh.workerClosing: - sh.schedDropWorker(wid) - case req := <-sh.schedule: - scheduled, err := sh.maybeSchedRequest(req) - if err != nil { - req.respond(err) - continue - } - if scheduled { - continue - } + sh.newWorker(w) + case wid := <-sh.workerClosing: + sh.dropWorker(wid) + + case req := <-sh.schedule: heap.Push(sh.schedQueue, req) - case wid := <-sh.workerFree: - sh.onWorkerFreed(wid) + sh.trySched() + + case req := <-sh.windowRequests: + sh.openWindows = append(sh.openWindows, req) + sh.trySched() + case <-sh.closing: sh.schedClose() return @@ -189,169 +205,161 @@ func (sh *scheduler) runSched() { } } -func (sh *scheduler) onWorkerFreed(wid WorkerID) { - sh.workersLk.Lock() - w, ok := sh.workers[wid] - sh.workersLk.Unlock() - if !ok { - log.Warnf("onWorkerFreed on invalid worker %d", wid) +func (sh *scheduler) trySched() { + /* + This assigns tasks to workers based on: + - Task priority (achieved by handling sh.schedQueue in order, since it's already sorted by priority) + - Worker resource availability + - Task-specified worker preference (acceptableWindows array below sorted by this preference) + - Window request age + + 1. For each task in the schedQueue find windows which can handle them + 1.1. Create list of windows capable of handling a task + 1.2. Sort windows according to task selector preferences + 2. Going through schedQueue again, assign task to first acceptable window + with resources available + 3. Submit windows with scheduled tasks to workers + + */ + + windows := make([]schedWindow, len(sh.openWindows)) + acceptableWindows := make([][]int, sh.schedQueue.Len()) + + // Step 1 + for sqi := 0; sqi < sh.schedQueue.Len(); sqi++ { + task := (*sh.schedQueue)[sqi] + needRes := ResourceTable[task.taskType][sh.spt] + + for wnd, windowRequest := range sh.openWindows { + worker := sh.workers[windowRequest.worker] + + // TODO: allow bigger windows + if !windows[wnd].allocated.canHandleRequest(needRes, windowRequest.worker, worker.info.Resources) { + continue + } + + ok, err := task.sel.Ok(task.ctx, task.taskType, sh.spt, worker) + if err != nil { + log.Errorf("trySched(1) req.sel.Ok error: %+v", err) + continue + } + + if !ok { + continue + } + + acceptableWindows[sqi] = append(acceptableWindows[sqi], wnd) + } + + if len(acceptableWindows[sqi]) == 0 { + continue + } + + // Pick best worker (shuffle in case some workers are equally as good) + rand.Shuffle(len(acceptableWindows[sqi]), func(i, j int) { + acceptableWindows[sqi][i], acceptableWindows[sqi][j] = acceptableWindows[sqi][j], acceptableWindows[sqi][i] + }) + sort.SliceStable(acceptableWindows, func(i, j int) bool { + wii := sh.openWindows[acceptableWindows[sqi][i]].worker + wji := sh.openWindows[acceptableWindows[sqi][j]].worker + + if wii == wji { + // for the same worker prefer older windows + return acceptableWindows[sqi][i] < acceptableWindows[sqi][j] + } + + wi := sh.workers[wii] + wj := sh.workers[wji] + + rpcCtx, cancel := context.WithTimeout(task.ctx, SelectorTimeout) + defer cancel() + + r, err := task.sel.Cmp(rpcCtx, task.taskType, wi, wj) + if err != nil { + log.Error("selecting best worker: %s", err) + } + return r + }) + } + + // Step 2 + scheduled := 0 + + for sqi := 0; sqi < sh.schedQueue.Len(); sqi++ { + task := (*sh.schedQueue)[sqi] + needRes := ResourceTable[task.taskType][sh.spt] + + selectedWindow := -1 + for _, wnd := range acceptableWindows[sqi+scheduled] { + wid := sh.openWindows[wnd].worker + wr := sh.workers[wid].info.Resources + + // TODO: allow bigger windows + if windows[wnd].allocated.canHandleRequest(needRes, wid, wr) { + continue + } + + windows[wnd].allocated.add(wr, needRes) + + selectedWindow = wnd + break + } + + windows[selectedWindow].todo = append(windows[selectedWindow].todo, task) + + heap.Remove(sh.schedQueue, sqi) + sqi-- + scheduled++ + } + + // Step 3 + + if scheduled == 0 { return } - for i := 0; i < sh.schedQueue.Len(); i++ { - req := (*sh.schedQueue)[i] - - ok, err := req.sel.Ok(req.ctx, req.taskType, sh.spt, w) - if err != nil { - log.Errorf("onWorkerFreed req.sel.Ok error: %+v", err) + scheduledWindows := map[int]struct{}{} + for wnd, window := range windows { + if len(window.todo) == 0 { + // Nothing scheduled here, keep the window open continue } - if !ok { - continue - } + scheduledWindows[wnd] = struct{}{} - scheduled, err := sh.maybeSchedRequest(req) - if err != nil { - req.respond(err) - continue - } - - if scheduled { - heap.Remove(sh.schedQueue, i) - i-- - continue + select { + case sh.openWindows[wnd].done <- &window: + default: + log.Error("expected sh.openWindows[wnd].done to be buffered") } } + + // Rewrite sh.openWindows array, removing scheduled windows + newOpenWindows := make([]*schedWindowRequest, 0, len(sh.openWindows)-len(scheduledWindows)) + for wnd, window := range sh.openWindows { + if _, scheduled := scheduledWindows[wnd]; !scheduled { + // keep unscheduled windows open + continue + } + + newOpenWindows = append(newOpenWindows, window) + } + + sh.openWindows = newOpenWindows } -var selectorTimeout = 5 * time.Second - -func (sh *scheduler) maybeSchedRequest(req *workerRequest) (bool, error) { - sh.workersLk.Lock() - defer sh.workersLk.Unlock() - - tried := 0 - var acceptable []WorkerID - - needRes := ResourceTable[req.taskType][sh.spt] - - for wid, worker := range sh.workers { - rpcCtx, cancel := context.WithTimeout(req.ctx, selectorTimeout) - ok, err := req.sel.Ok(rpcCtx, req.taskType, sh.spt, worker) - cancel() - - if err != nil { - return false, err - } - - if !ok { - continue - } - tried++ - - if !canHandleRequest(needRes, wid, worker.info.Resources, worker.preparing) { - continue - } - - acceptable = append(acceptable, wid) - } - - if len(acceptable) > 0 { - { - var serr error - - sort.SliceStable(acceptable, func(i, j int) bool { - rpcCtx, cancel := context.WithTimeout(req.ctx, selectorTimeout) - defer cancel() - r, err := req.sel.Cmp(rpcCtx, req.taskType, sh.workers[acceptable[i]], sh.workers[acceptable[j]]) - - if err != nil { - serr = multierror.Append(serr, err) - } - return r - }) - - if serr != nil { - return false, xerrors.Errorf("error(s) selecting best worker: %w", serr) - } - } - - return true, sh.assignWorker(acceptable[0], sh.workers[acceptable[0]], req) - } - - if tried == 0 { - return false, xerrors.New("maybeSchedRequest didn't find any good workers") - } - - return false, nil // put in waiting queue -} - -func (sh *scheduler) assignWorker(wid WorkerID, w *workerHandle, req *workerRequest) error { - needRes := ResourceTable[req.taskType][sh.spt] - - w.preparing.add(w.info.Resources, needRes) +func (sh *scheduler) runWorker(wid WorkerID) { + w := sh.workers[wid] go func() { - err := req.prepare(req.ctx, w.w) - sh.workersLk.Lock() + for { - if err != nil { - w.preparing.free(w.info.Resources, needRes) - sh.workersLk.Unlock() - - select { - case sh.workerFree <- wid: - case <-sh.closing: - log.Warnf("scheduler closed while sending response (prepare error: %+v)", err) - } - - select { - case req.ret <- workerResponse{err: err}: - case <-req.ctx.Done(): - log.Warnf("request got cancelled before we could respond (prepare error: %+v)", err) - case <-sh.closing: - log.Warnf("scheduler closed while sending response (prepare error: %+v)", err) - } - return - } - - err = w.active.withResources(wid, w.info.Resources, needRes, &sh.workersLk, func() error { - w.preparing.free(w.info.Resources, needRes) - sh.workersLk.Unlock() - defer sh.workersLk.Lock() // we MUST return locked from this function - - select { - case sh.workerFree <- wid: - case <-sh.closing: - } - - err = req.work(req.ctx, w.w) - - select { - case req.ret <- workerResponse{err: err}: - case <-req.ctx.Done(): - log.Warnf("request got cancelled before we could respond") - case <-sh.closing: - log.Warnf("scheduler closed while sending response") - } - - return nil - }) - - sh.workersLk.Unlock() - - // This error should always be nil, since nothing is setting it, but just to be safe: - if err != nil { - log.Errorf("error executing worker (withResources): %+v", err) } }() - - return nil } func (a *activeResources) withResources(id WorkerID, wr storiface.WorkerResources, r Resources, locker sync.Locker, cb func() error) error { - for !canHandleRequest(r, id, wr, a) { + for !a.canHandleRequest(r, id, wr) { if a.cond == nil { a.cond = sync.NewCond(locker) } @@ -396,16 +404,16 @@ func (a *activeResources) free(wr storiface.WorkerResources, r Resources) { a.memUsedMax -= r.MaxMemory } -func canHandleRequest(needRes Resources, wid WorkerID, res storiface.WorkerResources, active *activeResources) bool { +func (a *activeResources) canHandleRequest(needRes Resources, wid WorkerID, res storiface.WorkerResources) bool { // TODO: dedupe needRes.BaseMinMemory per task type (don't add if that task is already running) - minNeedMem := res.MemReserved + active.memUsedMin + needRes.MinMemory + needRes.BaseMinMemory + minNeedMem := res.MemReserved + a.memUsedMin + needRes.MinMemory + needRes.BaseMinMemory if minNeedMem > res.MemPhysical { log.Debugf("sched: not scheduling on worker %d; not enough physical memory - need: %dM, have %dM", wid, minNeedMem/mib, res.MemPhysical/mib) return false } - maxNeedMem := res.MemReserved + active.memUsedMax + needRes.MaxMemory + needRes.BaseMinMemory + maxNeedMem := res.MemReserved + a.memUsedMax + needRes.MaxMemory + needRes.BaseMinMemory if maxNeedMem > res.MemSwap+res.MemPhysical { log.Debugf("sched: not scheduling on worker %d; not enough virtual memory - need: %dM, have %dM", wid, maxNeedMem/mib, (res.MemSwap+res.MemPhysical)/mib) @@ -413,19 +421,19 @@ func canHandleRequest(needRes Resources, wid WorkerID, res storiface.WorkerResou } if needRes.MultiThread() { - if active.cpuUse > 0 { - log.Debugf("sched: not scheduling on worker %d; multicore process needs %d threads, %d in use, target %d", wid, res.CPUs, active.cpuUse, res.CPUs) + if a.cpuUse > 0 { + log.Debugf("sched: not scheduling on worker %d; multicore process needs %d threads, %d in use, target %d", wid, res.CPUs, a.cpuUse, res.CPUs) return false } } else { - if active.cpuUse+uint64(needRes.Threads) > res.CPUs { - log.Debugf("sched: not scheduling on worker %d; not enough threads, need %d, %d in use, target %d", wid, needRes.Threads, active.cpuUse, res.CPUs) + if a.cpuUse+uint64(needRes.Threads) > res.CPUs { + log.Debugf("sched: not scheduling on worker %d; not enough threads, need %d, %d in use, target %d", wid, needRes.Threads, a.cpuUse, res.CPUs) return false } } if len(res.GPUs) > 0 && needRes.CanGPU { - if active.gpuUsed { + if a.gpuUsed { log.Debugf("sched: not scheduling on worker %d; GPU in use", wid) return false } @@ -453,7 +461,7 @@ func (a *activeResources) utilization(wr storiface.WorkerResources) float64 { return max } -func (sh *scheduler) schedNewWorker(w *workerHandle) { +func (sh *scheduler) newWorker(w *workerHandle) { sh.workersLk.Lock() id := sh.nextWorker @@ -468,10 +476,10 @@ func (sh *scheduler) schedNewWorker(w *workerHandle) { return } - sh.onWorkerFreed(id) + sh.runWorker(id) } -func (sh *scheduler) schedDropWorker(wid WorkerID) { +func (sh *scheduler) dropWorker(wid WorkerID) { sh.workersLk.Lock() defer sh.workersLk.Unlock() From da96f06202c7dd6d4482396fcf9f2e0e22287a16 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Thu, 9 Jul 2020 13:49:01 +0200 Subject: [PATCH 15/51] sched: implement runWorker --- sched.go | 222 ++++++++++++++++++++++++++------------------- sched_resources.go | 110 ++++++++++++++++++++++ 2 files changed, 238 insertions(+), 94 deletions(-) create mode 100644 sched_resources.go diff --git a/sched.go b/sched.go index af6981b08..966bf2c46 100644 --- a/sched.go +++ b/sched.go @@ -349,116 +349,150 @@ func (sh *scheduler) trySched() { } func (sh *scheduler) runWorker(wid WorkerID) { - w := sh.workers[wid] - go func() { - for { + worker := sh.workers[wid] + scheduledWindows := make(chan *schedWindow, SchedWindows) + taskDone := make(chan struct{}, 1) + windowsRequested := 0 + var activeWindows []*schedWindow + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + workerClosing, err := worker.w.Closing(ctx) + if err != nil { + return + } + + defer func() { + log.Warnw("Worker closing", "workerid", wid) + + // TODO: close / return all queued tasks + }() + + for { + // ask for more windows if we need them + for ; windowsRequested < SchedWindows; windowsRequested++ { + select { + case sh.windowRequests <- &schedWindowRequest{ + worker: wid, + done: scheduledWindows, + }: + case <-sh.closing: + return + case <-workerClosing: + return + } + } + + select { + case w := <-scheduledWindows: + activeWindows = append(activeWindows, w) + case <-taskDone: + case <-sh.closing: + return + case <-workerClosing: + return + } + + assignLoop: + // process windows in order + for len(activeWindows) > 0 { + // process tasks within a window in order + for len(activeWindows[0].todo) > 0 { + todo := activeWindows[0].todo[0] + needRes := ResourceTable[todo.taskType][sh.spt] + + sh.workersLk.Lock() + ok := worker.preparing.canHandleRequest(needRes, wid, worker.info.Resources) + if !ok { + sh.workersLk.Unlock() + break assignLoop + } + + err := sh.assignWorker(taskDone, wid, worker, todo) + sh.workersLk.Unlock() + + if err != nil { + log.Error("assignWorker error: %+v", err) + go todo.respond(xerrors.Errorf("assignWorker error: %w", err)) + } + + activeWindows[0].todo = activeWindows[0].todo[1:] + } + + copy(activeWindows, activeWindows[1:]) + activeWindows[len(activeWindows)-1] = nil + activeWindows = activeWindows[:len(activeWindows)-1] + + windowsRequested-- + } } }() } -func (a *activeResources) withResources(id WorkerID, wr storiface.WorkerResources, r Resources, locker sync.Locker, cb func() error) error { - for !a.canHandleRequest(r, id, wr) { - if a.cond == nil { - a.cond = sync.NewCond(locker) +func (sh *scheduler) assignWorker(taskDone chan struct{}, wid WorkerID, w *workerHandle, req *workerRequest) error { + needRes := ResourceTable[req.taskType][sh.spt] + + w.preparing.add(w.info.Resources, needRes) + + go func() { + err := req.prepare(req.ctx, w.w) + sh.workersLk.Lock() + + if err != nil { + w.preparing.free(w.info.Resources, needRes) + sh.workersLk.Unlock() + + select { + case taskDone <- struct{}{}: + case <-sh.closing: + log.Warnf("scheduler closed while sending response (prepare error: %+v)", err) + } + + select { + case req.ret <- workerResponse{err: err}: + case <-req.ctx.Done(): + log.Warnf("request got cancelled before we could respond (prepare error: %+v)", err) + case <-sh.closing: + log.Warnf("scheduler closed while sending response (prepare error: %+v)", err) + } + return } - a.cond.Wait() - } - a.add(wr, r) + err = w.active.withResources(wid, w.info.Resources, needRes, &sh.workersLk, func() error { + w.preparing.free(w.info.Resources, needRes) + sh.workersLk.Unlock() + defer sh.workersLk.Lock() // we MUST return locked from this function - err := cb() + select { + case taskDone <- struct{}{}: + case <-sh.closing: + } - a.free(wr, r) - if a.cond != nil { - a.cond.Broadcast() - } + err = req.work(req.ctx, w.w) - return err -} + select { + case req.ret <- workerResponse{err: err}: + case <-req.ctx.Done(): + log.Warnf("request got cancelled before we could respond") + case <-sh.closing: + log.Warnf("scheduler closed while sending response") + } -func (a *activeResources) add(wr storiface.WorkerResources, r Resources) { - a.gpuUsed = r.CanGPU - if r.MultiThread() { - a.cpuUse += wr.CPUs - } else { - a.cpuUse += uint64(r.Threads) - } + return nil + }) - a.memUsedMin += r.MinMemory - a.memUsedMax += r.MaxMemory -} + sh.workersLk.Unlock() -func (a *activeResources) free(wr storiface.WorkerResources, r Resources) { - if r.CanGPU { - a.gpuUsed = false - } - if r.MultiThread() { - a.cpuUse -= wr.CPUs - } else { - a.cpuUse -= uint64(r.Threads) - } - - a.memUsedMin -= r.MinMemory - a.memUsedMax -= r.MaxMemory -} - -func (a *activeResources) canHandleRequest(needRes Resources, wid WorkerID, res storiface.WorkerResources) bool { - - // TODO: dedupe needRes.BaseMinMemory per task type (don't add if that task is already running) - minNeedMem := res.MemReserved + a.memUsedMin + needRes.MinMemory + needRes.BaseMinMemory - if minNeedMem > res.MemPhysical { - log.Debugf("sched: not scheduling on worker %d; not enough physical memory - need: %dM, have %dM", wid, minNeedMem/mib, res.MemPhysical/mib) - return false - } - - maxNeedMem := res.MemReserved + a.memUsedMax + needRes.MaxMemory + needRes.BaseMinMemory - - if maxNeedMem > res.MemSwap+res.MemPhysical { - log.Debugf("sched: not scheduling on worker %d; not enough virtual memory - need: %dM, have %dM", wid, maxNeedMem/mib, (res.MemSwap+res.MemPhysical)/mib) - return false - } - - if needRes.MultiThread() { - if a.cpuUse > 0 { - log.Debugf("sched: not scheduling on worker %d; multicore process needs %d threads, %d in use, target %d", wid, res.CPUs, a.cpuUse, res.CPUs) - return false + // This error should always be nil, since nothing is setting it, but just to be safe: + if err != nil { + log.Errorf("error executing worker (withResources): %+v", err) } - } else { - if a.cpuUse+uint64(needRes.Threads) > res.CPUs { - log.Debugf("sched: not scheduling on worker %d; not enough threads, need %d, %d in use, target %d", wid, needRes.Threads, a.cpuUse, res.CPUs) - return false - } - } + }() - if len(res.GPUs) > 0 && needRes.CanGPU { - if a.gpuUsed { - log.Debugf("sched: not scheduling on worker %d; GPU in use", wid) - return false - } - } - - return true -} - -func (a *activeResources) utilization(wr storiface.WorkerResources) float64 { - var max float64 - - cpu := float64(a.cpuUse) / float64(wr.CPUs) - max = cpu - - memMin := float64(a.memUsedMin+wr.MemReserved) / float64(wr.MemPhysical) - if memMin > max { - max = memMin - } - - memMax := float64(a.memUsedMax+wr.MemReserved) / float64(wr.MemPhysical+wr.MemSwap) - if memMax > max { - max = memMax - } - - return max + return nil } func (sh *scheduler) newWorker(w *workerHandle) { diff --git a/sched_resources.go b/sched_resources.go new file mode 100644 index 000000000..0ba9d1f66 --- /dev/null +++ b/sched_resources.go @@ -0,0 +1,110 @@ +package sectorstorage + +import ( + "sync" + + "github.com/filecoin-project/sector-storage/storiface" +) + +func (a *activeResources) withResources(id WorkerID, wr storiface.WorkerResources, r Resources, locker sync.Locker, cb func() error) error { + for !a.canHandleRequest(r, id, wr) { + if a.cond == nil { + a.cond = sync.NewCond(locker) + } + a.cond.Wait() + } + + a.add(wr, r) + + err := cb() + + a.free(wr, r) + if a.cond != nil { + a.cond.Broadcast() + } + + return err +} + +func (a *activeResources) add(wr storiface.WorkerResources, r Resources) { + a.gpuUsed = r.CanGPU + if r.MultiThread() { + a.cpuUse += wr.CPUs + } else { + a.cpuUse += uint64(r.Threads) + } + + a.memUsedMin += r.MinMemory + a.memUsedMax += r.MaxMemory +} + +func (a *activeResources) free(wr storiface.WorkerResources, r Resources) { + if r.CanGPU { + a.gpuUsed = false + } + if r.MultiThread() { + a.cpuUse -= wr.CPUs + } else { + a.cpuUse -= uint64(r.Threads) + } + + a.memUsedMin -= r.MinMemory + a.memUsedMax -= r.MaxMemory +} + +func (a *activeResources) canHandleRequest(needRes Resources, wid WorkerID, res storiface.WorkerResources) bool { + + // TODO: dedupe needRes.BaseMinMemory per task type (don't add if that task is already running) + minNeedMem := res.MemReserved + a.memUsedMin + needRes.MinMemory + needRes.BaseMinMemory + if minNeedMem > res.MemPhysical { + log.Debugf("sched: not scheduling on worker %d; not enough physical memory - need: %dM, have %dM", wid, minNeedMem/mib, res.MemPhysical/mib) + return false + } + + maxNeedMem := res.MemReserved + a.memUsedMax + needRes.MaxMemory + needRes.BaseMinMemory + + if maxNeedMem > res.MemSwap+res.MemPhysical { + log.Debugf("sched: not scheduling on worker %d; not enough virtual memory - need: %dM, have %dM", wid, maxNeedMem/mib, (res.MemSwap+res.MemPhysical)/mib) + return false + } + + if needRes.MultiThread() { + if a.cpuUse > 0 { + log.Debugf("sched: not scheduling on worker %d; multicore process needs %d threads, %d in use, target %d", wid, res.CPUs, a.cpuUse, res.CPUs) + return false + } + } else { + if a.cpuUse+uint64(needRes.Threads) > res.CPUs { + log.Debugf("sched: not scheduling on worker %d; not enough threads, need %d, %d in use, target %d", wid, needRes.Threads, a.cpuUse, res.CPUs) + return false + } + } + + if len(res.GPUs) > 0 && needRes.CanGPU { + if a.gpuUsed { + log.Debugf("sched: not scheduling on worker %d; GPU in use", wid) + return false + } + } + + return true +} + +func (a *activeResources) utilization(wr storiface.WorkerResources) float64 { + var max float64 + + cpu := float64(a.cpuUse) / float64(wr.CPUs) + max = cpu + + memMin := float64(a.memUsedMin+wr.MemReserved) / float64(wr.MemPhysical) + if memMin > max { + max = memMin + } + + memMax := float64(a.memUsedMax+wr.MemReserved) / float64(wr.MemPhysical+wr.MemSwap) + if memMax > max { + max = memMax + } + + return max +} From 903731adaf924a89d3b0ae41fd64a0fcf030fee4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Thu, 9 Jul 2020 14:40:53 +0200 Subject: [PATCH 16/51] sched: Fix tests --- manager_test.go | 4 ++++ sched.go | 17 +++++++++++++---- stores/index.go | 4 ++-- stores/local.go | 1 + 4 files changed, 20 insertions(+), 6 deletions(-) diff --git a/manager_test.go b/manager_test.go index ae318b487..19d9e3895 100644 --- a/manager_test.go +++ b/manager_test.go @@ -22,6 +22,10 @@ import ( "github.com/filecoin-project/sector-storage/stores" ) +func init() { + logging.SetAllLoggers(logging.LevelDebug) +} + type testStorage stores.StorageConfig func newTestStorage(t *testing.T) *testStorage { diff --git a/sched.go b/sched.go index 966bf2c46..d1ec33884 100644 --- a/sched.go +++ b/sched.go @@ -88,7 +88,7 @@ type schedWindowRequest struct { type schedWindow struct { worker WorkerID - allocated *activeResources + allocated activeResources todo []*workerRequest } @@ -132,10 +132,12 @@ func newScheduler(spt abi.RegisteredSealProof) *scheduler { watchClosing: make(chan WorkerID), workerClosing: make(chan WorkerID), - schedule: make(chan *workerRequest), - closing: make(chan struct{}), + schedule: make(chan *workerRequest), + windowRequests: make(chan *schedWindowRequest), schedQueue: &requestQueue{}, + + closing: make(chan struct{}), } } @@ -295,7 +297,7 @@ func (sh *scheduler) trySched() { wr := sh.workers[wid].info.Resources // TODO: allow bigger windows - if windows[wnd].allocated.canHandleRequest(needRes, wid, wr) { + if !windows[wnd].allocated.canHandleRequest(needRes, wid, wr) { continue } @@ -305,6 +307,11 @@ func (sh *scheduler) trySched() { break } + if selectedWindow < 0 { + // all windows full + continue + } + windows[selectedWindow].todo = append(windows[selectedWindow].todo, task) heap.Remove(sh.schedQueue, sqi) @@ -327,6 +334,7 @@ func (sh *scheduler) trySched() { scheduledWindows[wnd] = struct{}{} + window := window // copy select { case sh.openWindows[wnd].done <- &window: default: @@ -390,6 +398,7 @@ func (sh *scheduler) runWorker(wid WorkerID) { case w := <-scheduledWindows: activeWindows = append(activeWindows, w) case <-taskDone: + log.Debugw("task done", "workerid", wid) case <-sh.closing: return case <-workerClosing: diff --git a/stores/index.go b/stores/index.go index 049e2dc20..fda973124 100644 --- a/stores/index.go +++ b/stores/index.go @@ -384,8 +384,8 @@ func (i *Index) StorageBestAlloc(ctx context.Context, allocate SectorFileType, s } sort.Slice(candidates, func(i, j int) bool { - iw := big.Mul(big.NewInt(int64(candidates[i].fsi.Available)), big.NewInt(int64(candidates[i].info.Weight))) - jw := big.Mul(big.NewInt(int64(candidates[j].fsi.Available)), big.NewInt(int64(candidates[j].info.Weight))) + iw := big.Mul(big.NewInt(candidates[i].fsi.Available), big.NewInt(int64(candidates[i].info.Weight))) + jw := big.Mul(big.NewInt(candidates[j].fsi.Available), big.NewInt(int64(candidates[j].info.Weight))) return iw.GreaterThan(jw) }) diff --git a/stores/local.go b/stores/local.go index 26b7ccb75..92b777307 100644 --- a/stores/local.go +++ b/stores/local.go @@ -13,6 +13,7 @@ import ( "golang.org/x/xerrors" + "github.com/filecoin-project/sector-storage/fsutil" "github.com/filecoin-project/specs-actors/actors/abi" ) From 45c1b268f1294088d936e0a639511fc2b9cdc120 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Thu, 9 Jul 2020 15:09:38 +0200 Subject: [PATCH 17/51] sched: Remove unused worker field --- sched.go | 1 - 1 file changed, 1 deletion(-) diff --git a/sched.go b/sched.go index d1ec33884..b038eff1d 100644 --- a/sched.go +++ b/sched.go @@ -87,7 +87,6 @@ type schedWindowRequest struct { } type schedWindow struct { - worker WorkerID allocated activeResources todo []*workerRequest } From 5c5fe09990830f4619fefc414f14fe219b068f3e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Thu, 9 Jul 2020 15:18:45 +0200 Subject: [PATCH 18/51] post-rebase fixes --- stores/index.go | 4 ++-- stores/local.go | 1 - 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/stores/index.go b/stores/index.go index fda973124..049e2dc20 100644 --- a/stores/index.go +++ b/stores/index.go @@ -384,8 +384,8 @@ func (i *Index) StorageBestAlloc(ctx context.Context, allocate SectorFileType, s } sort.Slice(candidates, func(i, j int) bool { - iw := big.Mul(big.NewInt(candidates[i].fsi.Available), big.NewInt(int64(candidates[i].info.Weight))) - jw := big.Mul(big.NewInt(candidates[j].fsi.Available), big.NewInt(int64(candidates[j].info.Weight))) + iw := big.Mul(big.NewInt(int64(candidates[i].fsi.Available)), big.NewInt(int64(candidates[i].info.Weight))) + jw := big.Mul(big.NewInt(int64(candidates[j].fsi.Available)), big.NewInt(int64(candidates[j].info.Weight))) return iw.GreaterThan(jw) }) diff --git a/stores/local.go b/stores/local.go index 92b777307..26b7ccb75 100644 --- a/stores/local.go +++ b/stores/local.go @@ -13,7 +13,6 @@ import ( "golang.org/x/xerrors" - "github.com/filecoin-project/sector-storage/fsutil" "github.com/filecoin-project/specs-actors/actors/abi" ) From 7f115954fd7b977f59564cdbff7e2a61107d6de4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Thu, 9 Jul 2020 19:17:15 +0200 Subject: [PATCH 19/51] sched: More fixes --- sched.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sched.go b/sched.go index b038eff1d..d926e753a 100644 --- a/sched.go +++ b/sched.go @@ -260,7 +260,7 @@ func (sh *scheduler) trySched() { rand.Shuffle(len(acceptableWindows[sqi]), func(i, j int) { acceptableWindows[sqi][i], acceptableWindows[sqi][j] = acceptableWindows[sqi][j], acceptableWindows[sqi][i] }) - sort.SliceStable(acceptableWindows, func(i, j int) bool { + sort.SliceStable(acceptableWindows[sqi], func(i, j int) bool { wii := sh.openWindows[acceptableWindows[sqi][i]].worker wji := sh.openWindows[acceptableWindows[sqi][j]].worker @@ -344,7 +344,7 @@ func (sh *scheduler) trySched() { // Rewrite sh.openWindows array, removing scheduled windows newOpenWindows := make([]*schedWindowRequest, 0, len(sh.openWindows)-len(scheduledWindows)) for wnd, window := range sh.openWindows { - if _, scheduled := scheduledWindows[wnd]; !scheduled { + if _, scheduled := scheduledWindows[wnd]; scheduled { // keep unscheduled windows open continue } From d1a18c15e6655b63d2ca171d6ae6e3e71d76278a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Thu, 9 Jul 2020 19:29:05 +0200 Subject: [PATCH 20/51] Fix build on osx --- fsutil/dealloc_other.go | 2 +- fsutil/filesize_unix.go | 2 +- fsutil/statfs_unix.go | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/fsutil/dealloc_other.go b/fsutil/dealloc_other.go index 3ae8973ff..4f8347951 100644 --- a/fsutil/dealloc_other.go +++ b/fsutil/dealloc_other.go @@ -13,5 +13,5 @@ var log = logging.Logger("fsutil") func Deallocate(file *os.File, offset int64, length int64) error { log.Warnf("deallocating space not supported") - return err + return nil } diff --git a/fsutil/filesize_unix.go b/fsutil/filesize_unix.go index d596e4be7..41b62daf6 100644 --- a/fsutil/filesize_unix.go +++ b/fsutil/filesize_unix.go @@ -20,6 +20,6 @@ func FileSize(path string) (SizeInfo, error) { // NOTE: stat.Blocks is in 512B blocks, NOT in stat.Blksize // See https://www.gnu.org/software/libc/manual/html_node/Attribute-Meanings.html return SizeInfo{ - stat.Blocks * 512, + int64(stat.Blocks) * 512, }, nil } diff --git a/fsutil/statfs_unix.go b/fsutil/statfs_unix.go index 3e69d5a8e..7fcb8af37 100644 --- a/fsutil/statfs_unix.go +++ b/fsutil/statfs_unix.go @@ -13,7 +13,7 @@ func Statfs(path string) (FsStat, error) { } return FsStat{ - Capacity: int64(stat.Blocks) * stat.Bsize, - Available: int64(stat.Bavail) * stat.Bsize, + Capacity: int64(stat.Blocks) * int64(stat.Bsize), + Available: int64(stat.Bavail) * int64(stat.Bsize), }, nil } From 045e5977875f4a7ffb571b42401543b9d78bac80 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Sat, 11 Jul 2020 01:21:48 +0200 Subject: [PATCH 21/51] remove open windows when dropping workers --- sched.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/sched.go b/sched.go index d926e753a..241440beb 100644 --- a/sched.go +++ b/sched.go @@ -528,6 +528,16 @@ func (sh *scheduler) dropWorker(wid WorkerID) { w := sh.workers[wid] delete(sh.workers, wid) + newWindows := make([]*schedWindowRequest, 0, len(sh.openWindows)) + for _, window := range sh.openWindows { + if window.worker != wid { + newWindows = append(newWindows, window) + } + } + sh.openWindows = newWindows + + // TODO: sync close worker goroutine + go func() { if err := w.w.Close(); err != nil { log.Warnf("closing worker %d: %+v", err) From 1d67dcfa3c156dc04ca09c8a4f8efe70522f72ef Mon Sep 17 00:00:00 2001 From: Aayush Rajasekaran Date: Sat, 11 Jul 2020 21:30:16 -0400 Subject: [PATCH 22/51] extract GetRequiredPadding --- ffiwrapper/sealer_cgo.go | 78 +++++++++++++++++++++++++--------------- 1 file changed, 49 insertions(+), 29 deletions(-) diff --git a/ffiwrapper/sealer_cgo.go b/ffiwrapper/sealer_cgo.go index 88218921c..5c6e40ef9 100644 --- a/ffiwrapper/sealer_cgo.go +++ b/ffiwrapper/sealer_cgo.go @@ -598,45 +598,65 @@ func GeneratePieceCIDFromFile(proofType abi.RegisteredSealProof, piece io.Reader return pieceCID, werr() } -func GenerateUnsealedCID(proofType abi.RegisteredSealProof, pieces []abi.PieceInfo) (cid.Cid, error) { - allPieces := make([]abi.PieceInfo, 0, len(pieces)) +func GetRequiredPadding(oldLength abi.PaddedPieceSize, newPieceLength abi.PaddedPieceSize) ([]abi.PaddedPieceSize, abi.PaddedPieceSize) { + + padPieces := make([]abi.PaddedPieceSize, 0) + + toFill := uint64(-oldLength % newPieceLength) + + n := bits.OnesCount64(toFill) var sum abi.PaddedPieceSize + for i := 0; i < n; i++ { + next := bits.TrailingZeros64(toFill) + psize := uint64(1) << uint(next) + toFill ^= psize - padTo := func(s abi.PaddedPieceSize, trailing bool) { - // pad remaining space with 0 CommPs - toFill := uint64(-sum % s) - if trailing && sum == 0 { - toFill = uint64(s) - } - - n := bits.OnesCount64(toFill) - for i := 0; i < n; i++ { - next := bits.TrailingZeros64(toFill) - psize := uint64(1) << uint(next) - toFill ^= psize - - padded := abi.PaddedPieceSize(psize) - allPieces = append(allPieces, abi.PieceInfo{ - Size: padded, - PieceCID: zerocomm.ZeroPieceCommitment(padded.Unpadded()), - }) - sum += padded - } + padded := abi.PaddedPieceSize(psize) + padPieces = append(padPieces, padded) + sum += padded } - for _, p := range pieces { - padTo(p.Size, false) - - allPieces = append(allPieces, p) - sum += p.Size - } + return padPieces, sum +} +func GenerateUnsealedCID(proofType abi.RegisteredSealProof, pieces []abi.PieceInfo) (cid.Cid, error) { ssize, err := proofType.SectorSize() if err != nil { return cid.Undef, err } - padTo(abi.PaddedPieceSize(ssize), true) + pssize := abi.PaddedPieceSize(ssize) + allPieces := make([]abi.PieceInfo, 0, len(pieces)) + if len(pieces) == 0 { + allPieces = append(allPieces, abi.PieceInfo{ + Size: pssize, + PieceCID: zerocomm.ZeroPieceCommitment(pssize.Unpadded()), + }) + } else { + var sum abi.PaddedPieceSize + + padTo := func(pads []abi.PaddedPieceSize) { + for _, p := range pads { + allPieces = append(allPieces, abi.PieceInfo{ + Size: p, + PieceCID: zerocomm.ZeroPieceCommitment(p.Unpadded()), + }) + + sum += p + } + } + + for _, p := range pieces { + ps, _ := GetRequiredPadding(sum, p.Size) + padTo(ps) + + allPieces = append(allPieces, p) + sum += p.Size + } + + ps, _ := GetRequiredPadding(sum, pssize) + padTo(ps) + } return ffi.GenerateUnsealedCID(proofType, allPieces) } From 4f8015b58a9da612f1a49479d9fa299459da787d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Thu, 16 Jul 2020 18:18:22 +0200 Subject: [PATCH 23/51] Correctly turn randomness into fr32 values --- ffiwrapper/verifier_cgo.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/ffiwrapper/verifier_cgo.go b/ffiwrapper/verifier_cgo.go index 60d56dddc..1fecf9598 100644 --- a/ffiwrapper/verifier_cgo.go +++ b/ffiwrapper/verifier_cgo.go @@ -15,7 +15,7 @@ import ( ) func (sb *Sealer) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []abi.SectorInfo, randomness abi.PoStRandomness) ([]abi.PoStProof, error) { - randomness[31] = 0 // TODO: Not correct, fixme + randomness[31] &= 0x3f privsectors, skipped, done, err := sb.pubSectorToPriv(ctx, minerID, sectorInfo, nil, abi.RegisteredSealProof.RegisteredWinningPoStProof) // TODO: FAULTS? if err != nil { return nil, err @@ -29,7 +29,7 @@ func (sb *Sealer) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, } func (sb *Sealer) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []abi.SectorInfo, randomness abi.PoStRandomness) ([]abi.PoStProof, []abi.SectorID, error) { - randomness[31] = 0 // TODO: Not correct, fixme + randomness[31] &= 0x3f privsectors, skipped, done, err := sb.pubSectorToPriv(ctx, minerID, sectorInfo, nil, abi.RegisteredSealProof.RegisteredWindowPoStProof) if err != nil { return nil, nil, xerrors.Errorf("gathering sector info: %w", err) @@ -98,7 +98,7 @@ func (proofVerifier) VerifySeal(info abi.SealVerifyInfo) (bool, error) { } func (proofVerifier) VerifyWinningPoSt(ctx context.Context, info abi.WinningPoStVerifyInfo) (bool, error) { - info.Randomness[31] = 0 // TODO: Not correct, fixme + info.Randomness[31] &= 0x3f _, span := trace.StartSpan(ctx, "VerifyWinningPoSt") defer span.End() @@ -106,7 +106,7 @@ func (proofVerifier) VerifyWinningPoSt(ctx context.Context, info abi.WinningPoSt } func (proofVerifier) VerifyWindowPoSt(ctx context.Context, info abi.WindowPoStVerifyInfo) (bool, error) { - info.Randomness[31] = 0 // TODO: Not correct, fixme + info.Randomness[31] &= 0x3f _, span := trace.StartSpan(ctx, "VerifyWindowPoSt") defer span.End() @@ -114,6 +114,6 @@ func (proofVerifier) VerifyWindowPoSt(ctx context.Context, info abi.WindowPoStVe } func (proofVerifier) GenerateWinningPoStSectorChallenge(ctx context.Context, proofType abi.RegisteredPoStProof, minerID abi.ActorID, randomness abi.PoStRandomness, eligibleSectorCount uint64) ([]uint64, error) { - randomness[31] = 0 // TODO: Not correct, fixme + randomness[31] &= 0x3f return ffi.GenerateWinningPoStSectorChallenge(proofType, minerID, randomness, eligibleSectorCount) } From d244749f1a64ba1e764538e0d719c831928f1f93 Mon Sep 17 00:00:00 2001 From: Peter Rabbitson Date: Thu, 16 Jul 2020 01:53:13 +0200 Subject: [PATCH 24/51] Bump fil-commcid and filecoin-ffi deps Propagates correct on-chain commX CIDs --- extern/filecoin-ffi | 2 +- ffiwrapper/sealer_cgo.go | 6 +++--- go.mod | 6 +++--- go.sum | 12 ++++++++++-- mock/mock.go | 6 +++--- zerocomm/zerocomm.go | 3 ++- 6 files changed, 22 insertions(+), 13 deletions(-) diff --git a/extern/filecoin-ffi b/extern/filecoin-ffi index 5342c7c97..cddc56607 160000 --- a/extern/filecoin-ffi +++ b/extern/filecoin-ffi @@ -1 +1 @@ -Subproject commit 5342c7c97d1a1df4650629d14f2823d52889edd9 +Subproject commit cddc56607e1d851ea6d09d49404bd7db70cb3c2e diff --git a/ffiwrapper/sealer_cgo.go b/ffiwrapper/sealer_cgo.go index 5c6e40ef9..416bfa70b 100644 --- a/ffiwrapper/sealer_cgo.go +++ b/ffiwrapper/sealer_cgo.go @@ -168,14 +168,14 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector abi.SectorID, existingPie return abi.PieceInfo{}, xerrors.Errorf("generate unsealed CID: %w", err) } - commp, err := commcid.CIDToDataCommitmentV1(pieceCID) - if err != nil { + // validate that the pieceCID was properly formed + if _, err := commcid.CIDToPieceCommitmentV1(pieceCID); err != nil { return abi.PieceInfo{}, err } return abi.PieceInfo{ Size: pieceSize.Padded(), - PieceCID: commcid.PieceCommitmentV1ToCID(commp), + PieceCID: pieceCID, }, nil } diff --git a/go.mod b/go.mod index 83424841f..994a99a3f 100644 --- a/go.mod +++ b/go.mod @@ -5,16 +5,16 @@ go 1.13 require ( github.com/detailyang/go-fallocate v0.0.0-20180908115635-432fa640bd2e github.com/elastic/go-sysinfo v1.3.0 - github.com/filecoin-project/filecoin-ffi v0.0.0-20200326153646-e899cc1dd072 + github.com/filecoin-project/filecoin-ffi v0.30.4-0.20200716204036-cddc56607e1d github.com/filecoin-project/go-bitfield v0.0.4-0.20200703174658-f4a5758051a1 - github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5 + github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f github.com/filecoin-project/go-paramfetch v0.0.1 github.com/filecoin-project/specs-actors v0.6.1 github.com/filecoin-project/specs-storage v0.1.1-0.20200622113353-88a9704877ea github.com/google/uuid v1.1.1 github.com/gorilla/mux v1.7.4 github.com/hashicorp/go-multierror v1.0.0 - github.com/ipfs/go-cid v0.0.5 + github.com/ipfs/go-cid v0.0.6 github.com/ipfs/go-ipfs-files v0.0.7 github.com/ipfs/go-log v1.0.3 github.com/ipfs/go-log/v2 v2.0.3 diff --git a/go.sum b/go.sum index 508d985d7..67fa4d38d 100644 --- a/go.sum +++ b/go.sum @@ -38,8 +38,8 @@ github.com/filecoin-project/go-bitfield v0.0.4-0.20200703174658-f4a5758051a1 h1: github.com/filecoin-project/go-bitfield v0.0.4-0.20200703174658-f4a5758051a1/go.mod h1:Ry9/iUlWSyjPUzlAvdnfy4Gtvrq4kWmWDztCU1yEgJY= github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 h1:2pMXdBnCiXjfCYx/hLqFxccPoqsSveQFxVLvNxy9bus= github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ= -github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5 h1:yvQJCW9mmi9zy+51xA01Ea2X7/dL7r8eKDPuGUjRmbo= -github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5/go.mod h1:JbkIgFF/Z9BDlvrJO1FuKkaWsH673/UdFaiVS6uIHlA= +github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f h1:GxJzR3oRIMTPtpZ0b7QF8FKPK6/iPAc7trhlL5k/g+s= +github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= github.com/filecoin-project/go-paramfetch v0.0.1 h1:gV7bs5YaqlgpGFMiLxInGK2L1FyCXUE0rimz4L7ghoE= github.com/filecoin-project/go-paramfetch v0.0.1/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc= github.com/filecoin-project/specs-actors v0.3.0/go.mod h1:nQYnFbQ7Y0bHZyq6HDEuVlCPR+U3z5Q3wMOQ+2aiV+Y= @@ -82,6 +82,8 @@ github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUP github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.5 h1:o0Ix8e/ql7Zb5UVUJEUfjsWCIY8t48++9lR8qi6oiJU= github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= +github.com/ipfs/go-cid v0.0.6 h1:go0y+GcDOGeJIV01FeBsta4FHngoA4Wz7KMeLkXAhMs= +github.com/ipfs/go-cid v0.0.6/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= github.com/ipfs/go-hamt-ipld v0.0.15-0.20200131012125-dd88a59d3f2e/go.mod h1:9aQJu/i/TaRDW6jqB5U217dLIDopn50wxLdHXM2CTfE= github.com/ipfs/go-ipfs-files v0.0.7 h1:s5BRD12ndahqYifeH1S8Z73zqZhR+3IdKYAG9PiETs0= github.com/ipfs/go-ipfs-files v0.0.7/go.mod h1:wiN/jSG8FKyk7N0WyctKSvq3ljIa2NNTiZB55kpTdOs= @@ -157,15 +159,21 @@ github.com/mr-tron/base58 v1.1.3 h1:v+sk57XuaCKGXpWtVBX8YJzO7hMGx4Aajh4TQbdEFdc= github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/multiformats/go-base32 v0.0.3 h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp8Nq/kkI= github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= +github.com/multiformats/go-base36 v0.1.0 h1:JR6TyF7JjGd3m6FbLU2cOxhC0Li8z8dLNGQ89tUg4F4= +github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM= github.com/multiformats/go-multiaddr v0.2.0 h1:lR52sFwcTCuQb6bTfnXF6zA2XfyYvyd+5a9qECv/J90= github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4= github.com/multiformats/go-multibase v0.0.1 h1:PN9/v21eLywrFWdFNsFKaU04kLJzuYzmrJR+ubhT9qA= github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= +github.com/multiformats/go-multibase v0.0.3 h1:l/B6bJDQjvQ5G52jw4QGSYeOTZoAwIO77RblWplfIqk= +github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= github.com/multiformats/go-multihash v0.0.13 h1:06x+mk/zj1FoMsgNejLpy6QTvJqlSt/BhLEy87zidlc= github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= +github.com/multiformats/go-multihash v0.0.14 h1:QoBceQYQQtNUuf6s7wHxnE2c8bhbMqhfGzNI032se/I= +github.com/multiformats/go-multihash v0.0.14/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.2/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.5 h1:XVZwSo04Cs3j/jS0uAEPpT3JY6DzMcVLLoWOSnCxOjg= diff --git a/mock/mock.go b/mock/mock.go index 7c9ed57f0..55c38967c 100644 --- a/mock/mock.go +++ b/mock/mock.go @@ -161,7 +161,7 @@ func (mgr *SectorMgr) SealPreCommit1(ctx context.Context, sid abi.SectorID, tick return nil, err } - cc, _, err := commcid.CIDToCommitment(commd) + _, _, cc, err := commcid.CIDToCommitment(commd) if err != nil { panic(err) } @@ -175,14 +175,14 @@ func (mgr *SectorMgr) SealPreCommit2(ctx context.Context, sid abi.SectorID, phas db := []byte(string(phase1Out)) db[0] ^= 'd' - d := commcid.DataCommitmentV1ToCID(db) + d, _ := commcid.DataCommitmentV1ToCID(db) commr := make([]byte, 32) for i := range db { commr[32-(i+1)] = db[i] } - commR := commcid.ReplicaCommitmentV1ToCID(commr) + commR, _ := commcid.ReplicaCommitmentV1ToCID(commr) return storage.SectorCids{ Unsealed: d, diff --git a/zerocomm/zerocomm.go b/zerocomm/zerocomm.go index 7d6308549..9b59723a0 100644 --- a/zerocomm/zerocomm.go +++ b/zerocomm/zerocomm.go @@ -51,5 +51,6 @@ var PieceComms = [Levels - Skip][32]byte{ func ZeroPieceCommitment(sz abi.UnpaddedPieceSize) cid.Cid { level := bits.TrailingZeros64(uint64(sz.Padded())) - Skip - 5 // 2^5 = 32 - return commcid.PieceCommitmentV1ToCID(PieceComms[level][:]) + commP, _ := commcid.PieceCommitmentV1ToCID(PieceComms[level][:]) + return commP } From 0a6c939a7390e632f486e7e05738611dcd9a0dff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Thu, 16 Jul 2020 23:40:54 +0200 Subject: [PATCH 25/51] Drop unused SectorInfo fields --- stores/index.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/stores/index.go b/stores/index.go index c85dc125e..acad2abaa 100644 --- a/stores/index.go +++ b/stores/index.go @@ -29,9 +29,6 @@ type StorageInfo struct { CanSeal bool CanStore bool - - LastHeartbeat time.Time - HeartbeatErr error } type HealthReport struct { From be6b88f4064ac8df22e3efcfce6424008fa9dc10 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Thu, 16 Jul 2020 23:41:04 +0200 Subject: [PATCH 26/51] Some sched tests --- manager.go | 15 +- sched_test.go | 408 ++++++++++++++++++++++++++++++++++++++++++++++ selector_alloc.go | 4 +- 3 files changed, 414 insertions(+), 13 deletions(-) diff --git a/manager.go b/manager.go index 0c18645ac..0cd081d92 100644 --- a/manager.go +++ b/manager.go @@ -208,7 +208,7 @@ func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector abi.Sect var selector WorkerSelector if len(best) == 0 { // new - selector, err = newAllocSelector(ctx, m.index, stores.FTUnsealed, stores.PathSealing) + selector = newAllocSelector(ctx, m.index, stores.FTUnsealed, stores.PathSealing) } else { // append to existing selector, err = newExistingSelector(ctx, m.index, sector, stores.FTUnsealed, false) } @@ -269,7 +269,7 @@ func (m *Manager) AddPiece(ctx context.Context, sector abi.SectorID, existingPie var selector WorkerSelector var err error if len(existingPieces) == 0 { // new - selector, err = newAllocSelector(ctx, m.index, stores.FTUnsealed, stores.PathSealing) + selector = newAllocSelector(ctx, m.index, stores.FTUnsealed, stores.PathSealing) } else { // use existing selector, err = newExistingSelector(ctx, m.index, sector, stores.FTUnsealed, false) } @@ -300,10 +300,7 @@ func (m *Manager) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticke // TODO: also consider where the unsealed data sits - selector, err := newAllocSelector(ctx, m.index, stores.FTCache|stores.FTSealed, stores.PathSealing) - if err != nil { - return nil, xerrors.Errorf("creating path selector: %w", err) - } + selector := newAllocSelector(ctx, m.index, stores.FTCache|stores.FTSealed, stores.PathSealing) err = m.sched.Schedule(ctx, sector, sealtasks.TTPreCommit1, selector, schedFetch(sector, stores.FTUnsealed, stores.PathSealing, stores.AcquireMove), func(ctx context.Context, w Worker) error { p, err := w.SealPreCommit1(ctx, sector, ticket, pieces) @@ -417,11 +414,7 @@ func (m *Manager) FinalizeSector(ctx context.Context, sector abi.SectorID, keepU return err } - fetchSel, err := newAllocSelector(ctx, m.index, stores.FTCache|stores.FTSealed, stores.PathStorage) - if err != nil { - return xerrors.Errorf("creating fetchSel: %w", err) - } - + fetchSel := newAllocSelector(ctx, m.index, stores.FTCache|stores.FTSealed, stores.PathStorage) moveUnsealed := unsealed { if len(keepUnsealed) == 0 { diff --git a/sched_test.go b/sched_test.go index d0d0e7ca9..e810b6a0d 100644 --- a/sched_test.go +++ b/sched_test.go @@ -2,9 +2,21 @@ package sectorstorage import ( "context" + "io" + "sync" "testing" + "time" + "github.com/ipfs/go-cid" "github.com/stretchr/testify/require" + + "github.com/filecoin-project/specs-actors/actors/abi" + + "github.com/filecoin-project/sector-storage/fsutil" + "github.com/filecoin-project/sector-storage/sealtasks" + "github.com/filecoin-project/sector-storage/stores" + "github.com/filecoin-project/sector-storage/storiface" + "github.com/filecoin-project/specs-storage/storage" ) func TestWithPriority(t *testing.T) { @@ -16,3 +28,399 @@ func TestWithPriority(t *testing.T) { require.Equal(t, 2222, getPriority(ctx)) } + +type schedTestWorker struct { + name string + taskTypes map[sealtasks.TaskType]struct{} + paths []stores.StoragePath + + closed bool + closing chan struct{} +} + +func (s *schedTestWorker) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storage.PreCommit1Out, error) { + panic("implement me") +} + +func (s *schedTestWorker) SealPreCommit2(ctx context.Context, sector abi.SectorID, pc1o storage.PreCommit1Out) (storage.SectorCids, error) { + panic("implement me") +} + +func (s *schedTestWorker) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) { + panic("implement me") +} + +func (s *schedTestWorker) SealCommit2(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (storage.Proof, error) { + panic("implement me") +} + +func (s *schedTestWorker) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) error { + panic("implement me") +} + +func (s *schedTestWorker) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) error { + panic("implement me") +} + +func (s *schedTestWorker) Remove(ctx context.Context, sector abi.SectorID) error { + panic("implement me") +} + +func (s *schedTestWorker) NewSector(ctx context.Context, sector abi.SectorID) error { + panic("implement me") +} + +func (s *schedTestWorker) AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (abi.PieceInfo, error) { + panic("implement me") +} + +func (s *schedTestWorker) MoveStorage(ctx context.Context, sector abi.SectorID) error { + panic("implement me") +} + +func (s *schedTestWorker) Fetch(ctx context.Context, id abi.SectorID, ft stores.SectorFileType, ptype stores.PathType, am stores.AcquireMode) error { + panic("implement me") +} + +func (s *schedTestWorker) UnsealPiece(ctx context.Context, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, cid cid.Cid) error { + panic("implement me") +} + +func (s *schedTestWorker) ReadPiece(ctx context.Context, writer io.Writer, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) error { + panic("implement me") +} + +func (s *schedTestWorker) TaskTypes(ctx context.Context) (map[sealtasks.TaskType]struct{}, error) { + return s.taskTypes, nil +} + +func (s *schedTestWorker) Paths(ctx context.Context) ([]stores.StoragePath, error) { + return s.paths, nil +} + +func (s *schedTestWorker) Info(ctx context.Context) (storiface.WorkerInfo, error) { + return storiface.WorkerInfo{ + Hostname: s.name, + Resources: storiface.WorkerResources{ + MemPhysical: 128 << 30, + MemSwap: 200 << 30, + MemReserved: 2 << 30, + CPUs: 32, + GPUs: []string{"a GPU"}, + }, + }, nil +} + +func (s *schedTestWorker) Closing(ctx context.Context) (<-chan struct{}, error) { + return s.closing, nil +} + +func (s *schedTestWorker) Close() error { + if !s.closed { + s.closed = true + close(s.closing) + } + return nil +} + +var _ Worker = &schedTestWorker{} + +func addTestWorker(t *testing.T, sched *scheduler, index *stores.Index, name string, taskTypes map[sealtasks.TaskType]struct{}) { + w := &schedTestWorker{ + name: name, + taskTypes: taskTypes, + paths: []stores.StoragePath{{ID: "bb-8", Weight: 2, LocalPath: "food", CanSeal: true, CanStore: true}}, + + closing: make(chan struct{}), + } + + for _, path := range w.paths { + err := index.StorageAttach(context.TODO(), stores.StorageInfo{ + ID: path.ID, + URLs: nil, + Weight: path.Weight, + CanSeal: path.CanSeal, + CanStore: path.CanStore, + }, fsutil.FsStat{ + Capacity: 1 << 40, + Available: 1 << 40, + Reserved: 3, + }) + require.NoError(t, err) + } + + info, err := w.Info(context.TODO()) + require.NoError(t, err) + + sched.newWorkers <- &workerHandle{ + w: w, + info: info, + preparing: &activeResources{}, + active: &activeResources{}, + } +} + +func TestSchedStartStop(t *testing.T) { + spt := abi.RegisteredSealProof_StackedDrg32GiBV1 + sched := newScheduler(spt) + go sched.runSched() + + addTestWorker(t, sched, stores.NewIndex(), "fred", nil) + + sched.schedClose() +} + +func TestSched(t *testing.T) { + ctx := context.Background() + spt := abi.RegisteredSealProof_StackedDrg32GiBV1 + + sectorAte := abi.SectorID{ + Miner: 8, + Number: 8, + } + + type workerSpec struct { + name string + taskTypes map[sealtasks.TaskType]struct{} + } + + noopPrepare := func(ctx context.Context, w Worker) error { + return nil + } + + type runMeta struct { + done map[string]chan struct{} + + wg sync.WaitGroup + } + + type task func(*testing.T, *scheduler, *stores.Index, *runMeta) + + sched := func(taskName, expectWorker string, taskType sealtasks.TaskType) task { + return func(t *testing.T, sched *scheduler, index *stores.Index, rm *runMeta) { + done := make(chan struct{}) + rm.done[taskName] = done + + sel := newAllocSelector(ctx, index, stores.FTCache, stores.PathSealing) + + rm.wg.Add(1) + go func() { + defer rm.wg.Done() + + err := sched.Schedule(ctx, sectorAte, taskType, sel, noopPrepare, func(ctx context.Context, w Worker) error { + wi, err := w.Info(ctx) + require.NoError(t, err) + + require.Equal(t, expectWorker, wi.Hostname) + + log.Info("IN ", taskName) + + for { + _, ok := <-done + if !ok { + break + } + } + + log.Info("OUT ", taskName) + + return nil + }) + require.NoError(t, err) + }() + } + } + + taskStarted := func(name string) task { + return func(t *testing.T, sched *scheduler, index *stores.Index, rm *runMeta) { + rm.done[name] <- struct{}{} + } + } + + taskDone := func(name string) task { + return func(t *testing.T, sched *scheduler, index *stores.Index, rm *runMeta) { + rm.done[name] <- struct{}{} + close(rm.done[name]) + } + } + + taskNotScheduled := func(name string) task { + return func(t *testing.T, sched *scheduler, index *stores.Index, rm *runMeta) { + select { + case rm.done[name] <- struct{}{}: + t.Fatal("not expected") + case <-time.After(10 * time.Millisecond): // TODO: better synchronization thingy + } + } + } + + testFunc := func(workers []workerSpec, tasks []task) func(t *testing.T) { + return func(t *testing.T) { + index := stores.NewIndex() + + sched := newScheduler(spt) + go sched.runSched() + + for _, worker := range workers { + addTestWorker(t, sched, index, worker.name, worker.taskTypes) + } + + rm := runMeta{ + done: map[string]chan struct{}{}, + } + + for _, task := range tasks { + task(t, sched, index, &rm) + } + + log.Info("wait for async stuff") + rm.wg.Wait() + + sched.schedClose() + } + } + + multTask := func(tasks ...task) task { + return func(t *testing.T, s *scheduler, index *stores.Index, meta *runMeta) { + for _, tsk := range tasks { + tsk(t, s, index, meta) + } + } + } + + t.Run("one-pc1", testFunc([]workerSpec{ + {name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1:{}}}, + }, []task{ + sched("pc1-1", "fred", sealtasks.TTPreCommit1), + taskDone("pc1-1"), + })) + + t.Run("pc1-2workers-1", testFunc([]workerSpec{ + {name: "fred2", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit2:{}}}, + {name: "fred1", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1:{}}}, + }, []task{ + sched("pc1-1", "fred1", sealtasks.TTPreCommit1), + taskDone("pc1-1"), + })) + + t.Run("pc1-2workers-2", testFunc([]workerSpec{ + {name: "fred1", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1:{}}}, + {name: "fred2", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit2:{}}}, + }, []task{ + sched("pc1-1", "fred1", sealtasks.TTPreCommit1), + taskDone("pc1-1"), + })) + + t.Run("pc1-block-pc2", testFunc([]workerSpec{ + {name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1:{}, sealtasks.TTPreCommit2:{}}}, + }, []task{ + sched("pc1", "fred", sealtasks.TTPreCommit1), + taskStarted("pc1"), + + sched("pc2", "fred", sealtasks.TTPreCommit2), + taskNotScheduled("pc2"), + + taskDone("pc1"), + taskDone("pc2"), + })) + + t.Run("pc2-block-pc1", testFunc([]workerSpec{ + {name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1:{}, sealtasks.TTPreCommit2:{}}}, + }, []task{ + sched("pc2", "fred", sealtasks.TTPreCommit2), + taskStarted("pc2"), + + sched("pc1", "fred", sealtasks.TTPreCommit1), + taskNotScheduled("pc1"), + + taskDone("pc2"), + taskDone("pc1"), + })) + + t.Run("pc1-batching", testFunc([]workerSpec{ + {name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1:{}}}, + }, []task{ + sched("t1", "fred", sealtasks.TTPreCommit1), + taskStarted("t1"), + + sched("t2", "fred", sealtasks.TTPreCommit1), + taskStarted("t2"), + + // with worker settings, we can only run 2 parallel PC1s + + // start 2 more to fill fetch buffer + + sched("t3", "fred", sealtasks.TTPreCommit1), + taskNotScheduled("t3"), + + sched("t4", "fred", sealtasks.TTPreCommit1), + taskNotScheduled("t4"), + + taskDone("t1"), + taskDone("t2"), + + taskStarted("t3"), + taskStarted("t4"), + + taskDone("t3"), + taskDone("t4"), + })) + + twoPC1 := func(prefix string, schedAssert func(name string) task) task { + return multTask( + sched(prefix + "-a", "fred", sealtasks.TTPreCommit1), + schedAssert(prefix + "-a"), + + sched(prefix + "-b", "fred", sealtasks.TTPreCommit1), + schedAssert(prefix + "-b"), + ) + } + + twoPC1Done := func(prefix string) task { + return multTask( + taskDone(prefix + "-1"), + taskDone(prefix + "-b"), + ) + } + + t.Run("pc1-pc2-prio", testFunc([]workerSpec{ + {name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1:{}, sealtasks.TTPreCommit2: {}}}, + }, []task{ + // fill exec/fetch buffers + twoPC1("w0", taskStarted), + twoPC1("w1", taskNotScheduled), + + // fill worker windows + twoPC1("w2", taskNotScheduled), + twoPC1("w3", taskNotScheduled), + + // windowed + + sched("t1", "fred", sealtasks.TTPreCommit1), + taskNotScheduled("t1"), + + sched("t2", "fred", sealtasks.TTPreCommit1), + taskNotScheduled("t2"), + + sched("t3", "fred", sealtasks.TTPreCommit2), + taskNotScheduled("t3"), + + twoPC1Done("w0"), + twoPC1Done("w1"), + twoPC1Done("w2"), + twoPC1Done("w3"), + + taskStarted("t1"), + taskNotScheduled("t2"), + taskNotScheduled("t3"), + + taskDone("t1"), + + taskStarted("t2"), + taskStarted("t3"), + + taskDone("t2"), + taskDone("t3"), + })) +} + diff --git a/selector_alloc.go b/selector_alloc.go index 874bf7bb0..53e121737 100644 --- a/selector_alloc.go +++ b/selector_alloc.go @@ -17,12 +17,12 @@ type allocSelector struct { ptype stores.PathType } -func newAllocSelector(ctx context.Context, index stores.SectorIndex, alloc stores.SectorFileType, ptype stores.PathType) (*allocSelector, error) { +func newAllocSelector(ctx context.Context, index stores.SectorIndex, alloc stores.SectorFileType, ptype stores.PathType) (*allocSelector) { return &allocSelector{ index: index, alloc: alloc, ptype: ptype, - }, nil + } } func (s *allocSelector) Ok(ctx context.Context, task sealtasks.TaskType, spt abi.RegisteredSealProof, whnd *workerHandle) (bool, error) { From 2e557573f4864fdb431f205b59b4cb7574dc19db Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Thu, 16 Jul 2020 23:41:15 +0200 Subject: [PATCH 27/51] gofmt --- sched_test.go | 59 +++++++++++++++++++++++------------------------ selector_alloc.go | 2 +- 2 files changed, 30 insertions(+), 31 deletions(-) diff --git a/sched_test.go b/sched_test.go index e810b6a0d..1c7b88905 100644 --- a/sched_test.go +++ b/sched_test.go @@ -30,11 +30,11 @@ func TestWithPriority(t *testing.T) { } type schedTestWorker struct { - name string + name string taskTypes map[sealtasks.TaskType]struct{} - paths []stores.StoragePath + paths []stores.StoragePath - closed bool + closed bool closing chan struct{} } @@ -100,7 +100,7 @@ func (s *schedTestWorker) Paths(ctx context.Context) ([]stores.StoragePath, erro func (s *schedTestWorker) Info(ctx context.Context) (storiface.WorkerInfo, error) { return storiface.WorkerInfo{ - Hostname: s.name, + Hostname: s.name, Resources: storiface.WorkerResources{ MemPhysical: 128 << 30, MemSwap: 200 << 30, @@ -127,20 +127,20 @@ var _ Worker = &schedTestWorker{} func addTestWorker(t *testing.T, sched *scheduler, index *stores.Index, name string, taskTypes map[sealtasks.TaskType]struct{}) { w := &schedTestWorker{ - name: name, + name: name, taskTypes: taskTypes, - paths: []stores.StoragePath{{ID: "bb-8", Weight: 2, LocalPath: "food", CanSeal: true, CanStore: true}}, + paths: []stores.StoragePath{{ID: "bb-8", Weight: 2, LocalPath: "food", CanSeal: true, CanStore: true}}, closing: make(chan struct{}), } for _, path := range w.paths { err := index.StorageAttach(context.TODO(), stores.StorageInfo{ - ID: path.ID, - URLs: nil, - Weight: path.Weight, - CanSeal: path.CanSeal, - CanStore: path.CanStore, + ID: path.ID, + URLs: nil, + Weight: path.Weight, + CanSeal: path.CanSeal, + CanStore: path.CanStore, }, fsutil.FsStat{ Capacity: 1 << 40, Available: 1 << 40, @@ -180,7 +180,7 @@ func TestSched(t *testing.T) { } type workerSpec struct { - name string + name string taskTypes map[sealtasks.TaskType]struct{} } @@ -289,30 +289,30 @@ func TestSched(t *testing.T) { } t.Run("one-pc1", testFunc([]workerSpec{ - {name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1:{}}}, + {name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}}}, }, []task{ sched("pc1-1", "fred", sealtasks.TTPreCommit1), taskDone("pc1-1"), })) t.Run("pc1-2workers-1", testFunc([]workerSpec{ - {name: "fred2", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit2:{}}}, - {name: "fred1", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1:{}}}, + {name: "fred2", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit2: {}}}, + {name: "fred1", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}}}, }, []task{ sched("pc1-1", "fred1", sealtasks.TTPreCommit1), taskDone("pc1-1"), })) t.Run("pc1-2workers-2", testFunc([]workerSpec{ - {name: "fred1", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1:{}}}, - {name: "fred2", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit2:{}}}, + {name: "fred1", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}}}, + {name: "fred2", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit2: {}}}, }, []task{ sched("pc1-1", "fred1", sealtasks.TTPreCommit1), taskDone("pc1-1"), })) t.Run("pc1-block-pc2", testFunc([]workerSpec{ - {name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1:{}, sealtasks.TTPreCommit2:{}}}, + {name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}, sealtasks.TTPreCommit2: {}}}, }, []task{ sched("pc1", "fred", sealtasks.TTPreCommit1), taskStarted("pc1"), @@ -325,7 +325,7 @@ func TestSched(t *testing.T) { })) t.Run("pc2-block-pc1", testFunc([]workerSpec{ - {name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1:{}, sealtasks.TTPreCommit2:{}}}, + {name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}, sealtasks.TTPreCommit2: {}}}, }, []task{ sched("pc2", "fred", sealtasks.TTPreCommit2), taskStarted("pc2"), @@ -338,7 +338,7 @@ func TestSched(t *testing.T) { })) t.Run("pc1-batching", testFunc([]workerSpec{ - {name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1:{}}}, + {name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}}}, }, []task{ sched("t1", "fred", sealtasks.TTPreCommit1), taskStarted("t1"), @@ -368,23 +368,23 @@ func TestSched(t *testing.T) { twoPC1 := func(prefix string, schedAssert func(name string) task) task { return multTask( - sched(prefix + "-a", "fred", sealtasks.TTPreCommit1), - schedAssert(prefix + "-a"), + sched(prefix+"-a", "fred", sealtasks.TTPreCommit1), + schedAssert(prefix+"-a"), - sched(prefix + "-b", "fred", sealtasks.TTPreCommit1), - schedAssert(prefix + "-b"), - ) + sched(prefix+"-b", "fred", sealtasks.TTPreCommit1), + schedAssert(prefix+"-b"), + ) } twoPC1Done := func(prefix string) task { return multTask( - taskDone(prefix + "-1"), - taskDone(prefix + "-b"), - ) + taskDone(prefix+"-1"), + taskDone(prefix+"-b"), + ) } t.Run("pc1-pc2-prio", testFunc([]workerSpec{ - {name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1:{}, sealtasks.TTPreCommit2: {}}}, + {name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}, sealtasks.TTPreCommit2: {}}}, }, []task{ // fill exec/fetch buffers twoPC1("w0", taskStarted), @@ -423,4 +423,3 @@ func TestSched(t *testing.T) { taskDone("t3"), })) } - diff --git a/selector_alloc.go b/selector_alloc.go index 53e121737..35221921f 100644 --- a/selector_alloc.go +++ b/selector_alloc.go @@ -17,7 +17,7 @@ type allocSelector struct { ptype stores.PathType } -func newAllocSelector(ctx context.Context, index stores.SectorIndex, alloc stores.SectorFileType, ptype stores.PathType) (*allocSelector) { +func newAllocSelector(ctx context.Context, index stores.SectorIndex, alloc stores.SectorFileType, ptype stores.PathType) *allocSelector { return &allocSelector{ index: index, alloc: alloc, From cab0c74e08b35b3687c60f24a7a8e2724e5f4379 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Fri, 17 Jul 2020 01:26:55 +0200 Subject: [PATCH 28/51] more sched test debugging --- sched.go | 11 ++++ sched_test.go | 152 +++++++++++++++++++++++++++++++------------------- 2 files changed, 105 insertions(+), 58 deletions(-) diff --git a/sched.go b/sched.go index 241440beb..d89dad3f5 100644 --- a/sched.go +++ b/sched.go @@ -69,6 +69,7 @@ type scheduler struct { openWindows []*schedWindowRequest closing chan struct{} + testSync chan struct{} // used for testing } type workerHandle struct { @@ -195,6 +196,9 @@ func (sh *scheduler) runSched() { heap.Push(sh.schedQueue, req) sh.trySched() + if sh.testSync != nil { + sh.testSync <- struct{}{} + } case req := <-sh.windowRequests: sh.openWindows = append(sh.openWindows, req) sh.trySched() @@ -226,6 +230,8 @@ func (sh *scheduler) trySched() { windows := make([]schedWindow, len(sh.openWindows)) acceptableWindows := make([][]int, sh.schedQueue.Len()) + log.Debugf("trySched %d queued; %d open windows", sh.schedQueue.Len(), len(windows)) + // Step 1 for sqi := 0; sqi < sh.schedQueue.Len(); sqi++ { task := (*sh.schedQueue)[sqi] @@ -295,11 +301,15 @@ func (sh *scheduler) trySched() { wid := sh.openWindows[wnd].worker wr := sh.workers[wid].info.Resources + log.Debugf("trySched try assign sqi:%d sector %d to window %d", sqi, task.sector.Number, wnd) + // TODO: allow bigger windows if !windows[wnd].allocated.canHandleRequest(needRes, wid, wr) { continue } + log.Debugf("trySched ASSIGNED sqi:%d sector %d to window %d", sqi, task.sector.Number, wnd) + windows[wnd].allocated.add(wr, needRes) selectedWindow = wnd @@ -419,6 +429,7 @@ func (sh *scheduler) runWorker(wid WorkerID) { break assignLoop } + log.Debugf("assign worker sector %d", todo.sector.Number) err := sh.assignWorker(taskDone, wid, worker, todo) sh.workersLk.Unlock() diff --git a/sched_test.go b/sched_test.go index 1c7b88905..26961a4f6 100644 --- a/sched_test.go +++ b/sched_test.go @@ -2,7 +2,9 @@ package sectorstorage import ( "context" + "fmt" "io" + "runtime" "sync" "testing" "time" @@ -171,13 +173,10 @@ func TestSchedStartStop(t *testing.T) { } func TestSched(t *testing.T) { - ctx := context.Background() - spt := abi.RegisteredSealProof_StackedDrg32GiBV1 + ctx, done := context.WithTimeout(context.Background(), 20 * time.Second) + defer done() - sectorAte := abi.SectorID{ - Miner: 8, - Number: 8, - } + spt := abi.RegisteredSealProof_StackedDrg32GiBV1 type workerSpec struct { name string @@ -196,7 +195,10 @@ func TestSched(t *testing.T) { type task func(*testing.T, *scheduler, *stores.Index, *runMeta) - sched := func(taskName, expectWorker string, taskType sealtasks.TaskType) task { + sched := func(taskName, expectWorker string, sid abi.SectorNumber, taskType sealtasks.TaskType) task { + _, _, l, _ := runtime.Caller(1) + _, _, l2, _ := runtime.Caller(2) + return func(t *testing.T, sched *scheduler, index *stores.Index, rm *runMeta) { done := make(chan struct{}) rm.done[taskName] = done @@ -207,7 +209,12 @@ func TestSched(t *testing.T) { go func() { defer rm.wg.Done() - err := sched.Schedule(ctx, sectorAte, taskType, sel, noopPrepare, func(ctx context.Context, w Worker) error { + sectorNum := abi.SectorID{ + Miner: 8, + Number: sid, + } + + err := sched.Schedule(ctx, sectorNum, taskType, sel, noopPrepare, func(ctx context.Context, w Worker) error { wi, err := w.Info(ctx) require.NoError(t, err) @@ -226,29 +233,45 @@ func TestSched(t *testing.T) { return nil }) - require.NoError(t, err) + require.NoError(t, err, fmt.Sprint(l, l2)) }() + + <-sched.testSync } } taskStarted := func(name string) task { + _, _, l, _ := runtime.Caller(1) + _, _, l2, _ := runtime.Caller(2) return func(t *testing.T, sched *scheduler, index *stores.Index, rm *runMeta) { - rm.done[name] <- struct{}{} + select { + case rm.done[name] <- struct{}{}: + case <-ctx.Done(): + t.Fatal("ctx error", ctx.Err(), l, l2) + } } } taskDone := func(name string) task { + _, _, l, _ := runtime.Caller(1) + _, _, l2, _ := runtime.Caller(2) return func(t *testing.T, sched *scheduler, index *stores.Index, rm *runMeta) { - rm.done[name] <- struct{}{} + select { + case rm.done[name] <- struct{}{}: + case <-ctx.Done(): + t.Fatal("ctx error", ctx.Err(), l, l2) + } close(rm.done[name]) } } taskNotScheduled := func(name string) task { + _, _, l, _ := runtime.Caller(1) + _, _, l2, _ := runtime.Caller(2) return func(t *testing.T, sched *scheduler, index *stores.Index, rm *runMeta) { select { case rm.done[name] <- struct{}{}: - t.Fatal("not expected") + t.Fatal("not expected", l, l2) case <-time.After(10 * time.Millisecond): // TODO: better synchronization thingy } } @@ -259,6 +282,8 @@ func TestSched(t *testing.T) { index := stores.NewIndex() sched := newScheduler(spt) + sched.testSync = make(chan struct{}) + go sched.runSched() for _, worker := range workers { @@ -291,7 +316,7 @@ func TestSched(t *testing.T) { t.Run("one-pc1", testFunc([]workerSpec{ {name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}}}, }, []task{ - sched("pc1-1", "fred", sealtasks.TTPreCommit1), + sched("pc1-1", "fred", 8, sealtasks.TTPreCommit1), taskDone("pc1-1"), })) @@ -299,7 +324,7 @@ func TestSched(t *testing.T) { {name: "fred2", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit2: {}}}, {name: "fred1", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}}}, }, []task{ - sched("pc1-1", "fred1", sealtasks.TTPreCommit1), + sched("pc1-1", "fred1", 8, sealtasks.TTPreCommit1), taskDone("pc1-1"), })) @@ -307,17 +332,17 @@ func TestSched(t *testing.T) { {name: "fred1", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}}}, {name: "fred2", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit2: {}}}, }, []task{ - sched("pc1-1", "fred1", sealtasks.TTPreCommit1), + sched("pc1-1", "fred1", 8, sealtasks.TTPreCommit1), taskDone("pc1-1"), })) t.Run("pc1-block-pc2", testFunc([]workerSpec{ {name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}, sealtasks.TTPreCommit2: {}}}, }, []task{ - sched("pc1", "fred", sealtasks.TTPreCommit1), + sched("pc1", "fred", 8, sealtasks.TTPreCommit1), taskStarted("pc1"), - sched("pc2", "fred", sealtasks.TTPreCommit2), + sched("pc2", "fred", 8, sealtasks.TTPreCommit2), taskNotScheduled("pc2"), taskDone("pc1"), @@ -327,10 +352,10 @@ func TestSched(t *testing.T) { t.Run("pc2-block-pc1", testFunc([]workerSpec{ {name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}, sealtasks.TTPreCommit2: {}}}, }, []task{ - sched("pc2", "fred", sealtasks.TTPreCommit2), + sched("pc2", "fred", 8, sealtasks.TTPreCommit2), taskStarted("pc2"), - sched("pc1", "fred", sealtasks.TTPreCommit1), + sched("pc1", "fred", 8, sealtasks.TTPreCommit1), taskNotScheduled("pc1"), taskDone("pc2"), @@ -340,20 +365,20 @@ func TestSched(t *testing.T) { t.Run("pc1-batching", testFunc([]workerSpec{ {name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}}}, }, []task{ - sched("t1", "fred", sealtasks.TTPreCommit1), + sched("t1", "fred", 8, sealtasks.TTPreCommit1), taskStarted("t1"), - sched("t2", "fred", sealtasks.TTPreCommit1), + sched("t2", "fred", 8, sealtasks.TTPreCommit1), taskStarted("t2"), // with worker settings, we can only run 2 parallel PC1s // start 2 more to fill fetch buffer - sched("t3", "fred", sealtasks.TTPreCommit1), + sched("t3", "fred", 8, sealtasks.TTPreCommit1), taskNotScheduled("t3"), - sched("t4", "fred", sealtasks.TTPreCommit1), + sched("t4", "fred", 8, sealtasks.TTPreCommit1), taskNotScheduled("t4"), taskDone("t1"), @@ -366,60 +391,71 @@ func TestSched(t *testing.T) { taskDone("t4"), })) - twoPC1 := func(prefix string, schedAssert func(name string) task) task { + twoPC1 := func(prefix string, sid abi.SectorNumber, schedAssert func(name string) task) task { return multTask( - sched(prefix+"-a", "fred", sealtasks.TTPreCommit1), + sched(prefix+"-a", "fred", sid, sealtasks.TTPreCommit1), schedAssert(prefix+"-a"), - sched(prefix+"-b", "fred", sealtasks.TTPreCommit1), + sched(prefix+"-b", "fred", sid + 1, sealtasks.TTPreCommit1), schedAssert(prefix+"-b"), ) } - twoPC1Done := func(prefix string) task { + twoPC1Act := func(prefix string, schedAssert func(name string) task) task { return multTask( - taskDone(prefix+"-1"), - taskDone(prefix+"-b"), + schedAssert(prefix+"-a"), + schedAssert(prefix+"-b"), ) } - t.Run("pc1-pc2-prio", testFunc([]workerSpec{ - {name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}, sealtasks.TTPreCommit2: {}}}, - }, []task{ - // fill exec/fetch buffers - twoPC1("w0", taskStarted), - twoPC1("w1", taskNotScheduled), + for i := 0; i < 100; i++ { + t.Run("pc1-pc2-prio", testFunc([]workerSpec{ + {name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}, sealtasks.TTPreCommit2: {}}}, + }, []task{ + // fill exec/fetch buffers + twoPC1("w0", 0, taskStarted), + twoPC1("w1", 2, taskNotScheduled), - // fill worker windows - twoPC1("w2", taskNotScheduled), - twoPC1("w3", taskNotScheduled), + // fill worker windows + twoPC1("w2", 4, taskNotScheduled), + //twoPC1("w3", taskNotScheduled), - // windowed + // windowed - sched("t1", "fred", sealtasks.TTPreCommit1), - taskNotScheduled("t1"), + sched("t1", "fred", 6, sealtasks.TTPreCommit1), + taskNotScheduled("t1"), - sched("t2", "fred", sealtasks.TTPreCommit1), - taskNotScheduled("t2"), + sched("t2", "fred", 7, sealtasks.TTPreCommit1), + taskNotScheduled("t2"), - sched("t3", "fred", sealtasks.TTPreCommit2), - taskNotScheduled("t3"), + sched("t3", "fred", 8, sealtasks.TTPreCommit2), + taskNotScheduled("t3"), - twoPC1Done("w0"), - twoPC1Done("w1"), - twoPC1Done("w2"), - twoPC1Done("w3"), + twoPC1Act("w0", taskDone), + twoPC1Act("w1", taskStarted), + twoPC1Act("w2", taskNotScheduled), + //twoPC1Act("w3", taskNotScheduled), - taskStarted("t1"), - taskNotScheduled("t2"), - taskNotScheduled("t3"), + twoPC1Act("w1", taskDone), + twoPC1Act("w2", taskStarted), + //twoPC1Act("w3", taskNotScheduled), - taskDone("t1"), + twoPC1Act("w2", taskDone), + //twoPC1Act("w3", taskStarted), - taskStarted("t2"), - taskStarted("t3"), + //twoPC1Act("w3", taskDone), - taskDone("t2"), - taskDone("t3"), - })) + taskStarted("t3"), + taskNotScheduled("t1"), + taskNotScheduled("t2"), + + taskDone("t3"), + + taskStarted("t1"), + taskStarted("t2"), + + taskDone("t1"), + taskDone("t2"), + })) + } } From bf315e63d77f7eaa5d87058d32ca507bef904ca4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Fri, 17 Jul 2020 01:32:49 +0200 Subject: [PATCH 29/51] sched: working tests --- sched.go | 2 +- sched_test.go | 34 +++++++++++----------------------- 2 files changed, 12 insertions(+), 24 deletions(-) diff --git a/sched.go b/sched.go index d89dad3f5..44e62f6c2 100644 --- a/sched.go +++ b/sched.go @@ -68,7 +68,7 @@ type scheduler struct { schedQueue *requestQueue openWindows []*schedWindowRequest - closing chan struct{} + closing chan struct{} testSync chan struct{} // used for testing } diff --git a/sched_test.go b/sched_test.go index 26961a4f6..e6bd8d220 100644 --- a/sched_test.go +++ b/sched_test.go @@ -173,7 +173,7 @@ func TestSchedStartStop(t *testing.T) { } func TestSched(t *testing.T) { - ctx, done := context.WithTimeout(context.Background(), 20 * time.Second) + ctx, done := context.WithTimeout(context.Background(), 20*time.Second) defer done() spt := abi.RegisteredSealProof_StackedDrg32GiBV1 @@ -183,7 +183,7 @@ func TestSched(t *testing.T) { taskTypes map[sealtasks.TaskType]struct{} } - noopPrepare := func(ctx context.Context, w Worker) error { + noopAction := func(ctx context.Context, w Worker) error { return nil } @@ -214,7 +214,7 @@ func TestSched(t *testing.T) { Number: sid, } - err := sched.Schedule(ctx, sectorNum, taskType, sel, noopPrepare, func(ctx context.Context, w Worker) error { + err := sched.Schedule(ctx, sectorNum, taskType, sel, func(ctx context.Context, w Worker) error { wi, err := w.Info(ctx) require.NoError(t, err) @@ -232,7 +232,7 @@ func TestSched(t *testing.T) { log.Info("OUT ", taskName) return nil - }) + }, noopAction) require.NoError(t, err, fmt.Sprint(l, l2)) }() @@ -396,7 +396,7 @@ func TestSched(t *testing.T) { sched(prefix+"-a", "fred", sid, sealtasks.TTPreCommit1), schedAssert(prefix+"-a"), - sched(prefix+"-b", "fred", sid + 1, sealtasks.TTPreCommit1), + sched(prefix+"-b", "fred", sid+1, sealtasks.TTPreCommit1), schedAssert(prefix+"-b"), ) } @@ -408,42 +408,30 @@ func TestSched(t *testing.T) { ) } - for i := 0; i < 100; i++ { + // run this one a bunch of times, it had a very annoying tendency to fail randomly + for i := 0; i < 40; i++ { t.Run("pc1-pc2-prio", testFunc([]workerSpec{ {name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}, sealtasks.TTPreCommit2: {}}}, }, []task{ - // fill exec/fetch buffers + // fill queues twoPC1("w0", 0, taskStarted), twoPC1("w1", 2, taskNotScheduled), - // fill worker windows - twoPC1("w2", 4, taskNotScheduled), - //twoPC1("w3", taskNotScheduled), - // windowed - sched("t1", "fred", 6, sealtasks.TTPreCommit1), + sched("t1", "fred", 8, sealtasks.TTPreCommit1), taskNotScheduled("t1"), - sched("t2", "fred", 7, sealtasks.TTPreCommit1), + sched("t2", "fred", 9, sealtasks.TTPreCommit1), taskNotScheduled("t2"), - sched("t3", "fred", 8, sealtasks.TTPreCommit2), + sched("t3", "fred", 10, sealtasks.TTPreCommit2), taskNotScheduled("t3"), twoPC1Act("w0", taskDone), twoPC1Act("w1", taskStarted), - twoPC1Act("w2", taskNotScheduled), - //twoPC1Act("w3", taskNotScheduled), twoPC1Act("w1", taskDone), - twoPC1Act("w2", taskStarted), - //twoPC1Act("w3", taskNotScheduled), - - twoPC1Act("w2", taskDone), - //twoPC1Act("w3", taskStarted), - - //twoPC1Act("w3", taskDone), taskStarted("t3"), taskNotScheduled("t1"), From 908d47305bc8aa7fb63725c3991dafe57e1da23d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Fri, 17 Jul 2020 01:46:59 +0200 Subject: [PATCH 30/51] fix race in runWorker --- sched.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/sched.go b/sched.go index 44e62f6c2..caf67c678 100644 --- a/sched.go +++ b/sched.go @@ -367,7 +367,10 @@ func (sh *scheduler) trySched() { func (sh *scheduler) runWorker(wid WorkerID) { go func() { + sh.workersLk.Lock() worker := sh.workers[wid] + sh.workersLk.Unlock() + scheduledWindows := make(chan *schedWindow, SchedWindows) taskDone := make(chan struct{}, 1) windowsRequested := 0 From f1b38371866bec524ea8c5603b9e38d1a3391161 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Fri, 17 Jul 2020 12:59:12 +0200 Subject: [PATCH 31/51] fix worker setup/cleanup raciness --- manager.go | 4 +-- sched.go | 87 +++++++++++++++++++++++++++++++++++++++----------- sched_test.go | 7 ++-- sched_watch.go | 6 +++- 4 files changed, 80 insertions(+), 24 deletions(-) diff --git a/manager.go b/manager.go index 0cd081d92..fc3be18c1 100644 --- a/manager.go +++ b/manager.go @@ -489,8 +489,8 @@ func (m *Manager) FsStat(ctx context.Context, id stores.ID) (fsutil.FsStat, erro return m.storage.FsStat(ctx, id) } -func (m *Manager) Close() error { - return m.sched.Close() +func (m *Manager) Close(ctx context.Context) error { + return m.sched.Close(ctx) } var _ SectorManager = &Manager{} diff --git a/sched.go b/sched.go index caf67c678..bec5ee0c5 100644 --- a/sched.go +++ b/sched.go @@ -3,6 +3,7 @@ package sectorstorage import ( "container/heap" "context" + "fmt" "math/rand" "sort" "sync" @@ -69,6 +70,7 @@ type scheduler struct { openWindows []*schedWindowRequest closing chan struct{} + closed chan struct{} testSync chan struct{} // used for testing } @@ -79,6 +81,11 @@ type workerHandle struct { preparing *activeResources active *activeResources + + // for sync manager goroutine closing + cleanupStarted bool + closedMgr chan struct{} + closingMgr chan struct{} } type schedWindowRequest struct { @@ -138,6 +145,7 @@ func newScheduler(spt abi.RegisteredSealProof) *scheduler { schedQueue: &requestQueue{}, closing: make(chan struct{}), + closed: make(chan struct{}), } } @@ -182,6 +190,8 @@ func (r *workerRequest) respond(err error) { } func (sh *scheduler) runSched() { + defer close(sh.closed) + go sh.runWorkerWatcher() for { @@ -366,11 +376,23 @@ func (sh *scheduler) trySched() { } func (sh *scheduler) runWorker(wid WorkerID) { + var ready sync.WaitGroup + ready.Add(1) + defer ready.Wait() + go func() { sh.workersLk.Lock() - worker := sh.workers[wid] + worker, found := sh.workers[wid] sh.workersLk.Unlock() + ready.Done() + + if !found { + panic(fmt.Sprintf("worker %d not found", wid)) + } + + defer close(worker.closedMgr) + scheduledWindows := make(chan *schedWindow, SchedWindows) taskDone := make(chan struct{}, 1) windowsRequested := 0 @@ -403,6 +425,8 @@ func (sh *scheduler) runWorker(wid WorkerID) { return case <-workerClosing: return + case <-worker.closingMgr: + return } } @@ -415,6 +439,8 @@ func (sh *scheduler) runWorker(wid WorkerID) { return case <-workerClosing: return + case <-worker.closingMgr: + return } assignLoop: @@ -518,6 +544,9 @@ func (sh *scheduler) assignWorker(taskDone chan struct{}, wid WorkerID, w *worke } func (sh *scheduler) newWorker(w *workerHandle) { + w.closedMgr = make(chan struct{}) + w.closingMgr = make(chan struct{}) + sh.workersLk.Lock() id := sh.nextWorker @@ -526,13 +555,13 @@ func (sh *scheduler) newWorker(w *workerHandle) { sh.workersLk.Unlock() + sh.runWorker(id) + select { case sh.watchClosing <- id: case <-sh.closing: return } - - sh.runWorker(id) } func (sh *scheduler) dropWorker(wid WorkerID) { @@ -540,37 +569,59 @@ func (sh *scheduler) dropWorker(wid WorkerID) { defer sh.workersLk.Unlock() w := sh.workers[wid] + + sh.workerCleanup(wid, w) + delete(sh.workers, wid) +} - newWindows := make([]*schedWindowRequest, 0, len(sh.openWindows)) - for _, window := range sh.openWindows { - if window.worker != wid { - newWindows = append(newWindows, window) - } +func (sh *scheduler) workerCleanup(wid WorkerID, w *workerHandle) { + if !w.cleanupStarted { + close(w.closingMgr) + } + select { + case <-w.closedMgr: + case <-time.After(time.Second): + log.Errorf("timeout closing worker manager goroutine %d", wid) } - sh.openWindows = newWindows - // TODO: sync close worker goroutine + if !w.cleanupStarted { + w.cleanupStarted = true - go func() { - if err := w.w.Close(); err != nil { - log.Warnf("closing worker %d: %+v", err) + newWindows := make([]*schedWindowRequest, 0, len(sh.openWindows)) + for _, window := range sh.openWindows { + if window.worker != wid { + newWindows = append(newWindows, window) + } } - }() + sh.openWindows = newWindows + + log.Debugf("dropWorker %d", wid) + + go func() { + if err := w.w.Close(); err != nil { + log.Warnf("closing worker %d: %+v", err) + } + }() + } } func (sh *scheduler) schedClose() { sh.workersLk.Lock() defer sh.workersLk.Unlock() + log.Debugf("closing scheduler") for i, w := range sh.workers { - if err := w.w.Close(); err != nil { - log.Errorf("closing worker %d: %+v", i, err) - } + sh.workerCleanup(i, w) } } -func (sh *scheduler) Close() error { +func (sh *scheduler) Close(ctx context.Context) error { close(sh.closing) + select { + case <-sh.closed: + case <-ctx.Done(): + return ctx.Err() + } return nil } diff --git a/sched_test.go b/sched_test.go index e6bd8d220..67a5eeed3 100644 --- a/sched_test.go +++ b/sched_test.go @@ -119,6 +119,7 @@ func (s *schedTestWorker) Closing(ctx context.Context) (<-chan struct{}, error) func (s *schedTestWorker) Close() error { if !s.closed { + log.Info("close schedTestWorker") s.closed = true close(s.closing) } @@ -169,11 +170,11 @@ func TestSchedStartStop(t *testing.T) { addTestWorker(t, sched, stores.NewIndex(), "fred", nil) - sched.schedClose() + require.NoError(t, sched.Close(context.TODO())) } func TestSched(t *testing.T) { - ctx, done := context.WithTimeout(context.Background(), 20*time.Second) + ctx, done := context.WithTimeout(context.Background(), 30*time.Second) defer done() spt := abi.RegisteredSealProof_StackedDrg32GiBV1 @@ -301,7 +302,7 @@ func TestSched(t *testing.T) { log.Info("wait for async stuff") rm.wg.Wait() - sched.schedClose() + require.NoError(t, sched.Close(context.TODO())) } } diff --git a/sched_watch.go b/sched_watch.go index 214489083..d93cf1af3 100644 --- a/sched_watch.go +++ b/sched_watch.go @@ -74,7 +74,11 @@ func (sh *scheduler) runWorkerWatcher() { caseToWorker[toSet] = wid default: - wid := caseToWorker[n] + wid, found := caseToWorker[n] + if !found { + log.Errorf("worker ID not found for case %d", n) + continue + } delete(caseToWorker, n) cases[n] = reflect.SelectCase{ From c7da20e53cfaa712a1aad22f4426b747f381c13c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Tue, 21 Jul 2020 20:01:25 +0200 Subject: [PATCH 32/51] Add api to get active tasks --- manager.go | 5 +- sched.go | 7 ++- sched_test.go | 5 +- sealtasks/task.go | 24 +++++++++ stats.go | 13 +++++ storiface/worker.go | 15 ++++++ work_tracker.go | 129 ++++++++++++++++++++++++++++++++++++++++++++ 7 files changed, 194 insertions(+), 4 deletions(-) create mode 100644 work_tracker.go diff --git a/manager.go b/manager.go index fc3be18c1..063456fa9 100644 --- a/manager.go +++ b/manager.go @@ -166,7 +166,10 @@ func (m *Manager) AddWorker(ctx context.Context, w Worker) error { } m.sched.newWorkers <- &workerHandle{ - w: w, + w: w, + wt: &workTracker{ + running: map[uint64]storiface.WorkerJob{}, + }, info: info, preparing: &activeResources{}, active: &activeResources{}, diff --git a/sched.go b/sched.go index bec5ee0c5..ed48d097b 100644 --- a/sched.go +++ b/sched.go @@ -82,6 +82,9 @@ type workerHandle struct { preparing *activeResources active *activeResources + // stats / tracking + wt *workTracker + // for sync manager goroutine closing cleanupStarted bool closedMgr chan struct{} @@ -486,7 +489,7 @@ func (sh *scheduler) assignWorker(taskDone chan struct{}, wid WorkerID, w *worke w.preparing.add(w.info.Resources, needRes) go func() { - err := req.prepare(req.ctx, w.w) + err := req.prepare(req.ctx, w.wt.worker(w.w)) sh.workersLk.Lock() if err != nil { @@ -519,7 +522,7 @@ func (sh *scheduler) assignWorker(taskDone chan struct{}, wid WorkerID, w *worke case <-sh.closing: } - err = req.work(req.ctx, w.w) + err = req.work(req.ctx, w.wt.worker(w.w)) select { case req.ret <- workerResponse{err: err}: diff --git a/sched_test.go b/sched_test.go index 67a5eeed3..caf7f0b4b 100644 --- a/sched_test.go +++ b/sched_test.go @@ -156,7 +156,10 @@ func addTestWorker(t *testing.T, sched *scheduler, index *stores.Index, name str require.NoError(t, err) sched.newWorkers <- &workerHandle{ - w: w, + w: w, + wt: &workTracker{ + running: map[uint64]storiface.WorkerJob{}, + }, info: info, preparing: &activeResources{}, active: &activeResources{}, diff --git a/sealtasks/task.go b/sealtasks/task.go index 978107c85..ad5ce01bb 100644 --- a/sealtasks/task.go +++ b/sealtasks/task.go @@ -28,6 +28,30 @@ var order = map[TaskType]int{ TTReadUnsealed: 0, } +var shortNames = map[TaskType]string{ + TTAddPiece: "AP ", + + TTPreCommit1: "PC1", + TTPreCommit2: "PC2", + TTCommit1: "C1 ", + TTCommit2: "C2 ", + + TTFinalize: "FIN", + + TTFetch: "GET", + TTUnseal: "UNS", + TTReadUnsealed: "RD ", +} + func (a TaskType) Less(b TaskType) bool { return order[a] < order[b] } + +func (a TaskType) Short() string { + n, ok := shortNames[a] + if !ok { + return "UNK" + } + + return n +} diff --git a/stats.go b/stats.go index dbbee07f3..ee88898a4 100644 --- a/stats.go +++ b/stats.go @@ -20,3 +20,16 @@ func (m *Manager) WorkerStats() map[uint64]storiface.WorkerStats { return out } + +func (m *Manager) WorkerJobs() map[uint64][]storiface.WorkerJob { + m.sched.workersLk.Lock() + defer m.sched.workersLk.Unlock() + + out := map[uint64][]storiface.WorkerJob{} + + for id, handle := range m.sched.workers { + out[uint64(id)] = handle.wt.Running() + } + + return out +} diff --git a/storiface/worker.go b/storiface/worker.go index 0f49e8971..01ef59d36 100644 --- a/storiface/worker.go +++ b/storiface/worker.go @@ -1,5 +1,12 @@ package storiface +import ( + "time" + + "github.com/filecoin-project/sector-storage/sealtasks" + "github.com/filecoin-project/specs-actors/actors/abi" +) + type WorkerInfo struct { Hostname string @@ -24,3 +31,11 @@ type WorkerStats struct { GpuUsed bool CpuUse uint64 } + +type WorkerJob struct { + ID uint64 + Sector abi.SectorID + Task sealtasks.TaskType + + Start time.Time +} diff --git a/work_tracker.go b/work_tracker.go new file mode 100644 index 000000000..f1e243ed2 --- /dev/null +++ b/work_tracker.go @@ -0,0 +1,129 @@ +package sectorstorage + +import ( + "context" + "io" + "sync" + "time" + + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/specs-actors/actors/abi" + "github.com/filecoin-project/specs-storage/storage" + + "github.com/filecoin-project/sector-storage/sealtasks" + "github.com/filecoin-project/sector-storage/stores" + "github.com/filecoin-project/sector-storage/storiface" +) + +type workTracker struct { + lk sync.Mutex + + ctr uint64 + running map[uint64]storiface.WorkerJob + + // TODO: done, aggregate stats, queue stats, scheduler feedback +} + +func (wt *workTracker) track(sid abi.SectorID, task sealtasks.TaskType) func() { + wt.lk.Lock() + defer wt.lk.Unlock() + + id := wt.ctr + wt.ctr++ + + wt.running[id] = storiface.WorkerJob{ + ID: id, + Sector: sid, + Task: task, + Start: time.Now(), + } + + return func() { + wt.lk.Lock() + defer wt.lk.Unlock() + + delete(wt.running, id) + } +} + +func (wt *workTracker) worker(w Worker) Worker { + return &trackedWorker{ + Worker: w, + tracker: wt, + } +} + +func (wt *workTracker) Running() []storiface.WorkerJob { + wt.lk.Lock() + defer wt.lk.Unlock() + + out := make([]storiface.WorkerJob, 0, len(wt.running)) + for _, job := range wt.running { + out = append(out, job) + } + + return out +} + +type trackedWorker struct { + Worker + + tracker *workTracker +} + +func (t *trackedWorker) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storage.PreCommit1Out, error) { + defer t.tracker.track(sector, sealtasks.TTPreCommit1)() + + return t.Worker.SealPreCommit1(ctx, sector, ticket, pieces) +} + +func (t *trackedWorker) SealPreCommit2(ctx context.Context, sector abi.SectorID, pc1o storage.PreCommit1Out) (storage.SectorCids, error) { + defer t.tracker.track(sector, sealtasks.TTPreCommit2)() + + return t.Worker.SealPreCommit2(ctx, sector, pc1o) +} + +func (t *trackedWorker) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) { + defer t.tracker.track(sector, sealtasks.TTCommit1)() + + return t.Worker.SealCommit1(ctx, sector, ticket, seed, pieces, cids) +} + +func (t *trackedWorker) SealCommit2(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (storage.Proof, error) { + defer t.tracker.track(sector, sealtasks.TTCommit2)() + + return t.Worker.SealCommit2(ctx, sector, c1o) +} + +func (t *trackedWorker) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) error { + defer t.tracker.track(sector, sealtasks.TTFinalize)() + + return t.Worker.FinalizeSector(ctx, sector, keepUnsealed) +} + +func (t *trackedWorker) AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (abi.PieceInfo, error) { + defer t.tracker.track(sector, sealtasks.TTAddPiece)() + + return t.Worker.AddPiece(ctx, sector, pieceSizes, newPieceSize, pieceData) +} + +func (t *trackedWorker) Fetch(ctx context.Context, s abi.SectorID, ft stores.SectorFileType, ptype stores.PathType, am stores.AcquireMode) error { + defer t.tracker.track(s, sealtasks.TTFetch)() + + return t.Worker.Fetch(ctx, s, ft, ptype, am) +} + +func (t *trackedWorker) UnsealPiece(ctx context.Context, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, cid cid.Cid) error { + defer t.tracker.track(id, sealtasks.TTUnseal)() + + return t.Worker.UnsealPiece(ctx, id, index, size, randomness, cid) +} + +func (t *trackedWorker) ReadPiece(ctx context.Context, writer io.Writer, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) error { + defer t.tracker.track(id, sealtasks.TTReadUnsealed)() + + return t.Worker.ReadPiece(ctx, writer, id, index, size) +} + +var _ Worker = &trackedWorker{} From fafecdfd17395af434e6053396b08afa71568706 Mon Sep 17 00:00:00 2001 From: yaohcn Date: Thu, 23 Jul 2020 20:34:27 +0800 Subject: [PATCH 33/51] add RPC timeout --- sched.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/sched.go b/sched.go index ed48d097b..af28b1902 100644 --- a/sched.go +++ b/sched.go @@ -258,7 +258,9 @@ func (sh *scheduler) trySched() { continue } - ok, err := task.sel.Ok(task.ctx, task.taskType, sh.spt, worker) + rpcCtx, cancel := context.WithTimeout(task.ctx, SelectorTimeout) + ok, err := task.sel.Ok(rpcCtx, task.taskType, sh.spt, worker) + cancel() if err != nil { log.Errorf("trySched(1) req.sel.Ok error: %+v", err) continue From b2fe2c0e3b38b13040539cfa87df6d3522520fd0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Thu, 23 Jul 2020 19:00:20 +0200 Subject: [PATCH 34/51] mock: More accurate WindowPost --- mock/mock.go | 105 +++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 94 insertions(+), 11 deletions(-) diff --git a/mock/mock.go b/mock/mock.go index 55c38967c..55b103ab8 100644 --- a/mock/mock.go +++ b/mock/mock.go @@ -5,7 +5,6 @@ import ( "context" "fmt" "io" - "math" "math/rand" "sync" @@ -34,14 +33,22 @@ type SectorMgr struct { type mockVerif struct{} -func NewMockSectorMgr(ssize abi.SectorSize) *SectorMgr { +func NewMockSectorMgr(ssize abi.SectorSize, genesisSectors []abi.SectorID) *SectorMgr { rt, err := ffiwrapper.SealProofTypeFromSectorSize(ssize) if err != nil { panic(err) } + sectors := make(map[abi.SectorID]*sectorState) + for _, sid := range genesisSectors { + sectors[sid] = §orState{ + failed: false, + state: stateCommit, + } + } + return &SectorMgr{ - sectors: make(map[abi.SectorID]*sectorState), + sectors: sectors, pieces: map[cid.Cid][]byte{}, sectorSize: ssize, nextSectorID: 5, @@ -258,27 +265,57 @@ func AddOpFinish(ctx context.Context) (context.Context, func()) { } func (mgr *SectorMgr) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []abi.SectorInfo, randomness abi.PoStRandomness) ([]abi.PoStProof, error) { - return generateFakePoSt(sectorInfo, abi.RegisteredSealProof.RegisteredWinningPoStProof), nil + return generateFakePoSt(sectorInfo, abi.RegisteredSealProof.RegisteredWinningPoStProof, randomness), nil } func (mgr *SectorMgr) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []abi.SectorInfo, randomness abi.PoStRandomness) ([]abi.PoStProof, []abi.SectorID, error) { - return generateFakePoSt(sectorInfo, abi.RegisteredSealProof.RegisteredWindowPoStProof), nil, nil + si := make([]abi.SectorInfo, 0, len(sectorInfo)) + var skipped []abi.SectorID + + for _, info := range sectorInfo { + sid := abi.SectorID{ + Miner: minerID, + Number: info.SectorNumber, + } + + _, found := mgr.sectors[sid] + + if found && !mgr.sectors[sid].failed { + si = append(si, info) + } else { + skipped = append(skipped, sid) + } + } + + return generateFakePoSt(si, abi.RegisteredSealProof.RegisteredWindowPoStProof, randomness), skipped, nil } -func generateFakePoSt(sectorInfo []abi.SectorInfo, rpt func(abi.RegisteredSealProof) (abi.RegisteredPoStProof, error)) []abi.PoStProof { - se, err := sectorInfo[0].SealProof.WindowPoStPartitionSectors() +func generateFakePoSt(sectorInfo []abi.SectorInfo, rpt func(abi.RegisteredSealProof) (abi.RegisteredPoStProof, error), randomness abi.PoStRandomness) []abi.PoStProof { + sectors := abi.NewBitField() + for _, info := range sectorInfo { + sectors.Set(uint64(info.SectorNumber)) + } + + wp, err := rpt(sectorInfo[0].SealProof) if err != nil { panic(err) } - wp, err := rpt(sectorInfo[0].SealProof) + + var proofBuf bytes.Buffer + + _, err = proofBuf.Write(randomness) if err != nil { panic(err) } + if err := sectors.MarshalCBOR(&proofBuf); err != nil { + panic(err) + } + return []abi.PoStProof{ { PoStProof: wp, - ProofBytes: make([]byte, 192*int(math.Ceil(float64(len(sectorInfo))/float64(se)))), + ProofBytes: proofBuf.Bytes(), }, } } @@ -335,8 +372,18 @@ func (mgr *SectorMgr) Remove(ctx context.Context, sector abi.SectorID) error { return nil } -func (mgr *SectorMgr) CheckProvable(context.Context, abi.RegisteredSealProof, []abi.SectorID) ([]abi.SectorID, error) { - return nil, nil +func (mgr *SectorMgr) CheckProvable(ctx context.Context, spt abi.RegisteredSealProof, ids []abi.SectorID) ([]abi.SectorID, error) { + var bad []abi.SectorID + + for _, sid := range ids { + _, found := mgr.sectors[sid] + + if !found || mgr.sectors[sid].failed { + bad = append(bad, sid) + } + } + + return bad, nil } func (m mockVerif) VerifySeal(svi abi.SealVerifyInfo) (bool, error) { @@ -358,6 +405,42 @@ func (m mockVerif) VerifyWinningPoSt(ctx context.Context, info abi.WinningPoStVe } func (m mockVerif) VerifyWindowPoSt(ctx context.Context, info abi.WindowPoStVerifyInfo) (bool, error) { + if len(info.Proofs) != 1 { + return false, xerrors.Errorf("expected 1 proof entry") + } + + proof := info.Proofs[0] + + if !bytes.Equal(proof.ProofBytes[:len(info.Randomness)], info.Randomness) { + return false, xerrors.Errorf("bad randomness") + } + + sectors := abi.NewBitField() + if err := sectors.UnmarshalCBOR(bytes.NewReader(proof.ProofBytes[len(info.Randomness):])); err != nil { + return false, xerrors.Errorf("unmarshaling sectors bitfield from \"proof\": %w", err) + } + + challenged := abi.NewBitField() + for _, sector := range info.ChallengedSectors { + challenged.Set(uint64(sector.SectorNumber)) + } + + { + b1, err := sectors.MarshalJSON() + if err != nil { + return false, err + } + + b2, err := challenged.MarshalJSON() + if err != nil { + return false, err + } + + if !bytes.Equal(b1, b2) { + return false, xerrors.Errorf("proven and challenged sector sets didn't match: %s != !s", string(b1), string(b2)) + } + } + return true, nil } From 3791008b011c4aaa2572460f52eb5aa1771e7704 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Thu, 23 Jul 2020 19:46:51 +0200 Subject: [PATCH 35/51] mock: Fix tests --- mock/mock_test.go | 2 +- testworker_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/mock/mock_test.go b/mock/mock_test.go index 5f4b9c428..c7d43e8b9 100644 --- a/mock/mock_test.go +++ b/mock/mock_test.go @@ -9,7 +9,7 @@ import ( ) func TestOpFinish(t *testing.T) { - sb := NewMockSectorMgr(2048) + sb := NewMockSectorMgr(2048, nil) sid, pieces, err := sb.StageFakeData(123) if err != nil { diff --git a/testworker_test.go b/testworker_test.go index 5ca51b771..bdfff1915 100644 --- a/testworker_test.go +++ b/testworker_test.go @@ -37,7 +37,7 @@ func newTestWorker(wcfg WorkerConfig, lstor *stores.Local) *testWorker { acceptTasks: acceptTasks, lstor: lstor, - mockSeal: mock.NewMockSectorMgr(ssize), + mockSeal: mock.NewMockSectorMgr(ssize, nil), } } From ff9ffddd5769696b6769e9a89aa7869985ad3b03 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Fri, 24 Jul 2020 16:43:41 +0200 Subject: [PATCH 36/51] remote: Limit parallel fetches --- manager.go | 4 +++- manager_test.go | 2 +- stores/remote.go | 21 ++++++++++++++++++++- 3 files changed, 24 insertions(+), 3 deletions(-) diff --git a/manager.go b/manager.go index 063456fa9..64dd2dcbc 100644 --- a/manager.go +++ b/manager.go @@ -76,6 +76,8 @@ type Manager struct { } type SealerConfig struct { + ParallelFetchLimit int + // Local worker config AllowPreCommit1 bool AllowPreCommit2 bool @@ -96,7 +98,7 @@ func New(ctx context.Context, ls stores.LocalStorage, si stores.SectorIndex, cfg return nil, xerrors.Errorf("creating prover instance: %w", err) } - stor := stores.NewRemote(lstor, si, http.Header(sa)) + stor := stores.NewRemote(lstor, si, http.Header(sa), sc.ParallelFetchLimit) m := &Manager{ scfg: cfg, diff --git a/manager_test.go b/manager_test.go index 9cee303c5..10e6a5020 100644 --- a/manager_test.go +++ b/manager_test.go @@ -95,7 +95,7 @@ func newTestMgr(ctx context.Context, t *testing.T) (*Manager, *stores.Local, *st prover, err := ffiwrapper.New(&readonlyProvider{stor: lstor}, cfg) require.NoError(t, err) - stor := stores.NewRemote(lstor, si, nil) + stor := stores.NewRemote(lstor, si, nil, 6000) m := &Manager{ scfg: cfg, diff --git a/stores/remote.go b/stores/remote.go index c78f026f4..ee68b5ef6 100644 --- a/stores/remote.go +++ b/stores/remote.go @@ -29,6 +29,8 @@ type Remote struct { index SectorIndex auth http.Header + limit chan struct{} + fetchLk sync.Mutex fetching map[abi.SectorID]chan struct{} } @@ -41,12 +43,14 @@ func (r *Remote) RemoveCopies(ctx context.Context, s abi.SectorID, types SectorF return r.local.RemoveCopies(ctx, s, types) } -func NewRemote(local *Local, index SectorIndex, auth http.Header) *Remote { +func NewRemote(local *Local, index SectorIndex, auth http.Header, fetchLimit int) *Remote { return &Remote{ local: local, index: index, auth: auth, + limit: make(chan struct{}, fetchLimit), + fetching: map[abi.SectorID]chan struct{}{}, } } @@ -165,6 +169,21 @@ func (r *Remote) acquireFromRemote(ctx context.Context, s abi.SectorID, spt abi. func (r *Remote) fetch(ctx context.Context, url, outname string) error { log.Infof("Fetch %s -> %s", url, outname) + if len(r.limit) >= cap(r.limit) { + log.Infof("Throttling fetch, %d already running", len(r.limit)) + } + + // TODO: Smarter throttling + // * Priority (just going sequentially is still pretty good) + // * Per interface + // * Aware of remote load + select { + case r.limit <- struct{}{}: + defer func() { <-r.limit }() + case <-ctx.Done(): + return xerrors.Errorf("context error while waiting for fetch limiter: %w", ctx.Err()) + } + req, err := http.NewRequest("GET", url, nil) if err != nil { return xerrors.Errorf("request: %w", err) From 94e20ffee5d2278efe14418539b3e8dd4fb39755 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Fri, 24 Jul 2020 16:54:00 +0200 Subject: [PATCH 37/51] remote: Fetch into temp files --- stores/remote.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/stores/remote.go b/stores/remote.go index ee68b5ef6..2b6b19384 100644 --- a/stores/remote.go +++ b/stores/remote.go @@ -150,12 +150,18 @@ func (r *Remote) acquireFromRemote(ctx context.Context, s abi.SectorID, spt abi. // TODO: see what we have local, prefer that for _, url := range info.URLs { - err := r.fetch(ctx, url, dest) + tempDest := dest + ".fetch" + + err := r.fetch(ctx, url, tempDest) if err != nil { - merr = multierror.Append(merr, xerrors.Errorf("fetch error %s (storage %s) -> %s: %w", url, info.ID, dest, err)) + merr = multierror.Append(merr, xerrors.Errorf("fetch error %s (storage %s) -> %s: %w", url, info.ID, tempDest, err)) continue } + if err := move(tempDest, dest); err != nil { + return "", "", "", xerrors.Errorf("fetch move error (storage %s) %s -> %s: %w", info.ID, tempDest, dest, err) + } + if merr != nil { log.Warnw("acquireFromRemote encountered errors when fetching sector from remote", "errors", merr) } From 295300ff4671290c9751fdbbccef9b17fc182cae Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Fri, 24 Jul 2020 19:39:25 +0200 Subject: [PATCH 38/51] stores: Fix remote fetch move --- go.mod | 15 ++++++++------- go.sum | 47 +++++++++++++++++++++++++++++++---------------- stores/local.go | 6 +++++- stores/remote.go | 24 ++++++++++++++++++++++-- 4 files changed, 66 insertions(+), 26 deletions(-) diff --git a/go.mod b/go.mod index 994a99a3f..4912e5d1d 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( github.com/filecoin-project/filecoin-ffi v0.30.4-0.20200716204036-cddc56607e1d github.com/filecoin-project/go-bitfield v0.0.4-0.20200703174658-f4a5758051a1 github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f - github.com/filecoin-project/go-paramfetch v0.0.1 + github.com/filecoin-project/go-paramfetch v0.0.2-0.20200218225740-47c639bab663 github.com/filecoin-project/specs-actors v0.6.1 github.com/filecoin-project/specs-storage v0.1.1-0.20200622113353-88a9704877ea github.com/google/uuid v1.1.1 @@ -16,19 +16,20 @@ require ( github.com/hashicorp/go-multierror v1.0.0 github.com/ipfs/go-cid v0.0.6 github.com/ipfs/go-ipfs-files v0.0.7 + github.com/ipfs/go-ipld-cbor v0.0.5-0.20200204214505-252690b78669 // indirect github.com/ipfs/go-log v1.0.3 github.com/ipfs/go-log/v2 v2.0.3 github.com/mattn/go-isatty v0.0.9 // indirect github.com/mitchellh/go-homedir v1.1.0 github.com/stretchr/testify v1.4.0 go.opencensus.io v0.22.3 - go.uber.org/atomic v1.5.1 // indirect - go.uber.org/zap v1.13.0 // indirect - golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 // indirect - golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f // indirect - golang.org/x/sys v0.0.0-20200107162124-548cf772de50 // indirect - golang.org/x/tools v0.0.0-20200108195415-316d2f248479 // indirect + go.uber.org/zap v1.14.1 // indirect + golang.org/x/crypto v0.0.0-20200317142112-1b76d66859c6 // indirect + golang.org/x/lint v0.0.0-20200302205851-738671d3881b // indirect + golang.org/x/sys v0.0.0-20200317113312-5766fd39f98d // indirect + golang.org/x/tools v0.0.0-20200318150045-ba25ddc85566 // indirect golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 + honnef.co/go/tools v0.0.1-2020.1.3 // indirect ) replace github.com/filecoin-project/filecoin-ffi => ./extern/filecoin-ffi diff --git a/go.sum b/go.sum index 67fa4d38d..a1d803274 100644 --- a/go.sum +++ b/go.sum @@ -30,6 +30,7 @@ github.com/fatih/color v1.8.0/go.mod h1:3l45GVGkyrnYNl9HoIjnp2NnNWvh6hLAqD8yTfGj github.com/filecoin-project/go-address v0.0.2-0.20200218010043-eb9bb40ed5be h1:TooKBwR/g8jG0hZ3lqe9S5sy2vTUcLOZLlz3M5wGn2E= github.com/filecoin-project/go-address v0.0.2-0.20200218010043-eb9bb40ed5be/go.mod h1:SAOwJoakQ8EPjwNIsiakIQKsoKdkcbx8U3IapgCg9R0= github.com/filecoin-project/go-amt-ipld/v2 v2.0.1-0.20200131012142-05d80eeccc5e/go.mod h1:boRtQhzmxNocrMxOXo1NYn4oUc1NGvR8tEa79wApNXg= +github.com/filecoin-project/go-amt-ipld/v2 v2.0.1-0.20200424220931-6263827e49f2 h1:jamfsxfK0Q9yCMHt8MPWx7Aa/O9k2Lve8eSc6FILYGQ= github.com/filecoin-project/go-amt-ipld/v2 v2.0.1-0.20200424220931-6263827e49f2/go.mod h1:boRtQhzmxNocrMxOXo1NYn4oUc1NGvR8tEa79wApNXg= github.com/filecoin-project/go-bitfield v0.0.0-20200416002808-b3ee67ec9060/go.mod h1:iodsLxOFZnqKtjj2zkgqzoGNrv6vUqj69AT/J8DKXEw= github.com/filecoin-project/go-bitfield v0.0.1 h1:Xg/JnrqqE77aJVKdbEyR04n9FZQWhwrN+buDgQCVpZU= @@ -40,8 +41,8 @@ github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 h1:2pMX github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ= github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f h1:GxJzR3oRIMTPtpZ0b7QF8FKPK6/iPAc7trhlL5k/g+s= github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= -github.com/filecoin-project/go-paramfetch v0.0.1 h1:gV7bs5YaqlgpGFMiLxInGK2L1FyCXUE0rimz4L7ghoE= -github.com/filecoin-project/go-paramfetch v0.0.1/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc= +github.com/filecoin-project/go-paramfetch v0.0.2-0.20200218225740-47c639bab663 h1:eYxi6vI5CyeXD15X1bB3bledDXbqKxqf0wQzTLgwYwA= +github.com/filecoin-project/go-paramfetch v0.0.2-0.20200218225740-47c639bab663/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc= github.com/filecoin-project/specs-actors v0.3.0/go.mod h1:nQYnFbQ7Y0bHZyq6HDEuVlCPR+U3z5Q3wMOQ+2aiV+Y= github.com/filecoin-project/specs-actors v0.6.1 h1:rhHlEzqcuuQU6oKc4csuq+/kQBDZ4EXtSomoN2XApCA= github.com/filecoin-project/specs-actors v0.6.1/go.mod h1:dRdy3cURykh2R8O/DKqy8olScl70rmIS7GrB4hB1IDY= @@ -58,6 +59,7 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= @@ -92,6 +94,8 @@ github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyB github.com/ipfs/go-ipld-cbor v0.0.3/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= github.com/ipfs/go-ipld-cbor v0.0.4 h1:Aw3KPOKXjvrm6VjwJvFf1F1ekR/BH3jdof3Bk7OTiSA= github.com/ipfs/go-ipld-cbor v0.0.4/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= +github.com/ipfs/go-ipld-cbor v0.0.5-0.20200204214505-252690b78669 h1:jIVle1vGSzxyUhseYNEqd7qcDVRrIbJ7UxGwao70cF0= +github.com/ipfs/go-ipld-cbor v0.0.5-0.20200204214505-252690b78669/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= github.com/ipfs/go-ipld-format v0.0.1 h1:HCu4eB/Gh+KD/Q0M8u888RFkorTWNIL3da4oc5dwc80= github.com/ipfs/go-ipld-format v0.0.1/go.mod h1:kyJtbkDALmFHv3QR6et67i35QzO3S0dCDnkOJhcZkms= github.com/ipfs/go-ipld-format v0.0.2 h1:OVAGlyYT6JPZ0pEfGntFPS40lfrDmaDbQwNHEY2G9Zs= @@ -105,6 +109,7 @@ github.com/ipfs/go-log/v2 v2.0.3/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBW github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52 h1:QG4CGBqCeuBo6aZlGAamSkxWdgWfZGeE49eUOWJPA4c= github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52/go.mod h1:fdg+/X9Gg4AsAIzWpEHwnqd+QY3b7lajxyjE1m4hkq4= github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= +github.com/jbenet/goprocess v0.1.3 h1:YKyIEECS/XvcfHtBzxtjBBbWK+MbvA6dG8ASiqwvr10= github.com/jbenet/goprocess v0.1.3/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= @@ -228,25 +233,27 @@ github.com/whyrusleeping/cbor-gen v0.0.0-20200414195334-429a0b5e922e h1:JY8o/ebU github.com/whyrusleeping/cbor-gen v0.0.0-20200414195334-429a0b5e922e/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc/go.mod h1:bopw91TMyo8J3tvftk8xmU2kPmlrt4nScJQZU2hE5EM= github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.5.1 h1:rsqfU5vBkVknbhUGbAUwQKR2H4ItV8tjJ+6kJX4cxHM= -go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.4.0 h1:f3WCSC2KzAcBXGATIxAB1E2XuCpNU255wNKZ505qi3E= go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.13.0 h1:nR6NoDBgAf67s68NhaXbsojM+2gxp3S1hWkHDl27pVU= -go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.14.1 h1:nYDKopTbvAPq/NrUVZwT15y2lpROBiLLyoRTbXOYWOo= +go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -255,17 +262,19 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g= -golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200317142112-1b76d66859c6 h1:TjszyFsQsyZNHwdVdZ5m7bjmreu0znc2kRYsEml9/Ww= +golang.org/x/crypto v0.0.0-20200317142112-1b76d66859c6/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f h1:J5lckAjkw6qYlOZNj90mLYNTEKDvWeuc1yieZ8qUzUE= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -274,12 +283,14 @@ golang.org/x/net v0.0.0-20190227160552-c95aed5357e7/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180202135801-37707fdb30a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -295,8 +306,8 @@ golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191025021431-6c3a3bfe00ae h1:QoJmnb9uyPCrH8GIg9uRLn4Ta45yhcQtpymCd0AavO8= golang.org/x/sys v0.0.0-20191025021431-6c3a3bfe00ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200107162124-548cf772de50 h1:YvQ10rzcqWXLlJZ3XCUoO25savxmscf4+SC+ZqiCHhA= -golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200317113312-5766fd39f98d h1:62ap6LNOjDU6uGmKXHJbSfciMoV+FeI1sRXx/pLDL44= +golang.org/x/sys v0.0.0-20200317113312-5766fd39f98d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -310,9 +321,11 @@ golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200108195415-316d2f248479 h1:csuS+MHeEA2eWhyjQCMaPMq4z1+/PohkBSjJZHSIbOE= -golang.org/x/tools v0.0.0-20200108195415-316d2f248479/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200318150045-ba25ddc85566 h1:OXjomkWHhzUx4+HldlJ2TsMxJdWgEo5CTtspD1wdhdk= +golang.org/x/tools v0.0.0-20200318150045-ba25ddc85566/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= @@ -341,5 +354,7 @@ gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= howett.net/plist v0.0.0-20181124034731-591f970eefbb h1:jhnBjNi9UFpfpl8YZhA9CrOqpnJdvzuiHsl/dnxl11M= howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= diff --git a/stores/local.go b/stores/local.go index cbc9dbae1..f1affb397 100644 --- a/stores/local.go +++ b/stores/local.go @@ -3,7 +3,6 @@ package stores import ( "context" "encoding/json" - "github.com/filecoin-project/sector-storage/fsutil" "io/ioutil" "math/bits" "math/rand" @@ -14,6 +13,7 @@ import ( "golang.org/x/xerrors" + "github.com/filecoin-project/sector-storage/fsutil" "github.com/filecoin-project/specs-actors/actors/abi" ) @@ -179,6 +179,10 @@ func (st *Local) OpenPath(ctx context.Context, p string) error { } for _, ent := range ents { + if ent.Name() == FetchTempSubdir { + continue + } + sid, err := ParseSectorID(ent.Name()) if err != nil { return xerrors.Errorf("parse sector id %s: %w", ent.Name(), err) diff --git a/stores/remote.go b/stores/remote.go index 2b6b19384..42b730b40 100644 --- a/stores/remote.go +++ b/stores/remote.go @@ -11,6 +11,7 @@ import ( "net/url" "os" gopath "path" + "path/filepath" "sort" "sync" @@ -24,6 +25,8 @@ import ( "github.com/filecoin-project/sector-storage/tarutil" ) +var FetchTempSubdir = "fetching" + type Remote struct { local *Local index SectorIndex @@ -124,6 +127,16 @@ func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, spt abi.Regi return paths, stores, nil } +func tempDest(spath string) (string, error) { + st, b := filepath.Split(spath) + tempdir := filepath.Join(st, FetchTempSubdir) + if err := os.MkdirAll(tempdir, 755); err != nil { + return "", xerrors.Errorf("creating temp fetch dir: %w", err) + } + + return filepath.Join(tempdir, b), nil +} + func (r *Remote) acquireFromRemote(ctx context.Context, s abi.SectorID, spt abi.RegisteredSealProof, fileType SectorFileType, pathType PathType, op AcquireMode) (string, ID, string, error) { si, err := r.index.StorageFindSector(ctx, s, fileType, false) if err != nil { @@ -150,9 +163,16 @@ func (r *Remote) acquireFromRemote(ctx context.Context, s abi.SectorID, spt abi. // TODO: see what we have local, prefer that for _, url := range info.URLs { - tempDest := dest + ".fetch" + tempDest, err := tempDest(dest) + if err != nil { + return "", "", "", err + } - err := r.fetch(ctx, url, tempDest) + if err := os.RemoveAll(dest); err != nil { + return "", "", "", xerrors.Errorf("removing dest: %w", err) + } + + err = r.fetch(ctx, url, tempDest) if err != nil { merr = multierror.Append(merr, xerrors.Errorf("fetch error %s (storage %s) -> %s: %w", url, info.ID, tempDest, err)) continue From 7aa1ccf726afa9a4ab68908d9f89c7be190d81a2 Mon Sep 17 00:00:00 2001 From: ocean <3408598@qq.com> Date: Mon, 27 Jul 2020 14:21:29 +0800 Subject: [PATCH 39/51] modify workerRequest --- sched.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/sched.go b/sched.go index af28b1902..a7a6d3e86 100644 --- a/sched.go +++ b/sched.go @@ -122,6 +122,7 @@ type workerRequest struct { index int // The index of the item in the heap. + indexHeap int ret chan<- workerResponse ctx context.Context } @@ -250,6 +251,7 @@ func (sh *scheduler) trySched() { task := (*sh.schedQueue)[sqi] needRes := ResourceTable[task.taskType][sh.spt] + task.indexHeap = sqi for wnd, windowRequest := range sh.openWindows { worker := sh.workers[windowRequest.worker] @@ -312,7 +314,7 @@ func (sh *scheduler) trySched() { needRes := ResourceTable[task.taskType][sh.spt] selectedWindow := -1 - for _, wnd := range acceptableWindows[sqi+scheduled] { + for _, wnd := range acceptableWindows[task.indexHeap] { wid := sh.openWindows[wnd].worker wr := sh.workers[wid].info.Resources From 9fd91bb70aeb0fc7048f6fa1e0e10380573a8948 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Mon, 27 Jul 2020 12:17:09 +0200 Subject: [PATCH 40/51] sched: function to dump scheduler state --- manager.go | 4 ++++ sched.go | 62 +++++++++++++++++++++++++++++++++++++++++++++++++++--- 2 files changed, 63 insertions(+), 3 deletions(-) diff --git a/manager.go b/manager.go index 64dd2dcbc..4791eb5e6 100644 --- a/manager.go +++ b/manager.go @@ -494,6 +494,10 @@ func (m *Manager) FsStat(ctx context.Context, id stores.ID) (fsutil.FsStat, erro return m.storage.FsStat(ctx, id) } +func (m *Manager) SchedDiag(ctx context.Context) (interface{}, error) { + return m.sched.Info(ctx) +} + func (m *Manager) Close(ctx context.Context) error { return m.sched.Close(ctx) } diff --git a/sched.go b/sched.go index a7a6d3e86..239b52063 100644 --- a/sched.go +++ b/sched.go @@ -69,6 +69,8 @@ type scheduler struct { schedQueue *requestQueue openWindows []*schedWindowRequest + info chan func(interface{}) + closing chan struct{} closed chan struct{} testSync chan struct{} // used for testing @@ -148,6 +150,8 @@ func newScheduler(spt abi.RegisteredSealProof) *scheduler { schedQueue: &requestQueue{}, + info: make(chan func(interface{})), + closing: make(chan struct{}), closed: make(chan struct{}), } @@ -193,6 +197,17 @@ func (r *workerRequest) respond(err error) { } } +type SchedDiagRequestInfo struct { + Sector abi.SectorID + TaskType sealtasks.TaskType + Priority int +} + +type SchedDiagInfo struct { + Requests []SchedDiagRequestInfo + OpenWindows []WorkerID +} + func (sh *scheduler) runSched() { defer close(sh.closed) @@ -217,6 +232,9 @@ func (sh *scheduler) runSched() { sh.openWindows = append(sh.openWindows, req) sh.trySched() + case ireq := <-sh.info: + ireq(sh.diag()) + case <-sh.closing: sh.schedClose() return @@ -224,6 +242,26 @@ func (sh *scheduler) runSched() { } } +func (sh *scheduler) diag() SchedDiagInfo { + var out SchedDiagInfo + + for sqi := 0; sqi < sh.schedQueue.Len(); sqi++ { + task := (*sh.schedQueue)[sqi] + + out.Requests = append(out.Requests, SchedDiagRequestInfo{ + Sector: task.sector, + TaskType: task.taskType, + Priority: task.priority, + }) + } + + for _, window := range sh.openWindows { + out.OpenWindows = append(out.OpenWindows, window.worker) + } + + return out +} + func (sh *scheduler) trySched() { /* This assigns tasks to workers based on: @@ -244,7 +282,7 @@ func (sh *scheduler) trySched() { windows := make([]schedWindow, len(sh.openWindows)) acceptableWindows := make([][]int, sh.schedQueue.Len()) - log.Debugf("trySched %d queued; %d open windows", sh.schedQueue.Len(), len(windows)) + log.Debugf("SCHED %d queued; %d open windows", sh.schedQueue.Len(), len(windows)) // Step 1 for sqi := 0; sqi < sh.schedQueue.Len(); sqi++ { @@ -306,6 +344,9 @@ func (sh *scheduler) trySched() { }) } + log.Debugf("SCHED windows: %+v", windows) + log.Debugf("SCHED Acceptable win: %+v", acceptableWindows) + // Step 2 scheduled := 0 @@ -318,14 +359,14 @@ func (sh *scheduler) trySched() { wid := sh.openWindows[wnd].worker wr := sh.workers[wid].info.Resources - log.Debugf("trySched try assign sqi:%d sector %d to window %d", sqi, task.sector.Number, wnd) + log.Debugf("SCHED try assign sqi:%d sector %d to window %d", sqi, task.sector.Number, wnd) // TODO: allow bigger windows if !windows[wnd].allocated.canHandleRequest(needRes, wid, wr) { continue } - log.Debugf("trySched ASSIGNED sqi:%d sector %d to window %d", sqi, task.sector.Number, wnd) + log.Debugf("SCHED ASSIGNED sqi:%d sector %d to window %d", sqi, task.sector.Number, wnd) windows[wnd].allocated.add(wr, needRes) @@ -623,6 +664,21 @@ func (sh *scheduler) schedClose() { } } +func (sh *scheduler) Info(ctx context.Context) (interface{}, error) { + ch := make(chan interface{}, 1) + + sh.info <- func(res interface{}) { + ch <- res + } + + select { + case res := <- ch: + return res, nil + case <-ctx.Done(): + return nil, ctx.Err() + } +} + func (sh *scheduler) Close(ctx context.Context) error { close(sh.closing) select { From 510897a4979877b4d51fecbfaac9f9436f201ee6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Mon, 27 Jul 2020 13:20:18 +0200 Subject: [PATCH 41/51] container/heap doesn't provide sorted array --- request_queue.go | 19 +++++++++-------- request_queue_test.go | 48 +++++++++++++++++++++++++++++++++++-------- sched.go | 5 ++--- 3 files changed, 52 insertions(+), 20 deletions(-) diff --git a/request_queue.go b/request_queue.go index 0d35e9f1d..b40375dc8 100644 --- a/request_queue.go +++ b/request_queue.go @@ -1,6 +1,6 @@ package sectorstorage -import "container/heap" +import "sort" type requestQueue []*workerRequest @@ -24,21 +24,22 @@ func (q requestQueue) Swap(i, j int) { q[j].index = j } -func (q *requestQueue) Push(x interface{}) { +func (q *requestQueue) Push(x *workerRequest) { n := len(*q) - item := x.(*workerRequest) + item := x item.index = n *q = append(*q, item) + sort.Sort(q) } -func (q *requestQueue) Pop() interface{} { +func (q *requestQueue) Remove(i int) *workerRequest { old := *q n := len(old) - item := old[n-1] - old[n-1] = nil // avoid memory leak - item.index = -1 // for safety + item := old[i] + old[i] = old[n - 1] + old[n - 1] = nil + item.index = -1 *q = old[0 : n-1] + sort.Sort(q) return item } - -var _ heap.Interface = &requestQueue{} diff --git a/request_queue_test.go b/request_queue_test.go index 9bf231e39..cb4a5d5dd 100644 --- a/request_queue_test.go +++ b/request_queue_test.go @@ -1,7 +1,7 @@ package sectorstorage import ( - "container/heap" + "fmt" "testing" "github.com/filecoin-project/sector-storage/sealtasks" @@ -10,19 +10,51 @@ import ( func TestRequestQueue(t *testing.T) { rq := &requestQueue{} - heap.Push(rq, &workerRequest{taskType: sealtasks.TTAddPiece}) - heap.Push(rq, &workerRequest{taskType: sealtasks.TTPreCommit1}) - heap.Push(rq, &workerRequest{taskType: sealtasks.TTPreCommit2}) - heap.Push(rq, &workerRequest{taskType: sealtasks.TTPreCommit1}) - heap.Push(rq, &workerRequest{taskType: sealtasks.TTAddPiece}) + rq.Push(&workerRequest{taskType: sealtasks.TTAddPiece}) + rq.Push(&workerRequest{taskType: sealtasks.TTPreCommit1}) + rq.Push(&workerRequest{taskType: sealtasks.TTPreCommit2}) + rq.Push(&workerRequest{taskType: sealtasks.TTPreCommit1}) + rq.Push(&workerRequest{taskType: sealtasks.TTAddPiece}) - pt := heap.Pop(rq).(*workerRequest) + dump := func(s string) { + fmt.Println("---") + fmt.Println(s) + + for sqi := 0; sqi < rq.Len(); sqi++ { + task := (*rq)[sqi] + fmt.Println(sqi, task.taskType) + } + } + + dump("start") + + pt := rq.Remove(0) + + dump("pop 1") if pt.taskType != sealtasks.TTPreCommit2 { t.Error("expected precommit2, got", pt.taskType) } - pt = heap.Pop(rq).(*workerRequest) + pt = rq.Remove(0) + + dump("pop 2") + + if pt.taskType != sealtasks.TTPreCommit1 { + t.Error("expected precommit1, got", pt.taskType) + } + + pt = rq.Remove(1) + + dump("pop 3") + + if pt.taskType != sealtasks.TTAddPiece { + t.Error("expected addpiece, got", pt.taskType) + } + + pt = rq.Remove(0) + + dump("pop 4") if pt.taskType != sealtasks.TTPreCommit1 { t.Error("expected precommit1, got", pt.taskType) diff --git a/sched.go b/sched.go index 239b52063..b549eb7c9 100644 --- a/sched.go +++ b/sched.go @@ -1,7 +1,6 @@ package sectorstorage import ( - "container/heap" "context" "fmt" "math/rand" @@ -222,7 +221,7 @@ func (sh *scheduler) runSched() { sh.dropWorker(wid) case req := <-sh.schedule: - heap.Push(sh.schedQueue, req) + sh.schedQueue.Push(req) sh.trySched() if sh.testSync != nil { @@ -381,7 +380,7 @@ func (sh *scheduler) trySched() { windows[selectedWindow].todo = append(windows[selectedWindow].todo, task) - heap.Remove(sh.schedQueue, sqi) + sh.schedQueue.Remove(sqi) sqi-- scheduled++ } From 9377cb376d25f79dfd6b8bbf4141da8d17ffdd32 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Mon, 27 Jul 2020 13:21:36 +0200 Subject: [PATCH 42/51] gofmt --- request_queue.go | 4 ++-- sched.go | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/request_queue.go b/request_queue.go index b40375dc8..85d3abf46 100644 --- a/request_queue.go +++ b/request_queue.go @@ -36,8 +36,8 @@ func (q *requestQueue) Remove(i int) *workerRequest { old := *q n := len(old) item := old[i] - old[i] = old[n - 1] - old[n - 1] = nil + old[i] = old[n-1] + old[n-1] = nil item.index = -1 *q = old[0 : n-1] sort.Sort(q) diff --git a/sched.go b/sched.go index b549eb7c9..ed648bf19 100644 --- a/sched.go +++ b/sched.go @@ -124,8 +124,8 @@ type workerRequest struct { index int // The index of the item in the heap. indexHeap int - ret chan<- workerResponse - ctx context.Context + ret chan<- workerResponse + ctx context.Context } type workerResponse struct { @@ -203,7 +203,7 @@ type SchedDiagRequestInfo struct { } type SchedDiagInfo struct { - Requests []SchedDiagRequestInfo + Requests []SchedDiagRequestInfo OpenWindows []WorkerID } @@ -671,7 +671,7 @@ func (sh *scheduler) Info(ctx context.Context) (interface{}, error) { } select { - case res := <- ch: + case res := <-ch: return res, nil case <-ctx.Done(): return nil, ctx.Err() From 366de97ab5530f60d681b0afc3373c36a6eabca4 Mon Sep 17 00:00:00 2001 From: jackoelv Date: Tue, 28 Jul 2020 18:20:58 +0800 Subject: [PATCH 43/51] Update local.go StorageBestAlloc returns a sorted slice of storage paths. if no break, It would alloc the worst-last path but not the first-best path. --- stores/local.go | 1 + 1 file changed, 1 insertion(+) diff --git a/stores/local.go b/stores/local.go index 26b7ccb75..21819cd7a 100644 --- a/stores/local.go +++ b/stores/local.go @@ -273,6 +273,7 @@ func (st *Local) AcquireSector(ctx context.Context, sid abi.SectorID, spt abi.Re best = filepath.Join(p.local, fileType.String(), SectorName(sid)) bestID = si.ID + break } if best == "" { From 3ee28c3b6d9ad17d9a9783801f7a39d49fc95c11 Mon Sep 17 00:00:00 2001 From: Aayush Rajasekaran Date: Thu, 30 Jul 2020 00:54:28 -0400 Subject: [PATCH 44/51] Update go-bitfield and specs-actors --- ffiwrapper/partialfile.go | 7 ++++++- go.mod | 11 +++++------ go.sum | 34 +++++++++++++++++++--------------- 3 files changed, 30 insertions(+), 22 deletions(-) diff --git a/ffiwrapper/partialfile.go b/ffiwrapper/partialfile.go index 8c4fdcc72..b1ab8c53c 100644 --- a/ffiwrapper/partialfile.go +++ b/ffiwrapper/partialfile.go @@ -145,10 +145,15 @@ func openPartialFile(maxPieceSize abi.PaddedPieceSize, path string) (*partialFil return xerrors.Errorf("getting trailer run iterator: %w", err) } - lastSet, err := rlepluslazy.LastIndex(it, true) + f, err := rlepluslazy.Fill(it) + if err != nil { + return xerrors.Errorf("filling bitfield: %w", err) + } + lastSet, err := rlepluslazy.Count(f) if err != nil { return xerrors.Errorf("finding last set byte index: %w", err) } + if lastSet > uint64(maxPieceSize) { return xerrors.Errorf("last set byte at index higher than sector size: %d > %d", lastSet, maxPieceSize) } diff --git a/go.mod b/go.mod index 4912e5d1d..8398c98a3 100644 --- a/go.mod +++ b/go.mod @@ -6,10 +6,10 @@ require ( github.com/detailyang/go-fallocate v0.0.0-20180908115635-432fa640bd2e github.com/elastic/go-sysinfo v1.3.0 github.com/filecoin-project/filecoin-ffi v0.30.4-0.20200716204036-cddc56607e1d - github.com/filecoin-project/go-bitfield v0.0.4-0.20200703174658-f4a5758051a1 + github.com/filecoin-project/go-bitfield v0.1.2 github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f github.com/filecoin-project/go-paramfetch v0.0.2-0.20200218225740-47c639bab663 - github.com/filecoin-project/specs-actors v0.6.1 + github.com/filecoin-project/specs-actors v0.8.2 github.com/filecoin-project/specs-storage v0.1.1-0.20200622113353-88a9704877ea github.com/google/uuid v1.1.1 github.com/gorilla/mux v1.7.4 @@ -17,13 +17,12 @@ require ( github.com/ipfs/go-cid v0.0.6 github.com/ipfs/go-ipfs-files v0.0.7 github.com/ipfs/go-ipld-cbor v0.0.5-0.20200204214505-252690b78669 // indirect - github.com/ipfs/go-log v1.0.3 - github.com/ipfs/go-log/v2 v2.0.3 + github.com/ipfs/go-log v1.0.4 + github.com/ipfs/go-log/v2 v2.0.5 github.com/mattn/go-isatty v0.0.9 // indirect github.com/mitchellh/go-homedir v1.1.0 - github.com/stretchr/testify v1.4.0 + github.com/stretchr/testify v1.6.1 go.opencensus.io v0.22.3 - go.uber.org/zap v1.14.1 // indirect golang.org/x/crypto v0.0.0-20200317142112-1b76d66859c6 // indirect golang.org/x/lint v0.0.0-20200302205851-738671d3881b // indirect golang.org/x/sys v0.0.0-20200317113312-5766fd39f98d // indirect diff --git a/go.sum b/go.sum index a1d803274..942ff725c 100644 --- a/go.sum +++ b/go.sum @@ -2,7 +2,6 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= -github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= @@ -30,13 +29,12 @@ github.com/fatih/color v1.8.0/go.mod h1:3l45GVGkyrnYNl9HoIjnp2NnNWvh6hLAqD8yTfGj github.com/filecoin-project/go-address v0.0.2-0.20200218010043-eb9bb40ed5be h1:TooKBwR/g8jG0hZ3lqe9S5sy2vTUcLOZLlz3M5wGn2E= github.com/filecoin-project/go-address v0.0.2-0.20200218010043-eb9bb40ed5be/go.mod h1:SAOwJoakQ8EPjwNIsiakIQKsoKdkcbx8U3IapgCg9R0= github.com/filecoin-project/go-amt-ipld/v2 v2.0.1-0.20200131012142-05d80eeccc5e/go.mod h1:boRtQhzmxNocrMxOXo1NYn4oUc1NGvR8tEa79wApNXg= -github.com/filecoin-project/go-amt-ipld/v2 v2.0.1-0.20200424220931-6263827e49f2 h1:jamfsxfK0Q9yCMHt8MPWx7Aa/O9k2Lve8eSc6FILYGQ= github.com/filecoin-project/go-amt-ipld/v2 v2.0.1-0.20200424220931-6263827e49f2/go.mod h1:boRtQhzmxNocrMxOXo1NYn4oUc1NGvR8tEa79wApNXg= +github.com/filecoin-project/go-amt-ipld/v2 v2.1.0/go.mod h1:nfFPoGyX0CU9SkXX8EoCcSuHN1XcbN0c6KBh7yvP5fs= github.com/filecoin-project/go-bitfield v0.0.0-20200416002808-b3ee67ec9060/go.mod h1:iodsLxOFZnqKtjj2zkgqzoGNrv6vUqj69AT/J8DKXEw= -github.com/filecoin-project/go-bitfield v0.0.1 h1:Xg/JnrqqE77aJVKdbEyR04n9FZQWhwrN+buDgQCVpZU= github.com/filecoin-project/go-bitfield v0.0.1/go.mod h1:Ry9/iUlWSyjPUzlAvdnfy4Gtvrq4kWmWDztCU1yEgJY= -github.com/filecoin-project/go-bitfield v0.0.4-0.20200703174658-f4a5758051a1 h1:xuHlrdznafh7ul5t4xEncnA4qgpQvJZEw+mr98eqHXw= -github.com/filecoin-project/go-bitfield v0.0.4-0.20200703174658-f4a5758051a1/go.mod h1:Ry9/iUlWSyjPUzlAvdnfy4Gtvrq4kWmWDztCU1yEgJY= +github.com/filecoin-project/go-bitfield v0.1.2 h1:TjLregCoyP1/5lm7WCM0axyV1myIHwbjGa21skuu5tk= +github.com/filecoin-project/go-bitfield v0.1.2/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 h1:2pMXdBnCiXjfCYx/hLqFxccPoqsSveQFxVLvNxy9bus= github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ= github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f h1:GxJzR3oRIMTPtpZ0b7QF8FKPK6/iPAc7trhlL5k/g+s= @@ -44,8 +42,9 @@ github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f/go github.com/filecoin-project/go-paramfetch v0.0.2-0.20200218225740-47c639bab663 h1:eYxi6vI5CyeXD15X1bB3bledDXbqKxqf0wQzTLgwYwA= github.com/filecoin-project/go-paramfetch v0.0.2-0.20200218225740-47c639bab663/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc= github.com/filecoin-project/specs-actors v0.3.0/go.mod h1:nQYnFbQ7Y0bHZyq6HDEuVlCPR+U3z5Q3wMOQ+2aiV+Y= -github.com/filecoin-project/specs-actors v0.6.1 h1:rhHlEzqcuuQU6oKc4csuq+/kQBDZ4EXtSomoN2XApCA= github.com/filecoin-project/specs-actors v0.6.1/go.mod h1:dRdy3cURykh2R8O/DKqy8olScl70rmIS7GrB4hB1IDY= +github.com/filecoin-project/specs-actors v0.8.2 h1:fpAPOPqWqmzJCWHpm6P1XDRSpQrxyY5Pzh5H3doYs7Q= +github.com/filecoin-project/specs-actors v0.8.2/go.mod h1:Q3ACV5kBLvqPaYbthc/J1lGMJ5OwogmD9pzdtPRMdCw= github.com/filecoin-project/specs-storage v0.1.1-0.20200622113353-88a9704877ea h1:iixjULRQFPn7Q9KlIqfwLJnlAXO10bbkI+xy5GKGdLY= github.com/filecoin-project/specs-storage v0.1.1-0.20200622113353-88a9704877ea/go.mod h1:Pr5ntAaxsh+sLG/LYiL4tKzvA83Vk5vLODYhfNwOg7k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= @@ -84,9 +83,11 @@ github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUP github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.5 h1:o0Ix8e/ql7Zb5UVUJEUfjsWCIY8t48++9lR8qi6oiJU= github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= +github.com/ipfs/go-cid v0.0.6-0.20200501230655-7c82f3b81c00/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= github.com/ipfs/go-cid v0.0.6 h1:go0y+GcDOGeJIV01FeBsta4FHngoA4Wz7KMeLkXAhMs= github.com/ipfs/go-cid v0.0.6/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= github.com/ipfs/go-hamt-ipld v0.0.15-0.20200131012125-dd88a59d3f2e/go.mod h1:9aQJu/i/TaRDW6jqB5U217dLIDopn50wxLdHXM2CTfE= +github.com/ipfs/go-hamt-ipld v0.1.1/go.mod h1:1EZCr2v0jlCnhpa+aZ0JZYp8Tt2w16+JJOAVz17YcDk= github.com/ipfs/go-ipfs-files v0.0.7 h1:s5BRD12ndahqYifeH1S8Z73zqZhR+3IdKYAG9PiETs0= github.com/ipfs/go-ipfs-files v0.0.7/go.mod h1:wiN/jSG8FKyk7N0WyctKSvq3ljIa2NNTiZB55kpTdOs= github.com/ipfs/go-ipfs-util v0.0.1 h1:Wz9bL2wB2YBJqggkA4dD7oSmqB4cAnpNbGrlHJulv50= @@ -102,14 +103,13 @@ github.com/ipfs/go-ipld-format v0.0.2 h1:OVAGlyYT6JPZ0pEfGntFPS40lfrDmaDbQwNHEY2 github.com/ipfs/go-ipld-format v0.0.2/go.mod h1:4B6+FM2u9OJ9zCV+kSbgFAZlOrv1Hqbf0INGQgiKf9k= github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM= github.com/ipfs/go-log v1.0.0/go.mod h1:JO7RzlMK6rA+CIxFMLOuB6Wf5b81GDiKElL7UPSIKjA= -github.com/ipfs/go-log v1.0.3 h1:Gg7SUYSZ7BrqaKMwM+hRgcAkKv4QLfzP4XPQt5Sx/OI= -github.com/ipfs/go-log v1.0.3/go.mod h1:OsLySYkwIbiSUR/yBTdv1qPtcE4FW3WPWk/ewz9Ru+A= -github.com/ipfs/go-log/v2 v2.0.3 h1:Q2gXcBoCALyLN/pUQlz1qgu0x3uFV6FzP9oXhpfyJpc= -github.com/ipfs/go-log/v2 v2.0.3/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= +github.com/ipfs/go-log v1.0.4 h1:6nLQdX4W8P9yZZFH7mO+X/PzjN8Laozm/lMJ6esdgzY= +github.com/ipfs/go-log v1.0.4/go.mod h1:oDCg2FkjogeFOhqqb+N39l2RpTNPL6F/StPkB3kPgcs= +github.com/ipfs/go-log/v2 v2.0.5 h1:fL4YI+1g5V/b1Yxr1qAiXTMg1H8z9vx/VmJxBuQMHvU= +github.com/ipfs/go-log/v2 v2.0.5/go.mod h1:eZs4Xt4ZUJQFM3DlanGhy7TkwwawCZcSByscwkWG+dw= github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52 h1:QG4CGBqCeuBo6aZlGAamSkxWdgWfZGeE49eUOWJPA4c= github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52/go.mod h1:fdg+/X9Gg4AsAIzWpEHwnqd+QY3b7lajxyjE1m4hkq4= github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= -github.com/jbenet/goprocess v0.1.3 h1:YKyIEECS/XvcfHtBzxtjBBbWK+MbvA6dG8ASiqwvr10= github.com/jbenet/goprocess v0.1.3/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= @@ -132,9 +132,7 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/libp2p/go-flow-metrics v0.0.3/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs= -github.com/libp2p/go-libp2p-core v0.3.0 h1:F7PqduvrztDtFsAa/bcheQ3azmNo+Nq7m8hQY5GiUW8= github.com/libp2p/go-libp2p-core v0.3.0/go.mod h1:ACp3DmS3/N64c2jDzcV429ukDpicbL6+TrrxANBjPGw= -github.com/libp2p/go-openssl v0.0.4 h1:d27YZvLoTyMhIN4njrkr8zMDOM4lfpHIp6A+TK9fovg= github.com/libp2p/go-openssl v0.0.4/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= @@ -166,7 +164,6 @@ github.com/multiformats/go-base32 v0.0.3 h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= github.com/multiformats/go-base36 v0.1.0 h1:JR6TyF7JjGd3m6FbLU2cOxhC0Li8z8dLNGQ89tUg4F4= github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM= -github.com/multiformats/go-multiaddr v0.2.0 h1:lR52sFwcTCuQb6bTfnXF6zA2XfyYvyd+5a9qECv/J90= github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4= github.com/multiformats/go-multibase v0.0.1 h1:PN9/v21eLywrFWdFNsFKaU04kLJzuYzmrJR+ubhT9qA= github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= @@ -212,7 +209,6 @@ github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa/go.mod h1:2 github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337 h1:WN9BUFbdyOsSH/XohnWpXOlq9NBD5sGAB2FciQMUEe8= github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smola/gocompat v0.2.0/go.mod h1:1B0MlxbmoZNo3h8guHp8HztB3BSYR5itql9qtVc0ypY= -github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 h1:RC6RW7j+1+HkWaX/Yh71Ee5ZHaHYt7ZP4sQgUrm6cDU= github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572/go.mod h1:w0SWMsp6j9O/dk4/ZpIhL+3CkG8ofA2vuv7k+ltqUMc= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= @@ -223,6 +219,8 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436 h1:qOpVTI+BrstcjTZLm2Yz/3sOnqkzj3FQoh0g+E5s3Gc= github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= github.com/warpfork/go-wish v0.0.0-20190328234359-8b3e70f8e830 h1:8kxMKmKzXXL4Ru1nyhvdms/JjWt+3YLpvRb/bAjO/y0= @@ -231,8 +229,12 @@ github.com/whyrusleeping/cbor-gen v0.0.0-20191216205031-b047b6acb3c0/go.mod h1:x github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= github.com/whyrusleeping/cbor-gen v0.0.0-20200414195334-429a0b5e922e h1:JY8o/ebUUrCYetWmjRCNghxC59cOEaili83rxPRQCLw= github.com/whyrusleeping/cbor-gen v0.0.0-20200414195334-429a0b5e922e/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= +github.com/whyrusleeping/cbor-gen v0.0.0-20200504204219-64967432584d/go.mod h1:W5MvapuoHRP8rz4vxjwCK1pDqF1aQcWsV5PZ+AHbqdg= +github.com/whyrusleeping/cbor-gen v0.0.0-20200715143311-227fab5a2377 h1:LHFlP/ktDvOnCap7PsT87cs7Gwd0p+qv6Qm5g2ZPR+I= +github.com/whyrusleeping/cbor-gen v0.0.0-20200715143311-227fab5a2377/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc/go.mod h1:bopw91TMyo8J3tvftk8xmU2kPmlrt4nScJQZU2hE5EM= github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= +github.com/xorcare/golden v0.6.0/go.mod h1:7T39/ZMvaSEZlBPoYfVFmsBLmUl3uz9IuzWj/U6FtvQ= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= @@ -351,6 +353,8 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= From 3d2084ab931932da4b6dddd32b8b35df0fe3bc19 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Thu, 30 Jul 2020 22:03:43 +0200 Subject: [PATCH 45/51] Try to not unseal in ReadPiece when don't need to --- ffiwrapper/partialfile.go | 19 +++++++++++++++++++ ffiwrapper/sealer_cgo.go | 27 +++++++++++++++++++-------- ffiwrapper/types.go | 2 +- localworker.go | 4 ++-- manager.go | 32 +++++++++++++++++++++++++++++--- work_tracker.go | 2 +- 6 files changed, 71 insertions(+), 15 deletions(-) diff --git a/ffiwrapper/partialfile.go b/ffiwrapper/partialfile.go index b1ab8c53c..f6c03f1a3 100644 --- a/ffiwrapper/partialfile.go +++ b/ffiwrapper/partialfile.go @@ -279,6 +279,25 @@ func (pf *partialFile) Allocated() (rlepluslazy.RunIterator, error) { return pf.allocated.RunIterator() } +func (pf *partialFile) HasAllocated(offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) { + have, err := pf.Allocated() + if err != nil { + return false, err + } + + u, err := rlepluslazy.Union(have, pieceRun(offset.Padded(), size.Padded())) + if err != nil { + return false, err + } + + uc, err := rlepluslazy.Count(u) + if err != nil { + return false, err + } + + return abi.PaddedPieceSize(uc) == size.Padded(), nil +} + func pieceRun(offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) rlepluslazy.RunIterator { var runs []rlepluslazy.Run if offset > 0 { diff --git a/ffiwrapper/sealer_cgo.go b/ffiwrapper/sealer_cgo.go index 416bfa70b..8a4f18bc7 100644 --- a/ffiwrapper/sealer_cgo.go +++ b/ffiwrapper/sealer_cgo.go @@ -361,10 +361,10 @@ func (sb *Sealer) UnsealPiece(ctx context.Context, sector abi.SectorID, offset s return nil } -func (sb *Sealer) ReadPiece(ctx context.Context, writer io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) error { +func (sb *Sealer) ReadPiece(ctx context.Context, writer io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) { path, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, stores.FTNone, stores.PathStorage) if err != nil { - return xerrors.Errorf("acquire unsealed sector path: %w", err) + return false, xerrors.Errorf("acquire unsealed sector path: %w", err) } defer done() @@ -372,30 +372,41 @@ func (sb *Sealer) ReadPiece(ctx context.Context, writer io.Writer, sector abi.Se pf, err := openPartialFile(maxPieceSize, path.Unsealed) if xerrors.Is(err, os.ErrNotExist) { - return xerrors.Errorf("opening partial file: %w", err) + return false, xerrors.Errorf("opening partial file: %w", err) + } + + ok, err := pf.HasAllocated(offset, size) + if err != nil { + pf.Close() + return false, err + } + + if !ok { + pf.Close() + return false, nil } f, err := pf.Reader(offset.Padded(), size.Padded()) if err != nil { pf.Close() - return xerrors.Errorf("getting partial file reader: %w", err) + return false, xerrors.Errorf("getting partial file reader: %w", err) } upr, err := fr32.NewUnpadReader(f, size.Padded()) if err != nil { - return xerrors.Errorf("creating unpadded reader: %w", err) + return false, xerrors.Errorf("creating unpadded reader: %w", err) } if _, err := io.CopyN(writer, upr, int64(size)); err != nil { pf.Close() - return xerrors.Errorf("reading unsealed file: %w", err) + return false, xerrors.Errorf("reading unsealed file: %w", err) } if err := pf.Close(); err != nil { - return xerrors.Errorf("closing partial file: %w", err) + return false, xerrors.Errorf("closing partial file: %w", err) } - return nil + return false, nil } func (sb *Sealer) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) { diff --git a/ffiwrapper/types.go b/ffiwrapper/types.go index 13c0ee990..bc3c44f54 100644 --- a/ffiwrapper/types.go +++ b/ffiwrapper/types.go @@ -29,7 +29,7 @@ type Storage interface { StorageSealer UnsealPiece(ctx context.Context, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd cid.Cid) error - ReadPiece(ctx context.Context, writer io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) error + ReadPiece(ctx context.Context, writer io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) } type Verifier interface { diff --git a/localworker.go b/localworker.go index 7b9bbdee1..14ed1cd0b 100644 --- a/localworker.go +++ b/localworker.go @@ -237,10 +237,10 @@ func (l *LocalWorker) UnsealPiece(ctx context.Context, sector abi.SectorID, inde return nil } -func (l *LocalWorker) ReadPiece(ctx context.Context, writer io.Writer, sector abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) error { +func (l *LocalWorker) ReadPiece(ctx context.Context, writer io.Writer, sector abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) { sb, err := l.sb() if err != nil { - return err + return false, err } return sb.ReadPiece(ctx, writer, sector, index, size) diff --git a/manager.go b/manager.go index 4791eb5e6..5f2b8e334 100644 --- a/manager.go +++ b/manager.go @@ -34,7 +34,7 @@ type Worker interface { Fetch(ctx context.Context, s abi.SectorID, ft stores.SectorFileType, ptype stores.PathType, am stores.AcquireMode) error UnsealPiece(context.Context, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) error - ReadPiece(context.Context, io.Writer, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize) error + ReadPiece(context.Context, io.Writer, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize) (bool, error) TaskTypes(context.Context) (map[sealtasks.TaskType]struct{}, error) @@ -221,7 +221,28 @@ func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector abi.Sect return xerrors.Errorf("creating unsealPiece selector: %w", err) } - // TODO: Optimization: don't send unseal to a worker if the requested range is already unsealed + var readOk bool + + if len(best) > 0 { + // There is unsealed sector, see if we can read from it + + selector, err = newExistingSelector(ctx, m.index, sector, stores.FTUnsealed, false) + if err != nil { + return xerrors.Errorf("creating readPiece selector: %w", err) + } + + err = m.sched.Schedule(ctx, sector, sealtasks.TTReadUnsealed, selector, schedFetch(sector, stores.FTUnsealed, stores.PathSealing, stores.AcquireMove), func(ctx context.Context, w Worker) error { + readOk, err = w.ReadPiece(ctx, sink, sector, offset, size) + return err + }) + if err != nil { + return xerrors.Errorf("reading piece from sealed sector: %w", err) + } + + if readOk { + return nil + } + } unsealFetch := func(ctx context.Context, worker Worker) error { if err := worker.Fetch(ctx, sector, stores.FTSealed|stores.FTCache, stores.PathSealing, stores.AcquireCopy); err != nil { @@ -249,12 +270,17 @@ func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector abi.Sect } err = m.sched.Schedule(ctx, sector, sealtasks.TTReadUnsealed, selector, schedFetch(sector, stores.FTUnsealed, stores.PathSealing, stores.AcquireMove), func(ctx context.Context, w Worker) error { - return w.ReadPiece(ctx, sink, sector, offset, size) + readOk, err = w.ReadPiece(ctx, sink, sector, offset, size) + return err }) if err != nil { return xerrors.Errorf("reading piece from sealed sector: %w", err) } + if readOk { + return xerrors.Errorf("failed to read unsealed piece") + } + return nil } diff --git a/work_tracker.go b/work_tracker.go index f1e243ed2..7453752c9 100644 --- a/work_tracker.go +++ b/work_tracker.go @@ -120,7 +120,7 @@ func (t *trackedWorker) UnsealPiece(ctx context.Context, id abi.SectorID, index return t.Worker.UnsealPiece(ctx, id, index, size, randomness, cid) } -func (t *trackedWorker) ReadPiece(ctx context.Context, writer io.Writer, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) error { +func (t *trackedWorker) ReadPiece(ctx context.Context, writer io.Writer, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) { defer t.tracker.track(id, sealtasks.TTReadUnsealed)() return t.Worker.ReadPiece(ctx, writer, id, index, size) From 7153e1dd05b5e7aca17098eb47a65f9855bfdec5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Thu, 30 Jul 2020 22:38:05 +0200 Subject: [PATCH 46/51] Fix tests --- ffiwrapper/partialfile.go | 2 +- ffiwrapper/sealer_test.go | 17 ++++++++++------- sched_test.go | 2 +- testworker_test.go | 2 +- 4 files changed, 13 insertions(+), 10 deletions(-) diff --git a/ffiwrapper/partialfile.go b/ffiwrapper/partialfile.go index f6c03f1a3..3e8b32288 100644 --- a/ffiwrapper/partialfile.go +++ b/ffiwrapper/partialfile.go @@ -285,7 +285,7 @@ func (pf *partialFile) HasAllocated(offset storiface.UnpaddedByteIndex, size abi return false, err } - u, err := rlepluslazy.Union(have, pieceRun(offset.Padded(), size.Padded())) + u, err := rlepluslazy.And(have, pieceRun(offset.Padded(), size.Padded())) if err != nil { return false, err } diff --git a/ffiwrapper/sealer_test.go b/ffiwrapper/sealer_test.go index 0b5018d84..f795be159 100644 --- a/ffiwrapper/sealer_test.go +++ b/ffiwrapper/sealer_test.go @@ -111,7 +111,7 @@ func (s *seal) unseal(t *testing.T, sb *Sealer, sp *basicfs.Provider, si abi.Sec defer done() var b bytes.Buffer - err := sb.ReadPiece(context.TODO(), &b, si, 0, 1016) + _, err := sb.ReadPiece(context.TODO(), &b, si, 0, 1016) if err != nil { t.Fatal(err) } @@ -130,7 +130,7 @@ func (s *seal) unseal(t *testing.T, sb *Sealer, sp *basicfs.Provider, si abi.Sec } sd() - err = sb.ReadPiece(context.TODO(), &b, si, 0, 1016) + _, err = sb.ReadPiece(context.TODO(), &b, si, 0, 1016) if err == nil { t.Fatal("HOW?!") } @@ -141,7 +141,7 @@ func (s *seal) unseal(t *testing.T, sb *Sealer, sp *basicfs.Provider, si abi.Sec } b.Reset() - err = sb.ReadPiece(context.TODO(), &b, si, 0, 1016) + _, err = sb.ReadPiece(context.TODO(), &b, si, 0, 1016) if err != nil { t.Fatal(err) } @@ -150,14 +150,17 @@ func (s *seal) unseal(t *testing.T, sb *Sealer, sp *basicfs.Provider, si abi.Sec require.Equal(t, expect, b.Bytes()) b.Reset() - err = sb.ReadPiece(context.TODO(), &b, si, 0, 2032) + have, err := sb.ReadPiece(context.TODO(), &b, si, 0, 2032) if err != nil { t.Fatal(err) } - expect = append(expect, bytes.Repeat([]byte{0}, 1016)...) - if !bytes.Equal(b.Bytes(), expect) { - t.Fatal("read wrong bytes") + if have { + t.Errorf("didn't expect to read things") + } + + if b.Len() != 0 { + t.Fatal("read bytes") } } diff --git a/sched_test.go b/sched_test.go index caf7f0b4b..c96f7838c 100644 --- a/sched_test.go +++ b/sched_test.go @@ -88,7 +88,7 @@ func (s *schedTestWorker) UnsealPiece(ctx context.Context, id abi.SectorID, inde panic("implement me") } -func (s *schedTestWorker) ReadPiece(ctx context.Context, writer io.Writer, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) error { +func (s *schedTestWorker) ReadPiece(ctx context.Context, writer io.Writer, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) { panic("implement me") } diff --git a/testworker_test.go b/testworker_test.go index bdfff1915..40151a84d 100644 --- a/testworker_test.go +++ b/testworker_test.go @@ -53,7 +53,7 @@ func (t *testWorker) UnsealPiece(ctx context.Context, id abi.SectorID, index sto panic("implement me") } -func (t *testWorker) ReadPiece(ctx context.Context, writer io.Writer, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) error { +func (t *testWorker) ReadPiece(ctx context.Context, writer io.Writer, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) { panic("implement me") } From ed251d9f82123e86cab87adf57a60ea8d3762e26 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Mon, 3 Aug 2020 14:18:11 +0200 Subject: [PATCH 47/51] Fix some locking issues --- sched.go | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/sched.go b/sched.go index ed648bf19..e8eda4834 100644 --- a/sched.go +++ b/sched.go @@ -52,7 +52,7 @@ type WorkerSelector interface { type scheduler struct { spt abi.RegisteredSealProof - workersLk sync.Mutex + workersLk sync.RWMutex nextWorker WorkerID workers map[WorkerID]*workerHandle @@ -83,6 +83,8 @@ type workerHandle struct { preparing *activeResources active *activeResources + lk sync.Mutex + // stats / tracking wt *workTracker @@ -283,6 +285,9 @@ func (sh *scheduler) trySched() { log.Debugf("SCHED %d queued; %d open windows", sh.schedQueue.Len(), len(windows)) + sh.workersLk.RLock() + defer sh.workersLk.RUnlock() + // Step 1 for sqi := 0; sqi < sh.schedQueue.Len(); sqi++ { task := (*sh.schedQueue)[sqi] @@ -428,9 +433,9 @@ func (sh *scheduler) runWorker(wid WorkerID) { defer ready.Wait() go func() { - sh.workersLk.Lock() + sh.workersLk.RLock() worker, found := sh.workers[wid] - sh.workersLk.Unlock() + sh.workersLk.RUnlock() ready.Done() @@ -498,16 +503,19 @@ func (sh *scheduler) runWorker(wid WorkerID) { todo := activeWindows[0].todo[0] needRes := ResourceTable[todo.taskType][sh.spt] - sh.workersLk.Lock() + sh.workersLk.RLock() + worker.lk.Lock() ok := worker.preparing.canHandleRequest(needRes, wid, worker.info.Resources) + worker.lk.Unlock() + if !ok { - sh.workersLk.Unlock() + sh.workersLk.RUnlock() break assignLoop } log.Debugf("assign worker sector %d", todo.sector.Number) err := sh.assignWorker(taskDone, wid, worker, todo) - sh.workersLk.Unlock() + sh.workersLk.RUnlock() if err != nil { log.Error("assignWorker error: %+v", err) @@ -530,14 +538,18 @@ func (sh *scheduler) runWorker(wid WorkerID) { func (sh *scheduler) assignWorker(taskDone chan struct{}, wid WorkerID, w *workerHandle, req *workerRequest) error { needRes := ResourceTable[req.taskType][sh.spt] + w.lk.Lock() w.preparing.add(w.info.Resources, needRes) + w.lk.Unlock() go func() { err := req.prepare(req.ctx, w.wt.worker(w.w)) sh.workersLk.Lock() if err != nil { + w.lk.Lock() w.preparing.free(w.info.Resources, needRes) + w.lk.Unlock() sh.workersLk.Unlock() select { @@ -557,7 +569,9 @@ func (sh *scheduler) assignWorker(taskDone chan struct{}, wid WorkerID, w *worke } err = w.active.withResources(wid, w.info.Resources, needRes, &sh.workersLk, func() error { + w.lk.Lock() w.preparing.free(w.info.Resources, needRes) + w.lk.Unlock() sh.workersLk.Unlock() defer sh.workersLk.Lock() // we MUST return locked from this function From 3cab915fd225717efd7fc9b099b854e11af2d056 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Mon, 3 Aug 2020 20:49:04 +0200 Subject: [PATCH 48/51] mock: Make it possible to unfail sectors --- mock/mock.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mock/mock.go b/mock/mock.go index 55b103ab8..6eb71cd6b 100644 --- a/mock/mock.go +++ b/mock/mock.go @@ -236,7 +236,7 @@ func (mgr *SectorMgr) SealCommit2(ctx context.Context, sid abi.SectorID, phase1O // Test Instrumentation Methods -func (mgr *SectorMgr) FailSector(sid abi.SectorID) error { +func (mgr *SectorMgr) MarkFailed(sid abi.SectorID, failed bool) error { mgr.lk.Lock() defer mgr.lk.Unlock() ss, ok := mgr.sectors[sid] @@ -244,7 +244,7 @@ func (mgr *SectorMgr) FailSector(sid abi.SectorID) error { return fmt.Errorf("no such sector in storage") } - ss.failed = true + ss.failed = failed return nil } From b58eba0d999d33c6596ea84ed390aa5fc34f814d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Tue, 4 Aug 2020 16:20:59 +0200 Subject: [PATCH 49/51] remote: Fetch storage reservation --- fsutil/filesize_unix.go | 6 +++- stores/interface.go | 8 ++--- stores/local.go | 21 +++++++++--- stores/remote.go | 71 ++++++++++++++++++++++++++++------------- 4 files changed, 74 insertions(+), 32 deletions(-) diff --git a/fsutil/filesize_unix.go b/fsutil/filesize_unix.go index 41b62daf6..dacdcd515 100644 --- a/fsutil/filesize_unix.go +++ b/fsutil/filesize_unix.go @@ -1,6 +1,7 @@ package fsutil import ( + "os" "syscall" "golang.org/x/xerrors" @@ -14,12 +15,15 @@ type SizeInfo struct { func FileSize(path string) (SizeInfo, error) { var stat syscall.Stat_t if err := syscall.Stat(path, &stat); err != nil { + if err == syscall.ENOENT { + return SizeInfo{}, os.ErrNotExist + } return SizeInfo{}, xerrors.Errorf("stat: %w", err) } // NOTE: stat.Blocks is in 512B blocks, NOT in stat.Blksize // See https://www.gnu.org/software/libc/manual/html_node/Attribute-Meanings.html return SizeInfo{ - int64(stat.Blocks) * 512, + int64(stat.Blocks) * 512, // NOTE: int64 cast is needed on osx }, nil } diff --git a/stores/interface.go b/stores/interface.go index 836705f40..142769b1b 100644 --- a/stores/interface.go +++ b/stores/interface.go @@ -9,15 +9,15 @@ import ( type PathType string const ( - PathStorage = "storage" - PathSealing = "sealing" + PathStorage PathType = "storage" + PathSealing PathType = "sealing" ) type AcquireMode string const ( - AcquireMove = "move" - AcquireCopy = "copy" + AcquireMove AcquireMode = "move" + AcquireCopy AcquireMode = "copy" ) type Store interface { diff --git a/stores/local.go b/stores/local.go index 9a7ec6108..9efab6480 100644 --- a/stores/local.go +++ b/stores/local.go @@ -50,7 +50,10 @@ type LocalStorage interface { SetStorage(func(*StorageConfig)) error Stat(path string) (fsutil.FsStat, error) - DiskUsage(path string) (int64, error) // returns real disk usage for a file/directory + + // returns real disk usage for a file/directory + // os.ErrNotExit when file doesn't exist + DiskUsage(path string) (int64, error) } const MetaFile = "sectorstore.json" @@ -77,7 +80,7 @@ type path struct { func (p *path) stat(ls LocalStorage) (fsutil.FsStat, error) { stat, err := ls.Stat(p.local) if err != nil { - return fsutil.FsStat{}, err + return fsutil.FsStat{}, xerrors.Errorf("stat %s: %w", p.local, err) } stat.Reserved = p.reserved @@ -88,7 +91,17 @@ func (p *path) stat(ls LocalStorage) (fsutil.FsStat, error) { continue } - used, err := ls.DiskUsage(p.sectorPath(id, fileType)) + sp := p.sectorPath(id, fileType) + + used, err := ls.DiskUsage(sp) + if err == os.ErrNotExist { + p, ferr := tempFetchDest(sp, false) + if ferr != nil { + return fsutil.FsStat{}, ferr + } + + used, err = ls.DiskUsage(p) + } if err != nil { log.Errorf("getting disk usage of '%s': %+v", p.sectorPath(id, fileType), err) continue @@ -279,7 +292,7 @@ func (st *Local) Reserve(ctx context.Context, sid abi.SectorID, spt abi.Register stat, err := p.stat(st.localStorage) if err != nil { - return nil, err + return nil, xerrors.Errorf("getting local storage stat: %w", err) } overhead := int64(overheadTab[fileType]) * int64(ssize) / FSOverheadDen diff --git a/stores/remote.go b/stores/remote.go index 42b730b40..93dc2ca58 100644 --- a/stores/remote.go +++ b/stores/remote.go @@ -95,6 +95,33 @@ func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, spt abi.Regi return SectorPaths{}, SectorPaths{}, xerrors.Errorf("local acquire error: %w", err) } + var toFetch SectorFileType + for _, fileType := range PathTypes { + if fileType&existing == 0 { + continue + } + + if PathByType(paths, fileType) == "" { + toFetch |= fileType + } + } + + apaths, ids, err := r.local.AcquireSector(ctx, s, spt, FTNone, toFetch, pathType, op) + if err != nil { + return SectorPaths{}, SectorPaths{}, xerrors.Errorf("allocate local sector for fetching: %w", err) + } + + odt := FSOverheadSeal + if pathType == PathStorage { + odt = FsOverheadFinalized + } + + releaseStorage, err := r.local.Reserve(ctx, s, spt, toFetch, ids, odt) + if err != nil { + return SectorPaths{}, SectorPaths{}, xerrors.Errorf("reserving storage space: %w", err) + } + defer releaseStorage() + for _, fileType := range PathTypes { if fileType&existing == 0 { continue @@ -104,15 +131,18 @@ func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, spt abi.Regi continue } - ap, storageID, url, err := r.acquireFromRemote(ctx, s, spt, fileType, pathType, op) + dest := PathByType(apaths, fileType) + storageID := PathByType(ids, fileType) + + url, err := r.acquireFromRemote(ctx, s, fileType, dest) if err != nil { return SectorPaths{}, SectorPaths{}, err } - SetPathByType(&paths, fileType, ap) - SetPathByType(&stores, fileType, string(storageID)) + SetPathByType(&paths, fileType, dest) + SetPathByType(&stores, fileType, storageID) - if err := r.index.StorageDeclareSector(ctx, storageID, s, fileType, op == AcquireMove); err != nil { + if err := r.index.StorageDeclareSector(ctx, ID(storageID), s, fileType, op == AcquireMove); err != nil { log.Warnf("declaring sector %v in %s failed: %+v", s, storageID, err) continue } @@ -127,49 +157,44 @@ func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, spt abi.Regi return paths, stores, nil } -func tempDest(spath string) (string, error) { +func tempFetchDest(spath string, create bool) (string, error) { st, b := filepath.Split(spath) tempdir := filepath.Join(st, FetchTempSubdir) - if err := os.MkdirAll(tempdir, 755); err != nil { - return "", xerrors.Errorf("creating temp fetch dir: %w", err) + if create { + if err := os.MkdirAll(tempdir, 0755); err != nil { + return "", xerrors.Errorf("creating temp fetch dir: %w", err) + } } return filepath.Join(tempdir, b), nil } -func (r *Remote) acquireFromRemote(ctx context.Context, s abi.SectorID, spt abi.RegisteredSealProof, fileType SectorFileType, pathType PathType, op AcquireMode) (string, ID, string, error) { +func (r *Remote) acquireFromRemote(ctx context.Context, s abi.SectorID, fileType SectorFileType, dest string) (string, error) { si, err := r.index.StorageFindSector(ctx, s, fileType, false) if err != nil { - return "", "", "", err + return "", err } if len(si) == 0 { - return "", "", "", xerrors.Errorf("failed to acquire sector %v from remote(%d): %w", s, fileType, storiface.ErrSectorNotFound) + return "", xerrors.Errorf("failed to acquire sector %v from remote(%d): %w", s, fileType, storiface.ErrSectorNotFound) } sort.Slice(si, func(i, j int) bool { return si[i].Weight < si[j].Weight }) - apaths, ids, err := r.local.AcquireSector(ctx, s, spt, FTNone, fileType, pathType, op) - if err != nil { - return "", "", "", xerrors.Errorf("allocate local sector for fetching: %w", err) - } - dest := PathByType(apaths, fileType) - storageID := PathByType(ids, fileType) - var merr error for _, info := range si { // TODO: see what we have local, prefer that for _, url := range info.URLs { - tempDest, err := tempDest(dest) + tempDest, err := tempFetchDest(dest, true) if err != nil { - return "", "", "", err + return "", err } if err := os.RemoveAll(dest); err != nil { - return "", "", "", xerrors.Errorf("removing dest: %w", err) + return "", xerrors.Errorf("removing dest: %w", err) } err = r.fetch(ctx, url, tempDest) @@ -179,17 +204,17 @@ func (r *Remote) acquireFromRemote(ctx context.Context, s abi.SectorID, spt abi. } if err := move(tempDest, dest); err != nil { - return "", "", "", xerrors.Errorf("fetch move error (storage %s) %s -> %s: %w", info.ID, tempDest, dest, err) + return "", xerrors.Errorf("fetch move error (storage %s) %s -> %s: %w", info.ID, tempDest, dest, err) } if merr != nil { log.Warnw("acquireFromRemote encountered errors when fetching sector from remote", "errors", merr) } - return dest, ID(storageID), url, nil + return url, nil } } - return "", "", "", xerrors.Errorf("failed to acquire sector %v from remote (tried %v): %w", s, si, merr) + return "", xerrors.Errorf("failed to acquire sector %v from remote (tried %v): %w", s, si, merr) } func (r *Remote) fetch(ctx context.Context, url, outname string) error { From de3d3b48f71bbcb8e2b7bb9732d4ed7cc4ca1c7a Mon Sep 17 00:00:00 2001 From: yaohcn Date: Wed, 5 Aug 2020 20:36:49 +0800 Subject: [PATCH 50/51] fix 2806 --- manager.go | 50 ++++++++++++-------------------------------- sched_test.go | 2 +- selector_alloc.go | 2 +- selector_existing.go | 26 ++++++++++++++--------- 4 files changed, 31 insertions(+), 49 deletions(-) diff --git a/manager.go b/manager.go index 5f2b8e334..303df2169 100644 --- a/manager.go +++ b/manager.go @@ -213,12 +213,9 @@ func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector abi.Sect var selector WorkerSelector if len(best) == 0 { // new - selector = newAllocSelector(ctx, m.index, stores.FTUnsealed, stores.PathSealing) + selector = newAllocSelector(m.index, stores.FTUnsealed, stores.PathSealing) } else { // append to existing - selector, err = newExistingSelector(ctx, m.index, sector, stores.FTUnsealed, false) - } - if err != nil { - return xerrors.Errorf("creating unsealPiece selector: %w", err) + selector = newExistingSelector(m.index, sector, stores.FTUnsealed, false) } var readOk bool @@ -226,10 +223,7 @@ func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector abi.Sect if len(best) > 0 { // There is unsealed sector, see if we can read from it - selector, err = newExistingSelector(ctx, m.index, sector, stores.FTUnsealed, false) - if err != nil { - return xerrors.Errorf("creating readPiece selector: %w", err) - } + selector = newExistingSelector(m.index, sector, stores.FTUnsealed, false) err = m.sched.Schedule(ctx, sector, sealtasks.TTReadUnsealed, selector, schedFetch(sector, stores.FTUnsealed, stores.PathSealing, stores.AcquireMove), func(ctx context.Context, w Worker) error { readOk, err = w.ReadPiece(ctx, sink, sector, offset, size) @@ -264,10 +258,7 @@ func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector abi.Sect return err } - selector, err = newExistingSelector(ctx, m.index, sector, stores.FTUnsealed, false) - if err != nil { - return xerrors.Errorf("creating readPiece selector: %w", err) - } + selector = newExistingSelector(m.index, sector, stores.FTUnsealed, false) err = m.sched.Schedule(ctx, sector, sealtasks.TTReadUnsealed, selector, schedFetch(sector, stores.FTUnsealed, stores.PathSealing, stores.AcquireMove), func(ctx context.Context, w Worker) error { readOk, err = w.ReadPiece(ctx, sink, sector, offset, size) @@ -300,12 +291,9 @@ func (m *Manager) AddPiece(ctx context.Context, sector abi.SectorID, existingPie var selector WorkerSelector var err error if len(existingPieces) == 0 { // new - selector = newAllocSelector(ctx, m.index, stores.FTUnsealed, stores.PathSealing) + selector = newAllocSelector(m.index, stores.FTUnsealed, stores.PathSealing) } else { // use existing - selector, err = newExistingSelector(ctx, m.index, sector, stores.FTUnsealed, false) - } - if err != nil { - return abi.PieceInfo{}, xerrors.Errorf("creating path selector: %w", err) + selector = newExistingSelector(m.index, sector, stores.FTUnsealed, false) } var out abi.PieceInfo @@ -331,7 +319,7 @@ func (m *Manager) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticke // TODO: also consider where the unsealed data sits - selector := newAllocSelector(ctx, m.index, stores.FTCache|stores.FTSealed, stores.PathSealing) + selector := newAllocSelector(m.index, stores.FTCache|stores.FTSealed, stores.PathSealing) err = m.sched.Schedule(ctx, sector, sealtasks.TTPreCommit1, selector, schedFetch(sector, stores.FTUnsealed, stores.PathSealing, stores.AcquireMove), func(ctx context.Context, w Worker) error { p, err := w.SealPreCommit1(ctx, sector, ticket, pieces) @@ -353,10 +341,7 @@ func (m *Manager) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase return storage.SectorCids{}, xerrors.Errorf("acquiring sector lock: %w", err) } - selector, err := newExistingSelector(ctx, m.index, sector, stores.FTCache|stores.FTSealed, true) - if err != nil { - return storage.SectorCids{}, xerrors.Errorf("creating path selector: %w", err) - } + selector := newExistingSelector(m.index, sector, stores.FTCache|stores.FTSealed, true) err = m.sched.Schedule(ctx, sector, sealtasks.TTPreCommit2, selector, schedFetch(sector, stores.FTCache|stores.FTSealed, stores.PathSealing, stores.AcquireMove), func(ctx context.Context, w Worker) error { p, err := w.SealPreCommit2(ctx, sector, phase1Out) @@ -380,10 +365,7 @@ func (m *Manager) SealCommit1(ctx context.Context, sector abi.SectorID, ticket a // NOTE: We set allowFetch to false in so that we always execute on a worker // with direct access to the data. We want to do that because this step is // generally very cheap / fast, and transferring data is not worth the effort - selector, err := newExistingSelector(ctx, m.index, sector, stores.FTCache|stores.FTSealed, false) - if err != nil { - return storage.Commit1Out{}, xerrors.Errorf("creating path selector: %w", err) - } + selector := newExistingSelector(m.index, sector, stores.FTCache|stores.FTSealed, false) err = m.sched.Schedule(ctx, sector, sealtasks.TTCommit1, selector, schedFetch(sector, stores.FTCache|stores.FTSealed, stores.PathSealing, stores.AcquireMove), func(ctx context.Context, w Worker) error { p, err := w.SealCommit1(ctx, sector, ticket, seed, pieces, cids) @@ -431,12 +413,9 @@ func (m *Manager) FinalizeSector(ctx context.Context, sector abi.SectorID, keepU } } - selector, err := newExistingSelector(ctx, m.index, sector, stores.FTCache|stores.FTSealed, false) - if err != nil { - return xerrors.Errorf("creating path selector: %w", err) - } + selector := newExistingSelector(m.index, sector, stores.FTCache|stores.FTSealed, false) - err = m.sched.Schedule(ctx, sector, sealtasks.TTFinalize, selector, + err := m.sched.Schedule(ctx, sector, sealtasks.TTFinalize, selector, schedFetch(sector, stores.FTCache|stores.FTSealed|unsealed, stores.PathSealing, stores.AcquireMove), func(ctx context.Context, w Worker) error { return w.FinalizeSector(ctx, sector, keepUnsealed) @@ -445,7 +424,7 @@ func (m *Manager) FinalizeSector(ctx context.Context, sector abi.SectorID, keepU return err } - fetchSel := newAllocSelector(ctx, m.index, stores.FTCache|stores.FTSealed, stores.PathStorage) + fetchSel := newAllocSelector(m.index, stores.FTCache|stores.FTSealed, stores.PathStorage) moveUnsealed := unsealed { if len(keepUnsealed) == 0 { @@ -490,10 +469,7 @@ func (m *Manager) Remove(ctx context.Context, sector abi.SectorID) error { } } - selector, err := newExistingSelector(ctx, m.index, sector, stores.FTCache|stores.FTSealed, false) - if err != nil { - return xerrors.Errorf("creating selector: %w", err) - } + selector := newExistingSelector(m.index, sector, stores.FTCache|stores.FTSealed, false) return m.sched.Schedule(ctx, sector, sealtasks.TTFinalize, selector, schedFetch(sector, stores.FTCache|stores.FTSealed|unsealed, stores.PathStorage, stores.AcquireMove), diff --git a/sched_test.go b/sched_test.go index c96f7838c..6490e738e 100644 --- a/sched_test.go +++ b/sched_test.go @@ -207,7 +207,7 @@ func TestSched(t *testing.T) { done := make(chan struct{}) rm.done[taskName] = done - sel := newAllocSelector(ctx, index, stores.FTCache, stores.PathSealing) + sel := newAllocSelector(index, stores.FTCache, stores.PathSealing) rm.wg.Add(1) go func() { diff --git a/selector_alloc.go b/selector_alloc.go index 35221921f..cf7937587 100644 --- a/selector_alloc.go +++ b/selector_alloc.go @@ -17,7 +17,7 @@ type allocSelector struct { ptype stores.PathType } -func newAllocSelector(ctx context.Context, index stores.SectorIndex, alloc stores.SectorFileType, ptype stores.PathType) *allocSelector { +func newAllocSelector(index stores.SectorIndex, alloc stores.SectorFileType, ptype stores.PathType) *allocSelector { return &allocSelector{ index: index, alloc: alloc, diff --git a/selector_existing.go b/selector_existing.go index 3f99010cb..20cb1b209 100644 --- a/selector_existing.go +++ b/selector_existing.go @@ -12,18 +12,19 @@ import ( ) type existingSelector struct { - best []stores.SectorStorageInfo + index stores.SectorIndex + sector abi.SectorID + alloc stores.SectorFileType + allowFetch bool } -func newExistingSelector(ctx context.Context, index stores.SectorIndex, sector abi.SectorID, alloc stores.SectorFileType, allowFetch bool) (*existingSelector, error) { - best, err := index.StorageFindSector(ctx, sector, alloc, allowFetch) - if err != nil { - return nil, err - } - +func newExistingSelector(index stores.SectorIndex, sector abi.SectorID, alloc stores.SectorFileType, allowFetch bool) *existingSelector { return &existingSelector{ - best: best, - }, nil + index: index, + sector: sector, + alloc: alloc, + allowFetch: allowFetch, + } } func (s *existingSelector) Ok(ctx context.Context, task sealtasks.TaskType, spt abi.RegisteredSealProof, whnd *workerHandle) (bool, error) { @@ -45,7 +46,12 @@ func (s *existingSelector) Ok(ctx context.Context, task sealtasks.TaskType, spt have[path.ID] = struct{}{} } - for _, info := range s.best { + best, err := s.index.StorageFindSector(ctx, s.sector, s.alloc, s.allowFetch) + if err != nil { + return false, xerrors.Errorf("finding best storage: %w", err) + } + + for _, info := range best { if _, ok := have[info.ID]; ok { return true, nil } From 77e4adb5567e31b8ad164a1df6de36bc2eb96466 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Wed, 5 Aug 2020 22:11:53 +0200 Subject: [PATCH 51/51] gofmt --- selector_existing.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/selector_existing.go b/selector_existing.go index 20cb1b209..a11c39007 100644 --- a/selector_existing.go +++ b/selector_existing.go @@ -12,17 +12,17 @@ import ( ) type existingSelector struct { - index stores.SectorIndex - sector abi.SectorID - alloc stores.SectorFileType + index stores.SectorIndex + sector abi.SectorID + alloc stores.SectorFileType allowFetch bool } func newExistingSelector(index stores.SectorIndex, sector abi.SectorID, alloc stores.SectorFileType, allowFetch bool) *existingSelector { return &existingSelector{ - index: index, - sector: sector, - alloc: alloc, + index: index, + sector: sector, + alloc: alloc, allowFetch: allowFetch, } }