Merge pull request #4871 from filecoin-project/release/v1.2.0
v1.2.0 staging branch
This commit is contained in:
commit
dc3d17c9ea
@ -5,7 +5,7 @@ orbs:
|
||||
executors:
|
||||
golang:
|
||||
docker:
|
||||
- image: circleci/golang:1.14.6
|
||||
- image: circleci/golang:1.15.5
|
||||
resource_class: 2xlarge
|
||||
ubuntu:
|
||||
docker:
|
||||
@ -294,8 +294,8 @@ jobs:
|
||||
- run:
|
||||
name: Install go
|
||||
command: |
|
||||
curl -O https://dl.google.com/go/go1.14.2.darwin-amd64.pkg && \
|
||||
sudo installer -pkg go1.14.2.darwin-amd64.pkg -target /
|
||||
curl -O https://dl.google.com/go/go1.15.5.darwin-amd64.pkg && \
|
||||
sudo installer -pkg go1.15.5.darwin-amd64.pkg -target /
|
||||
- run:
|
||||
name: Install pkg-config
|
||||
command: HOMEBREW_NO_AUTO_UPDATE=1 brew install pkg-config
|
||||
|
39
CHANGELOG.md
39
CHANGELOG.md
@ -1,5 +1,44 @@
|
||||
# Lotus changelog
|
||||
|
||||
# 1.2.0 / 2020-11-18
|
||||
|
||||
This is a mandatory release of Lotus that introduces the second post-liftoff upgrade to the Filecoin network. The network upgrade occurs at height 265200, before which time all nodes must have update to this release (or later). This release also bumps the required version of Go to 1.15.
|
||||
|
||||
The changes that break consensus are:
|
||||
|
||||
- Upgrading to sepcs-actors 2.3.2 (https://github.com/filecoin-project/specs-actors/releases/tag/v2.3.2)
|
||||
- Introducing proofs v5.4.0 (https://github.com/filecoin-project/rust-fil-proofs/releases/tag/storage-proofs-v5.4.0), and switching between the proof types (https://github.com/filecoin-project/lotus/pull/4873)
|
||||
- Don't use terminated sectors for winning PoSt (https://github.com/filecoin-project/lotus/pull/4770)
|
||||
- Various small VM-level edge-case handling (https://github.com/filecoin-project/lotus/pull/4783)
|
||||
- Correction of the VM circulating supply calculation (https://github.com/filecoin-project/lotus/pull/4862)
|
||||
- Retuning gas costs (https://github.com/filecoin-project/lotus/pull/4830)
|
||||
- Avoid sending messages to the zero BLS address (https://github.com/filecoin-project/lotus/pull/4888)
|
||||
|
||||
## Other Changes
|
||||
|
||||
- delayed pubsub subscribe for messages topic (https://github.com/filecoin-project/lotus/pull/3646)
|
||||
- add chain base64 decode params (https://github.com/filecoin-project/lotus/pull/4748)
|
||||
- chore(dep): update bitswap to fix an initialization race that could panic (https://github.com/filecoin-project/lotus/pull/4855)
|
||||
- Chore/blockstore nits (https://github.com/filecoin-project/lotus/pull/4813)
|
||||
- Print Consensus Faults in miner info (https://github.com/filecoin-project/lotus/pull/4853)
|
||||
- Truncate genesis file before generating (https://github.com/filecoin-project/lotus/pull/4851)
|
||||
- miner: Winning PoSt Warmup (https://github.com/filecoin-project/lotus/pull/4824)
|
||||
- Fix init actor address map diffing (https://github.com/filecoin-project/lotus/pull/4875)
|
||||
- Bump API versions to 1.0.0 (https://github.com/filecoin-project/lotus/pull/4884)
|
||||
- Fix cid recording issue (https://github.com/filecoin-project/lotus/pull/4874)
|
||||
- Speed up worker key retrieval (https://github.com/filecoin-project/lotus/pull/4885)
|
||||
- Add error codes to worker return (https://github.com/filecoin-project/lotus/pull/4890)
|
||||
- Update go to 1.15.5 (https://github.com/filecoin-project/lotus/pull/4896)
|
||||
- Fix MaxSealingSectrosForDeals getting reset to 0 (https://github.com/filecoin-project/lotus/pull/4879)
|
||||
- add sanity check for maximum block size (https://github.com/filecoin-project/lotus/pull/3171)
|
||||
- Check (pre)commit receipt before other checks in failed states (https://github.com/filecoin-project/lotus/pull/4712)
|
||||
- fix badger double open on daemon --import-snapshot; chainstore lifecycle (https://github.com/filecoin-project/lotus/pull/4872)
|
||||
- Update to ipfs-blockstore 1.0.3 (https://github.com/filecoin-project/lotus/pull/4897)
|
||||
- break loop when found warm up sector (https://github.com/filecoin-project/lotus/pull/4869)
|
||||
- Tweak handling of bad beneficaries in DeleteActor (https://github.com/filecoin-project/lotus/pull/4903)
|
||||
- cap maximum number of messages per block in selection (https://github.com/filecoin-project/lotus/pull/4905)
|
||||
- Set Calico epoch (https://github.com/filecoin-project/lotus/pull/4889)
|
||||
|
||||
# 1.1.3 / 2020-11-13
|
||||
|
||||
This is an optional release of Lotus that upgrades Lotus dependencies, and includes many performance enhancements, bugfixes, and UX improvements.
|
||||
|
8
Makefile
8
Makefile
@ -5,10 +5,10 @@ all: build
|
||||
|
||||
unexport GOFLAGS
|
||||
|
||||
GOVERSION:=$(shell go version | cut -d' ' -f 3 | cut -d. -f 2)
|
||||
ifeq ($(shell expr $(GOVERSION) \< 14), 1)
|
||||
$(warning Your Golang version is go 1.$(GOVERSION))
|
||||
$(error Update Golang to version $(shell grep '^go' go.mod))
|
||||
GOVERSION:=$(shell go version | cut -d' ' -f 3 | awk -F. '{printf "%d%03d", $$2, $$3}')
|
||||
ifeq ($(shell expr $(GOVERSION) \< 15005), 1)
|
||||
$(warning Your Golang version is go 1.$(shell expr $(GOVERSION) / 1000).$(shell expr $(GOVERSION) % 1000))
|
||||
$(error Update Golang to version to at least 1.15.5)
|
||||
endif
|
||||
|
||||
# git modules that need to be loaded
|
||||
|
@ -311,17 +311,17 @@ type StorageMinerStruct struct {
|
||||
WorkerStats func(context.Context) (map[uuid.UUID]storiface.WorkerStats, error) `perm:"admin"`
|
||||
WorkerJobs func(context.Context) (map[uuid.UUID][]storiface.WorkerJob, error) `perm:"admin"`
|
||||
|
||||
ReturnAddPiece func(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err string) error `perm:"admin" retry:"true"`
|
||||
ReturnSealPreCommit1 func(ctx context.Context, callID storiface.CallID, p1o storage.PreCommit1Out, err string) error `perm:"admin" retry:"true"`
|
||||
ReturnSealPreCommit2 func(ctx context.Context, callID storiface.CallID, sealed storage.SectorCids, err string) error `perm:"admin" retry:"true"`
|
||||
ReturnSealCommit1 func(ctx context.Context, callID storiface.CallID, out storage.Commit1Out, err string) error `perm:"admin" retry:"true"`
|
||||
ReturnSealCommit2 func(ctx context.Context, callID storiface.CallID, proof storage.Proof, err string) error `perm:"admin" retry:"true"`
|
||||
ReturnFinalizeSector func(ctx context.Context, callID storiface.CallID, err string) error `perm:"admin" retry:"true"`
|
||||
ReturnReleaseUnsealed func(ctx context.Context, callID storiface.CallID, err string) error `perm:"admin" retry:"true"`
|
||||
ReturnMoveStorage func(ctx context.Context, callID storiface.CallID, err string) error `perm:"admin" retry:"true"`
|
||||
ReturnUnsealPiece func(ctx context.Context, callID storiface.CallID, err string) error `perm:"admin" retry:"true"`
|
||||
ReturnReadPiece func(ctx context.Context, callID storiface.CallID, ok bool, err string) error `perm:"admin" retry:"true"`
|
||||
ReturnFetch func(ctx context.Context, callID storiface.CallID, err string) error `perm:"admin" retry:"true"`
|
||||
ReturnAddPiece func(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err *storiface.CallError) error `perm:"admin" retry:"true"`
|
||||
ReturnSealPreCommit1 func(ctx context.Context, callID storiface.CallID, p1o storage.PreCommit1Out, err *storiface.CallError) error `perm:"admin" retry:"true"`
|
||||
ReturnSealPreCommit2 func(ctx context.Context, callID storiface.CallID, sealed storage.SectorCids, err *storiface.CallError) error `perm:"admin" retry:"true"`
|
||||
ReturnSealCommit1 func(ctx context.Context, callID storiface.CallID, out storage.Commit1Out, err *storiface.CallError) error `perm:"admin" retry:"true"`
|
||||
ReturnSealCommit2 func(ctx context.Context, callID storiface.CallID, proof storage.Proof, err *storiface.CallError) error `perm:"admin" retry:"true"`
|
||||
ReturnFinalizeSector func(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error `perm:"admin" retry:"true"`
|
||||
ReturnReleaseUnsealed func(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error `perm:"admin" retry:"true"`
|
||||
ReturnMoveStorage func(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error `perm:"admin" retry:"true"`
|
||||
ReturnUnsealPiece func(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error `perm:"admin" retry:"true"`
|
||||
ReturnReadPiece func(ctx context.Context, callID storiface.CallID, ok bool, err *storiface.CallError) error `perm:"admin" retry:"true"`
|
||||
ReturnFetch func(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error `perm:"admin" retry:"true"`
|
||||
|
||||
SealingSchedDiag func(context.Context, bool) (interface{}, error) `perm:"admin"`
|
||||
SealingAbort func(ctx context.Context, call storiface.CallID) error `perm:"admin"`
|
||||
@ -373,17 +373,17 @@ type WorkerStruct struct {
|
||||
Paths func(context.Context) ([]stores.StoragePath, error) `perm:"admin"`
|
||||
Info func(context.Context) (storiface.WorkerInfo, error) `perm:"admin"`
|
||||
|
||||
AddPiece func(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) `perm:"admin"`
|
||||
SealPreCommit1 func(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) `perm:"admin"`
|
||||
SealPreCommit2 func(ctx context.Context, sector abi.SectorID, pc1o storage.PreCommit1Out) (storiface.CallID, error) `perm:"admin"`
|
||||
SealCommit1 func(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) `perm:"admin"`
|
||||
SealCommit2 func(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (storiface.CallID, error) `perm:"admin"`
|
||||
FinalizeSector func(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) (storiface.CallID, error) `perm:"admin"`
|
||||
ReleaseUnsealed func(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) (storiface.CallID, error) `perm:"admin"`
|
||||
MoveStorage func(ctx context.Context, sector abi.SectorID, types storiface.SectorFileType) (storiface.CallID, error) `perm:"admin"`
|
||||
UnsealPiece func(context.Context, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (storiface.CallID, error) `perm:"admin"`
|
||||
ReadPiece func(context.Context, io.Writer, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize) (storiface.CallID, error) `perm:"admin"`
|
||||
Fetch func(context.Context, abi.SectorID, storiface.SectorFileType, storiface.PathType, storiface.AcquireMode) (storiface.CallID, error) `perm:"admin"`
|
||||
AddPiece func(ctx context.Context, sector storage.SectorRef, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) `perm:"admin"`
|
||||
SealPreCommit1 func(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) `perm:"admin"`
|
||||
SealPreCommit2 func(ctx context.Context, sector storage.SectorRef, pc1o storage.PreCommit1Out) (storiface.CallID, error) `perm:"admin"`
|
||||
SealCommit1 func(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) `perm:"admin"`
|
||||
SealCommit2 func(ctx context.Context, sector storage.SectorRef, c1o storage.Commit1Out) (storiface.CallID, error) `perm:"admin"`
|
||||
FinalizeSector func(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (storiface.CallID, error) `perm:"admin"`
|
||||
ReleaseUnsealed func(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) (storiface.CallID, error) `perm:"admin"`
|
||||
MoveStorage func(ctx context.Context, sector storage.SectorRef, types storiface.SectorFileType) (storiface.CallID, error) `perm:"admin"`
|
||||
UnsealPiece func(context.Context, storage.SectorRef, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (storiface.CallID, error) `perm:"admin"`
|
||||
ReadPiece func(context.Context, io.Writer, storage.SectorRef, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize) (storiface.CallID, error) `perm:"admin"`
|
||||
Fetch func(context.Context, storage.SectorRef, storiface.SectorFileType, storiface.PathType, storiface.AcquireMode) (storiface.CallID, error) `perm:"admin"`
|
||||
|
||||
Remove func(ctx context.Context, sector abi.SectorID) error `perm:"admin"`
|
||||
StorageAddLocal func(ctx context.Context, path string) error `perm:"admin"`
|
||||
@ -1271,47 +1271,47 @@ func (c *StorageMinerStruct) WorkerJobs(ctx context.Context) (map[uuid.UUID][]st
|
||||
return c.Internal.WorkerJobs(ctx)
|
||||
}
|
||||
|
||||
func (c *StorageMinerStruct) ReturnAddPiece(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err string) error {
|
||||
func (c *StorageMinerStruct) ReturnAddPiece(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err *storiface.CallError) error {
|
||||
return c.Internal.ReturnAddPiece(ctx, callID, pi, err)
|
||||
}
|
||||
|
||||
func (c *StorageMinerStruct) ReturnSealPreCommit1(ctx context.Context, callID storiface.CallID, p1o storage.PreCommit1Out, err string) error {
|
||||
func (c *StorageMinerStruct) ReturnSealPreCommit1(ctx context.Context, callID storiface.CallID, p1o storage.PreCommit1Out, err *storiface.CallError) error {
|
||||
return c.Internal.ReturnSealPreCommit1(ctx, callID, p1o, err)
|
||||
}
|
||||
|
||||
func (c *StorageMinerStruct) ReturnSealPreCommit2(ctx context.Context, callID storiface.CallID, sealed storage.SectorCids, err string) error {
|
||||
func (c *StorageMinerStruct) ReturnSealPreCommit2(ctx context.Context, callID storiface.CallID, sealed storage.SectorCids, err *storiface.CallError) error {
|
||||
return c.Internal.ReturnSealPreCommit2(ctx, callID, sealed, err)
|
||||
}
|
||||
|
||||
func (c *StorageMinerStruct) ReturnSealCommit1(ctx context.Context, callID storiface.CallID, out storage.Commit1Out, err string) error {
|
||||
func (c *StorageMinerStruct) ReturnSealCommit1(ctx context.Context, callID storiface.CallID, out storage.Commit1Out, err *storiface.CallError) error {
|
||||
return c.Internal.ReturnSealCommit1(ctx, callID, out, err)
|
||||
}
|
||||
|
||||
func (c *StorageMinerStruct) ReturnSealCommit2(ctx context.Context, callID storiface.CallID, proof storage.Proof, err string) error {
|
||||
func (c *StorageMinerStruct) ReturnSealCommit2(ctx context.Context, callID storiface.CallID, proof storage.Proof, err *storiface.CallError) error {
|
||||
return c.Internal.ReturnSealCommit2(ctx, callID, proof, err)
|
||||
}
|
||||
|
||||
func (c *StorageMinerStruct) ReturnFinalizeSector(ctx context.Context, callID storiface.CallID, err string) error {
|
||||
func (c *StorageMinerStruct) ReturnFinalizeSector(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error {
|
||||
return c.Internal.ReturnFinalizeSector(ctx, callID, err)
|
||||
}
|
||||
|
||||
func (c *StorageMinerStruct) ReturnReleaseUnsealed(ctx context.Context, callID storiface.CallID, err string) error {
|
||||
func (c *StorageMinerStruct) ReturnReleaseUnsealed(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error {
|
||||
return c.Internal.ReturnReleaseUnsealed(ctx, callID, err)
|
||||
}
|
||||
|
||||
func (c *StorageMinerStruct) ReturnMoveStorage(ctx context.Context, callID storiface.CallID, err string) error {
|
||||
func (c *StorageMinerStruct) ReturnMoveStorage(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error {
|
||||
return c.Internal.ReturnMoveStorage(ctx, callID, err)
|
||||
}
|
||||
|
||||
func (c *StorageMinerStruct) ReturnUnsealPiece(ctx context.Context, callID storiface.CallID, err string) error {
|
||||
func (c *StorageMinerStruct) ReturnUnsealPiece(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error {
|
||||
return c.Internal.ReturnUnsealPiece(ctx, callID, err)
|
||||
}
|
||||
|
||||
func (c *StorageMinerStruct) ReturnReadPiece(ctx context.Context, callID storiface.CallID, ok bool, err string) error {
|
||||
func (c *StorageMinerStruct) ReturnReadPiece(ctx context.Context, callID storiface.CallID, ok bool, err *storiface.CallError) error {
|
||||
return c.Internal.ReturnReadPiece(ctx, callID, ok, err)
|
||||
}
|
||||
|
||||
func (c *StorageMinerStruct) ReturnFetch(ctx context.Context, callID storiface.CallID, err string) error {
|
||||
func (c *StorageMinerStruct) ReturnFetch(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error {
|
||||
return c.Internal.ReturnFetch(ctx, callID, err)
|
||||
}
|
||||
|
||||
@ -1513,47 +1513,47 @@ func (w *WorkerStruct) Info(ctx context.Context) (storiface.WorkerInfo, error) {
|
||||
return w.Internal.Info(ctx)
|
||||
}
|
||||
|
||||
func (w *WorkerStruct) AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) {
|
||||
func (w *WorkerStruct) AddPiece(ctx context.Context, sector storage.SectorRef, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) {
|
||||
return w.Internal.AddPiece(ctx, sector, pieceSizes, newPieceSize, pieceData)
|
||||
}
|
||||
|
||||
func (w *WorkerStruct) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) {
|
||||
func (w *WorkerStruct) SealPreCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) {
|
||||
return w.Internal.SealPreCommit1(ctx, sector, ticket, pieces)
|
||||
}
|
||||
|
||||
func (w *WorkerStruct) SealPreCommit2(ctx context.Context, sector abi.SectorID, pc1o storage.PreCommit1Out) (storiface.CallID, error) {
|
||||
func (w *WorkerStruct) SealPreCommit2(ctx context.Context, sector storage.SectorRef, pc1o storage.PreCommit1Out) (storiface.CallID, error) {
|
||||
return w.Internal.SealPreCommit2(ctx, sector, pc1o)
|
||||
}
|
||||
|
||||
func (w *WorkerStruct) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) {
|
||||
func (w *WorkerStruct) SealCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) {
|
||||
return w.Internal.SealCommit1(ctx, sector, ticket, seed, pieces, cids)
|
||||
}
|
||||
|
||||
func (w *WorkerStruct) SealCommit2(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (storiface.CallID, error) {
|
||||
func (w *WorkerStruct) SealCommit2(ctx context.Context, sector storage.SectorRef, c1o storage.Commit1Out) (storiface.CallID, error) {
|
||||
return w.Internal.SealCommit2(ctx, sector, c1o)
|
||||
}
|
||||
|
||||
func (w *WorkerStruct) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) (storiface.CallID, error) {
|
||||
func (w *WorkerStruct) FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (storiface.CallID, error) {
|
||||
return w.Internal.FinalizeSector(ctx, sector, keepUnsealed)
|
||||
}
|
||||
|
||||
func (w *WorkerStruct) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) (storiface.CallID, error) {
|
||||
func (w *WorkerStruct) ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) (storiface.CallID, error) {
|
||||
return w.Internal.ReleaseUnsealed(ctx, sector, safeToFree)
|
||||
}
|
||||
|
||||
func (w *WorkerStruct) MoveStorage(ctx context.Context, sector abi.SectorID, types storiface.SectorFileType) (storiface.CallID, error) {
|
||||
func (w *WorkerStruct) MoveStorage(ctx context.Context, sector storage.SectorRef, types storiface.SectorFileType) (storiface.CallID, error) {
|
||||
return w.Internal.MoveStorage(ctx, sector, types)
|
||||
}
|
||||
|
||||
func (w *WorkerStruct) UnsealPiece(ctx context.Context, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, c cid.Cid) (storiface.CallID, error) {
|
||||
func (w *WorkerStruct) UnsealPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, c cid.Cid) (storiface.CallID, error) {
|
||||
return w.Internal.UnsealPiece(ctx, sector, offset, size, ticket, c)
|
||||
}
|
||||
|
||||
func (w *WorkerStruct) ReadPiece(ctx context.Context, sink io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (storiface.CallID, error) {
|
||||
func (w *WorkerStruct) ReadPiece(ctx context.Context, sink io.Writer, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (storiface.CallID, error) {
|
||||
return w.Internal.ReadPiece(ctx, sink, sector, offset, size)
|
||||
}
|
||||
|
||||
func (w *WorkerStruct) Fetch(ctx context.Context, id abi.SectorID, fileType storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) (storiface.CallID, error) {
|
||||
func (w *WorkerStruct) Fetch(ctx context.Context, id storage.SectorRef, fileType storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) (storiface.CallID, error) {
|
||||
return w.Internal.Fetch(ctx, id, fileType, ptype, am)
|
||||
}
|
||||
|
||||
|
@ -90,7 +90,7 @@ func init() {
|
||||
addExample(&pid)
|
||||
|
||||
addExample(bitfield.NewFromSet([]uint64{5}))
|
||||
addExample(abi.RegisteredSealProof_StackedDrg32GiBV1)
|
||||
addExample(abi.RegisteredSealProof_StackedDrg32GiBV1_1)
|
||||
addExample(abi.RegisteredPoStProof_StackedDrgWindow32GiBV1)
|
||||
addExample(abi.ChainEpoch(10101))
|
||||
addExample(crypto.SigTypeBLS)
|
||||
@ -233,6 +233,7 @@ func init() {
|
||||
CpuUse: 0,
|
||||
},
|
||||
})
|
||||
addExample(storiface.ErrorCode(0))
|
||||
|
||||
// worker specific
|
||||
addExample(storiface.AcquireMove)
|
||||
|
@ -31,7 +31,7 @@ func TestCCUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration) {
|
||||
|
||||
func testCCUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration, upgradeHeight abi.ChainEpoch) {
|
||||
ctx := context.Background()
|
||||
n, sn := b(t, []FullNodeOpts{FullNodeWithUpgradeAt(upgradeHeight)}, OneMiner)
|
||||
n, sn := b(t, []FullNodeOpts{FullNodeWithActorsV2At(upgradeHeight)}, OneMiner)
|
||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
||||
miner := sn[0]
|
||||
|
||||
|
@ -109,7 +109,7 @@ var OneMiner = []StorageMiner{{Full: 0, Preseal: PresealGenesis}}
|
||||
var OneFull = DefaultFullOpts(1)
|
||||
var TwoFull = DefaultFullOpts(2)
|
||||
|
||||
var FullNodeWithUpgradeAt = func(upgradeHeight abi.ChainEpoch) FullNodeOpts {
|
||||
var FullNodeWithActorsV2At = func(upgradeHeight abi.ChainEpoch) FullNodeOpts {
|
||||
return FullNodeOpts{
|
||||
Opts: func(nodes []TestNode) node.Option {
|
||||
return node.Override(new(stmgr.UpgradeSchedule), stmgr.UpgradeSchedule{{
|
||||
@ -122,6 +122,25 @@ var FullNodeWithUpgradeAt = func(upgradeHeight abi.ChainEpoch) FullNodeOpts {
|
||||
}
|
||||
}
|
||||
|
||||
var FullNodeWithSDRAt = func(calico, persian abi.ChainEpoch) FullNodeOpts {
|
||||
return FullNodeOpts{
|
||||
Opts: func(nodes []TestNode) node.Option {
|
||||
return node.Override(new(stmgr.UpgradeSchedule), stmgr.UpgradeSchedule{{
|
||||
Network: network.Version6,
|
||||
Height: 1,
|
||||
Migration: stmgr.UpgradeActorsV2,
|
||||
}, {
|
||||
Network: network.Version7,
|
||||
Height: calico,
|
||||
Migration: stmgr.UpgradeCalico,
|
||||
}, {
|
||||
Network: network.Version8,
|
||||
Height: persian,
|
||||
}})
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
var MineNext = miner.MineReq{
|
||||
InjectNulls: 0,
|
||||
Done: func(bool, abi.ChainEpoch, error) {},
|
||||
|
@ -3,18 +3,22 @@ package test
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"sync/atomic"
|
||||
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/mock"
|
||||
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
@ -23,6 +27,90 @@ import (
|
||||
"github.com/filecoin-project/lotus/node/impl"
|
||||
)
|
||||
|
||||
func TestSDRUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
n, sn := b(t, []FullNodeOpts{FullNodeWithSDRAt(500, 1000)}, OneMiner)
|
||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
||||
miner := sn[0]
|
||||
|
||||
addrinfo, err := client.NetAddrsListen(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := miner.NetConnect(ctx, addrinfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
build.Clock.Sleep(time.Second)
|
||||
|
||||
pledge := make(chan struct{})
|
||||
mine := int64(1)
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
defer close(done)
|
||||
round := 0
|
||||
for atomic.LoadInt64(&mine) != 0 {
|
||||
build.Clock.Sleep(blocktime)
|
||||
if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) {
|
||||
|
||||
}}); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
// 3 sealing rounds: before, during after.
|
||||
if round >= 3 {
|
||||
continue
|
||||
}
|
||||
|
||||
head, err := client.ChainHead(ctx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// rounds happen every 100 blocks, with a 50 block offset.
|
||||
if head.Height() >= abi.ChainEpoch(round*500+50) {
|
||||
round++
|
||||
pledge <- struct{}{}
|
||||
|
||||
ver, err := client.StateNetworkVersion(ctx, head.Key())
|
||||
assert.NoError(t, err)
|
||||
switch round {
|
||||
case 1:
|
||||
assert.Equal(t, network.Version6, ver)
|
||||
case 2:
|
||||
assert.Equal(t, network.Version7, ver)
|
||||
case 3:
|
||||
assert.Equal(t, network.Version8, ver)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}()
|
||||
|
||||
// before.
|
||||
pledgeSectors(t, ctx, miner, 9, 0, pledge)
|
||||
|
||||
s, err := miner.SectorsList(ctx)
|
||||
require.NoError(t, err)
|
||||
sort.Slice(s, func(i, j int) bool {
|
||||
return s[i] < s[j]
|
||||
})
|
||||
|
||||
for i, id := range s {
|
||||
info, err := miner.SectorsStatus(ctx, id, true)
|
||||
require.NoError(t, err)
|
||||
expectProof := abi.RegisteredSealProof_StackedDrg2KiBV1
|
||||
if i >= 3 {
|
||||
// after
|
||||
expectProof = abi.RegisteredSealProof_StackedDrg2KiBV1_1
|
||||
}
|
||||
assert.Equal(t, expectProof, info.SealProof, "sector %d, id %d", i, id)
|
||||
}
|
||||
|
||||
atomic.StoreInt64(&mine, 0)
|
||||
<-done
|
||||
}
|
||||
|
||||
func TestPledgeSector(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
@ -63,11 +151,13 @@ func TestPledgeSector(t *testing.T, b APIBuilder, blocktime time.Duration, nSect
|
||||
|
||||
func pledgeSectors(t *testing.T, ctx context.Context, miner TestStorageNode, n, existing int, blockNotif <-chan struct{}) {
|
||||
for i := 0; i < n; i++ {
|
||||
err := miner.PledgeSector(ctx)
|
||||
require.NoError(t, err)
|
||||
if i%3 == 0 && blockNotif != nil {
|
||||
<-blockNotif
|
||||
log.Errorf("WAIT")
|
||||
}
|
||||
log.Errorf("PLEDGING %d", i)
|
||||
err := miner.PledgeSector(ctx)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
for {
|
||||
@ -126,7 +216,7 @@ func testWindowPostUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration,
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
n, sn := b(t, []FullNodeOpts{FullNodeWithUpgradeAt(upgradeHeight)}, OneMiner)
|
||||
n, sn := b(t, []FullNodeOpts{FullNodeWithActorsV2At(upgradeHeight)}, OneMiner)
|
||||
|
||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
||||
miner := sn[0]
|
||||
@ -209,15 +299,17 @@ func testWindowPostUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration,
|
||||
|
||||
// Drop the partition
|
||||
err = secs.ForEach(func(sid uint64) error {
|
||||
return miner.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).MarkCorrupted(abi.SectorID{
|
||||
Miner: abi.ActorID(mid),
|
||||
Number: abi.SectorNumber(sid),
|
||||
return miner.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).MarkCorrupted(storage.SectorRef{
|
||||
ID: abi.SectorID{
|
||||
Miner: abi.ActorID(mid),
|
||||
Number: abi.SectorNumber(sid),
|
||||
},
|
||||
}, true)
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
var s abi.SectorID
|
||||
var s storage.SectorRef
|
||||
|
||||
// Drop 1 sectors from deadline 3 partition 0
|
||||
{
|
||||
@ -238,9 +330,11 @@ func testWindowPostUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration,
|
||||
require.NoError(t, err)
|
||||
fmt.Println("the sectors", all)
|
||||
|
||||
s = abi.SectorID{
|
||||
Miner: abi.ActorID(mid),
|
||||
Number: abi.SectorNumber(sn),
|
||||
s = storage.SectorRef{
|
||||
ID: abi.SectorID{
|
||||
Miner: abi.ActorID(mid),
|
||||
Number: abi.SectorNumber(sn),
|
||||
},
|
||||
}
|
||||
|
||||
err = miner.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).MarkFailed(s, true)
|
||||
|
@ -22,6 +22,8 @@ var UpgradeActorsV2Height = abi.ChainEpoch(10)
|
||||
var UpgradeLiftoffHeight = abi.ChainEpoch(-5)
|
||||
|
||||
const UpgradeKumquatHeight = 15
|
||||
const UpgradeCalicoHeight = 20
|
||||
const UpgradePersianHeight = 25
|
||||
|
||||
var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
|
||||
0: DrandMainnet,
|
||||
|
@ -38,12 +38,11 @@ const UpgradeLiftoffHeight = 148888
|
||||
|
||||
const UpgradeKumquatHeight = 170000
|
||||
|
||||
const UpgradeCalicoHeight = 265200
|
||||
const UpgradePersianHeight = UpgradeCalicoHeight + (builtin2.EpochsInHour * 60)
|
||||
|
||||
func init() {
|
||||
policy.SetConsensusMinerMinPower(abi.NewStoragePower(10 << 40))
|
||||
policy.SetSupportedProofTypes(
|
||||
abi.RegisteredSealProof_StackedDrg32GiBV1,
|
||||
abi.RegisteredSealProof_StackedDrg64GiBV1,
|
||||
)
|
||||
|
||||
if os.Getenv("LOTUS_USE_TEST_ADDRESSES") != "1" {
|
||||
SetAddressNetwork(address.Mainnet)
|
||||
|
@ -19,3 +19,12 @@ func DhtProtocolName(netName dtypes.NetworkName) protocol.ID {
|
||||
func SetAddressNetwork(n address.Network) {
|
||||
address.CurrentNetwork = n
|
||||
}
|
||||
|
||||
func MustParseAddress(addr string) address.Address {
|
||||
ret, err := address.NewFromString(addr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
@ -25,7 +25,7 @@ const UnixfsLinksPerLevel = 1024
|
||||
// Consensus / Network
|
||||
|
||||
const AllowableClockDriftSecs = uint64(1)
|
||||
const NewestNetworkVersion = network.Version6
|
||||
const NewestNetworkVersion = network.Version8
|
||||
const ActorUpgradeNetworkVersion = network.Version4
|
||||
|
||||
// Epochs
|
||||
@ -61,6 +61,9 @@ const TicketRandomnessLookback = abi.ChainEpoch(1)
|
||||
|
||||
const AddressMainnetEnvVar = "_mainnet_"
|
||||
|
||||
// the 'f' prefix doesn't matter
|
||||
var ZeroAddress = MustParseAddress("f3yaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaby2smx7a")
|
||||
|
||||
// /////
|
||||
// Devnet settings
|
||||
|
||||
|
@ -88,15 +88,18 @@ var (
|
||||
UpgradeActorsV2Height abi.ChainEpoch = 10
|
||||
UpgradeLiftoffHeight abi.ChainEpoch = -5
|
||||
UpgradeKumquatHeight abi.ChainEpoch = -6
|
||||
UpgradeCalicoHeight abi.ChainEpoch = -7
|
||||
UpgradePersianHeight abi.ChainEpoch = -8
|
||||
|
||||
DrandSchedule = map[abi.ChainEpoch]DrandEnum{
|
||||
0: DrandMainnet,
|
||||
}
|
||||
|
||||
NewestNetworkVersion = network.Version5
|
||||
NewestNetworkVersion = network.Version8
|
||||
ActorUpgradeNetworkVersion = network.Version4
|
||||
|
||||
Devnet = true
|
||||
Devnet = true
|
||||
ZeroAddress = MustParseAddress("f3yaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaby2smx7a")
|
||||
)
|
||||
|
||||
const BootstrapPeerThreshold = 1
|
||||
|
@ -29,7 +29,7 @@ func buildType() string {
|
||||
}
|
||||
|
||||
// BuildVersion is the local build version, set by build system
|
||||
const BuildVersion = "1.1.3"
|
||||
const BuildVersion = "1.2.0"
|
||||
|
||||
func UserVersion() string {
|
||||
return BuildVersion + buildType() + CurrentCommit
|
||||
@ -83,9 +83,9 @@ func VersionForType(nodeType NodeType) (Version, error) {
|
||||
|
||||
// semver versions of the rpc api exposed
|
||||
var (
|
||||
FullAPIVersion = newVer(0, 17, 0)
|
||||
MinerAPIVersion = newVer(0, 17, 0)
|
||||
WorkerAPIVersion = newVer(0, 16, 0)
|
||||
FullAPIVersion = newVer(1, 0, 0)
|
||||
MinerAPIVersion = newVer(1, 0, 0)
|
||||
WorkerAPIVersion = newVer(1, 0, 0)
|
||||
)
|
||||
|
||||
//nolint:varcheck,deadcode
|
||||
|
@ -4,6 +4,8 @@ import (
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-bitfield"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
)
|
||||
|
||||
func AllPartSectors(mas State, sget func(Partition) (bitfield.BitField, error)) (bitfield.BitField, error) {
|
||||
@ -26,3 +28,42 @@ func AllPartSectors(mas State, sget func(Partition) (bitfield.BitField, error))
|
||||
|
||||
return bitfield.MultiMerge(parts...)
|
||||
}
|
||||
|
||||
// SealProofTypeFromSectorSize returns preferred seal proof type for creating
|
||||
// new miner actors and new sectors
|
||||
func SealProofTypeFromSectorSize(ssize abi.SectorSize, nv network.Version) (abi.RegisteredSealProof, error) {
|
||||
switch {
|
||||
case nv < network.Version7:
|
||||
switch ssize {
|
||||
case 2 << 10:
|
||||
return abi.RegisteredSealProof_StackedDrg2KiBV1, nil
|
||||
case 8 << 20:
|
||||
return abi.RegisteredSealProof_StackedDrg8MiBV1, nil
|
||||
case 512 << 20:
|
||||
return abi.RegisteredSealProof_StackedDrg512MiBV1, nil
|
||||
case 32 << 30:
|
||||
return abi.RegisteredSealProof_StackedDrg32GiBV1, nil
|
||||
case 64 << 30:
|
||||
return abi.RegisteredSealProof_StackedDrg64GiBV1, nil
|
||||
default:
|
||||
return 0, xerrors.Errorf("unsupported sector size for miner: %v", ssize)
|
||||
}
|
||||
case nv >= network.Version7:
|
||||
switch ssize {
|
||||
case 2 << 10:
|
||||
return abi.RegisteredSealProof_StackedDrg2KiBV1_1, nil
|
||||
case 8 << 20:
|
||||
return abi.RegisteredSealProof_StackedDrg8MiBV1_1, nil
|
||||
case 512 << 20:
|
||||
return abi.RegisteredSealProof_StackedDrg512MiBV1_1, nil
|
||||
case 32 << 30:
|
||||
return abi.RegisteredSealProof_StackedDrg32GiBV1_1, nil
|
||||
case 64 << 30:
|
||||
return abi.RegisteredSealProof_StackedDrg64GiBV1_1, nil
|
||||
default:
|
||||
return 0, xerrors.Errorf("unsupported sector size for miner: %v", ssize)
|
||||
}
|
||||
}
|
||||
|
||||
return 0, xerrors.Errorf("unsupported network version")
|
||||
}
|
||||
|
@ -3,6 +3,8 @@ package actors
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/exitcode"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/aerrors"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
)
|
||||
@ -11,7 +13,7 @@ func SerializeParams(i cbg.CBORMarshaler) ([]byte, aerrors.ActorError) {
|
||||
buf := new(bytes.Buffer)
|
||||
if err := i.MarshalCBOR(buf); err != nil {
|
||||
// TODO: shouldnt this be a fatal error?
|
||||
return nil, aerrors.Absorb(err, 1, "failed to encode parameter")
|
||||
return nil, aerrors.Absorb(err, exitcode.ErrSerialization, "failed to encode parameter")
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
@ -26,22 +26,29 @@ const (
|
||||
// SetSupportedProofTypes sets supported proof types, across all actor versions.
|
||||
// This should only be used for testing.
|
||||
func SetSupportedProofTypes(types ...abi.RegisteredSealProof) {
|
||||
newTypes := make(map[abi.RegisteredSealProof]struct{}, len(types))
|
||||
for _, t := range types {
|
||||
newTypes[t] = struct{}{}
|
||||
}
|
||||
// Set for all miner versions.
|
||||
miner0.SupportedProofTypes = newTypes
|
||||
miner2.SupportedProofTypes = newTypes
|
||||
miner0.SupportedProofTypes = make(map[abi.RegisteredSealProof]struct{}, len(types))
|
||||
miner2.PreCommitSealProofTypesV0 = make(map[abi.RegisteredSealProof]struct{}, len(types))
|
||||
miner2.PreCommitSealProofTypesV7 = make(map[abi.RegisteredSealProof]struct{}, len(types))
|
||||
miner2.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types))
|
||||
|
||||
AddSupportedProofTypes(types...)
|
||||
}
|
||||
|
||||
// AddSupportedProofTypes sets supported proof types, across all actor versions.
|
||||
// This should only be used for testing.
|
||||
func AddSupportedProofTypes(types ...abi.RegisteredSealProof) {
|
||||
for _, t := range types {
|
||||
if t >= abi.RegisteredSealProof_StackedDrg2KiBV1_1 {
|
||||
panic("must specify v1 proof types only")
|
||||
}
|
||||
// Set for all miner versions.
|
||||
miner0.SupportedProofTypes[t] = struct{}{}
|
||||
miner2.SupportedProofTypes[t] = struct{}{}
|
||||
miner2.PreCommitSealProofTypesV0[t] = struct{}{}
|
||||
|
||||
miner2.PreCommitSealProofTypesV7[t] = struct{}{}
|
||||
miner2.PreCommitSealProofTypesV7[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{}
|
||||
|
||||
miner2.PreCommitSealProofTypesV8[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
@ -133,9 +140,9 @@ func GetMaxPoStPartitions(p abi.RegisteredPoStProof) (int, error) {
|
||||
}
|
||||
|
||||
func GetDefaultSectorSize() abi.SectorSize {
|
||||
// supported proof types are the same across versions.
|
||||
szs := make([]abi.SectorSize, 0, len(miner2.SupportedProofTypes))
|
||||
for spt := range miner2.SupportedProofTypes {
|
||||
// supported sector sizes are the same across versions.
|
||||
szs := make([]abi.SectorSize, 0, len(miner2.PreCommitSealProofTypesV8))
|
||||
for spt := range miner2.PreCommitSealProofTypesV8 {
|
||||
ss, err := spt.SectorSize()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
@ -44,7 +44,7 @@ func TestSupportedProofTypes(t *testing.T) {
|
||||
|
||||
// Tests assumptions about policies being the same between actor versions.
|
||||
func TestAssumptions(t *testing.T) {
|
||||
require.EqualValues(t, miner0.SupportedProofTypes, miner2.SupportedProofTypes)
|
||||
require.EqualValues(t, miner0.SupportedProofTypes, miner2.PreCommitSealProofTypesV0)
|
||||
require.Equal(t, miner0.PreCommitChallengeDelay, miner2.PreCommitChallengeDelay)
|
||||
require.Equal(t, miner0.MaxSectorExpirationExtension, miner2.MaxSectorExpirationExtension)
|
||||
require.Equal(t, miner0.ChainFinality, miner2.ChainFinality)
|
||||
@ -57,10 +57,10 @@ func TestAssumptions(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPartitionSizes(t *testing.T) {
|
||||
for p := range abi.PoStSealProofTypes {
|
||||
sizeNew, err := builtin2.PoStProofWindowPoStPartitionSectors(p)
|
||||
for _, p := range abi.SealProofInfos {
|
||||
sizeNew, err := builtin2.PoStProofWindowPoStPartitionSectors(p.WindowPoStProof)
|
||||
require.NoError(t, err)
|
||||
sizeOld, err := builtin0.PoStProofWindowPoStPartitionSectors(p)
|
||||
sizeOld, err := builtin0.PoStProofWindowPoStPartitionSectors(p.WindowPoStProof)
|
||||
if err != nil {
|
||||
// new proof type.
|
||||
continue
|
||||
|
@ -18,7 +18,7 @@ func VersionForNetwork(version network.Version) Version {
|
||||
switch version {
|
||||
case network.Version0, network.Version1, network.Version2, network.Version3:
|
||||
return Version0
|
||||
case network.Version4, network.Version5, network.Version6:
|
||||
case network.Version4, network.Version5, network.Version6, network.Version7, network.Version8:
|
||||
return Version2
|
||||
default:
|
||||
panic(fmt.Sprintf("unsupported network version %d", version))
|
||||
|
@ -23,8 +23,6 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
"github.com/filecoin-project/go-state-types/crypto"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
||||
|
||||
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
|
||||
miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
||||
power0 "github.com/filecoin-project/specs-actors/actors/builtin/power"
|
||||
@ -101,7 +99,7 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid
|
||||
i := i
|
||||
m := m
|
||||
|
||||
spt, err := ffiwrapper.SealProofTypeFromSectorSize(m.SectorSize)
|
||||
spt, err := miner.SealProofTypeFromSectorSize(m.SectorSize, GenesisNetworkVersion)
|
||||
if err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
|
@ -465,7 +465,7 @@ func (mp *MessagePool) verifyMsgBeforeAdd(m *types.SignedMessage, curTs *types.T
|
||||
epoch := curTs.Height()
|
||||
minGas := vm.PricelistByEpoch(epoch).OnChainMessage(m.ChainLength())
|
||||
|
||||
if err := m.VMMessage().ValidForBlockInclusion(minGas.Total()); err != nil {
|
||||
if err := m.VMMessage().ValidForBlockInclusion(minGas.Total(), build.NewestNetworkVersion); err != nil {
|
||||
return false, xerrors.Errorf("message will not be included in a block: %w", err)
|
||||
}
|
||||
|
||||
@ -546,7 +546,7 @@ func (mp *MessagePool) checkMessage(m *types.SignedMessage) error {
|
||||
}
|
||||
|
||||
// Perform syntactic validation, minGas=0 as we check the actual mingas before we add it
|
||||
if err := m.Message.ValidForBlockInclusion(0); err != nil {
|
||||
if err := m.Message.ValidForBlockInclusion(0, build.NewestNetworkVersion); err != nil {
|
||||
return xerrors.Errorf("message not valid for block inclusion: %w", err)
|
||||
}
|
||||
|
||||
|
@ -21,6 +21,8 @@ import (
|
||||
|
||||
var bigBlockGasLimit = big.NewInt(build.BlockGasLimit)
|
||||
|
||||
var MaxBlockMessages = 16000
|
||||
|
||||
// this is *temporary* mutilation until we have implemented uncapped miner penalties -- it will go
|
||||
// away in the next fork.
|
||||
func allowNegativeChains(epoch abi.ChainEpoch) bool {
|
||||
@ -43,7 +45,7 @@ type msgChain struct {
|
||||
prev *msgChain
|
||||
}
|
||||
|
||||
func (mp *MessagePool) SelectMessages(ts *types.TipSet, tq float64) ([]*types.SignedMessage, error) {
|
||||
func (mp *MessagePool) SelectMessages(ts *types.TipSet, tq float64) (msgs []*types.SignedMessage, err error) {
|
||||
mp.curTsLk.Lock()
|
||||
defer mp.curTsLk.Unlock()
|
||||
|
||||
@ -54,10 +56,20 @@ func (mp *MessagePool) SelectMessages(ts *types.TipSet, tq float64) ([]*types.Si
|
||||
// than any other block, then we don't bother with optimal selection because the
|
||||
// first block will always have higher effective performance
|
||||
if tq > 0.84 {
|
||||
return mp.selectMessagesGreedy(mp.curTs, ts)
|
||||
msgs, err = mp.selectMessagesGreedy(mp.curTs, ts)
|
||||
} else {
|
||||
msgs, err = mp.selectMessagesOptimal(mp.curTs, ts, tq)
|
||||
}
|
||||
|
||||
return mp.selectMessagesOptimal(mp.curTs, ts, tq)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(msgs) > MaxBlockMessages {
|
||||
msgs = msgs[:MaxBlockMessages]
|
||||
}
|
||||
|
||||
return msgs, nil
|
||||
}
|
||||
|
||||
func (mp *MessagePool) selectMessagesOptimal(curTs, ts *types.TipSet, tq float64) ([]*types.SignedMessage, error) {
|
||||
|
@ -6,6 +6,10 @@ import (
|
||||
"encoding/binary"
|
||||
"math"
|
||||
|
||||
"github.com/filecoin-project/specs-actors/v2/actors/migration/nv7"
|
||||
|
||||
"github.com/filecoin-project/specs-actors/v2/actors/migration/nv4"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
@ -23,7 +27,6 @@ import (
|
||||
adt0 "github.com/filecoin-project/specs-actors/actors/util/adt"
|
||||
|
||||
"github.com/filecoin-project/specs-actors/actors/migration/nv3"
|
||||
m2 "github.com/filecoin-project/specs-actors/v2/actors/migration"
|
||||
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
@ -90,6 +93,14 @@ func DefaultUpgradeSchedule() UpgradeSchedule {
|
||||
Height: build.UpgradeKumquatHeight,
|
||||
Network: network.Version6,
|
||||
Migration: nil,
|
||||
}, {
|
||||
Height: build.UpgradeCalicoHeight,
|
||||
Network: network.Version7,
|
||||
Migration: UpgradeCalico,
|
||||
}, {
|
||||
Height: build.UpgradePersianHeight,
|
||||
Network: network.Version8,
|
||||
Migration: nil,
|
||||
}}
|
||||
|
||||
if build.UpgradeActorsV2Height == math.MaxInt64 { // disable actors upgrade
|
||||
@ -601,7 +612,7 @@ func UpgradeActorsV2(ctx context.Context, sm *StateManager, cb ExecCallback, roo
|
||||
return cid.Undef, xerrors.Errorf("failed to create new state info for actors v2: %w", err)
|
||||
}
|
||||
|
||||
newHamtRoot, err := m2.MigrateStateTree(ctx, store, root, epoch, m2.DefaultConfig())
|
||||
newHamtRoot, err := nv4.MigrateStateTree(ctx, store, root, epoch, nv4.DefaultConfig())
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("upgrading to actors v2: %w", err)
|
||||
}
|
||||
@ -652,6 +663,48 @@ func UpgradeLiftoff(ctx context.Context, sm *StateManager, cb ExecCallback, root
|
||||
return tree.Flush(ctx)
|
||||
}
|
||||
|
||||
func UpgradeCalico(ctx context.Context, sm *StateManager, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||
store := sm.cs.Store(ctx)
|
||||
var stateRoot types.StateRoot
|
||||
if err := store.Get(ctx, root, &stateRoot); err != nil {
|
||||
return cid.Undef, xerrors.Errorf("failed to decode state root: %w", err)
|
||||
}
|
||||
|
||||
if stateRoot.Version != types.StateTreeVersion1 {
|
||||
return cid.Undef, xerrors.Errorf(
|
||||
"expected state root version 1 for calico upgrade, got %d",
|
||||
stateRoot.Version,
|
||||
)
|
||||
}
|
||||
|
||||
newHamtRoot, err := nv7.MigrateStateTree(ctx, store, stateRoot.Actors, epoch, nv7.DefaultConfig())
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("running nv7 migration: %w", err)
|
||||
}
|
||||
|
||||
newRoot, err := store.Put(ctx, &types.StateRoot{
|
||||
Version: stateRoot.Version,
|
||||
Actors: newHamtRoot,
|
||||
Info: stateRoot.Info,
|
||||
})
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err)
|
||||
}
|
||||
|
||||
// perform some basic sanity checks to make sure everything still works.
|
||||
if newSm, err := state.LoadStateTree(store, newRoot); err != nil {
|
||||
return cid.Undef, xerrors.Errorf("state tree sanity load failed: %w", err)
|
||||
} else if newRoot2, err := newSm.Flush(ctx); err != nil {
|
||||
return cid.Undef, xerrors.Errorf("state tree sanity flush failed: %w", err)
|
||||
} else if newRoot2 != newRoot {
|
||||
return cid.Undef, xerrors.Errorf("state-root mismatch: %s != %s", newRoot, newRoot2)
|
||||
} else if _, err := newSm.GetActor(builtin0.InitActorAddr); err != nil {
|
||||
return cid.Undef, xerrors.Errorf("failed to load init actor after upgrade: %w", err)
|
||||
}
|
||||
|
||||
return newRoot, nil
|
||||
}
|
||||
|
||||
func setNetworkName(ctx context.Context, store adt.Store, tree *state.StateTree, name string) error {
|
||||
ia, err := tree.GetActor(builtin0.InitActorAddr)
|
||||
if err != nil {
|
||||
|
@ -72,13 +72,17 @@ type StateManager struct {
|
||||
// ErrExpensiveFork.
|
||||
expensiveUpgrades map[abi.ChainEpoch]struct{}
|
||||
|
||||
stCache map[string][]cid.Cid
|
||||
compWait map[string]chan struct{}
|
||||
stlk sync.Mutex
|
||||
genesisMsigLk sync.Mutex
|
||||
newVM func(context.Context, *vm.VMOpts) (*vm.VM, error)
|
||||
preIgnitionGenInfos *genesisInfo
|
||||
postIgnitionGenInfos *genesisInfo
|
||||
stCache map[string][]cid.Cid
|
||||
compWait map[string]chan struct{}
|
||||
stlk sync.Mutex
|
||||
genesisMsigLk sync.Mutex
|
||||
newVM func(context.Context, *vm.VMOpts) (*vm.VM, error)
|
||||
preIgnitionVesting []msig0.State
|
||||
postIgnitionVesting []msig0.State
|
||||
postCalicoVesting []msig0.State
|
||||
|
||||
genesisPledge abi.TokenAmount
|
||||
genesisMarketFunds abi.TokenAmount
|
||||
}
|
||||
|
||||
func NewStateManager(cs *store.ChainStore) *StateManager {
|
||||
@ -889,23 +893,8 @@ func (sm *StateManager) SetVMConstructor(nvm func(context.Context, *vm.VMOpts) (
|
||||
sm.newVM = nvm
|
||||
}
|
||||
|
||||
type genesisInfo struct {
|
||||
genesisMsigs []msig0.State
|
||||
// info about the Accounts in the genesis state
|
||||
genesisActors []genesisActor
|
||||
genesisPledge abi.TokenAmount
|
||||
genesisMarketFunds abi.TokenAmount
|
||||
}
|
||||
|
||||
type genesisActor struct {
|
||||
addr address.Address
|
||||
initBal abi.TokenAmount
|
||||
}
|
||||
|
||||
// sets up information about the actors in the genesis state
|
||||
func (sm *StateManager) setupGenesisActors(ctx context.Context) error {
|
||||
|
||||
gi := genesisInfo{}
|
||||
// sets up information about the vesting schedule
|
||||
func (sm *StateManager) setupGenesisVestingSchedule(ctx context.Context) error {
|
||||
|
||||
gb, err := sm.cs.GetGenesis()
|
||||
if err != nil {
|
||||
@ -928,127 +917,18 @@ func (sm *StateManager) setupGenesisActors(ctx context.Context) error {
|
||||
return xerrors.Errorf("loading state tree: %w", err)
|
||||
}
|
||||
|
||||
gi.genesisMarketFunds, err = getFilMarketLocked(ctx, sTree)
|
||||
gmf, err := getFilMarketLocked(ctx, sTree)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("setting up genesis market funds: %w", err)
|
||||
}
|
||||
|
||||
gi.genesisPledge, err = getFilPowerLocked(ctx, sTree)
|
||||
gp, err := getFilPowerLocked(ctx, sTree)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("setting up genesis pledge: %w", err)
|
||||
}
|
||||
|
||||
totalsByEpoch := make(map[abi.ChainEpoch]abi.TokenAmount)
|
||||
err = sTree.ForEach(func(kaddr address.Address, act *types.Actor) error {
|
||||
if builtin.IsMultisigActor(act.Code) {
|
||||
s, err := multisig.Load(sm.cs.Store(ctx), act)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
se, err := s.StartEpoch()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if se != 0 {
|
||||
return xerrors.New("genesis multisig doesn't start vesting at epoch 0!")
|
||||
}
|
||||
|
||||
ud, err := s.UnlockDuration()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ib, err := s.InitialBalance()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ot, f := totalsByEpoch[ud]
|
||||
if f {
|
||||
totalsByEpoch[ud] = big.Add(ot, ib)
|
||||
} else {
|
||||
totalsByEpoch[ud] = ib
|
||||
}
|
||||
|
||||
} else if builtin.IsAccountActor(act.Code) {
|
||||
// should exclude burnt funds actor and "remainder account actor"
|
||||
// should only ever be "faucet" accounts in testnets
|
||||
if kaddr == builtin.BurntFundsActorAddr {
|
||||
return nil
|
||||
}
|
||||
|
||||
kid, err := sTree.LookupID(kaddr)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("resolving address: %w", err)
|
||||
}
|
||||
|
||||
gi.genesisActors = append(gi.genesisActors, genesisActor{
|
||||
addr: kid,
|
||||
initBal: act.Balance,
|
||||
})
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error setting up genesis infos: %w", err)
|
||||
}
|
||||
|
||||
// TODO: use network upgrade abstractions or always start at actors v0?
|
||||
gi.genesisMsigs = make([]msig0.State, 0, len(totalsByEpoch))
|
||||
for k, v := range totalsByEpoch {
|
||||
ns := msig0.State{
|
||||
InitialBalance: v,
|
||||
UnlockDuration: k,
|
||||
PendingTxns: cid.Undef,
|
||||
}
|
||||
gi.genesisMsigs = append(gi.genesisMsigs, ns)
|
||||
}
|
||||
|
||||
sm.preIgnitionGenInfos = &gi
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// sets up information about the actors in the genesis state
|
||||
// For testnet we use a hardcoded set of multisig states, instead of what's actually in the genesis multisigs
|
||||
// We also do not consider ANY account actors (including the faucet)
|
||||
func (sm *StateManager) setupPreIgnitionGenesisActorsTestnet(ctx context.Context) error {
|
||||
|
||||
gi := genesisInfo{}
|
||||
|
||||
gb, err := sm.cs.GetGenesis()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting genesis block: %w", err)
|
||||
}
|
||||
|
||||
gts, err := types.NewTipSet([]*types.BlockHeader{gb})
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting genesis tipset: %w", err)
|
||||
}
|
||||
|
||||
st, _, err := sm.TipSetState(ctx, gts)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting genesis tipset state: %w", err)
|
||||
}
|
||||
|
||||
cst := cbor.NewCborStore(sm.cs.Blockstore())
|
||||
sTree, err := state.LoadStateTree(cst, st)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("loading state tree: %w", err)
|
||||
}
|
||||
|
||||
gi.genesisMarketFunds, err = getFilMarketLocked(ctx, sTree)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("setting up genesis market funds: %w", err)
|
||||
}
|
||||
|
||||
gi.genesisPledge, err = getFilPowerLocked(ctx, sTree)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("setting up genesis pledge: %w", err)
|
||||
}
|
||||
sm.genesisMarketFunds = gmf
|
||||
sm.genesisPledge = gp
|
||||
|
||||
totalsByEpoch := make(map[abi.ChainEpoch]abi.TokenAmount)
|
||||
|
||||
@ -1074,58 +954,21 @@ func (sm *StateManager) setupPreIgnitionGenesisActorsTestnet(ctx context.Context
|
||||
totalsByEpoch[sixYears] = big.NewInt(100_000_000)
|
||||
totalsByEpoch[sixYears] = big.Add(totalsByEpoch[sixYears], big.NewInt(300_000_000))
|
||||
|
||||
gi.genesisMsigs = make([]msig0.State, 0, len(totalsByEpoch))
|
||||
sm.preIgnitionVesting = make([]msig0.State, 0, len(totalsByEpoch))
|
||||
for k, v := range totalsByEpoch {
|
||||
ns := msig0.State{
|
||||
InitialBalance: v,
|
||||
UnlockDuration: k,
|
||||
PendingTxns: cid.Undef,
|
||||
}
|
||||
gi.genesisMsigs = append(gi.genesisMsigs, ns)
|
||||
sm.preIgnitionVesting = append(sm.preIgnitionVesting, ns)
|
||||
}
|
||||
|
||||
sm.preIgnitionGenInfos = &gi
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// sets up information about the actors in the genesis state, post the ignition fork
|
||||
func (sm *StateManager) setupPostIgnitionGenesisActors(ctx context.Context) error {
|
||||
|
||||
gi := genesisInfo{}
|
||||
|
||||
gb, err := sm.cs.GetGenesis()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting genesis block: %w", err)
|
||||
}
|
||||
|
||||
gts, err := types.NewTipSet([]*types.BlockHeader{gb})
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting genesis tipset: %w", err)
|
||||
}
|
||||
|
||||
st, _, err := sm.TipSetState(ctx, gts)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting genesis tipset state: %w", err)
|
||||
}
|
||||
|
||||
cst := cbor.NewCborStore(sm.cs.Blockstore())
|
||||
sTree, err := state.LoadStateTree(cst, st)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("loading state tree: %w", err)
|
||||
}
|
||||
|
||||
// Unnecessary, should be removed
|
||||
gi.genesisMarketFunds, err = getFilMarketLocked(ctx, sTree)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("setting up genesis market funds: %w", err)
|
||||
}
|
||||
|
||||
// Unnecessary, should be removed
|
||||
gi.genesisPledge, err = getFilPowerLocked(ctx, sTree)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("setting up genesis pledge: %w", err)
|
||||
}
|
||||
// sets up information about the vesting schedule post the ignition upgrade
|
||||
func (sm *StateManager) setupPostIgnitionVesting(ctx context.Context) error {
|
||||
|
||||
totalsByEpoch := make(map[abi.ChainEpoch]abi.TokenAmount)
|
||||
|
||||
@ -1151,7 +994,7 @@ func (sm *StateManager) setupPostIgnitionGenesisActors(ctx context.Context) erro
|
||||
totalsByEpoch[sixYears] = big.NewInt(100_000_000)
|
||||
totalsByEpoch[sixYears] = big.Add(totalsByEpoch[sixYears], big.NewInt(300_000_000))
|
||||
|
||||
gi.genesisMsigs = make([]msig0.State, 0, len(totalsByEpoch))
|
||||
sm.postIgnitionVesting = make([]msig0.State, 0, len(totalsByEpoch))
|
||||
for k, v := range totalsByEpoch {
|
||||
ns := msig0.State{
|
||||
// In the pre-ignition logic, we incorrectly set this value in Fil, not attoFil, an off-by-10^18 error
|
||||
@ -1161,10 +1004,56 @@ func (sm *StateManager) setupPostIgnitionGenesisActors(ctx context.Context) erro
|
||||
// In the pre-ignition logic, the start epoch was 0. This changes in the fork logic of the Ignition upgrade itself.
|
||||
StartEpoch: build.UpgradeLiftoffHeight,
|
||||
}
|
||||
gi.genesisMsigs = append(gi.genesisMsigs, ns)
|
||||
sm.postIgnitionVesting = append(sm.postIgnitionVesting, ns)
|
||||
}
|
||||
|
||||
sm.postIgnitionGenInfos = &gi
|
||||
return nil
|
||||
}
|
||||
|
||||
// sets up information about the vesting schedule post the calico upgrade
|
||||
func (sm *StateManager) setupPostCalicoVesting(ctx context.Context) error {
|
||||
|
||||
totalsByEpoch := make(map[abi.ChainEpoch]abi.TokenAmount)
|
||||
|
||||
// 0 days
|
||||
zeroDays := abi.ChainEpoch(0)
|
||||
totalsByEpoch[zeroDays] = big.NewInt(10_632_000)
|
||||
|
||||
// 6 months
|
||||
sixMonths := abi.ChainEpoch(183 * builtin.EpochsInDay)
|
||||
totalsByEpoch[sixMonths] = big.NewInt(19_015_887)
|
||||
totalsByEpoch[sixMonths] = big.Add(totalsByEpoch[sixMonths], big.NewInt(32_787_700))
|
||||
|
||||
// 1 year
|
||||
oneYear := abi.ChainEpoch(365 * builtin.EpochsInDay)
|
||||
totalsByEpoch[oneYear] = big.NewInt(22_421_712)
|
||||
totalsByEpoch[oneYear] = big.Add(totalsByEpoch[oneYear], big.NewInt(9_400_000))
|
||||
|
||||
// 2 years
|
||||
twoYears := abi.ChainEpoch(2 * 365 * builtin.EpochsInDay)
|
||||
totalsByEpoch[twoYears] = big.NewInt(7_223_364)
|
||||
|
||||
// 3 years
|
||||
threeYears := abi.ChainEpoch(3 * 365 * builtin.EpochsInDay)
|
||||
totalsByEpoch[threeYears] = big.NewInt(87_637_883)
|
||||
totalsByEpoch[threeYears] = big.Add(totalsByEpoch[threeYears], big.NewInt(898_958))
|
||||
|
||||
// 6 years
|
||||
sixYears := abi.ChainEpoch(6 * 365 * builtin.EpochsInDay)
|
||||
totalsByEpoch[sixYears] = big.NewInt(100_000_000)
|
||||
totalsByEpoch[sixYears] = big.Add(totalsByEpoch[sixYears], big.NewInt(300_000_000))
|
||||
totalsByEpoch[sixYears] = big.Add(totalsByEpoch[sixYears], big.NewInt(9_805_053))
|
||||
|
||||
sm.postCalicoVesting = make([]msig0.State, 0, len(totalsByEpoch))
|
||||
for k, v := range totalsByEpoch {
|
||||
ns := msig0.State{
|
||||
InitialBalance: big.Mul(v, big.NewInt(int64(build.FilecoinPrecision))),
|
||||
UnlockDuration: k,
|
||||
PendingTxns: cid.Undef,
|
||||
StartEpoch: build.UpgradeLiftoffHeight,
|
||||
}
|
||||
sm.postCalicoVesting = append(sm.postCalicoVesting, ns)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -1175,12 +1064,19 @@ func (sm *StateManager) setupPostIgnitionGenesisActors(ctx context.Context) erro
|
||||
func (sm *StateManager) GetFilVested(ctx context.Context, height abi.ChainEpoch, st *state.StateTree) (abi.TokenAmount, error) {
|
||||
vf := big.Zero()
|
||||
if height <= build.UpgradeIgnitionHeight {
|
||||
for _, v := range sm.preIgnitionGenInfos.genesisMsigs {
|
||||
for _, v := range sm.preIgnitionVesting {
|
||||
au := big.Sub(v.InitialBalance, v.AmountLocked(height))
|
||||
vf = big.Add(vf, au)
|
||||
}
|
||||
} else if height <= build.UpgradeCalicoHeight {
|
||||
for _, v := range sm.postIgnitionVesting {
|
||||
// In the pre-ignition logic, we simply called AmountLocked(height), assuming startEpoch was 0.
|
||||
// The start epoch changed in the Ignition upgrade.
|
||||
au := big.Sub(v.InitialBalance, v.AmountLocked(height-v.StartEpoch))
|
||||
vf = big.Add(vf, au)
|
||||
}
|
||||
} else {
|
||||
for _, v := range sm.postIgnitionGenInfos.genesisMsigs {
|
||||
for _, v := range sm.postCalicoVesting {
|
||||
// In the pre-ignition logic, we simply called AmountLocked(height), assuming startEpoch was 0.
|
||||
// The start epoch changed in the Ignition upgrade.
|
||||
au := big.Sub(v.InitialBalance, v.AmountLocked(height-v.StartEpoch))
|
||||
@ -1188,26 +1084,12 @@ func (sm *StateManager) GetFilVested(ctx context.Context, height abi.ChainEpoch,
|
||||
}
|
||||
}
|
||||
|
||||
// there should not be any such accounts in testnet (and also none in mainnet?)
|
||||
// continue to use preIgnitionGenInfos, nothing changed at the Ignition epoch
|
||||
for _, v := range sm.preIgnitionGenInfos.genesisActors {
|
||||
act, err := st.GetActor(v.addr)
|
||||
if err != nil {
|
||||
return big.Zero(), xerrors.Errorf("failed to get actor: %w", err)
|
||||
}
|
||||
|
||||
diff := big.Sub(v.initBal, act.Balance)
|
||||
if diff.GreaterThan(big.Zero()) {
|
||||
vf = big.Add(vf, diff)
|
||||
}
|
||||
}
|
||||
|
||||
// After UpgradeActorsV2Height these funds are accounted for in GetFilReserveDisbursed
|
||||
if height <= build.UpgradeActorsV2Height {
|
||||
// continue to use preIgnitionGenInfos, nothing changed at the Ignition epoch
|
||||
vf = big.Add(vf, sm.preIgnitionGenInfos.genesisPledge)
|
||||
vf = big.Add(vf, sm.genesisPledge)
|
||||
// continue to use preIgnitionGenInfos, nothing changed at the Ignition epoch
|
||||
vf = big.Add(vf, sm.preIgnitionGenInfos.genesisMarketFunds)
|
||||
vf = big.Add(vf, sm.genesisMarketFunds)
|
||||
}
|
||||
|
||||
return vf, nil
|
||||
@ -1301,16 +1183,22 @@ func (sm *StateManager) GetVMCirculatingSupply(ctx context.Context, height abi.C
|
||||
func (sm *StateManager) GetVMCirculatingSupplyDetailed(ctx context.Context, height abi.ChainEpoch, st *state.StateTree) (api.CirculatingSupply, error) {
|
||||
sm.genesisMsigLk.Lock()
|
||||
defer sm.genesisMsigLk.Unlock()
|
||||
if sm.preIgnitionGenInfos == nil {
|
||||
err := sm.setupPreIgnitionGenesisActorsTestnet(ctx)
|
||||
if sm.preIgnitionVesting == nil || sm.genesisPledge.IsZero() || sm.genesisMarketFunds.IsZero() {
|
||||
err := sm.setupGenesisVestingSchedule(ctx)
|
||||
if err != nil {
|
||||
return api.CirculatingSupply{}, xerrors.Errorf("failed to setup pre-ignition genesis information: %w", err)
|
||||
return api.CirculatingSupply{}, xerrors.Errorf("failed to setup pre-ignition vesting schedule: %w", err)
|
||||
}
|
||||
}
|
||||
if sm.postIgnitionGenInfos == nil {
|
||||
err := sm.setupPostIgnitionGenesisActors(ctx)
|
||||
if sm.postIgnitionVesting == nil {
|
||||
err := sm.setupPostIgnitionVesting(ctx)
|
||||
if err != nil {
|
||||
return api.CirculatingSupply{}, xerrors.Errorf("failed to setup post-ignition genesis information: %w", err)
|
||||
return api.CirculatingSupply{}, xerrors.Errorf("failed to setup post-ignition vesting schedule: %w", err)
|
||||
}
|
||||
}
|
||||
if sm.postCalicoVesting == nil {
|
||||
err := sm.setupPostCalicoVesting(ctx)
|
||||
if err != nil {
|
||||
return api.CirculatingSupply{}, xerrors.Errorf("failed to setup post-calico vesting schedule: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -158,7 +158,7 @@ func GetMinerSectorSet(ctx context.Context, sm *StateManager, ts *types.TipSet,
|
||||
return mas.LoadSectors(snos)
|
||||
}
|
||||
|
||||
func GetSectorsForWinningPoSt(ctx context.Context, pv ffiwrapper.Verifier, sm *StateManager, st cid.Cid, maddr address.Address, rand abi.PoStRandomness) ([]builtin.SectorInfo, error) {
|
||||
func GetSectorsForWinningPoSt(ctx context.Context, nv network.Version, pv ffiwrapper.Verifier, sm *StateManager, st cid.Cid, maddr address.Address, rand abi.PoStRandomness) ([]builtin.SectorInfo, error) {
|
||||
act, err := sm.LoadActorRaw(ctx, maddr, st)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to load miner actor: %w", err)
|
||||
@ -169,21 +169,27 @@ func GetSectorsForWinningPoSt(ctx context.Context, pv ffiwrapper.Verifier, sm *S
|
||||
return nil, xerrors.Errorf("failed to load miner actor state: %w", err)
|
||||
}
|
||||
|
||||
// TODO (!!): Actor Update: Make this active sectors
|
||||
var provingSectors bitfield.BitField
|
||||
if nv < network.Version7 {
|
||||
allSectors, err := miner.AllPartSectors(mas, miner.Partition.AllSectors)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("get all sectors: %w", err)
|
||||
}
|
||||
|
||||
allSectors, err := miner.AllPartSectors(mas, miner.Partition.AllSectors)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("get all sectors: %w", err)
|
||||
}
|
||||
faultySectors, err := miner.AllPartSectors(mas, miner.Partition.FaultySectors)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("get faulty sectors: %w", err)
|
||||
}
|
||||
|
||||
faultySectors, err := miner.AllPartSectors(mas, miner.Partition.FaultySectors)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("get faulty sectors: %w", err)
|
||||
}
|
||||
|
||||
provingSectors, err := bitfield.SubtractBitField(allSectors, faultySectors) // TODO: This is wrong, as it can contain faaults, change to just ActiveSectors in an upgrade
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("calc proving sectors: %w", err)
|
||||
provingSectors, err = bitfield.SubtractBitField(allSectors, faultySectors)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("calc proving sectors: %w", err)
|
||||
}
|
||||
} else {
|
||||
provingSectors, err = miner.AllPartSectors(mas, miner.Partition.ActiveSectors)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("get active sectors sectors: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
numProvSect, err := provingSectors.Count()
|
||||
@ -201,12 +207,7 @@ func GetSectorsForWinningPoSt(ctx context.Context, pv ffiwrapper.Verifier, sm *S
|
||||
return nil, xerrors.Errorf("getting miner info: %w", err)
|
||||
}
|
||||
|
||||
spt, err := ffiwrapper.SealProofTypeFromSectorSize(info.SectorSize)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("getting seal proof type: %w", err)
|
||||
}
|
||||
|
||||
wpt, err := spt.RegisteredWinningPoStProof()
|
||||
wpt, err := info.SealProofType.RegisteredWinningPoStProof()
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("getting window proof type: %w", err)
|
||||
}
|
||||
@ -246,7 +247,7 @@ func GetSectorsForWinningPoSt(ctx context.Context, pv ffiwrapper.Verifier, sm *S
|
||||
out := make([]builtin.SectorInfo, len(sectors))
|
||||
for i, sinfo := range sectors {
|
||||
out[i] = builtin.SectorInfo{
|
||||
SealProof: spt,
|
||||
SealProof: sinfo.SealProof,
|
||||
SectorNumber: sinfo.SectorNumber,
|
||||
SealedCID: sinfo.SealedCID,
|
||||
}
|
||||
@ -497,7 +498,9 @@ func MinerGetBaseInfo(ctx context.Context, sm *StateManager, bcs beacon.Schedule
|
||||
return nil, xerrors.Errorf("failed to get randomness for winning post: %w", err)
|
||||
}
|
||||
|
||||
sectors, err := GetSectorsForWinningPoSt(ctx, pv, sm, lbst, maddr, prand)
|
||||
nv := sm.GetNtwkVersion(ctx, ts.Height())
|
||||
|
||||
sectors, err := GetSectorsForWinningPoSt(ctx, nv, pv, sm, lbst, maddr, prand)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("getting winning post proving set: %w", err)
|
||||
}
|
||||
|
@ -340,6 +340,13 @@ func (bv *BlockValidator) Validate(ctx context.Context, pid peer.ID, msg *pubsub
|
||||
func (bv *BlockValidator) validateLocalBlock(ctx context.Context, msg *pubsub.Message) pubsub.ValidationResult {
|
||||
stats.Record(ctx, metrics.BlockPublished.M(1))
|
||||
|
||||
if size := msg.Size(); size > 1<<20-1<<15 {
|
||||
log.Errorf("ignoring oversize block (%dB)", size)
|
||||
ctx, _ = tag.New(ctx, tag.Insert(metrics.FailureType, "oversize_block"))
|
||||
stats.Record(ctx, metrics.BlockValidationFailure.M(1))
|
||||
return pubsub.ValidationIgnore
|
||||
}
|
||||
|
||||
blk, what, err := bv.decodeAndCheckBlock(msg)
|
||||
if err != nil {
|
||||
log.Errorf("got invalid local block: %s", err)
|
||||
|
@ -32,6 +32,7 @@ import (
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/crypto"
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
||||
blst "github.com/supranational/blst/bindings/go"
|
||||
|
||||
@ -731,6 +732,8 @@ func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock, use
|
||||
return xerrors.Errorf("load parent tipset failed (%s): %w", h.Parents, err)
|
||||
}
|
||||
|
||||
winPoStNv := syncer.sm.GetNtwkVersion(ctx, baseTs.Height())
|
||||
|
||||
lbts, lbst, err := stmgr.GetLookbackTipSetForRound(ctx, syncer.sm, baseTs, h.Height)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to get lookback tipset for block: %w", err)
|
||||
@ -924,7 +927,7 @@ func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock, use
|
||||
})
|
||||
|
||||
wproofCheck := async.Err(func() error {
|
||||
if err := syncer.VerifyWinningPoStProof(ctx, h, *prevBeacon, lbst, waddr); err != nil {
|
||||
if err := syncer.VerifyWinningPoStProof(ctx, winPoStNv, h, *prevBeacon, lbst, waddr); err != nil {
|
||||
return xerrors.Errorf("invalid election post: %w", err)
|
||||
}
|
||||
return nil
|
||||
@ -976,7 +979,7 @@ func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock, use
|
||||
return nil
|
||||
}
|
||||
|
||||
func (syncer *Syncer) VerifyWinningPoStProof(ctx context.Context, h *types.BlockHeader, prevBeacon types.BeaconEntry, lbst cid.Cid, waddr address.Address) error {
|
||||
func (syncer *Syncer) VerifyWinningPoStProof(ctx context.Context, nv network.Version, h *types.BlockHeader, prevBeacon types.BeaconEntry, lbst cid.Cid, waddr address.Address) error {
|
||||
if build.InsecurePoStValidation {
|
||||
if len(h.WinPoStProof) == 0 {
|
||||
return xerrors.Errorf("[INSECURE-POST-VALIDATION] No winning post proof given")
|
||||
@ -1008,7 +1011,7 @@ func (syncer *Syncer) VerifyWinningPoStProof(ctx context.Context, h *types.Block
|
||||
return xerrors.Errorf("failed to get ID from miner address %s: %w", h.Miner, err)
|
||||
}
|
||||
|
||||
sectors, err := stmgr.GetSectorsForWinningPoSt(ctx, syncer.verifier, syncer.sm, lbst, h.Miner, rand)
|
||||
sectors, err := stmgr.GetSectorsForWinningPoSt(ctx, nv, syncer.verifier, syncer.sm, lbst, h.Miner, rand)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting winning post sector set: %w", err)
|
||||
}
|
||||
@ -1072,7 +1075,7 @@ func (syncer *Syncer) checkBlockMessages(ctx context.Context, b *types.FullBlock
|
||||
|
||||
// Phase 1: syntactic validation, as defined in the spec
|
||||
minGas := pl.OnChainMessage(msg.ChainLength())
|
||||
if err := m.ValidForBlockInclusion(minGas.Total()); err != nil {
|
||||
if err := m.ValidForBlockInclusion(minGas.Total(), syncer.sm.GetNtwkVersion(ctx, b.Header.Height)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -5,6 +5,8 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
@ -144,7 +146,7 @@ func (m *Message) EqualCall(o *Message) bool {
|
||||
return (&m1).Equals(&m2)
|
||||
}
|
||||
|
||||
func (m *Message) ValidForBlockInclusion(minGas int64) error {
|
||||
func (m *Message) ValidForBlockInclusion(minGas int64, version network.Version) error {
|
||||
if m.Version != 0 {
|
||||
return xerrors.New("'Version' unsupported")
|
||||
}
|
||||
@ -153,6 +155,10 @@ func (m *Message) ValidForBlockInclusion(minGas int64) error {
|
||||
return xerrors.New("'To' address cannot be empty")
|
||||
}
|
||||
|
||||
if m.To == build.ZeroAddress && version >= network.Version7 {
|
||||
return xerrors.New("invalid 'To' address")
|
||||
}
|
||||
|
||||
if m.From == address.Undef {
|
||||
return xerrors.New("'From' address cannot be empty")
|
||||
}
|
||||
|
@ -3,21 +3,17 @@ package vm
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
vmr2 "github.com/filecoin-project/specs-actors/v2/actors/runtime"
|
||||
proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
addr "github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/crypto"
|
||||
vmr2 "github.com/filecoin-project/specs-actors/v2/actors/runtime"
|
||||
proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof"
|
||||
"github.com/ipfs/go-cid"
|
||||
)
|
||||
|
||||
const (
|
||||
GasStorageMulti = 1000
|
||||
GasComputeMulti = 1
|
||||
)
|
||||
|
||||
type GasCharge struct {
|
||||
Name string
|
||||
Extra interface{}
|
||||
@ -132,6 +128,54 @@ var prices = map[abi.ChainEpoch]Pricelist{
|
||||
verifyPostDiscount: true,
|
||||
verifyConsensusFault: 495422,
|
||||
},
|
||||
abi.ChainEpoch(build.UpgradeCalicoHeight): &pricelistV0{
|
||||
computeGasMulti: 1,
|
||||
storageGasMulti: 1300,
|
||||
|
||||
onChainMessageComputeBase: 38863,
|
||||
onChainMessageStorageBase: 36,
|
||||
onChainMessageStoragePerByte: 1,
|
||||
|
||||
onChainReturnValuePerByte: 1,
|
||||
|
||||
sendBase: 29233,
|
||||
sendTransferFunds: 27500,
|
||||
sendTransferOnlyPremium: 159672,
|
||||
sendInvokeMethod: -5377,
|
||||
|
||||
ipldGetBase: 114617,
|
||||
ipldPutBase: 353640,
|
||||
ipldPutPerByte: 1,
|
||||
|
||||
createActorCompute: 1108454,
|
||||
createActorStorage: 36 + 40,
|
||||
deleteActor: -(36 + 40), // -createActorStorage
|
||||
|
||||
verifySignature: map[crypto.SigType]int64{
|
||||
crypto.SigTypeBLS: 16598605,
|
||||
crypto.SigTypeSecp256k1: 1637292,
|
||||
},
|
||||
|
||||
hashingBase: 31355,
|
||||
computeUnsealedSectorCidBase: 98647,
|
||||
verifySealBase: 2000, // TODO gas , it VerifySeal syscall is not used
|
||||
verifyPostLookup: map[abi.RegisteredPoStProof]scalingCost{
|
||||
abi.RegisteredPoStProof_StackedDrgWindow512MiBV1: {
|
||||
flat: 117680921,
|
||||
scale: 43780,
|
||||
},
|
||||
abi.RegisteredPoStProof_StackedDrgWindow32GiBV1: {
|
||||
flat: 117680921,
|
||||
scale: 43780,
|
||||
},
|
||||
abi.RegisteredPoStProof_StackedDrgWindow64GiBV1: {
|
||||
flat: 117680921,
|
||||
scale: 43780,
|
||||
},
|
||||
},
|
||||
verifyPostDiscount: false,
|
||||
verifyConsensusFault: 495422,
|
||||
},
|
||||
}
|
||||
|
||||
// PricelistByEpoch finds the latest prices for the given epoch
|
||||
|
@ -133,13 +133,13 @@ func (pl *pricelistV0) OnMethodInvocation(value abi.TokenAmount, methodNum abi.M
|
||||
|
||||
// OnIpldGet returns the gas used for storing an object
|
||||
func (pl *pricelistV0) OnIpldGet() GasCharge {
|
||||
return newGasCharge("OnIpldGet", pl.ipldGetBase, 0)
|
||||
return newGasCharge("OnIpldGet", pl.ipldGetBase, 0).WithVirtual(114617, 0)
|
||||
}
|
||||
|
||||
// OnIpldPut returns the gas used for storing an object
|
||||
func (pl *pricelistV0) OnIpldPut(dataSize int) GasCharge {
|
||||
return newGasCharge("OnIpldPut", pl.ipldPutBase, int64(dataSize)*pl.ipldPutPerByte*pl.storageGasMulti).
|
||||
WithExtra(dataSize)
|
||||
WithExtra(dataSize).WithVirtual(400000, int64(dataSize)*1300)
|
||||
}
|
||||
|
||||
// OnCreateActor returns the gas used for creating an actor
|
||||
@ -209,6 +209,7 @@ func (pl *pricelistV0) OnVerifyPost(info proof2.WindowPoStVerifyInfo) GasCharge
|
||||
}
|
||||
|
||||
return newGasCharge("OnVerifyPost", gasUsed, 0).
|
||||
WithVirtual(117680921+43780*int64(len(info.ChallengedSectors)), 0).
|
||||
WithExtra(map[string]interface{}{
|
||||
"type": sectorSize,
|
||||
"size": len(info.ChallengedSectors),
|
||||
|
@ -6,6 +6,8 @@ import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
@ -173,9 +175,14 @@ func (*ActorRegistry) transform(instance invokee) (nativeCode, error) {
|
||||
paramT := meth.Type().In(1).Elem()
|
||||
param := reflect.New(paramT)
|
||||
|
||||
rt := in[0].Interface().(*Runtime)
|
||||
inBytes := in[1].Interface().([]byte)
|
||||
if err := DecodeParams(inBytes, param.Interface()); err != nil {
|
||||
aerr := aerrors.Absorb(err, 1, "failed to decode parameters")
|
||||
ec := exitcode.ErrSerialization
|
||||
if rt.NetworkVersion() < network.Version7 {
|
||||
ec = 1
|
||||
}
|
||||
aerr := aerrors.Absorb(err, ec, "failed to decode parameters")
|
||||
return []reflect.Value{
|
||||
reflect.ValueOf([]byte{}),
|
||||
// Below is a hack, fixed in Go 1.13
|
||||
@ -183,7 +190,6 @@ func (*ActorRegistry) transform(instance invokee) (nativeCode, error) {
|
||||
reflect.ValueOf(&aerr).Elem(),
|
||||
}
|
||||
}
|
||||
rt := in[0].Interface().(*Runtime)
|
||||
rval, aerror := rt.shimCall(func() interface{} {
|
||||
ret := meth.Call([]reflect.Value{
|
||||
reflect.ValueOf(rt),
|
||||
|
@ -1,10 +1,13 @@
|
||||
package vm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
|
||||
cbor "github.com/ipfs/go-ipld-cbor"
|
||||
"github.com/stretchr/testify/assert"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
@ -105,10 +108,27 @@ func TestInvokerBasic(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
_, aerr := code[1](&Runtime{}, []byte{99})
|
||||
if aerrors.IsFatal(aerr) {
|
||||
t.Fatal("err should not be fatal")
|
||||
{
|
||||
_, aerr := code[1](&Runtime{
|
||||
vm: &VM{ntwkVersion: func(ctx context.Context, epoch abi.ChainEpoch) network.Version {
|
||||
return network.Version0
|
||||
}},
|
||||
}, []byte{99})
|
||||
if aerrors.IsFatal(aerr) {
|
||||
t.Fatal("err should not be fatal")
|
||||
}
|
||||
assert.Equal(t, exitcode.ExitCode(1), aerrors.RetCode(aerr), "return code should be 1")
|
||||
}
|
||||
assert.Equal(t, exitcode.ExitCode(1), aerrors.RetCode(aerr), "return code should be 1")
|
||||
|
||||
{
|
||||
_, aerr := code[1](&Runtime{
|
||||
vm: &VM{ntwkVersion: func(ctx context.Context, epoch abi.ChainEpoch) network.Version {
|
||||
return network.Version7
|
||||
}},
|
||||
}, []byte{99})
|
||||
if aerrors.IsFatal(aerr) {
|
||||
t.Fatal("err should not be fatal")
|
||||
}
|
||||
assert.Equal(t, exitcode.ErrSerialization, aerrors.RetCode(aerr), "return code should be %s", 1)
|
||||
}
|
||||
}
|
||||
|
@ -244,20 +244,23 @@ func (rt *Runtime) NewActorAddress() address.Address {
|
||||
return addr
|
||||
}
|
||||
|
||||
func (rt *Runtime) CreateActor(codeID cid.Cid, address address.Address) {
|
||||
func (rt *Runtime) CreateActor(codeID cid.Cid, addr address.Address) {
|
||||
if addr == address.Undef && rt.NetworkVersion() >= network.Version7 {
|
||||
rt.Abortf(exitcode.SysErrorIllegalArgument, "CreateActor with Undef address")
|
||||
}
|
||||
act, aerr := rt.vm.areg.Create(codeID, rt)
|
||||
if aerr != nil {
|
||||
rt.Abortf(aerr.RetCode(), aerr.Error())
|
||||
}
|
||||
|
||||
_, err := rt.state.GetActor(address)
|
||||
_, err := rt.state.GetActor(addr)
|
||||
if err == nil {
|
||||
rt.Abortf(exitcode.SysErrorIllegalArgument, "Actor address already exists")
|
||||
}
|
||||
|
||||
rt.chargeGas(rt.Pricelist().OnCreateActor())
|
||||
|
||||
err = rt.state.SetActor(address, act)
|
||||
err = rt.state.SetActor(addr, act)
|
||||
if err != nil {
|
||||
panic(aerrors.Fatalf("creating actor entry: %v", err))
|
||||
}
|
||||
@ -266,7 +269,7 @@ func (rt *Runtime) CreateActor(codeID cid.Cid, address address.Address) {
|
||||
|
||||
// DeleteActor deletes the executing actor from the state tree, transferring
|
||||
// any balance to beneficiary.
|
||||
// Aborts if the beneficiary does not exist.
|
||||
// Aborts if the beneficiary does not exist or is the calling actor.
|
||||
// May only be called by the actor itself.
|
||||
func (rt *Runtime) DeleteActor(beneficiary address.Address) {
|
||||
rt.chargeGas(rt.Pricelist().OnDeleteActor())
|
||||
@ -278,6 +281,19 @@ func (rt *Runtime) DeleteActor(beneficiary address.Address) {
|
||||
panic(aerrors.Fatalf("failed to get actor: %s", err))
|
||||
}
|
||||
if !act.Balance.IsZero() {
|
||||
// TODO: Should be safe to drop the version-check,
|
||||
// since only the paych actor called this pre-version 7, but let's leave it for now
|
||||
if rt.NetworkVersion() >= network.Version7 {
|
||||
beneficiaryId, found := rt.ResolveAddress(beneficiary)
|
||||
if !found {
|
||||
rt.Abortf(exitcode.SysErrorIllegalArgument, "beneficiary doesn't exist")
|
||||
}
|
||||
|
||||
if beneficiaryId == rt.Receiver() {
|
||||
rt.Abortf(exitcode.SysErrorIllegalArgument, "benefactor cannot be beneficiary")
|
||||
}
|
||||
}
|
||||
|
||||
// Transfer the executing actor's balance to the beneficiary
|
||||
if err := rt.vm.transfer(rt.Receiver(), beneficiary, act.Balance); err != nil {
|
||||
panic(aerrors.Fatalf("failed to transfer balance to beneficiary actor: %s", err))
|
||||
@ -533,12 +549,19 @@ func (rt *Runtime) chargeGasInternal(gas GasCharge, skip int) aerrors.ActorError
|
||||
ComputeGas: gas.ComputeGas,
|
||||
StorageGas: gas.StorageGas,
|
||||
|
||||
TotalVirtualGas: gas.VirtualCompute*GasComputeMulti + gas.VirtualStorage*GasStorageMulti,
|
||||
VirtualComputeGas: gas.VirtualCompute,
|
||||
VirtualStorageGas: gas.VirtualStorage,
|
||||
|
||||
Callers: callers[:cout],
|
||||
}
|
||||
if gasTrace.VirtualStorageGas == 0 {
|
||||
gasTrace.VirtualStorageGas = gasTrace.StorageGas
|
||||
}
|
||||
if gasTrace.VirtualComputeGas == 0 {
|
||||
gasTrace.VirtualComputeGas = gasTrace.ComputeGas
|
||||
}
|
||||
gasTrace.TotalVirtualGas = gasTrace.VirtualComputeGas + gasTrace.VirtualStorageGas
|
||||
|
||||
rt.executionTrace.GasCharges = append(rt.executionTrace.GasCharges, &gasTrace)
|
||||
rt.lastGasChargeTime = now
|
||||
rt.lastGasCharge = &gasTrace
|
||||
|
@ -7,6 +7,10 @@ import (
|
||||
goruntime "runtime"
|
||||
"sync"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/policy"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/ipfs/go-cid"
|
||||
cbor "github.com/ipfs/go-ipld-cbor"
|
||||
@ -40,7 +44,9 @@ func Syscalls(verifier ffiwrapper.Verifier) SyscallBuilder {
|
||||
return func(ctx context.Context, rt *Runtime) runtime2.Syscalls {
|
||||
|
||||
return &syscallShim{
|
||||
ctx: ctx,
|
||||
ctx: ctx,
|
||||
epoch: rt.CurrEpoch(),
|
||||
networkVersion: rt.NetworkVersion(),
|
||||
|
||||
actor: rt.Receiver(),
|
||||
cstate: rt.state,
|
||||
@ -55,11 +61,13 @@ func Syscalls(verifier ffiwrapper.Verifier) SyscallBuilder {
|
||||
type syscallShim struct {
|
||||
ctx context.Context
|
||||
|
||||
lbState LookbackStateGetter
|
||||
actor address.Address
|
||||
cstate *state.StateTree
|
||||
cst cbor.IpldStore
|
||||
verifier ffiwrapper.Verifier
|
||||
epoch abi.ChainEpoch
|
||||
networkVersion network.Version
|
||||
lbState LookbackStateGetter
|
||||
actor address.Address
|
||||
cstate *state.StateTree
|
||||
cst cbor.IpldStore
|
||||
verifier ffiwrapper.Verifier
|
||||
}
|
||||
|
||||
func (ss *syscallShim) ComputeUnsealedSectorCID(st abi.RegisteredSealProof, pieces []abi.PieceInfo) (cid.Cid, error) {
|
||||
@ -202,6 +210,10 @@ func (ss *syscallShim) VerifyBlockSig(blk *types.BlockHeader) error {
|
||||
}
|
||||
|
||||
func (ss *syscallShim) workerKeyAtLookback(height abi.ChainEpoch) (address.Address, error) {
|
||||
if ss.networkVersion >= network.Version7 && height < ss.epoch-policy.ChainFinality {
|
||||
return address.Undef, xerrors.Errorf("cannot get worker key (currEpoch %d, height %d)", ss.epoch, height)
|
||||
}
|
||||
|
||||
lbState, err := ss.lbState(ss.ctx, height)
|
||||
if err != nil {
|
||||
return address.Undef, err
|
||||
|
@ -9,6 +9,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||
"github.com/filecoin-project/lotus/metrics"
|
||||
|
||||
block "github.com/ipfs/go-block-format"
|
||||
cid "github.com/ipfs/go-cid"
|
||||
@ -16,6 +17,7 @@ import (
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
mh "github.com/multiformats/go-multihash"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
"go.opencensus.io/stats"
|
||||
"go.opencensus.io/trace"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
@ -138,6 +140,10 @@ func (vm *VM) makeRuntime(ctx context.Context, msg *types.Message, parent *Runti
|
||||
}
|
||||
|
||||
if parent != nil {
|
||||
// TODO: The version check here should be unnecessary, but we can wait to take it out
|
||||
if !parent.allowInternal && rt.NetworkVersion() >= network.Version7 {
|
||||
rt.Abortf(exitcode.SysErrForbidden, "internal calls currently disabled")
|
||||
}
|
||||
rt.gasUsed = parent.gasUsed
|
||||
rt.origin = parent.origin
|
||||
rt.originNonce = parent.originNonce
|
||||
@ -602,6 +608,8 @@ func (vm *VM) ActorBalance(addr address.Address) (types.BigInt, aerrors.ActorErr
|
||||
return act.Balance, nil
|
||||
}
|
||||
|
||||
type vmFlushKey struct{}
|
||||
|
||||
func (vm *VM) Flush(ctx context.Context) (cid.Cid, error) {
|
||||
_, span := trace.StartSpan(ctx, "vm.Flush")
|
||||
defer span.End()
|
||||
@ -614,7 +622,7 @@ func (vm *VM) Flush(ctx context.Context) (cid.Cid, error) {
|
||||
return cid.Undef, xerrors.Errorf("flushing vm: %w", err)
|
||||
}
|
||||
|
||||
if err := Copy(ctx, from, to, root); err != nil {
|
||||
if err := Copy(context.WithValue(ctx, vmFlushKey{}, true), from, to, root); err != nil {
|
||||
return cid.Undef, xerrors.Errorf("copying tree: %w", err)
|
||||
}
|
||||
|
||||
@ -671,21 +679,48 @@ func linksForObj(blk block.Block, cb func(cid.Cid)) error {
|
||||
func Copy(ctx context.Context, from, to blockstore.Blockstore, root cid.Cid) error {
|
||||
ctx, span := trace.StartSpan(ctx, "vm.Copy") // nolint
|
||||
defer span.End()
|
||||
start := time.Now()
|
||||
|
||||
var numBlocks int
|
||||
var totalCopySize int
|
||||
|
||||
var batch []block.Block
|
||||
const batchSize = 128
|
||||
const bufCount = 3
|
||||
freeBufs := make(chan []block.Block, bufCount)
|
||||
toFlush := make(chan []block.Block, bufCount)
|
||||
for i := 0; i < bufCount; i++ {
|
||||
freeBufs <- make([]block.Block, 0, batchSize)
|
||||
}
|
||||
|
||||
errFlushChan := make(chan error)
|
||||
|
||||
go func() {
|
||||
for b := range toFlush {
|
||||
if err := to.PutMany(b); err != nil {
|
||||
close(freeBufs)
|
||||
errFlushChan <- xerrors.Errorf("batch put in copy: %w", err)
|
||||
return
|
||||
}
|
||||
freeBufs <- b[:0]
|
||||
}
|
||||
close(errFlushChan)
|
||||
close(freeBufs)
|
||||
}()
|
||||
|
||||
var batch = <-freeBufs
|
||||
batchCp := func(blk block.Block) error {
|
||||
numBlocks++
|
||||
totalCopySize += len(blk.RawData())
|
||||
|
||||
batch = append(batch, blk)
|
||||
if len(batch) > 100 {
|
||||
if err := to.PutMany(batch); err != nil {
|
||||
return xerrors.Errorf("batch put in copy: %w", err)
|
||||
|
||||
if len(batch) >= batchSize {
|
||||
toFlush <- batch
|
||||
var ok bool
|
||||
batch, ok = <-freeBufs
|
||||
if !ok {
|
||||
return <-errFlushChan
|
||||
}
|
||||
batch = batch[:0]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -695,15 +730,22 @@ func Copy(ctx context.Context, from, to blockstore.Blockstore, root cid.Cid) err
|
||||
}
|
||||
|
||||
if len(batch) > 0 {
|
||||
if err := to.PutMany(batch); err != nil {
|
||||
return xerrors.Errorf("batch put in copy: %w", err)
|
||||
}
|
||||
toFlush <- batch
|
||||
}
|
||||
close(toFlush) // close the toFlush triggering the loop to end
|
||||
err := <-errFlushChan // get error out or get nil if it was closed
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
span.AddAttributes(
|
||||
trace.Int64Attribute("numBlocks", int64(numBlocks)),
|
||||
trace.Int64Attribute("copySize", int64(totalCopySize)),
|
||||
)
|
||||
if yes, ok := ctx.Value(vmFlushKey{}).(bool); yes && ok {
|
||||
took := metrics.SinceInMilliseconds(start)
|
||||
stats.Record(ctx, metrics.VMFlushCopyCount.M(int64(numBlocks)), metrics.VMFlushCopyDuration.M(took))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -31,6 +31,7 @@ import (
|
||||
|
||||
lapi "github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
"github.com/filecoin-project/lotus/chain/actors/policy"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/genesis"
|
||||
@ -211,15 +212,6 @@ var sealBenchCmd = &cli.Command{
|
||||
}
|
||||
sectorSize := abi.SectorSize(sectorSizeInt)
|
||||
|
||||
spt, err := ffiwrapper.SealProofTypeFromSectorSize(sectorSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cfg := &ffiwrapper.Config{
|
||||
SealProofType: spt,
|
||||
}
|
||||
|
||||
// Only fetch parameters if actually needed
|
||||
if !c.Bool("skip-commit2") {
|
||||
if err := paramfetch.GetParams(lcli.ReqContext(c), build.ParametersJSON(), uint64(sectorSize)); err != nil {
|
||||
@ -231,7 +223,7 @@ var sealBenchCmd = &cli.Command{
|
||||
Root: sbdir,
|
||||
}
|
||||
|
||||
sb, err := ffiwrapper.New(sbfs, cfg)
|
||||
sb, err := ffiwrapper.New(sbfs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -295,7 +287,7 @@ var sealBenchCmd = &cli.Command{
|
||||
|
||||
if !c.Bool("skip-commit2") {
|
||||
log.Info("generating winning post candidates")
|
||||
wipt, err := spt.RegisteredWinningPoStProof()
|
||||
wipt, err := spt(sectorSize).RegisteredWinningPoStProof()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -428,7 +420,7 @@ var sealBenchCmd = &cli.Command{
|
||||
|
||||
fmt.Println(string(data))
|
||||
} else {
|
||||
fmt.Printf("----\nresults (v27) (%d)\n", sectorSize)
|
||||
fmt.Printf("----\nresults (v28) (%d)\n", sectorSize)
|
||||
if robench == "" {
|
||||
fmt.Printf("seal: addPiece: %s (%s)\n", bo.SealingResults[0].AddPiece, bps(bo.SectorSize, bo.SealingResults[0].AddPiece)) // TODO: average across multiple sealings
|
||||
fmt.Printf("seal: preCommit phase 1: %s (%s)\n", bo.SealingResults[0].PreCommit1, bps(bo.SectorSize, bo.SealingResults[0].PreCommit1))
|
||||
@ -477,9 +469,12 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par
|
||||
}
|
||||
|
||||
for i := abi.SectorNumber(1); i <= abi.SectorNumber(numSectors); i++ {
|
||||
sid := abi.SectorID{
|
||||
Miner: mid,
|
||||
Number: i,
|
||||
sid := storage.SectorRef{
|
||||
ID: abi.SectorID{
|
||||
Miner: mid,
|
||||
Number: i,
|
||||
},
|
||||
ProofType: spt(sectorSize),
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
@ -507,9 +502,12 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par
|
||||
end := start + sectorsPerWorker
|
||||
for i := abi.SectorNumber(start); i < abi.SectorNumber(end); i++ {
|
||||
ix := int(i - 1)
|
||||
sid := abi.SectorID{
|
||||
Miner: mid,
|
||||
Number: i,
|
||||
sid := storage.SectorRef{
|
||||
ID: abi.SectorID{
|
||||
Miner: mid,
|
||||
Number: i,
|
||||
},
|
||||
ProofType: spt(sectorSize),
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
@ -538,7 +536,7 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par
|
||||
<-preCommit2Sema
|
||||
|
||||
sealedSectors[ix] = saproof2.SectorInfo{
|
||||
SealProof: sb.SealProofType(),
|
||||
SealProof: sid.ProofType,
|
||||
SectorNumber: i,
|
||||
SealedCID: cids.Sealed,
|
||||
}
|
||||
@ -592,7 +590,7 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par
|
||||
svi := saproof2.SealVerifyInfo{
|
||||
SectorID: abi.SectorID{Miner: mid, Number: i},
|
||||
SealedCID: cids.Sealed,
|
||||
SealProof: sb.SealProofType(),
|
||||
SealProof: sid.ProofType,
|
||||
Proof: proof,
|
||||
DealIDs: nil,
|
||||
Randomness: ticket,
|
||||
@ -614,7 +612,7 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par
|
||||
if !skipunseal {
|
||||
log.Infof("[%d] Unsealing sector", i)
|
||||
{
|
||||
p, done, err := sbfs.AcquireSector(context.TODO(), abi.SectorID{Miner: mid, Number: 1}, storiface.FTUnsealed, storiface.FTNone, storiface.PathSealing)
|
||||
p, done, err := sbfs.AcquireSector(context.TODO(), sid, storiface.FTUnsealed, storiface.FTNone, storiface.PathSealing)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("acquire unsealed sector for removing: %w", err)
|
||||
}
|
||||
@ -625,7 +623,7 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par
|
||||
}
|
||||
}
|
||||
|
||||
err := sb.UnsealPiece(context.TODO(), abi.SectorID{Miner: mid, Number: 1}, 0, abi.PaddedPieceSize(sectorSize).Unpadded(), ticket, cids.Unsealed)
|
||||
err := sb.UnsealPiece(context.TODO(), sid, 0, abi.PaddedPieceSize(sectorSize).Unpadded(), ticket, cids.Unsealed)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -708,23 +706,22 @@ var proveCmd = &cli.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
spt, err := ffiwrapper.SealProofTypeFromSectorSize(abi.SectorSize(c2in.SectorSize))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cfg := &ffiwrapper.Config{
|
||||
SealProofType: spt,
|
||||
}
|
||||
|
||||
sb, err := ffiwrapper.New(nil, cfg)
|
||||
sb, err := ffiwrapper.New(nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
|
||||
proof, err := sb.SealCommit2(context.TODO(), abi.SectorID{Miner: abi.ActorID(mid), Number: abi.SectorNumber(c2in.SectorNum)}, c2in.Phase1Out)
|
||||
ref := storage.SectorRef{
|
||||
ID: abi.SectorID{
|
||||
Miner: abi.ActorID(mid),
|
||||
Number: abi.SectorNumber(c2in.SectorNum),
|
||||
},
|
||||
ProofType: spt(abi.SectorSize(c2in.SectorSize)),
|
||||
}
|
||||
|
||||
proof, err := sb.SealCommit2(context.TODO(), ref, c2in.Phase1Out)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -733,7 +730,7 @@ var proveCmd = &cli.Command{
|
||||
|
||||
fmt.Printf("proof: %x\n", proof)
|
||||
|
||||
fmt.Printf("----\nresults (v27) (%d)\n", c2in.SectorSize)
|
||||
fmt.Printf("----\nresults (v28) (%d)\n", c2in.SectorSize)
|
||||
dur := sealCommit2.Sub(start)
|
||||
|
||||
fmt.Printf("seal: commit phase 2: %s (%s)\n", dur, bps(abi.SectorSize(c2in.SectorSize), dur))
|
||||
@ -747,3 +744,12 @@ func bps(data abi.SectorSize, d time.Duration) string {
|
||||
bps := bdata.Div(bdata, big.NewInt(d.Nanoseconds()))
|
||||
return types.SizeStr(types.BigInt{Int: bps}) + "/s"
|
||||
}
|
||||
|
||||
func spt(ssize abi.SectorSize) abi.RegisteredSealProof {
|
||||
spt, err := miner.SealProofTypeFromSectorSize(ssize, build.NewestNetworkVersion)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return spt
|
||||
}
|
||||
|
@ -32,7 +32,6 @@ import (
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
lcli "github.com/filecoin-project/lotus/cli"
|
||||
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
||||
"github.com/filecoin-project/lotus/lib/lotuslog"
|
||||
@ -356,11 +355,6 @@ var runCmd = &cli.Command{
|
||||
}
|
||||
|
||||
// Setup remote sector store
|
||||
spt, err := ffiwrapper.SealProofTypeFromSectorSize(ssize)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting proof type: %w", err)
|
||||
}
|
||||
|
||||
sminfo, err := lcli.GetAPIInfo(cctx, repo.StorageMiner)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("could not get api info: %w", err)
|
||||
@ -374,7 +368,6 @@ var runCmd = &cli.Command{
|
||||
|
||||
workerApi := &worker{
|
||||
LocalWorker: sectorstorage.NewLocalWorker(sectorstorage.WorkerConfig{
|
||||
SealProof: spt,
|
||||
TaskTypes: taskTypes,
|
||||
NoSwap: cctx.Bool("no-swap"),
|
||||
}, remote, localStore, nodeApi, nodeApi, wsts),
|
||||
|
@ -8,8 +8,6 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/docker/go-units"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
||||
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
"github.com/mitchellh/go-homedir"
|
||||
"github.com/urfave/cli/v2"
|
||||
@ -19,6 +17,7 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/cmd/lotus-seed/seed"
|
||||
"github.com/filecoin-project/lotus/genesis"
|
||||
@ -128,12 +127,12 @@ var preSealCmd = &cli.Command{
|
||||
}
|
||||
sectorSize := abi.SectorSize(sectorSizeInt)
|
||||
|
||||
rp, err := ffiwrapper.SealProofTypeFromSectorSize(sectorSize)
|
||||
spt, err := miner.SealProofTypeFromSectorSize(sectorSize, build.NewestNetworkVersion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
gm, key, err := seed.PreSeal(maddr, rp, abi.SectorNumber(c.Uint64("sector-offset")), c.Int("num-sectors"), sbroot, []byte(c.String("ticket-preimage")), k, c.Bool("fake-sectors"))
|
||||
gm, key, err := seed.PreSeal(maddr, spt, abi.SectorNumber(c.Uint64("sector-offset")), c.Int("num-sectors"), sbroot, []byte(c.String("ticket-preimage")), k, c.Bool("fake-sectors"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/zerocomm"
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market"
|
||||
|
||||
@ -42,10 +43,6 @@ func PreSeal(maddr address.Address, spt abi.RegisteredSealProof, offset abi.Sect
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
cfg := &ffiwrapper.Config{
|
||||
SealProofType: spt,
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(sbroot, 0775); err != nil { //nolint:gosec
|
||||
return nil, nil, err
|
||||
}
|
||||
@ -56,7 +53,7 @@ func PreSeal(maddr address.Address, spt abi.RegisteredSealProof, offset abi.Sect
|
||||
Root: sbroot,
|
||||
}
|
||||
|
||||
sb, err := ffiwrapper.New(sbfs, cfg)
|
||||
sb, err := ffiwrapper.New(sbfs)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@ -69,16 +66,17 @@ func PreSeal(maddr address.Address, spt abi.RegisteredSealProof, offset abi.Sect
|
||||
var sealedSectors []*genesis.PreSeal
|
||||
for i := 0; i < sectors; i++ {
|
||||
sid := abi.SectorID{Miner: abi.ActorID(mid), Number: next}
|
||||
ref := storage.SectorRef{ID: sid, ProofType: spt}
|
||||
next++
|
||||
|
||||
var preseal *genesis.PreSeal
|
||||
if !fakeSectors {
|
||||
preseal, err = presealSector(sb, sbfs, sid, spt, ssize, preimage)
|
||||
preseal, err = presealSector(sb, sbfs, ref, ssize, preimage)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
} else {
|
||||
preseal, err = presealSectorFake(sbfs, sid, spt, ssize)
|
||||
preseal, err = presealSectorFake(sbfs, ref, ssize)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@ -148,7 +146,7 @@ func PreSeal(maddr address.Address, spt abi.RegisteredSealProof, offset abi.Sect
|
||||
return miner, &minerAddr.KeyInfo, nil
|
||||
}
|
||||
|
||||
func presealSector(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, sid abi.SectorID, spt abi.RegisteredSealProof, ssize abi.SectorSize, preimage []byte) (*genesis.PreSeal, error) {
|
||||
func presealSector(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, sid storage.SectorRef, ssize abi.SectorSize, preimage []byte) (*genesis.PreSeal, error) {
|
||||
pi, err := sb.AddPiece(context.TODO(), sid, nil, abi.PaddedPieceSize(ssize).Unpadded(), rand.Reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -182,12 +180,12 @@ func presealSector(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, sid abi.Sector
|
||||
return &genesis.PreSeal{
|
||||
CommR: cids.Sealed,
|
||||
CommD: cids.Unsealed,
|
||||
SectorID: sid.Number,
|
||||
ProofType: spt,
|
||||
SectorID: sid.ID.Number,
|
||||
ProofType: sid.ProofType,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func presealSectorFake(sbfs *basicfs.Provider, sid abi.SectorID, spt abi.RegisteredSealProof, ssize abi.SectorSize) (*genesis.PreSeal, error) {
|
||||
func presealSectorFake(sbfs *basicfs.Provider, sid storage.SectorRef, ssize abi.SectorSize) (*genesis.PreSeal, error) {
|
||||
paths, done, err := sbfs.AcquireSector(context.TODO(), sid, 0, storiface.FTSealed|storiface.FTCache, storiface.PathSealing)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("acquire unsealed sector: %w", err)
|
||||
@ -198,7 +196,7 @@ func presealSectorFake(sbfs *basicfs.Provider, sid abi.SectorID, spt abi.Registe
|
||||
return nil, xerrors.Errorf("mkdir cache: %w", err)
|
||||
}
|
||||
|
||||
commr, err := ffi.FauxRep(spt, paths.Cache, paths.Sealed)
|
||||
commr, err := ffi.FauxRep(sid.ProofType, paths.Cache, paths.Sealed)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("fauxrep: %w", err)
|
||||
}
|
||||
@ -206,13 +204,13 @@ func presealSectorFake(sbfs *basicfs.Provider, sid abi.SectorID, spt abi.Registe
|
||||
return &genesis.PreSeal{
|
||||
CommR: commr,
|
||||
CommD: zerocomm.ZeroPieceCommitment(abi.PaddedPieceSize(ssize).Unpadded()),
|
||||
SectorID: sid.Number,
|
||||
ProofType: spt,
|
||||
SectorID: sid.ID.Number,
|
||||
ProofType: sid.ProofType,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func cleanupUnsealed(sbfs *basicfs.Provider, sid abi.SectorID) error {
|
||||
paths, done, err := sbfs.AcquireSector(context.TODO(), sid, storiface.FTUnsealed, storiface.FTNone, storiface.PathSealing)
|
||||
func cleanupUnsealed(sbfs *basicfs.Provider, ref storage.SectorRef) error {
|
||||
paths, done, err := sbfs.AcquireSector(context.TODO(), ref, storiface.FTUnsealed, storiface.FTNone, storiface.PathSealing)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -50,7 +50,7 @@ func TestWorkerKeyChange(t *testing.T) {
|
||||
|
||||
blocktime := 1 * time.Millisecond
|
||||
|
||||
n, sn := builder.MockSbBuilder(t, []test.FullNodeOpts{test.FullNodeWithUpgradeAt(1), test.FullNodeWithUpgradeAt(1)}, test.OneMiner)
|
||||
n, sn := builder.MockSbBuilder(t, []test.FullNodeOpts{test.FullNodeWithActorsV2At(1), test.FullNodeWithActorsV2At(1)}, test.OneMiner)
|
||||
|
||||
client1 := n[0]
|
||||
client2 := n[1]
|
||||
|
@ -433,11 +433,6 @@ func storageMinerInit(ctx context.Context, cctx *cli.Context, api lapi.FullNode,
|
||||
return err
|
||||
}
|
||||
|
||||
spt, err := ffiwrapper.SealProofTypeFromSectorSize(ssize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mid, err := address.IDFromAddress(a)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting id address: %w", err)
|
||||
@ -451,9 +446,7 @@ func storageMinerInit(ctx context.Context, cctx *cli.Context, api lapi.FullNode,
|
||||
wsts := statestore.New(namespace.Wrap(mds, modules.WorkerCallsPrefix))
|
||||
smsts := statestore.New(namespace.Wrap(mds, modules.ManagerWorkPrefix))
|
||||
|
||||
smgr, err := sectorstorage.New(ctx, lr, stores.NewIndex(), &ffiwrapper.Config{
|
||||
SealProofType: spt,
|
||||
}, sectorstorage.SealerConfig{
|
||||
smgr, err := sectorstorage.New(ctx, lr, stores.NewIndex(), sectorstorage.SealerConfig{
|
||||
ParallelFetchLimit: 10,
|
||||
AllowAddPiece: true,
|
||||
AllowPreCommit1: true,
|
||||
@ -657,9 +650,14 @@ func createStorageMiner(ctx context.Context, api lapi.FullNode, peerid peer.ID,
|
||||
}
|
||||
}
|
||||
|
||||
spt, err := ffiwrapper.SealProofTypeFromSectorSize(abi.SectorSize(ssize))
|
||||
nv, err := api.StateNetworkVersion(ctx, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return address.Undef, err
|
||||
return address.Undef, xerrors.Errorf("getting network version: %w", err)
|
||||
}
|
||||
|
||||
spt, err := miner.SealProofTypeFromSectorSize(abi.SectorSize(ssize), nv)
|
||||
if err != nil {
|
||||
return address.Undef, xerrors.Errorf("getting seal proof type: %w", err)
|
||||
}
|
||||
|
||||
params, err := actors.SerializeParams(&power2.CreateMinerParams{
|
||||
|
@ -73,6 +73,8 @@ const (
|
||||
// MethodInspectRuntime is the identifier for the method that returns the
|
||||
// current runtime values.
|
||||
MethodInspectRuntime
|
||||
// MethodCreateState is the identifier for the method that creates the chaos actor's state.
|
||||
MethodCreateState
|
||||
)
|
||||
|
||||
// Exports defines the methods this actor exposes publicly.
|
||||
@ -87,6 +89,7 @@ func (a Actor) Exports() []interface{} {
|
||||
MethodMutateState: a.MutateState,
|
||||
MethodAbortWith: a.AbortWith,
|
||||
MethodInspectRuntime: a.InspectRuntime,
|
||||
MethodCreateState: a.CreateState,
|
||||
}
|
||||
}
|
||||
|
||||
@ -227,6 +230,14 @@ type MutateStateArgs struct {
|
||||
Branch MutateStateBranch
|
||||
}
|
||||
|
||||
// CreateState creates the chaos actor's state
|
||||
func (a Actor) CreateState(rt runtime2.Runtime, _ *abi.EmptyValue) *abi.EmptyValue {
|
||||
rt.ValidateImmediateCallerAcceptAny()
|
||||
rt.StateCreate(&State{})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MutateState attempts to mutate a state value in the actor.
|
||||
func (a Actor) MutateState(rt runtime2.Runtime, args *MutateStateArgs) *abi.EmptyValue {
|
||||
rt.ValidateImmediateCallerAcceptAny()
|
||||
|
@ -129,8 +129,9 @@ func TestMutateStateInTransaction(t *testing.T) {
|
||||
var a Actor
|
||||
|
||||
rt.ExpectValidateCallerAny()
|
||||
rt.StateCreate(&State{})
|
||||
rt.Call(a.CreateState, nil)
|
||||
|
||||
rt.ExpectValidateCallerAny()
|
||||
val := "__mutstat test"
|
||||
rt.Call(a.MutateState, &MutateStateArgs{
|
||||
Value: val,
|
||||
@ -155,23 +156,30 @@ func TestMutateStateAfterTransaction(t *testing.T) {
|
||||
var a Actor
|
||||
|
||||
rt.ExpectValidateCallerAny()
|
||||
rt.StateCreate(&State{})
|
||||
rt.Call(a.CreateState, nil)
|
||||
|
||||
rt.ExpectValidateCallerAny()
|
||||
val := "__mutstat test"
|
||||
defer func() {
|
||||
if r := recover(); r == nil {
|
||||
t.Fatal("The code did not panic")
|
||||
} else {
|
||||
var st State
|
||||
rt.GetState(&st)
|
||||
|
||||
// state should be updated successfully _in_ the transaction but not outside
|
||||
if st.Value != val+"-in" {
|
||||
t.Fatal("state was not updated")
|
||||
}
|
||||
|
||||
rt.Verify()
|
||||
}
|
||||
}()
|
||||
rt.Call(a.MutateState, &MutateStateArgs{
|
||||
Value: val,
|
||||
Branch: MutateAfterTransaction,
|
||||
})
|
||||
|
||||
var st State
|
||||
rt.GetState(&st)
|
||||
|
||||
// state should be updated successfully _in_ the transaction but not outside
|
||||
if st.Value != val+"-in" {
|
||||
t.Fatal("state was not updated")
|
||||
}
|
||||
|
||||
rt.Verify()
|
||||
}
|
||||
|
||||
func TestMutateStateReadonly(t *testing.T) {
|
||||
@ -182,22 +190,30 @@ func TestMutateStateReadonly(t *testing.T) {
|
||||
var a Actor
|
||||
|
||||
rt.ExpectValidateCallerAny()
|
||||
rt.StateCreate(&State{})
|
||||
rt.Call(a.CreateState, nil)
|
||||
|
||||
rt.ExpectValidateCallerAny()
|
||||
val := "__mutstat test"
|
||||
defer func() {
|
||||
if r := recover(); r == nil {
|
||||
t.Fatal("The code did not panic")
|
||||
} else {
|
||||
var st State
|
||||
rt.GetState(&st)
|
||||
|
||||
if st.Value != "" {
|
||||
t.Fatal("state was not expected to be updated")
|
||||
}
|
||||
|
||||
rt.Verify()
|
||||
}
|
||||
}()
|
||||
|
||||
rt.Call(a.MutateState, &MutateStateArgs{
|
||||
Value: val,
|
||||
Branch: MutateReadonly,
|
||||
})
|
||||
|
||||
var st State
|
||||
rt.GetState(&st)
|
||||
|
||||
if st.Value != "" {
|
||||
t.Fatal("state was not expected to be updated")
|
||||
}
|
||||
|
||||
rt.Verify()
|
||||
}
|
||||
|
||||
func TestMutateStateInvalidBranch(t *testing.T) {
|
||||
@ -254,11 +270,13 @@ func TestInspectRuntime(t *testing.T) {
|
||||
receiver := atesting2.NewIDAddr(t, 101)
|
||||
builder := mock2.NewBuilder(context.Background(), receiver)
|
||||
|
||||
rt := builder.Build(t)
|
||||
rt.SetCaller(caller, builtin2.AccountActorCodeID)
|
||||
rt.StateCreate(&State{})
|
||||
var a Actor
|
||||
|
||||
rt := builder.Build(t)
|
||||
rt.ExpectValidateCallerAny()
|
||||
rt.Call(a.CreateState, nil)
|
||||
|
||||
rt.SetCaller(caller, builtin2.AccountActorCodeID)
|
||||
rt.ExpectValidateCallerAny()
|
||||
ret := rt.Call(a.InspectRuntime, abi.Empty)
|
||||
rtr, ok := ret.(*InspectRuntimeReturn)
|
||||
|
@ -153,7 +153,7 @@ Response:
|
||||
```json
|
||||
{
|
||||
"Version": "string value",
|
||||
"APIVersion": 4352,
|
||||
"APIVersion": 65536,
|
||||
"BlockDelay": 42
|
||||
}
|
||||
```
|
||||
@ -987,7 +987,10 @@ Inputs:
|
||||
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
|
||||
}
|
||||
},
|
||||
"string value"
|
||||
{
|
||||
"Code": 0,
|
||||
"Message": "string value"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
@ -1008,7 +1011,10 @@ Inputs:
|
||||
},
|
||||
"ID": "07070707-0707-0707-0707-070707070707"
|
||||
},
|
||||
"string value"
|
||||
{
|
||||
"Code": 0,
|
||||
"Message": "string value"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
@ -1029,7 +1035,10 @@ Inputs:
|
||||
},
|
||||
"ID": "07070707-0707-0707-0707-070707070707"
|
||||
},
|
||||
"string value"
|
||||
{
|
||||
"Code": 0,
|
||||
"Message": "string value"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
@ -1050,7 +1059,10 @@ Inputs:
|
||||
},
|
||||
"ID": "07070707-0707-0707-0707-070707070707"
|
||||
},
|
||||
"string value"
|
||||
{
|
||||
"Code": 0,
|
||||
"Message": "string value"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
@ -1072,7 +1084,10 @@ Inputs:
|
||||
"ID": "07070707-0707-0707-0707-070707070707"
|
||||
},
|
||||
true,
|
||||
"string value"
|
||||
{
|
||||
"Code": 0,
|
||||
"Message": "string value"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
@ -1093,7 +1108,10 @@ Inputs:
|
||||
},
|
||||
"ID": "07070707-0707-0707-0707-070707070707"
|
||||
},
|
||||
"string value"
|
||||
{
|
||||
"Code": 0,
|
||||
"Message": "string value"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
@ -1115,7 +1133,10 @@ Inputs:
|
||||
"ID": "07070707-0707-0707-0707-070707070707"
|
||||
},
|
||||
null,
|
||||
"string value"
|
||||
{
|
||||
"Code": 0,
|
||||
"Message": "string value"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
@ -1137,7 +1158,10 @@ Inputs:
|
||||
"ID": "07070707-0707-0707-0707-070707070707"
|
||||
},
|
||||
null,
|
||||
"string value"
|
||||
{
|
||||
"Code": 0,
|
||||
"Message": "string value"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
@ -1159,7 +1183,10 @@ Inputs:
|
||||
"ID": "07070707-0707-0707-0707-070707070707"
|
||||
},
|
||||
null,
|
||||
"string value"
|
||||
{
|
||||
"Code": 0,
|
||||
"Message": "string value"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
@ -1188,7 +1215,10 @@ Inputs:
|
||||
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
|
||||
}
|
||||
},
|
||||
"string value"
|
||||
{
|
||||
"Code": 0,
|
||||
"Message": "string value"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
@ -1209,7 +1239,10 @@ Inputs:
|
||||
},
|
||||
"ID": "07070707-0707-0707-0707-070707070707"
|
||||
},
|
||||
"string value"
|
||||
{
|
||||
"Code": 0,
|
||||
"Message": "string value"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
@ -1422,7 +1455,7 @@ Response:
|
||||
"ToUpgrade": true,
|
||||
"LastErr": "string value",
|
||||
"Log": null,
|
||||
"SealProof": 3,
|
||||
"SealProof": 8,
|
||||
"Activation": 10101,
|
||||
"Expiration": 10101,
|
||||
"DealWeight": "0",
|
||||
|
@ -55,8 +55,11 @@ Inputs:
|
||||
```json
|
||||
[
|
||||
{
|
||||
"Miner": 1000,
|
||||
"Number": 9
|
||||
"ID": {
|
||||
"Miner": 1000,
|
||||
"Number": 9
|
||||
},
|
||||
"ProofType": 8
|
||||
},
|
||||
1,
|
||||
"sealing",
|
||||
@ -141,7 +144,7 @@ Perms: admin
|
||||
|
||||
Inputs: `null`
|
||||
|
||||
Response: `4352`
|
||||
Response: `65536`
|
||||
|
||||
## Add
|
||||
|
||||
@ -155,8 +158,11 @@ Inputs:
|
||||
```json
|
||||
[
|
||||
{
|
||||
"Miner": 1000,
|
||||
"Number": 9
|
||||
"ID": {
|
||||
"Miner": 1000,
|
||||
"Number": 9
|
||||
},
|
||||
"ProofType": 8
|
||||
},
|
||||
null,
|
||||
1024,
|
||||
@ -187,8 +193,11 @@ Inputs:
|
||||
```json
|
||||
[
|
||||
{
|
||||
"Miner": 1000,
|
||||
"Number": 9
|
||||
"ID": {
|
||||
"Miner": 1000,
|
||||
"Number": 9
|
||||
},
|
||||
"ProofType": 8
|
||||
},
|
||||
null
|
||||
]
|
||||
@ -217,8 +226,11 @@ Inputs:
|
||||
```json
|
||||
[
|
||||
{
|
||||
"Miner": 1000,
|
||||
"Number": 9
|
||||
"ID": {
|
||||
"Miner": 1000,
|
||||
"Number": 9
|
||||
},
|
||||
"ProofType": 8
|
||||
},
|
||||
1
|
||||
]
|
||||
@ -262,8 +274,11 @@ Inputs:
|
||||
[
|
||||
{},
|
||||
{
|
||||
"Miner": 1000,
|
||||
"Number": 9
|
||||
"ID": {
|
||||
"Miner": 1000,
|
||||
"Number": 9
|
||||
},
|
||||
"ProofType": 8
|
||||
},
|
||||
1040384,
|
||||
1024
|
||||
@ -293,8 +308,11 @@ Inputs:
|
||||
```json
|
||||
[
|
||||
{
|
||||
"Miner": 1000,
|
||||
"Number": 9
|
||||
"ID": {
|
||||
"Miner": 1000,
|
||||
"Number": 9
|
||||
},
|
||||
"ProofType": 8
|
||||
},
|
||||
null
|
||||
]
|
||||
@ -323,8 +341,11 @@ Inputs:
|
||||
```json
|
||||
[
|
||||
{
|
||||
"Miner": 1000,
|
||||
"Number": 9
|
||||
"ID": {
|
||||
"Miner": 1000,
|
||||
"Number": 9
|
||||
},
|
||||
"ProofType": 8
|
||||
},
|
||||
null,
|
||||
null,
|
||||
@ -360,8 +381,11 @@ Inputs:
|
||||
```json
|
||||
[
|
||||
{
|
||||
"Miner": 1000,
|
||||
"Number": 9
|
||||
"ID": {
|
||||
"Miner": 1000,
|
||||
"Number": 9
|
||||
},
|
||||
"ProofType": 8
|
||||
},
|
||||
null
|
||||
]
|
||||
@ -387,8 +411,11 @@ Inputs:
|
||||
```json
|
||||
[
|
||||
{
|
||||
"Miner": 1000,
|
||||
"Number": 9
|
||||
"ID": {
|
||||
"Miner": 1000,
|
||||
"Number": 9
|
||||
},
|
||||
"ProofType": 8
|
||||
},
|
||||
null,
|
||||
null
|
||||
@ -415,8 +442,11 @@ Inputs:
|
||||
```json
|
||||
[
|
||||
{
|
||||
"Miner": 1000,
|
||||
"Number": 9
|
||||
"ID": {
|
||||
"Miner": 1000,
|
||||
"Number": 9
|
||||
},
|
||||
"ProofType": 8
|
||||
},
|
||||
null
|
||||
]
|
||||
@ -499,8 +529,11 @@ Inputs:
|
||||
```json
|
||||
[
|
||||
{
|
||||
"Miner": 1000,
|
||||
"Number": 9
|
||||
"ID": {
|
||||
"Miner": 1000,
|
||||
"Number": 9
|
||||
},
|
||||
"ProofType": 8
|
||||
},
|
||||
1040384,
|
||||
1024,
|
||||
|
@ -246,7 +246,7 @@ Response:
|
||||
```json
|
||||
{
|
||||
"Version": "string value",
|
||||
"APIVersion": 4352,
|
||||
"APIVersion": 65536,
|
||||
"BlockDelay": 42
|
||||
}
|
||||
```
|
||||
@ -3874,7 +3874,7 @@ Response:
|
||||
"WorkerChangeEpoch": 10101,
|
||||
"PeerId": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
|
||||
"Multiaddrs": null,
|
||||
"SealProofType": 3,
|
||||
"SealProofType": 8,
|
||||
"SectorSize": 34359738368,
|
||||
"WindowPoStPartitionSectors": 42,
|
||||
"ConsensusFaultElapsed": 10101
|
||||
@ -3892,7 +3892,7 @@ Inputs:
|
||||
[
|
||||
"f01234",
|
||||
{
|
||||
"SealProof": 3,
|
||||
"SealProof": 8,
|
||||
"SectorNumber": 9,
|
||||
"SealedCID": {
|
||||
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
|
||||
@ -3989,7 +3989,7 @@ Inputs:
|
||||
[
|
||||
"f01234",
|
||||
{
|
||||
"SealProof": 3,
|
||||
"SealProof": 8,
|
||||
"SectorNumber": 9,
|
||||
"SealedCID": {
|
||||
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
|
||||
@ -4194,7 +4194,7 @@ Inputs:
|
||||
]
|
||||
```
|
||||
|
||||
Response: `6`
|
||||
Response: `8`
|
||||
|
||||
### StateReadState
|
||||
StateReadState returns the indicated actor's state.
|
||||
@ -4415,7 +4415,7 @@ Response:
|
||||
```json
|
||||
{
|
||||
"SectorNumber": 9,
|
||||
"SealProof": 3,
|
||||
"SealProof": 8,
|
||||
"SealedCID": {
|
||||
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
|
||||
},
|
||||
@ -4486,7 +4486,7 @@ Response:
|
||||
```json
|
||||
{
|
||||
"Info": {
|
||||
"SealProof": 3,
|
||||
"SealProof": 8,
|
||||
"SectorNumber": 9,
|
||||
"SealedCID": {
|
||||
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
|
||||
|
2
extern/filecoin-ffi
vendored
2
extern/filecoin-ffi
vendored
@ -1 +1 @@
|
||||
Subproject commit 1985275547f222e8c97a8ab70b5cc26bc1fa50b1
|
||||
Subproject commit 1d9cb3e8ff53f51f9318fc57e5d00bc79bdc0128
|
19
extern/sector-storage/faults.go
vendored
19
extern/sector-storage/faults.go
vendored
@ -9,17 +9,18 @@ import (
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||
)
|
||||
|
||||
// FaultTracker TODO: Track things more actively
|
||||
type FaultTracker interface {
|
||||
CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []abi.SectorID) ([]abi.SectorID, error)
|
||||
CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef) ([]abi.SectorID, error)
|
||||
}
|
||||
|
||||
// CheckProvable returns unprovable sectors
|
||||
func (m *Manager) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []abi.SectorID) ([]abi.SectorID, error) {
|
||||
func (m *Manager) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef) ([]abi.SectorID, error) {
|
||||
var bad []abi.SectorID
|
||||
|
||||
ssize, err := pp.SectorSize()
|
||||
@ -33,27 +34,27 @@ func (m *Manager) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof,
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
locked, err := m.index.StorageTryLock(ctx, sector, storiface.FTSealed|storiface.FTCache, storiface.FTNone)
|
||||
locked, err := m.index.StorageTryLock(ctx, sector.ID, storiface.FTSealed|storiface.FTCache, storiface.FTNone)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("acquiring sector lock: %w", err)
|
||||
}
|
||||
|
||||
if !locked {
|
||||
log.Warnw("CheckProvable Sector FAULT: can't acquire read lock", "sector", sector, "sealed")
|
||||
bad = append(bad, sector)
|
||||
bad = append(bad, sector.ID)
|
||||
return nil
|
||||
}
|
||||
|
||||
lp, _, err := m.localStore.AcquireSector(ctx, sector, ssize, storiface.FTSealed|storiface.FTCache, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove)
|
||||
lp, _, err := m.localStore.AcquireSector(ctx, sector, storiface.FTSealed|storiface.FTCache, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove)
|
||||
if err != nil {
|
||||
log.Warnw("CheckProvable Sector FAULT: acquire sector in checkProvable", "sector", sector, "error", err)
|
||||
bad = append(bad, sector)
|
||||
bad = append(bad, sector.ID)
|
||||
return nil
|
||||
}
|
||||
|
||||
if lp.Sealed == "" || lp.Cache == "" {
|
||||
log.Warnw("CheckProvable Sector FAULT: cache an/or sealed paths not found", "sector", sector, "sealed", lp.Sealed, "cache", lp.Cache)
|
||||
bad = append(bad, sector)
|
||||
bad = append(bad, sector.ID)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -69,14 +70,14 @@ func (m *Manager) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof,
|
||||
st, err := os.Stat(p)
|
||||
if err != nil {
|
||||
log.Warnw("CheckProvable Sector FAULT: sector file stat error", "sector", sector, "sealed", lp.Sealed, "cache", lp.Cache, "file", p, "err", err)
|
||||
bad = append(bad, sector)
|
||||
bad = append(bad, sector.ID)
|
||||
return nil
|
||||
}
|
||||
|
||||
if sz != 0 {
|
||||
if st.Size() != int64(ssize)*sz {
|
||||
log.Warnw("CheckProvable Sector FAULT: sector file is wrong size", "sector", sector, "sealed", lp.Sealed, "cache", lp.Cache, "file", p, "size", st.Size(), "expectSize", int64(ssize)*sz)
|
||||
bad = append(bad, sector)
|
||||
bad = append(bad, sector.ID)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
11
extern/sector-storage/ffiwrapper/basicfs/fs.go
vendored
11
extern/sector-storage/ffiwrapper/basicfs/fs.go
vendored
@ -7,6 +7,7 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||
)
|
||||
@ -23,7 +24,7 @@ type Provider struct {
|
||||
waitSector map[sectorFile]chan struct{}
|
||||
}
|
||||
|
||||
func (b *Provider) AcquireSector(ctx context.Context, id abi.SectorID, existing storiface.SectorFileType, allocate storiface.SectorFileType, ptype storiface.PathType) (storiface.SectorPaths, func(), error) {
|
||||
func (b *Provider) AcquireSector(ctx context.Context, id storage.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, ptype storiface.PathType) (storiface.SectorPaths, func(), error) {
|
||||
if err := os.Mkdir(filepath.Join(b.Root, storiface.FTUnsealed.String()), 0755); err != nil && !os.IsExist(err) { // nolint
|
||||
return storiface.SectorPaths{}, nil, err
|
||||
}
|
||||
@ -37,7 +38,7 @@ func (b *Provider) AcquireSector(ctx context.Context, id abi.SectorID, existing
|
||||
done := func() {}
|
||||
|
||||
out := storiface.SectorPaths{
|
||||
ID: id,
|
||||
ID: id.ID,
|
||||
}
|
||||
|
||||
for _, fileType := range storiface.PathTypes {
|
||||
@ -49,10 +50,10 @@ func (b *Provider) AcquireSector(ctx context.Context, id abi.SectorID, existing
|
||||
if b.waitSector == nil {
|
||||
b.waitSector = map[sectorFile]chan struct{}{}
|
||||
}
|
||||
ch, found := b.waitSector[sectorFile{id, fileType}]
|
||||
ch, found := b.waitSector[sectorFile{id.ID, fileType}]
|
||||
if !found {
|
||||
ch = make(chan struct{}, 1)
|
||||
b.waitSector[sectorFile{id, fileType}] = ch
|
||||
b.waitSector[sectorFile{id.ID, fileType}] = ch
|
||||
}
|
||||
b.lk.Unlock()
|
||||
|
||||
@ -63,7 +64,7 @@ func (b *Provider) AcquireSector(ctx context.Context, id abi.SectorID, existing
|
||||
return storiface.SectorPaths{}, nil, ctx.Err()
|
||||
}
|
||||
|
||||
path := filepath.Join(b.Root, fileType.String(), storiface.SectorName(id))
|
||||
path := filepath.Join(b.Root, fileType.String(), storiface.SectorName(id.ID))
|
||||
|
||||
prevDone := done
|
||||
done = func() {
|
||||
|
34
extern/sector-storage/ffiwrapper/config.go
vendored
34
extern/sector-storage/ffiwrapper/config.go
vendored
@ -1,34 +0,0 @@
|
||||
package ffiwrapper
|
||||
|
||||
import (
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
SealProofType abi.RegisteredSealProof
|
||||
|
||||
_ struct{} // guard against nameless init
|
||||
}
|
||||
|
||||
func sizeFromConfig(cfg Config) (abi.SectorSize, error) {
|
||||
return cfg.SealProofType.SectorSize()
|
||||
}
|
||||
|
||||
func SealProofTypeFromSectorSize(ssize abi.SectorSize) (abi.RegisteredSealProof, error) {
|
||||
switch ssize {
|
||||
case 2 << 10:
|
||||
return abi.RegisteredSealProof_StackedDrg2KiBV1, nil
|
||||
case 8 << 20:
|
||||
return abi.RegisteredSealProof_StackedDrg8MiBV1, nil
|
||||
case 512 << 20:
|
||||
return abi.RegisteredSealProof_StackedDrg512MiBV1, nil
|
||||
case 32 << 30:
|
||||
return abi.RegisteredSealProof_StackedDrg32GiBV1, nil
|
||||
case 64 << 30:
|
||||
return abi.RegisteredSealProof_StackedDrg64GiBV1, nil
|
||||
default:
|
||||
return 0, xerrors.Errorf("unsupported sector size for miner: %v", ssize)
|
||||
}
|
||||
}
|
12
extern/sector-storage/ffiwrapper/sealer.go
vendored
12
extern/sector-storage/ffiwrapper/sealer.go
vendored
@ -1,16 +1,12 @@
|
||||
package ffiwrapper
|
||||
|
||||
import (
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
)
|
||||
|
||||
var log = logging.Logger("ffiwrapper")
|
||||
|
||||
type Sealer struct {
|
||||
sealProofType abi.RegisteredSealProof
|
||||
ssize abi.SectorSize // a function of sealProofType and postProofType
|
||||
|
||||
sectors SectorProvider
|
||||
stopping chan struct{}
|
||||
}
|
||||
@ -18,11 +14,3 @@ type Sealer struct {
|
||||
func (sb *Sealer) Stop() {
|
||||
close(sb.stopping)
|
||||
}
|
||||
|
||||
func (sb *Sealer) SectorSize() abi.SectorSize {
|
||||
return sb.ssize
|
||||
}
|
||||
|
||||
func (sb *Sealer) SealProofType() abi.RegisteredSealProof {
|
||||
return sb.sealProofType
|
||||
}
|
||||
|
101
extern/sector-storage/ffiwrapper/sealer_cgo.go
vendored
101
extern/sector-storage/ffiwrapper/sealer_cgo.go
vendored
@ -27,16 +27,8 @@ import (
|
||||
|
||||
var _ Storage = &Sealer{}
|
||||
|
||||
func New(sectors SectorProvider, cfg *Config) (*Sealer, error) {
|
||||
sectorSize, err := sizeFromConfig(*cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func New(sectors SectorProvider) (*Sealer, error) {
|
||||
sb := &Sealer{
|
||||
sealProofType: cfg.SealProofType,
|
||||
ssize: sectorSize,
|
||||
|
||||
sectors: sectors,
|
||||
|
||||
stopping: make(chan struct{}),
|
||||
@ -45,25 +37,29 @@ func New(sectors SectorProvider, cfg *Config) (*Sealer, error) {
|
||||
return sb, nil
|
||||
}
|
||||
|
||||
func (sb *Sealer) NewSector(ctx context.Context, sector abi.SectorID) error {
|
||||
func (sb *Sealer) NewSector(ctx context.Context, sector storage.SectorRef) error {
|
||||
// TODO: Allocate the sector here instead of in addpiece
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sb *Sealer) AddPiece(ctx context.Context, sector abi.SectorID, existingPieceSizes []abi.UnpaddedPieceSize, pieceSize abi.UnpaddedPieceSize, file storage.Data) (abi.PieceInfo, error) {
|
||||
func (sb *Sealer) AddPiece(ctx context.Context, sector storage.SectorRef, existingPieceSizes []abi.UnpaddedPieceSize, pieceSize abi.UnpaddedPieceSize, file storage.Data) (abi.PieceInfo, error) {
|
||||
var offset abi.UnpaddedPieceSize
|
||||
for _, size := range existingPieceSizes {
|
||||
offset += size
|
||||
}
|
||||
|
||||
maxPieceSize := abi.PaddedPieceSize(sb.ssize)
|
||||
ssize, err := sector.ProofType.SectorSize()
|
||||
if err != nil {
|
||||
return abi.PieceInfo{}, err
|
||||
}
|
||||
|
||||
maxPieceSize := abi.PaddedPieceSize(ssize)
|
||||
|
||||
if offset.Padded()+pieceSize.Padded() > maxPieceSize {
|
||||
return abi.PieceInfo{}, xerrors.Errorf("can't add %d byte piece to sector %v with %d bytes of existing pieces", pieceSize, sector, offset)
|
||||
}
|
||||
|
||||
var err error
|
||||
var done func()
|
||||
var stagedFile *partialFile
|
||||
|
||||
@ -135,7 +131,7 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector abi.SectorID, existingPie
|
||||
break
|
||||
}
|
||||
|
||||
c, err := sb.pieceCid(buf[:read])
|
||||
c, err := sb.pieceCid(sector.ProofType, buf[:read])
|
||||
if err != nil {
|
||||
return abi.PieceInfo{}, xerrors.Errorf("pieceCid error: %w", err)
|
||||
}
|
||||
@ -162,7 +158,7 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector abi.SectorID, existingPie
|
||||
return pieceCids[0], nil
|
||||
}
|
||||
|
||||
pieceCID, err := ffi.GenerateUnsealedCID(sb.sealProofType, pieceCids)
|
||||
pieceCID, err := ffi.GenerateUnsealedCID(sector.ProofType, pieceCids)
|
||||
if err != nil {
|
||||
return abi.PieceInfo{}, xerrors.Errorf("generate unsealed CID: %w", err)
|
||||
}
|
||||
@ -178,13 +174,13 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector abi.SectorID, existingPie
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (sb *Sealer) pieceCid(in []byte) (cid.Cid, error) {
|
||||
func (sb *Sealer) pieceCid(spt abi.RegisteredSealProof, in []byte) (cid.Cid, error) {
|
||||
prf, werr, err := ToReadableFile(bytes.NewReader(in), int64(len(in)))
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("getting tee reader pipe: %w", err)
|
||||
}
|
||||
|
||||
pieceCID, err := ffi.GeneratePieceCIDFromFile(sb.sealProofType, prf, abi.UnpaddedPieceSize(len(in)))
|
||||
pieceCID, err := ffi.GeneratePieceCIDFromFile(spt, prf, abi.UnpaddedPieceSize(len(in)))
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("generating piece commitment: %w", err)
|
||||
}
|
||||
@ -194,8 +190,12 @@ func (sb *Sealer) pieceCid(in []byte) (cid.Cid, error) {
|
||||
return pieceCID, werr()
|
||||
}
|
||||
|
||||
func (sb *Sealer) UnsealPiece(ctx context.Context, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd cid.Cid) error {
|
||||
maxPieceSize := abi.PaddedPieceSize(sb.ssize)
|
||||
func (sb *Sealer) UnsealPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd cid.Cid) error {
|
||||
ssize, err := sector.ProofType.SectorSize()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
maxPieceSize := abi.PaddedPieceSize(ssize)
|
||||
|
||||
// try finding existing
|
||||
unsealedPath, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTUnsealed, storiface.FTNone, storiface.PathStorage)
|
||||
@ -317,12 +317,12 @@ func (sb *Sealer) UnsealPiece(ctx context.Context, sector abi.SectorID, offset s
|
||||
// </eww>
|
||||
|
||||
// TODO: This may be possible to do in parallel
|
||||
err = ffi.UnsealRange(sb.sealProofType,
|
||||
err = ffi.UnsealRange(sector.ProofType,
|
||||
srcPaths.Cache,
|
||||
sealed,
|
||||
opw,
|
||||
sector.Number,
|
||||
sector.Miner,
|
||||
sector.ID.Number,
|
||||
sector.ID.Miner,
|
||||
randomness,
|
||||
commd,
|
||||
uint64(at.Unpadded()),
|
||||
@ -356,14 +356,18 @@ func (sb *Sealer) UnsealPiece(ctx context.Context, sector abi.SectorID, offset s
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sb *Sealer) ReadPiece(ctx context.Context, writer io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) {
|
||||
func (sb *Sealer) ReadPiece(ctx context.Context, writer io.Writer, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) {
|
||||
path, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTUnsealed, storiface.FTNone, storiface.PathStorage)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("acquire unsealed sector path: %w", err)
|
||||
}
|
||||
defer done()
|
||||
|
||||
maxPieceSize := abi.PaddedPieceSize(sb.ssize)
|
||||
ssize, err := sector.ProofType.SectorSize()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
maxPieceSize := abi.PaddedPieceSize(ssize)
|
||||
|
||||
pf, err := openPartialFile(maxPieceSize, path.Unsealed)
|
||||
if err != nil {
|
||||
@ -408,7 +412,7 @@ func (sb *Sealer) ReadPiece(ctx context.Context, writer io.Writer, sector abi.Se
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (sb *Sealer) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) {
|
||||
func (sb *Sealer) SealPreCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) {
|
||||
paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTUnsealed, storiface.FTSealed|storiface.FTCache, storiface.PathSealing)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("acquiring sector paths: %w", err)
|
||||
@ -443,29 +447,33 @@ func (sb *Sealer) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticke
|
||||
for _, piece := range pieces {
|
||||
sum += piece.Size.Unpadded()
|
||||
}
|
||||
ussize := abi.PaddedPieceSize(sb.ssize).Unpadded()
|
||||
ssize, err := sector.ProofType.SectorSize()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ussize := abi.PaddedPieceSize(ssize).Unpadded()
|
||||
if sum != ussize {
|
||||
return nil, xerrors.Errorf("aggregated piece sizes don't match sector size: %d != %d (%d)", sum, ussize, int64(ussize-sum))
|
||||
}
|
||||
|
||||
// TODO: context cancellation respect
|
||||
p1o, err := ffi.SealPreCommitPhase1(
|
||||
sb.sealProofType,
|
||||
sector.ProofType,
|
||||
paths.Cache,
|
||||
paths.Unsealed,
|
||||
paths.Sealed,
|
||||
sector.Number,
|
||||
sector.Miner,
|
||||
sector.ID.Number,
|
||||
sector.ID.Miner,
|
||||
ticket,
|
||||
pieces,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("presealing sector %d (%s): %w", sector.Number, paths.Unsealed, err)
|
||||
return nil, xerrors.Errorf("presealing sector %d (%s): %w", sector.ID.Number, paths.Unsealed, err)
|
||||
}
|
||||
return p1o, nil
|
||||
}
|
||||
|
||||
func (sb *Sealer) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage.PreCommit1Out) (storage.SectorCids, error) {
|
||||
func (sb *Sealer) SealPreCommit2(ctx context.Context, sector storage.SectorRef, phase1Out storage.PreCommit1Out) (storage.SectorCids, error) {
|
||||
paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTSealed|storiface.FTCache, 0, storiface.PathSealing)
|
||||
if err != nil {
|
||||
return storage.SectorCids{}, xerrors.Errorf("acquiring sector paths: %w", err)
|
||||
@ -474,7 +482,7 @@ func (sb *Sealer) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase
|
||||
|
||||
sealedCID, unsealedCID, err := ffi.SealPreCommitPhase2(phase1Out, paths.Cache, paths.Sealed)
|
||||
if err != nil {
|
||||
return storage.SectorCids{}, xerrors.Errorf("presealing sector %d (%s): %w", sector.Number, paths.Unsealed, err)
|
||||
return storage.SectorCids{}, xerrors.Errorf("presealing sector %d (%s): %w", sector.ID.Number, paths.Unsealed, err)
|
||||
}
|
||||
|
||||
return storage.SectorCids{
|
||||
@ -483,40 +491,45 @@ func (sb *Sealer) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (sb *Sealer) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) {
|
||||
func (sb *Sealer) SealCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) {
|
||||
paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTSealed|storiface.FTCache, 0, storiface.PathSealing)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("acquire sector paths: %w", err)
|
||||
}
|
||||
defer done()
|
||||
output, err := ffi.SealCommitPhase1(
|
||||
sb.sealProofType,
|
||||
sector.ProofType,
|
||||
cids.Sealed,
|
||||
cids.Unsealed,
|
||||
paths.Cache,
|
||||
paths.Sealed,
|
||||
sector.Number,
|
||||
sector.Miner,
|
||||
sector.ID.Number,
|
||||
sector.ID.Miner,
|
||||
ticket,
|
||||
seed,
|
||||
pieces,
|
||||
)
|
||||
if err != nil {
|
||||
log.Warn("StandaloneSealCommit error: ", err)
|
||||
log.Warnf("num:%d tkt:%v seed:%v, pi:%v sealedCID:%v, unsealedCID:%v", sector.Number, ticket, seed, pieces, cids.Sealed, cids.Unsealed)
|
||||
log.Warnf("num:%d tkt:%v seed:%v, pi:%v sealedCID:%v, unsealedCID:%v", sector.ID.Number, ticket, seed, pieces, cids.Sealed, cids.Unsealed)
|
||||
|
||||
return nil, xerrors.Errorf("StandaloneSealCommit: %w", err)
|
||||
}
|
||||
return output, nil
|
||||
}
|
||||
|
||||
func (sb *Sealer) SealCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage.Commit1Out) (storage.Proof, error) {
|
||||
return ffi.SealCommitPhase2(phase1Out, sector.Number, sector.Miner)
|
||||
func (sb *Sealer) SealCommit2(ctx context.Context, sector storage.SectorRef, phase1Out storage.Commit1Out) (storage.Proof, error) {
|
||||
return ffi.SealCommitPhase2(phase1Out, sector.ID.Number, sector.ID.Miner)
|
||||
}
|
||||
|
||||
func (sb *Sealer) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) error {
|
||||
func (sb *Sealer) FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) error {
|
||||
ssize, err := sector.ProofType.SectorSize()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
maxPieceSize := abi.PaddedPieceSize(ssize)
|
||||
|
||||
if len(keepUnsealed) > 0 {
|
||||
maxPieceSize := abi.PaddedPieceSize(sb.ssize)
|
||||
|
||||
sr := pieceRun(0, maxPieceSize)
|
||||
|
||||
@ -580,10 +593,10 @@ func (sb *Sealer) FinalizeSector(ctx context.Context, sector abi.SectorID, keepU
|
||||
}
|
||||
defer done()
|
||||
|
||||
return ffi.ClearCache(uint64(sb.ssize), paths.Cache)
|
||||
return ffi.ClearCache(uint64(ssize), paths.Cache)
|
||||
}
|
||||
|
||||
func (sb *Sealer) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) error {
|
||||
func (sb *Sealer) ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) error {
|
||||
// This call is meant to mark storage as 'freeable'. Given that unsealing is
|
||||
// very expensive, we don't remove data as soon as we can - instead we only
|
||||
// do that when we don't have free space for data that really needs it
|
||||
@ -593,7 +606,7 @@ func (sb *Sealer) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safe
|
||||
return xerrors.Errorf("not supported at this layer")
|
||||
}
|
||||
|
||||
func (sb *Sealer) Remove(ctx context.Context, sector abi.SectorID) error {
|
||||
func (sb *Sealer) Remove(ctx context.Context, sector storage.SectorRef) error {
|
||||
return xerrors.Errorf("not supported at this layer") // happens in localworker
|
||||
}
|
||||
|
||||
|
83
extern/sector-storage/ffiwrapper/sealer_test.go
vendored
83
extern/sector-storage/ffiwrapper/sealer_test.go
vendored
@ -43,7 +43,7 @@ var sectorSize, _ = sealProofType.SectorSize()
|
||||
var sealRand = abi.SealRandomness{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2}
|
||||
|
||||
type seal struct {
|
||||
id abi.SectorID
|
||||
ref storage.SectorRef
|
||||
cids storage.SectorCids
|
||||
pi abi.PieceInfo
|
||||
ticket abi.SealRandomness
|
||||
@ -56,12 +56,12 @@ func data(sn abi.SectorNumber, dlen abi.UnpaddedPieceSize) io.Reader {
|
||||
)
|
||||
}
|
||||
|
||||
func (s *seal) precommit(t *testing.T, sb *Sealer, id abi.SectorID, done func()) {
|
||||
func (s *seal) precommit(t *testing.T, sb *Sealer, id storage.SectorRef, done func()) {
|
||||
defer done()
|
||||
dlen := abi.PaddedPieceSize(sectorSize).Unpadded()
|
||||
|
||||
var err error
|
||||
r := data(id.Number, dlen)
|
||||
r := data(id.ID.Number, dlen)
|
||||
s.pi, err = sb.AddPiece(context.TODO(), id, []abi.UnpaddedPieceSize{}, dlen, r)
|
||||
if err != nil {
|
||||
t.Fatalf("%+v", err)
|
||||
@ -84,19 +84,19 @@ func (s *seal) commit(t *testing.T, sb *Sealer, done func()) {
|
||||
defer done()
|
||||
seed := abi.InteractiveSealRandomness{0, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 9, 8, 7, 6, 45, 3, 2, 1, 0, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 9}
|
||||
|
||||
pc1, err := sb.SealCommit1(context.TODO(), s.id, s.ticket, seed, []abi.PieceInfo{s.pi}, s.cids)
|
||||
pc1, err := sb.SealCommit1(context.TODO(), s.ref, s.ticket, seed, []abi.PieceInfo{s.pi}, s.cids)
|
||||
if err != nil {
|
||||
t.Fatalf("%+v", err)
|
||||
}
|
||||
proof, err := sb.SealCommit2(context.TODO(), s.id, pc1)
|
||||
proof, err := sb.SealCommit2(context.TODO(), s.ref, pc1)
|
||||
if err != nil {
|
||||
t.Fatalf("%+v", err)
|
||||
}
|
||||
|
||||
ok, err := ProofVerifier.VerifySeal(proof2.SealVerifyInfo{
|
||||
SectorID: s.id,
|
||||
SectorID: s.ref.ID,
|
||||
SealedCID: s.cids.Sealed,
|
||||
SealProof: sealProofType,
|
||||
SealProof: s.ref.ProofType,
|
||||
Proof: proof,
|
||||
Randomness: s.ticket,
|
||||
InteractiveRandomness: seed,
|
||||
@ -111,7 +111,7 @@ func (s *seal) commit(t *testing.T, sb *Sealer, done func()) {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *seal) unseal(t *testing.T, sb *Sealer, sp *basicfs.Provider, si abi.SectorID, done func()) {
|
||||
func (s *seal) unseal(t *testing.T, sb *Sealer, sp *basicfs.Provider, si storage.SectorRef, done func()) {
|
||||
defer done()
|
||||
|
||||
var b bytes.Buffer
|
||||
@ -120,7 +120,7 @@ func (s *seal) unseal(t *testing.T, sb *Sealer, sp *basicfs.Provider, si abi.Sec
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expect, _ := ioutil.ReadAll(data(si.Number, 1016))
|
||||
expect, _ := ioutil.ReadAll(data(si.ID.Number, 1016))
|
||||
if !bytes.Equal(b.Bytes(), expect) {
|
||||
t.Fatal("read wrong bytes")
|
||||
}
|
||||
@ -150,7 +150,7 @@ func (s *seal) unseal(t *testing.T, sb *Sealer, sp *basicfs.Provider, si abi.Sec
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expect, _ = ioutil.ReadAll(data(si.Number, 1016))
|
||||
expect, _ = ioutil.ReadAll(data(si.ID.Number, 1016))
|
||||
require.Equal(t, expect, b.Bytes())
|
||||
|
||||
b.Reset()
|
||||
@ -174,13 +174,13 @@ func post(t *testing.T, sealer *Sealer, skipped []abi.SectorID, seals ...seal) {
|
||||
sis := make([]proof2.SectorInfo, len(seals))
|
||||
for i, s := range seals {
|
||||
sis[i] = proof2.SectorInfo{
|
||||
SealProof: sealProofType,
|
||||
SectorNumber: s.id.Number,
|
||||
SealProof: s.ref.ProofType,
|
||||
SectorNumber: s.ref.ID.Number,
|
||||
SealedCID: s.cids.Sealed,
|
||||
}
|
||||
}
|
||||
|
||||
proofs, skp, err := sealer.GenerateWindowPoSt(context.TODO(), seals[0].id.Miner, sis, randomness)
|
||||
proofs, skp, err := sealer.GenerateWindowPoSt(context.TODO(), seals[0].ref.ID.Miner, sis, randomness)
|
||||
if len(skipped) > 0 {
|
||||
require.Error(t, err)
|
||||
require.EqualValues(t, skipped, skp)
|
||||
@ -195,7 +195,7 @@ func post(t *testing.T, sealer *Sealer, skipped []abi.SectorID, seals ...seal) {
|
||||
Randomness: randomness,
|
||||
Proofs: proofs,
|
||||
ChallengedSectors: sis,
|
||||
Prover: seals[0].id.Miner,
|
||||
Prover: seals[0].ref.ID.Miner,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("%+v", err)
|
||||
@ -205,7 +205,7 @@ func post(t *testing.T, sealer *Sealer, skipped []abi.SectorID, seals ...seal) {
|
||||
}
|
||||
}
|
||||
|
||||
func corrupt(t *testing.T, sealer *Sealer, id abi.SectorID) {
|
||||
func corrupt(t *testing.T, sealer *Sealer, id storage.SectorRef) {
|
||||
paths, done, err := sealer.sectors.AcquireSector(context.Background(), id, storiface.FTSealed, 0, storiface.PathStorage)
|
||||
require.NoError(t, err)
|
||||
defer done()
|
||||
@ -264,14 +264,10 @@ func TestSealAndVerify(t *testing.T) {
|
||||
}
|
||||
miner := abi.ActorID(123)
|
||||
|
||||
cfg := &Config{
|
||||
SealProofType: sealProofType,
|
||||
}
|
||||
|
||||
sp := &basicfs.Provider{
|
||||
Root: cdir,
|
||||
}
|
||||
sb, err := New(sp, cfg)
|
||||
sb, err := New(sp)
|
||||
if err != nil {
|
||||
t.Fatalf("%+v", err)
|
||||
}
|
||||
@ -286,9 +282,12 @@ func TestSealAndVerify(t *testing.T) {
|
||||
}
|
||||
defer cleanup()
|
||||
|
||||
si := abi.SectorID{Miner: miner, Number: 1}
|
||||
si := storage.SectorRef{
|
||||
ID: abi.SectorID{Miner: miner, Number: 1},
|
||||
ProofType: sealProofType,
|
||||
}
|
||||
|
||||
s := seal{id: si}
|
||||
s := seal{ref: si}
|
||||
|
||||
start := time.Now()
|
||||
|
||||
@ -338,13 +337,10 @@ func TestSealPoStNoCommit(t *testing.T) {
|
||||
|
||||
miner := abi.ActorID(123)
|
||||
|
||||
cfg := &Config{
|
||||
SealProofType: sealProofType,
|
||||
}
|
||||
sp := &basicfs.Provider{
|
||||
Root: dir,
|
||||
}
|
||||
sb, err := New(sp, cfg)
|
||||
sb, err := New(sp)
|
||||
if err != nil {
|
||||
t.Fatalf("%+v", err)
|
||||
}
|
||||
@ -360,9 +356,12 @@ func TestSealPoStNoCommit(t *testing.T) {
|
||||
}
|
||||
defer cleanup()
|
||||
|
||||
si := abi.SectorID{Miner: miner, Number: 1}
|
||||
si := storage.SectorRef{
|
||||
ID: abi.SectorID{Miner: miner, Number: 1},
|
||||
ProofType: sealProofType,
|
||||
}
|
||||
|
||||
s := seal{id: si}
|
||||
s := seal{ref: si}
|
||||
|
||||
start := time.Now()
|
||||
|
||||
@ -403,13 +402,10 @@ func TestSealAndVerify3(t *testing.T) {
|
||||
|
||||
miner := abi.ActorID(123)
|
||||
|
||||
cfg := &Config{
|
||||
SealProofType: sealProofType,
|
||||
}
|
||||
sp := &basicfs.Provider{
|
||||
Root: dir,
|
||||
}
|
||||
sb, err := New(sp, cfg)
|
||||
sb, err := New(sp)
|
||||
if err != nil {
|
||||
t.Fatalf("%+v", err)
|
||||
}
|
||||
@ -424,13 +420,22 @@ func TestSealAndVerify3(t *testing.T) {
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
si1 := abi.SectorID{Miner: miner, Number: 1}
|
||||
si2 := abi.SectorID{Miner: miner, Number: 2}
|
||||
si3 := abi.SectorID{Miner: miner, Number: 3}
|
||||
si1 := storage.SectorRef{
|
||||
ID: abi.SectorID{Miner: miner, Number: 1},
|
||||
ProofType: sealProofType,
|
||||
}
|
||||
si2 := storage.SectorRef{
|
||||
ID: abi.SectorID{Miner: miner, Number: 2},
|
||||
ProofType: sealProofType,
|
||||
}
|
||||
si3 := storage.SectorRef{
|
||||
ID: abi.SectorID{Miner: miner, Number: 3},
|
||||
ProofType: sealProofType,
|
||||
}
|
||||
|
||||
s1 := seal{id: si1}
|
||||
s2 := seal{id: si2}
|
||||
s3 := seal{id: si3}
|
||||
s1 := seal{ref: si1}
|
||||
s2 := seal{ref: si2}
|
||||
s3 := seal{ref: si3}
|
||||
|
||||
wg.Add(3)
|
||||
go s1.precommit(t, sb, si1, wg.Done) //nolint: staticcheck
|
||||
@ -451,7 +456,7 @@ func TestSealAndVerify3(t *testing.T) {
|
||||
corrupt(t, sb, si1)
|
||||
corrupt(t, sb, si2)
|
||||
|
||||
post(t, sb, []abi.SectorID{si1, si2}, s1, s2, s3)
|
||||
post(t, sb, []abi.SectorID{si1.ID, si2.ID}, s1, s2, s3)
|
||||
}
|
||||
|
||||
func BenchmarkWriteWithAlignment(b *testing.B) {
|
||||
|
6
extern/sector-storage/ffiwrapper/types.go
vendored
6
extern/sector-storage/ffiwrapper/types.go
vendored
@ -29,8 +29,8 @@ type Storage interface {
|
||||
storage.Prover
|
||||
StorageSealer
|
||||
|
||||
UnsealPiece(ctx context.Context, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd cid.Cid) error
|
||||
ReadPiece(ctx context.Context, writer io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error)
|
||||
UnsealPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd cid.Cid) error
|
||||
ReadPiece(ctx context.Context, writer io.Writer, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error)
|
||||
}
|
||||
|
||||
type Verifier interface {
|
||||
@ -44,7 +44,7 @@ type Verifier interface {
|
||||
type SectorProvider interface {
|
||||
// * returns storiface.ErrSectorNotFound if a requested existing sector doesn't exist
|
||||
// * returns an error when allocate is set, and existing isn't, and the sector exists
|
||||
AcquireSector(ctx context.Context, id abi.SectorID, existing storiface.SectorFileType, allocate storiface.SectorFileType, ptype storiface.PathType) (storiface.SectorPaths, func(), error)
|
||||
AcquireSector(ctx context.Context, id storage.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, ptype storiface.PathType) (storiface.SectorPaths, func(), error)
|
||||
}
|
||||
|
||||
var _ SectorProvider = &basicfs.Provider{}
|
||||
|
10
extern/sector-storage/ffiwrapper/verifier_cgo.go
vendored
10
extern/sector-storage/ffiwrapper/verifier_cgo.go
vendored
@ -11,6 +11,7 @@ import (
|
||||
ffi "github.com/filecoin-project/filecoin-ffi"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof"
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||
)
|
||||
@ -74,12 +75,15 @@ func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorIn
|
||||
continue
|
||||
}
|
||||
|
||||
sid := abi.SectorID{Miner: mid, Number: s.SectorNumber}
|
||||
sid := storage.SectorRef{
|
||||
ID: abi.SectorID{Miner: mid, Number: s.SectorNumber},
|
||||
ProofType: s.SealProof,
|
||||
}
|
||||
|
||||
paths, d, err := sb.sectors.AcquireSector(ctx, sid, storiface.FTCache|storiface.FTSealed, 0, storiface.PathStorage)
|
||||
if err != nil {
|
||||
log.Warnw("failed to acquire sector, skipping", "sector", sid, "error", err)
|
||||
skipped = append(skipped, sid)
|
||||
log.Warnw("failed to acquire sector, skipping", "sector", sid.ID, "error", err)
|
||||
skipped = append(skipped, sid.ID)
|
||||
continue
|
||||
}
|
||||
doneFuncs = append(doneFuncs, d)
|
||||
|
106
extern/sector-storage/manager.go
vendored
106
extern/sector-storage/manager.go
vendored
@ -47,9 +47,7 @@ type Worker interface {
|
||||
}
|
||||
|
||||
type SectorManager interface {
|
||||
SectorSize() abi.SectorSize
|
||||
|
||||
ReadPiece(context.Context, io.Writer, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) error
|
||||
ReadPiece(context.Context, io.Writer, storage.SectorRef, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) error
|
||||
|
||||
ffiwrapper.StorageSealer
|
||||
storage.Prover
|
||||
@ -61,8 +59,6 @@ type WorkerID uuid.UUID // worker session UUID
|
||||
var ClosedWorkerID = uuid.UUID{}
|
||||
|
||||
type Manager struct {
|
||||
scfg *ffiwrapper.Config
|
||||
|
||||
ls stores.LocalStorage
|
||||
storage *stores.Remote
|
||||
localStore *stores.Local
|
||||
@ -105,13 +101,13 @@ type StorageAuth http.Header
|
||||
type WorkerStateStore *statestore.StateStore
|
||||
type ManagerStateStore *statestore.StateStore
|
||||
|
||||
func New(ctx context.Context, ls stores.LocalStorage, si stores.SectorIndex, cfg *ffiwrapper.Config, sc SealerConfig, urls URLs, sa StorageAuth, wss WorkerStateStore, mss ManagerStateStore) (*Manager, error) {
|
||||
func New(ctx context.Context, ls stores.LocalStorage, si stores.SectorIndex, sc SealerConfig, urls URLs, sa StorageAuth, wss WorkerStateStore, mss ManagerStateStore) (*Manager, error) {
|
||||
lstor, err := stores.NewLocal(ctx, ls, si, urls)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
prover, err := ffiwrapper.New(&readonlyProvider{stor: lstor, index: si, spt: cfg.SealProofType}, cfg)
|
||||
prover, err := ffiwrapper.New(&readonlyProvider{stor: lstor, index: si})
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("creating prover instance: %w", err)
|
||||
}
|
||||
@ -119,15 +115,13 @@ func New(ctx context.Context, ls stores.LocalStorage, si stores.SectorIndex, cfg
|
||||
stor := stores.NewRemote(lstor, si, http.Header(sa), sc.ParallelFetchLimit)
|
||||
|
||||
m := &Manager{
|
||||
scfg: cfg,
|
||||
|
||||
ls: ls,
|
||||
storage: stor,
|
||||
localStore: lstor,
|
||||
remoteHnd: &stores.FetchHandler{Local: lstor},
|
||||
index: si,
|
||||
|
||||
sched: newScheduler(cfg.SealProofType),
|
||||
sched: newScheduler(),
|
||||
|
||||
Prover: prover,
|
||||
|
||||
@ -162,7 +156,6 @@ func New(ctx context.Context, ls stores.LocalStorage, si stores.SectorIndex, cfg
|
||||
}
|
||||
|
||||
err = m.AddWorker(ctx, NewLocalWorker(WorkerConfig{
|
||||
SealProof: cfg.SealProofType,
|
||||
TaskTypes: localTasks,
|
||||
}, stor, lstor, si, m, wss))
|
||||
if err != nil {
|
||||
@ -198,23 +191,18 @@ func (m *Manager) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
m.remoteHnd.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
func (m *Manager) SectorSize() abi.SectorSize {
|
||||
sz, _ := m.scfg.SealProofType.SectorSize()
|
||||
return sz
|
||||
}
|
||||
|
||||
func schedNop(context.Context, Worker) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Manager) schedFetch(sector abi.SectorID, ft storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) func(context.Context, Worker) error {
|
||||
func (m *Manager) schedFetch(sector storage.SectorRef, ft storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) func(context.Context, Worker) error {
|
||||
return func(ctx context.Context, worker Worker) error {
|
||||
_, err := m.waitSimpleCall(ctx)(worker.Fetch(ctx, sector, ft, ptype, am))
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) readPiece(sink io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, rok *bool) func(ctx context.Context, w Worker) error {
|
||||
func (m *Manager) readPiece(sink io.Writer, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, rok *bool) func(ctx context.Context, w Worker) error {
|
||||
return func(ctx context.Context, w Worker) error {
|
||||
r, err := m.waitSimpleCall(ctx)(w.ReadPiece(ctx, sink, sector, offset, size))
|
||||
if err != nil {
|
||||
@ -227,19 +215,19 @@ func (m *Manager) readPiece(sink io.Writer, sector abi.SectorID, offset storifac
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) tryReadUnsealedPiece(ctx context.Context, sink io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (foundUnsealed bool, readOk bool, selector WorkerSelector, returnErr error) {
|
||||
func (m *Manager) tryReadUnsealedPiece(ctx context.Context, sink io.Writer, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (foundUnsealed bool, readOk bool, selector WorkerSelector, returnErr error) {
|
||||
|
||||
// acquire a lock purely for reading unsealed sectors
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
if err := m.index.StorageLock(ctx, sector, storiface.FTUnsealed, storiface.FTNone); err != nil {
|
||||
if err := m.index.StorageLock(ctx, sector.ID, storiface.FTUnsealed, storiface.FTNone); err != nil {
|
||||
returnErr = xerrors.Errorf("acquiring read sector lock: %w", err)
|
||||
return
|
||||
}
|
||||
|
||||
// passing 0 spt because we only need it when allowFetch is true
|
||||
best, err := m.index.StorageFindSector(ctx, sector, storiface.FTUnsealed, 0, false)
|
||||
best, err := m.index.StorageFindSector(ctx, sector.ID, storiface.FTUnsealed, 0, false)
|
||||
if err != nil {
|
||||
returnErr = xerrors.Errorf("read piece: checking for already existing unsealed sector: %w", err)
|
||||
return
|
||||
@ -249,7 +237,7 @@ func (m *Manager) tryReadUnsealedPiece(ctx context.Context, sink io.Writer, sect
|
||||
if foundUnsealed { // append to existing
|
||||
// There is unsealed sector, see if we can read from it
|
||||
|
||||
selector = newExistingSelector(m.index, sector, storiface.FTUnsealed, false)
|
||||
selector = newExistingSelector(m.index, sector.ID, storiface.FTUnsealed, false)
|
||||
|
||||
err = m.sched.Schedule(ctx, sector, sealtasks.TTReadUnsealed, selector, m.schedFetch(sector, storiface.FTUnsealed, storiface.PathSealing, storiface.AcquireMove),
|
||||
m.readPiece(sink, sector, offset, size, &readOk))
|
||||
@ -262,7 +250,7 @@ func (m *Manager) tryReadUnsealedPiece(ctx context.Context, sink io.Writer, sect
|
||||
return
|
||||
}
|
||||
|
||||
func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) error {
|
||||
func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) error {
|
||||
foundUnsealed, readOk, selector, err := m.tryReadUnsealedPiece(ctx, sink, sector, offset, size)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -273,7 +261,7 @@ func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector abi.Sect
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
if err := m.index.StorageLock(ctx, sector, storiface.FTSealed|storiface.FTCache, storiface.FTUnsealed); err != nil {
|
||||
if err := m.index.StorageLock(ctx, sector.ID, storiface.FTSealed|storiface.FTCache, storiface.FTUnsealed); err != nil {
|
||||
return xerrors.Errorf("acquiring unseal sector lock: %w", err)
|
||||
}
|
||||
|
||||
@ -302,7 +290,7 @@ func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector abi.Sect
|
||||
return err
|
||||
}
|
||||
|
||||
selector = newExistingSelector(m.index, sector, storiface.FTUnsealed, false)
|
||||
selector = newExistingSelector(m.index, sector.ID, storiface.FTUnsealed, false)
|
||||
|
||||
err = m.sched.Schedule(ctx, sector, sealtasks.TTReadUnsealed, selector, m.schedFetch(sector, storiface.FTUnsealed, storiface.PathSealing, storiface.AcquireMove),
|
||||
m.readPiece(sink, sector, offset, size, &readOk))
|
||||
@ -317,16 +305,16 @@ func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector abi.Sect
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Manager) NewSector(ctx context.Context, sector abi.SectorID) error {
|
||||
func (m *Manager) NewSector(ctx context.Context, sector storage.SectorRef) error {
|
||||
log.Warnf("stub NewSector")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Manager) AddPiece(ctx context.Context, sector abi.SectorID, existingPieces []abi.UnpaddedPieceSize, sz abi.UnpaddedPieceSize, r io.Reader) (abi.PieceInfo, error) {
|
||||
func (m *Manager) AddPiece(ctx context.Context, sector storage.SectorRef, existingPieces []abi.UnpaddedPieceSize, sz abi.UnpaddedPieceSize, r io.Reader) (abi.PieceInfo, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
if err := m.index.StorageLock(ctx, sector, storiface.FTNone, storiface.FTUnsealed); err != nil {
|
||||
if err := m.index.StorageLock(ctx, sector.ID, storiface.FTNone, storiface.FTUnsealed); err != nil {
|
||||
return abi.PieceInfo{}, xerrors.Errorf("acquiring sector lock: %w", err)
|
||||
}
|
||||
|
||||
@ -335,7 +323,7 @@ func (m *Manager) AddPiece(ctx context.Context, sector abi.SectorID, existingPie
|
||||
if len(existingPieces) == 0 { // new
|
||||
selector = newAllocSelector(m.index, storiface.FTUnsealed, storiface.PathSealing)
|
||||
} else { // use existing
|
||||
selector = newExistingSelector(m.index, sector, storiface.FTUnsealed, false)
|
||||
selector = newExistingSelector(m.index, sector.ID, storiface.FTUnsealed, false)
|
||||
}
|
||||
|
||||
var out abi.PieceInfo
|
||||
@ -353,7 +341,7 @@ func (m *Manager) AddPiece(ctx context.Context, sector abi.SectorID, existingPie
|
||||
return out, err
|
||||
}
|
||||
|
||||
func (m *Manager) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) {
|
||||
func (m *Manager) SealPreCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
@ -380,7 +368,7 @@ func (m *Manager) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticke
|
||||
return out, waitErr
|
||||
}
|
||||
|
||||
if err := m.index.StorageLock(ctx, sector, storiface.FTUnsealed, storiface.FTSealed|storiface.FTCache); err != nil {
|
||||
if err := m.index.StorageLock(ctx, sector.ID, storiface.FTUnsealed, storiface.FTSealed|storiface.FTCache); err != nil {
|
||||
return nil, xerrors.Errorf("acquiring sector lock: %w", err)
|
||||
}
|
||||
|
||||
@ -404,7 +392,7 @@ func (m *Manager) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticke
|
||||
return out, waitErr
|
||||
}
|
||||
|
||||
func (m *Manager) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage.PreCommit1Out) (out storage.SectorCids, err error) {
|
||||
func (m *Manager) SealPreCommit2(ctx context.Context, sector storage.SectorRef, phase1Out storage.PreCommit1Out) (out storage.SectorCids, err error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
@ -431,11 +419,11 @@ func (m *Manager) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase
|
||||
return out, waitErr
|
||||
}
|
||||
|
||||
if err := m.index.StorageLock(ctx, sector, storiface.FTSealed, storiface.FTCache); err != nil {
|
||||
if err := m.index.StorageLock(ctx, sector.ID, storiface.FTSealed, storiface.FTCache); err != nil {
|
||||
return storage.SectorCids{}, xerrors.Errorf("acquiring sector lock: %w", err)
|
||||
}
|
||||
|
||||
selector := newExistingSelector(m.index, sector, storiface.FTCache|storiface.FTSealed, true)
|
||||
selector := newExistingSelector(m.index, sector.ID, storiface.FTCache|storiface.FTSealed, true)
|
||||
|
||||
err = m.sched.Schedule(ctx, sector, sealtasks.TTPreCommit2, selector, m.schedFetch(sector, storiface.FTCache|storiface.FTSealed, storiface.PathSealing, storiface.AcquireMove), func(ctx context.Context, w Worker) error {
|
||||
err := m.startWork(ctx, w, wk)(w.SealPreCommit2(ctx, sector, phase1Out))
|
||||
@ -453,7 +441,7 @@ func (m *Manager) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase
|
||||
return out, waitErr
|
||||
}
|
||||
|
||||
func (m *Manager) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (out storage.Commit1Out, err error) {
|
||||
func (m *Manager) SealCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (out storage.Commit1Out, err error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
@ -480,14 +468,14 @@ func (m *Manager) SealCommit1(ctx context.Context, sector abi.SectorID, ticket a
|
||||
return out, waitErr
|
||||
}
|
||||
|
||||
if err := m.index.StorageLock(ctx, sector, storiface.FTSealed, storiface.FTCache); err != nil {
|
||||
if err := m.index.StorageLock(ctx, sector.ID, storiface.FTSealed, storiface.FTCache); err != nil {
|
||||
return storage.Commit1Out{}, xerrors.Errorf("acquiring sector lock: %w", err)
|
||||
}
|
||||
|
||||
// NOTE: We set allowFetch to false in so that we always execute on a worker
|
||||
// with direct access to the data. We want to do that because this step is
|
||||
// generally very cheap / fast, and transferring data is not worth the effort
|
||||
selector := newExistingSelector(m.index, sector, storiface.FTCache|storiface.FTSealed, false)
|
||||
selector := newExistingSelector(m.index, sector.ID, storiface.FTCache|storiface.FTSealed, false)
|
||||
|
||||
err = m.sched.Schedule(ctx, sector, sealtasks.TTCommit1, selector, m.schedFetch(sector, storiface.FTCache|storiface.FTSealed, storiface.PathSealing, storiface.AcquireMove), func(ctx context.Context, w Worker) error {
|
||||
err := m.startWork(ctx, w, wk)(w.SealCommit1(ctx, sector, ticket, seed, pieces, cids))
|
||||
@ -505,7 +493,7 @@ func (m *Manager) SealCommit1(ctx context.Context, sector abi.SectorID, ticket a
|
||||
return out, waitErr
|
||||
}
|
||||
|
||||
func (m *Manager) SealCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage.Commit1Out) (out storage.Proof, err error) {
|
||||
func (m *Manager) SealCommit2(ctx context.Context, sector storage.SectorRef, phase1Out storage.Commit1Out) (out storage.Proof, err error) {
|
||||
wk, wait, cancel, err := m.getWork(ctx, sealtasks.TTCommit2, sector, phase1Out)
|
||||
if err != nil {
|
||||
return storage.Proof{}, xerrors.Errorf("getWork: %w", err)
|
||||
@ -548,17 +536,17 @@ func (m *Manager) SealCommit2(ctx context.Context, sector abi.SectorID, phase1Ou
|
||||
return out, waitErr
|
||||
}
|
||||
|
||||
func (m *Manager) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) error {
|
||||
func (m *Manager) FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) error {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
if err := m.index.StorageLock(ctx, sector, storiface.FTNone, storiface.FTSealed|storiface.FTUnsealed|storiface.FTCache); err != nil {
|
||||
if err := m.index.StorageLock(ctx, sector.ID, storiface.FTNone, storiface.FTSealed|storiface.FTUnsealed|storiface.FTCache); err != nil {
|
||||
return xerrors.Errorf("acquiring sector lock: %w", err)
|
||||
}
|
||||
|
||||
unsealed := storiface.FTUnsealed
|
||||
{
|
||||
unsealedStores, err := m.index.StorageFindSector(ctx, sector, storiface.FTUnsealed, 0, false)
|
||||
unsealedStores, err := m.index.StorageFindSector(ctx, sector.ID, storiface.FTUnsealed, 0, false)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("finding unsealed sector: %w", err)
|
||||
}
|
||||
@ -568,7 +556,7 @@ func (m *Manager) FinalizeSector(ctx context.Context, sector abi.SectorID, keepU
|
||||
}
|
||||
}
|
||||
|
||||
selector := newExistingSelector(m.index, sector, storiface.FTCache|storiface.FTSealed, false)
|
||||
selector := newExistingSelector(m.index, sector.ID, storiface.FTCache|storiface.FTSealed, false)
|
||||
|
||||
err := m.sched.Schedule(ctx, sector, sealtasks.TTFinalize, selector,
|
||||
m.schedFetch(sector, storiface.FTCache|storiface.FTSealed|unsealed, storiface.PathSealing, storiface.AcquireMove),
|
||||
@ -601,75 +589,75 @@ func (m *Manager) FinalizeSector(ctx context.Context, sector abi.SectorID, keepU
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Manager) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) error {
|
||||
func (m *Manager) ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) error {
|
||||
log.Warnw("ReleaseUnsealed todo")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Manager) Remove(ctx context.Context, sector abi.SectorID) error {
|
||||
func (m *Manager) Remove(ctx context.Context, sector storage.SectorRef) error {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
if err := m.index.StorageLock(ctx, sector, storiface.FTNone, storiface.FTSealed|storiface.FTUnsealed|storiface.FTCache); err != nil {
|
||||
if err := m.index.StorageLock(ctx, sector.ID, storiface.FTNone, storiface.FTSealed|storiface.FTUnsealed|storiface.FTCache); err != nil {
|
||||
return xerrors.Errorf("acquiring sector lock: %w", err)
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
if rerr := m.storage.Remove(ctx, sector, storiface.FTSealed, true); rerr != nil {
|
||||
if rerr := m.storage.Remove(ctx, sector.ID, storiface.FTSealed, true); rerr != nil {
|
||||
err = multierror.Append(err, xerrors.Errorf("removing sector (sealed): %w", rerr))
|
||||
}
|
||||
if rerr := m.storage.Remove(ctx, sector, storiface.FTCache, true); rerr != nil {
|
||||
if rerr := m.storage.Remove(ctx, sector.ID, storiface.FTCache, true); rerr != nil {
|
||||
err = multierror.Append(err, xerrors.Errorf("removing sector (cache): %w", rerr))
|
||||
}
|
||||
if rerr := m.storage.Remove(ctx, sector, storiface.FTUnsealed, true); rerr != nil {
|
||||
if rerr := m.storage.Remove(ctx, sector.ID, storiface.FTUnsealed, true); rerr != nil {
|
||||
err = multierror.Append(err, xerrors.Errorf("removing sector (unsealed): %w", rerr))
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (m *Manager) ReturnAddPiece(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err string) error {
|
||||
func (m *Manager) ReturnAddPiece(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err *storiface.CallError) error {
|
||||
return m.returnResult(callID, pi, err)
|
||||
}
|
||||
|
||||
func (m *Manager) ReturnSealPreCommit1(ctx context.Context, callID storiface.CallID, p1o storage.PreCommit1Out, err string) error {
|
||||
func (m *Manager) ReturnSealPreCommit1(ctx context.Context, callID storiface.CallID, p1o storage.PreCommit1Out, err *storiface.CallError) error {
|
||||
return m.returnResult(callID, p1o, err)
|
||||
}
|
||||
|
||||
func (m *Manager) ReturnSealPreCommit2(ctx context.Context, callID storiface.CallID, sealed storage.SectorCids, err string) error {
|
||||
func (m *Manager) ReturnSealPreCommit2(ctx context.Context, callID storiface.CallID, sealed storage.SectorCids, err *storiface.CallError) error {
|
||||
return m.returnResult(callID, sealed, err)
|
||||
}
|
||||
|
||||
func (m *Manager) ReturnSealCommit1(ctx context.Context, callID storiface.CallID, out storage.Commit1Out, err string) error {
|
||||
func (m *Manager) ReturnSealCommit1(ctx context.Context, callID storiface.CallID, out storage.Commit1Out, err *storiface.CallError) error {
|
||||
return m.returnResult(callID, out, err)
|
||||
}
|
||||
|
||||
func (m *Manager) ReturnSealCommit2(ctx context.Context, callID storiface.CallID, proof storage.Proof, err string) error {
|
||||
func (m *Manager) ReturnSealCommit2(ctx context.Context, callID storiface.CallID, proof storage.Proof, err *storiface.CallError) error {
|
||||
return m.returnResult(callID, proof, err)
|
||||
}
|
||||
|
||||
func (m *Manager) ReturnFinalizeSector(ctx context.Context, callID storiface.CallID, err string) error {
|
||||
func (m *Manager) ReturnFinalizeSector(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error {
|
||||
return m.returnResult(callID, nil, err)
|
||||
}
|
||||
|
||||
func (m *Manager) ReturnReleaseUnsealed(ctx context.Context, callID storiface.CallID, err string) error {
|
||||
func (m *Manager) ReturnReleaseUnsealed(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error {
|
||||
return m.returnResult(callID, nil, err)
|
||||
}
|
||||
|
||||
func (m *Manager) ReturnMoveStorage(ctx context.Context, callID storiface.CallID, err string) error {
|
||||
func (m *Manager) ReturnMoveStorage(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error {
|
||||
return m.returnResult(callID, nil, err)
|
||||
}
|
||||
|
||||
func (m *Manager) ReturnUnsealPiece(ctx context.Context, callID storiface.CallID, err string) error {
|
||||
func (m *Manager) ReturnUnsealPiece(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error {
|
||||
return m.returnResult(callID, nil, err)
|
||||
}
|
||||
|
||||
func (m *Manager) ReturnReadPiece(ctx context.Context, callID storiface.CallID, ok bool, err string) error {
|
||||
func (m *Manager) ReturnReadPiece(ctx context.Context, callID storiface.CallID, ok bool, err *storiface.CallError) error {
|
||||
return m.returnResult(callID, ok, err)
|
||||
}
|
||||
|
||||
func (m *Manager) ReturnFetch(ctx context.Context, callID storiface.CallID, err string) error {
|
||||
func (m *Manager) ReturnFetch(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error {
|
||||
return m.returnResult(callID, nil, err)
|
||||
}
|
||||
|
||||
|
19
extern/sector-storage/manager_calltracker.go
vendored
19
extern/sector-storage/manager_calltracker.go
vendored
@ -5,7 +5,6 @@ import (
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
@ -350,15 +349,12 @@ func (m *Manager) waitCall(ctx context.Context, callID storiface.CallID) (interf
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) returnResult(callID storiface.CallID, r interface{}, serr string) error {
|
||||
var err error
|
||||
if serr != "" {
|
||||
err = errors.New(serr)
|
||||
}
|
||||
|
||||
func (m *Manager) returnResult(callID storiface.CallID, r interface{}, cerr *storiface.CallError) error {
|
||||
res := result{
|
||||
r: r,
|
||||
err: err,
|
||||
r: r,
|
||||
}
|
||||
if cerr != nil {
|
||||
res.err = cerr
|
||||
}
|
||||
|
||||
m.sched.workTracker.onDone(callID)
|
||||
@ -392,7 +388,7 @@ func (m *Manager) returnResult(callID storiface.CallID, r interface{}, serr stri
|
||||
|
||||
m.results[wid] = res
|
||||
|
||||
err = m.work.Get(wid).Mutate(func(ws *WorkState) error {
|
||||
err := m.work.Get(wid).Mutate(func(ws *WorkState) error {
|
||||
ws.Status = wsDone
|
||||
return nil
|
||||
})
|
||||
@ -416,5 +412,6 @@ func (m *Manager) returnResult(callID storiface.CallID, r interface{}, serr stri
|
||||
}
|
||||
|
||||
func (m *Manager) Abort(ctx context.Context, call storiface.CallID) error {
|
||||
return m.returnResult(call, nil, "task aborted")
|
||||
// TODO: Allow temp error
|
||||
return m.returnResult(call, nil, storiface.Err(storiface.ErrUnknown, xerrors.New("task aborted")))
|
||||
}
|
||||
|
36
extern/sector-storage/manager_test.go
vendored
36
extern/sector-storage/manager_test.go
vendored
@ -21,6 +21,7 @@ import (
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-statestore"
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
|
||||
@ -90,28 +91,23 @@ func newTestMgr(ctx context.Context, t *testing.T, ds datastore.Datastore) (*Man
|
||||
st := newTestStorage(t)
|
||||
|
||||
si := stores.NewIndex()
|
||||
cfg := &ffiwrapper.Config{
|
||||
SealProofType: abi.RegisteredSealProof_StackedDrg2KiBV1,
|
||||
}
|
||||
|
||||
lstor, err := stores.NewLocal(ctx, st, si, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
prover, err := ffiwrapper.New(&readonlyProvider{stor: lstor, spt: cfg.SealProofType}, cfg)
|
||||
prover, err := ffiwrapper.New(&readonlyProvider{stor: lstor, index: si})
|
||||
require.NoError(t, err)
|
||||
|
||||
stor := stores.NewRemote(lstor, si, nil, 6000)
|
||||
|
||||
m := &Manager{
|
||||
scfg: cfg,
|
||||
|
||||
ls: st,
|
||||
storage: stor,
|
||||
localStore: lstor,
|
||||
remoteHnd: &stores.FetchHandler{Local: lstor},
|
||||
index: si,
|
||||
|
||||
sched: newScheduler(cfg.SealProofType),
|
||||
sched: newScheduler(),
|
||||
|
||||
Prover: prover,
|
||||
|
||||
@ -141,12 +137,14 @@ func TestSimple(t *testing.T) {
|
||||
}
|
||||
|
||||
err := m.AddWorker(ctx, newTestWorker(WorkerConfig{
|
||||
SealProof: abi.RegisteredSealProof_StackedDrg2KiBV1,
|
||||
TaskTypes: localTasks,
|
||||
}, lstor, m))
|
||||
require.NoError(t, err)
|
||||
|
||||
sid := abi.SectorID{Miner: 1000, Number: 1}
|
||||
sid := storage.SectorRef{
|
||||
ID: abi.SectorID{Miner: 1000, Number: 1},
|
||||
ProofType: abi.RegisteredSealProof_StackedDrg2KiBV1,
|
||||
}
|
||||
|
||||
pi, err := m.AddPiece(ctx, sid, nil, 1016, strings.NewReader(strings.Repeat("testthis", 127)))
|
||||
require.NoError(t, err)
|
||||
@ -176,14 +174,16 @@ func TestRedoPC1(t *testing.T) {
|
||||
}
|
||||
|
||||
tw := newTestWorker(WorkerConfig{
|
||||
SealProof: abi.RegisteredSealProof_StackedDrg2KiBV1,
|
||||
TaskTypes: localTasks,
|
||||
}, lstor, m)
|
||||
|
||||
err := m.AddWorker(ctx, tw)
|
||||
require.NoError(t, err)
|
||||
|
||||
sid := abi.SectorID{Miner: 1000, Number: 1}
|
||||
sid := storage.SectorRef{
|
||||
ID: abi.SectorID{Miner: 1000, Number: 1},
|
||||
ProofType: abi.RegisteredSealProof_StackedDrg2KiBV1,
|
||||
}
|
||||
|
||||
pi, err := m.AddPiece(ctx, sid, nil, 1016, strings.NewReader(strings.Repeat("testthis", 127)))
|
||||
require.NoError(t, err)
|
||||
@ -228,14 +228,16 @@ func TestRestartManager(t *testing.T) {
|
||||
}
|
||||
|
||||
tw := newTestWorker(WorkerConfig{
|
||||
SealProof: abi.RegisteredSealProof_StackedDrg2KiBV1,
|
||||
TaskTypes: localTasks,
|
||||
}, lstor, m)
|
||||
|
||||
err := m.AddWorker(ctx, tw)
|
||||
require.NoError(t, err)
|
||||
|
||||
sid := abi.SectorID{Miner: 1000, Number: 1}
|
||||
sid := storage.SectorRef{
|
||||
ID: abi.SectorID{Miner: 1000, Number: 1},
|
||||
ProofType: abi.RegisteredSealProof_StackedDrg2KiBV1,
|
||||
}
|
||||
|
||||
pi, err := m.AddPiece(ctx, sid, nil, 1016, strings.NewReader(strings.Repeat("testthis", 127)))
|
||||
require.NoError(t, err)
|
||||
@ -329,14 +331,16 @@ func TestRestartWorker(t *testing.T) {
|
||||
w := newLocalWorker(func() (ffiwrapper.Storage, error) {
|
||||
return &testExec{apch: arch}, nil
|
||||
}, WorkerConfig{
|
||||
SealProof: 0,
|
||||
TaskTypes: localTasks,
|
||||
}, stor, lstor, idx, m, statestore.New(wds))
|
||||
|
||||
err := m.AddWorker(ctx, w)
|
||||
require.NoError(t, err)
|
||||
|
||||
sid := abi.SectorID{Miner: 1000, Number: 1}
|
||||
sid := storage.SectorRef{
|
||||
ID: abi.SectorID{Miner: 1000, Number: 1},
|
||||
ProofType: abi.RegisteredSealProof_StackedDrg2KiBV1,
|
||||
}
|
||||
|
||||
apDone := make(chan struct{})
|
||||
|
||||
@ -363,7 +367,6 @@ func TestRestartWorker(t *testing.T) {
|
||||
w = newLocalWorker(func() (ffiwrapper.Storage, error) {
|
||||
return &testExec{apch: arch}, nil
|
||||
}, WorkerConfig{
|
||||
SealProof: 0,
|
||||
TaskTypes: localTasks,
|
||||
}, stor, lstor, idx, m, statestore.New(wds))
|
||||
|
||||
@ -400,7 +403,6 @@ func TestReenableWorker(t *testing.T) {
|
||||
w := newLocalWorker(func() (ffiwrapper.Storage, error) {
|
||||
return &testExec{apch: arch}, nil
|
||||
}, WorkerConfig{
|
||||
SealProof: 0,
|
||||
TaskTypes: localTasks,
|
||||
}, stor, lstor, idx, m, statestore.New(wds))
|
||||
|
||||
|
131
extern/sector-storage/mock/mock.go
vendored
131
extern/sector-storage/mock/mock.go
vendored
@ -27,21 +27,14 @@ var log = logging.Logger("sbmock")
|
||||
type SectorMgr struct {
|
||||
sectors map[abi.SectorID]*sectorState
|
||||
pieces map[cid.Cid][]byte
|
||||
sectorSize abi.SectorSize
|
||||
nextSectorID abi.SectorNumber
|
||||
proofType abi.RegisteredSealProof
|
||||
|
||||
lk sync.Mutex
|
||||
}
|
||||
|
||||
type mockVerif struct{}
|
||||
|
||||
func NewMockSectorMgr(ssize abi.SectorSize, genesisSectors []abi.SectorID) *SectorMgr {
|
||||
rt, err := ffiwrapper.SealProofTypeFromSectorSize(ssize)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
func NewMockSectorMgr(genesisSectors []abi.SectorID) *SectorMgr {
|
||||
sectors := make(map[abi.SectorID]*sectorState)
|
||||
for _, sid := range genesisSectors {
|
||||
sectors[sid] = §orState{
|
||||
@ -53,9 +46,7 @@ func NewMockSectorMgr(ssize abi.SectorSize, genesisSectors []abi.SectorID) *Sect
|
||||
return &SectorMgr{
|
||||
sectors: sectors,
|
||||
pieces: map[cid.Cid][]byte{},
|
||||
sectorSize: ssize,
|
||||
nextSectorID: 5,
|
||||
proofType: rt,
|
||||
}
|
||||
}
|
||||
|
||||
@ -75,17 +66,17 @@ type sectorState struct {
|
||||
lk sync.Mutex
|
||||
}
|
||||
|
||||
func (mgr *SectorMgr) NewSector(ctx context.Context, sector abi.SectorID) error {
|
||||
func (mgr *SectorMgr) NewSector(ctx context.Context, sector storage.SectorRef) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mgr *SectorMgr) AddPiece(ctx context.Context, sectorID abi.SectorID, existingPieces []abi.UnpaddedPieceSize, size abi.UnpaddedPieceSize, r io.Reader) (abi.PieceInfo, error) {
|
||||
log.Warn("Add piece: ", sectorID, size, mgr.proofType)
|
||||
func (mgr *SectorMgr) AddPiece(ctx context.Context, sectorID storage.SectorRef, existingPieces []abi.UnpaddedPieceSize, size abi.UnpaddedPieceSize, r io.Reader) (abi.PieceInfo, error) {
|
||||
log.Warn("Add piece: ", sectorID, size, sectorID.ProofType)
|
||||
|
||||
var b bytes.Buffer
|
||||
tr := io.TeeReader(r, &b)
|
||||
|
||||
c, err := ffiwrapper.GeneratePieceCIDFromFile(mgr.proofType, tr, size)
|
||||
c, err := ffiwrapper.GeneratePieceCIDFromFile(sectorID.ProofType, tr, size)
|
||||
if err != nil {
|
||||
return abi.PieceInfo{}, xerrors.Errorf("failed to generate piece cid: %w", err)
|
||||
}
|
||||
@ -95,12 +86,12 @@ func (mgr *SectorMgr) AddPiece(ctx context.Context, sectorID abi.SectorID, exist
|
||||
mgr.lk.Lock()
|
||||
mgr.pieces[c] = b.Bytes()
|
||||
|
||||
ss, ok := mgr.sectors[sectorID]
|
||||
ss, ok := mgr.sectors[sectorID.ID]
|
||||
if !ok {
|
||||
ss = §orState{
|
||||
state: statePacking,
|
||||
}
|
||||
mgr.sectors[sectorID] = ss
|
||||
mgr.sectors[sectorID.ID] = ss
|
||||
}
|
||||
mgr.lk.Unlock()
|
||||
|
||||
@ -115,10 +106,6 @@ func (mgr *SectorMgr) AddPiece(ctx context.Context, sectorID abi.SectorID, exist
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (mgr *SectorMgr) SectorSize() abi.SectorSize {
|
||||
return mgr.sectorSize
|
||||
}
|
||||
|
||||
func (mgr *SectorMgr) AcquireSectorNumber() (abi.SectorNumber, error) {
|
||||
mgr.lk.Lock()
|
||||
defer mgr.lk.Unlock()
|
||||
@ -127,9 +114,9 @@ func (mgr *SectorMgr) AcquireSectorNumber() (abi.SectorNumber, error) {
|
||||
return id, nil
|
||||
}
|
||||
|
||||
func (mgr *SectorMgr) ForceState(sid abi.SectorID, st int) error {
|
||||
func (mgr *SectorMgr) ForceState(sid storage.SectorRef, st int) error {
|
||||
mgr.lk.Lock()
|
||||
ss, ok := mgr.sectors[sid]
|
||||
ss, ok := mgr.sectors[sid.ID]
|
||||
mgr.lk.Unlock()
|
||||
if !ok {
|
||||
return xerrors.Errorf("no sector with id %d in storage", sid)
|
||||
@ -140,18 +127,23 @@ func (mgr *SectorMgr) ForceState(sid abi.SectorID, st int) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mgr *SectorMgr) SealPreCommit1(ctx context.Context, sid abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) {
|
||||
func (mgr *SectorMgr) SealPreCommit1(ctx context.Context, sid storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) {
|
||||
mgr.lk.Lock()
|
||||
ss, ok := mgr.sectors[sid]
|
||||
ss, ok := mgr.sectors[sid.ID]
|
||||
mgr.lk.Unlock()
|
||||
if !ok {
|
||||
return nil, xerrors.Errorf("no sector with id %d in storage", sid)
|
||||
}
|
||||
|
||||
ssize, err := sid.ProofType.SectorSize()
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to get proof sector size: %w", err)
|
||||
}
|
||||
|
||||
ss.lk.Lock()
|
||||
defer ss.lk.Unlock()
|
||||
|
||||
ussize := abi.PaddedPieceSize(mgr.sectorSize).Unpadded()
|
||||
ussize := abi.PaddedPieceSize(ssize).Unpadded()
|
||||
|
||||
// TODO: verify pieces in sinfo.pieces match passed in pieces
|
||||
|
||||
@ -180,7 +172,7 @@ func (mgr *SectorMgr) SealPreCommit1(ctx context.Context, sid abi.SectorID, tick
|
||||
}
|
||||
}
|
||||
|
||||
commd, err := MockVerifier.GenerateDataCommitment(mgr.proofType, pis)
|
||||
commd, err := MockVerifier.GenerateDataCommitment(sid.ProofType, pis)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -195,7 +187,7 @@ func (mgr *SectorMgr) SealPreCommit1(ctx context.Context, sid abi.SectorID, tick
|
||||
return cc, nil
|
||||
}
|
||||
|
||||
func (mgr *SectorMgr) SealPreCommit2(ctx context.Context, sid abi.SectorID, phase1Out storage.PreCommit1Out) (cids storage.SectorCids, err error) {
|
||||
func (mgr *SectorMgr) SealPreCommit2(ctx context.Context, sid storage.SectorRef, phase1Out storage.PreCommit1Out) (cids storage.SectorCids, err error) {
|
||||
db := []byte(string(phase1Out))
|
||||
db[0] ^= 'd'
|
||||
|
||||
@ -214,9 +206,9 @@ func (mgr *SectorMgr) SealPreCommit2(ctx context.Context, sid abi.SectorID, phas
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (mgr *SectorMgr) SealCommit1(ctx context.Context, sid abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (output storage.Commit1Out, err error) {
|
||||
func (mgr *SectorMgr) SealCommit1(ctx context.Context, sid storage.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (output storage.Commit1Out, err error) {
|
||||
mgr.lk.Lock()
|
||||
ss, ok := mgr.sectors[sid]
|
||||
ss, ok := mgr.sectors[sid.ID]
|
||||
mgr.lk.Unlock()
|
||||
if !ok {
|
||||
return nil, xerrors.Errorf("no such sector %d", sid)
|
||||
@ -236,16 +228,16 @@ func (mgr *SectorMgr) SealCommit1(ctx context.Context, sid abi.SectorID, ticket
|
||||
|
||||
var out [32]byte
|
||||
for i := range out {
|
||||
out[i] = cids.Unsealed.Bytes()[i] + cids.Sealed.Bytes()[31-i] - ticket[i]*seed[i] ^ byte(sid.Number&0xff)
|
||||
out[i] = cids.Unsealed.Bytes()[i] + cids.Sealed.Bytes()[31-i] - ticket[i]*seed[i] ^ byte(sid.ID.Number&0xff)
|
||||
}
|
||||
|
||||
return out[:], nil
|
||||
}
|
||||
|
||||
func (mgr *SectorMgr) SealCommit2(ctx context.Context, sid abi.SectorID, phase1Out storage.Commit1Out) (proof storage.Proof, err error) {
|
||||
func (mgr *SectorMgr) SealCommit2(ctx context.Context, sid storage.SectorRef, phase1Out storage.Commit1Out) (proof storage.Proof, err error) {
|
||||
var out [1920]byte
|
||||
for i := range out[:len(phase1Out)] {
|
||||
out[i] = phase1Out[i] ^ byte(sid.Number&0xff)
|
||||
out[i] = phase1Out[i] ^ byte(sid.ID.Number&0xff)
|
||||
}
|
||||
|
||||
return out[:], nil
|
||||
@ -253,10 +245,10 @@ func (mgr *SectorMgr) SealCommit2(ctx context.Context, sid abi.SectorID, phase1O
|
||||
|
||||
// Test Instrumentation Methods
|
||||
|
||||
func (mgr *SectorMgr) MarkFailed(sid abi.SectorID, failed bool) error {
|
||||
func (mgr *SectorMgr) MarkFailed(sid storage.SectorRef, failed bool) error {
|
||||
mgr.lk.Lock()
|
||||
defer mgr.lk.Unlock()
|
||||
ss, ok := mgr.sectors[sid]
|
||||
ss, ok := mgr.sectors[sid.ID]
|
||||
if !ok {
|
||||
return fmt.Errorf("no such sector in storage")
|
||||
}
|
||||
@ -265,10 +257,10 @@ func (mgr *SectorMgr) MarkFailed(sid abi.SectorID, failed bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mgr *SectorMgr) MarkCorrupted(sid abi.SectorID, corrupted bool) error {
|
||||
func (mgr *SectorMgr) MarkCorrupted(sid storage.SectorRef, corrupted bool) error {
|
||||
mgr.lk.Lock()
|
||||
defer mgr.lk.Unlock()
|
||||
ss, ok := mgr.sectors[sid]
|
||||
ss, ok := mgr.sectors[sid.ID]
|
||||
if !ok {
|
||||
return fmt.Errorf("no such sector in storage")
|
||||
}
|
||||
@ -353,113 +345,120 @@ func generateFakePoSt(sectorInfo []proof2.SectorInfo, rpt func(abi.RegisteredSea
|
||||
}
|
||||
}
|
||||
|
||||
func (mgr *SectorMgr) ReadPiece(ctx context.Context, w io.Writer, sectorID abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, c cid.Cid) error {
|
||||
if len(mgr.sectors[sectorID].pieces) > 1 || offset != 0 {
|
||||
func (mgr *SectorMgr) ReadPiece(ctx context.Context, w io.Writer, sectorID storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, c cid.Cid) error {
|
||||
if len(mgr.sectors[sectorID.ID].pieces) > 1 || offset != 0 {
|
||||
panic("implme")
|
||||
}
|
||||
|
||||
_, err := io.CopyN(w, bytes.NewReader(mgr.pieces[mgr.sectors[sectorID].pieces[0]]), int64(size))
|
||||
_, err := io.CopyN(w, bytes.NewReader(mgr.pieces[mgr.sectors[sectorID.ID].pieces[0]]), int64(size))
|
||||
return err
|
||||
}
|
||||
|
||||
func (mgr *SectorMgr) StageFakeData(mid abi.ActorID) (abi.SectorID, []abi.PieceInfo, error) {
|
||||
usize := abi.PaddedPieceSize(mgr.sectorSize).Unpadded()
|
||||
func (mgr *SectorMgr) StageFakeData(mid abi.ActorID, spt abi.RegisteredSealProof) (storage.SectorRef, []abi.PieceInfo, error) {
|
||||
psize, err := spt.SectorSize()
|
||||
if err != nil {
|
||||
return storage.SectorRef{}, nil, err
|
||||
}
|
||||
usize := abi.PaddedPieceSize(psize).Unpadded()
|
||||
sid, err := mgr.AcquireSectorNumber()
|
||||
if err != nil {
|
||||
return abi.SectorID{}, nil, err
|
||||
return storage.SectorRef{}, nil, err
|
||||
}
|
||||
|
||||
buf := make([]byte, usize)
|
||||
_, _ = rand.Read(buf) // nolint:gosec
|
||||
|
||||
id := abi.SectorID{
|
||||
Miner: mid,
|
||||
Number: sid,
|
||||
id := storage.SectorRef{
|
||||
ID: abi.SectorID{
|
||||
Miner: mid,
|
||||
Number: sid,
|
||||
},
|
||||
ProofType: spt,
|
||||
}
|
||||
|
||||
pi, err := mgr.AddPiece(context.TODO(), id, nil, usize, bytes.NewReader(buf))
|
||||
if err != nil {
|
||||
return abi.SectorID{}, nil, err
|
||||
return storage.SectorRef{}, nil, err
|
||||
}
|
||||
|
||||
return id, []abi.PieceInfo{pi}, nil
|
||||
}
|
||||
|
||||
func (mgr *SectorMgr) FinalizeSector(context.Context, abi.SectorID, []storage.Range) error {
|
||||
func (mgr *SectorMgr) FinalizeSector(context.Context, storage.SectorRef, []storage.Range) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mgr *SectorMgr) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) error {
|
||||
func (mgr *SectorMgr) ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mgr *SectorMgr) Remove(ctx context.Context, sector abi.SectorID) error {
|
||||
func (mgr *SectorMgr) Remove(ctx context.Context, sector storage.SectorRef) error {
|
||||
mgr.lk.Lock()
|
||||
defer mgr.lk.Unlock()
|
||||
|
||||
if _, has := mgr.sectors[sector]; !has {
|
||||
if _, has := mgr.sectors[sector.ID]; !has {
|
||||
return xerrors.Errorf("sector not found")
|
||||
}
|
||||
|
||||
delete(mgr.sectors, sector)
|
||||
delete(mgr.sectors, sector.ID)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mgr *SectorMgr) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, ids []abi.SectorID) ([]abi.SectorID, error) {
|
||||
func (mgr *SectorMgr) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, ids []storage.SectorRef) ([]abi.SectorID, error) {
|
||||
var bad []abi.SectorID
|
||||
|
||||
for _, sid := range ids {
|
||||
_, found := mgr.sectors[sid]
|
||||
_, found := mgr.sectors[sid.ID]
|
||||
|
||||
if !found || mgr.sectors[sid].failed {
|
||||
bad = append(bad, sid)
|
||||
if !found || mgr.sectors[sid.ID].failed {
|
||||
bad = append(bad, sid.ID)
|
||||
}
|
||||
}
|
||||
|
||||
return bad, nil
|
||||
}
|
||||
|
||||
func (mgr *SectorMgr) ReturnAddPiece(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err string) error {
|
||||
func (mgr *SectorMgr) ReturnAddPiece(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err *storiface.CallError) error {
|
||||
panic("not supported")
|
||||
}
|
||||
|
||||
func (mgr *SectorMgr) ReturnSealPreCommit1(ctx context.Context, callID storiface.CallID, p1o storage.PreCommit1Out, err string) error {
|
||||
func (mgr *SectorMgr) ReturnSealPreCommit1(ctx context.Context, callID storiface.CallID, p1o storage.PreCommit1Out, err *storiface.CallError) error {
|
||||
panic("not supported")
|
||||
}
|
||||
|
||||
func (mgr *SectorMgr) ReturnSealPreCommit2(ctx context.Context, callID storiface.CallID, sealed storage.SectorCids, err string) error {
|
||||
func (mgr *SectorMgr) ReturnSealPreCommit2(ctx context.Context, callID storiface.CallID, sealed storage.SectorCids, err *storiface.CallError) error {
|
||||
panic("not supported")
|
||||
}
|
||||
|
||||
func (mgr *SectorMgr) ReturnSealCommit1(ctx context.Context, callID storiface.CallID, out storage.Commit1Out, err string) error {
|
||||
func (mgr *SectorMgr) ReturnSealCommit1(ctx context.Context, callID storiface.CallID, out storage.Commit1Out, err *storiface.CallError) error {
|
||||
panic("not supported")
|
||||
}
|
||||
|
||||
func (mgr *SectorMgr) ReturnSealCommit2(ctx context.Context, callID storiface.CallID, proof storage.Proof, err string) error {
|
||||
func (mgr *SectorMgr) ReturnSealCommit2(ctx context.Context, callID storiface.CallID, proof storage.Proof, err *storiface.CallError) error {
|
||||
panic("not supported")
|
||||
}
|
||||
|
||||
func (mgr *SectorMgr) ReturnFinalizeSector(ctx context.Context, callID storiface.CallID, err string) error {
|
||||
func (mgr *SectorMgr) ReturnFinalizeSector(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error {
|
||||
panic("not supported")
|
||||
}
|
||||
|
||||
func (mgr *SectorMgr) ReturnReleaseUnsealed(ctx context.Context, callID storiface.CallID, err string) error {
|
||||
func (mgr *SectorMgr) ReturnReleaseUnsealed(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error {
|
||||
panic("not supported")
|
||||
}
|
||||
|
||||
func (mgr *SectorMgr) ReturnMoveStorage(ctx context.Context, callID storiface.CallID, err string) error {
|
||||
func (mgr *SectorMgr) ReturnMoveStorage(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error {
|
||||
panic("not supported")
|
||||
}
|
||||
|
||||
func (mgr *SectorMgr) ReturnUnsealPiece(ctx context.Context, callID storiface.CallID, err string) error {
|
||||
func (mgr *SectorMgr) ReturnUnsealPiece(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error {
|
||||
panic("not supported")
|
||||
}
|
||||
|
||||
func (mgr *SectorMgr) ReturnReadPiece(ctx context.Context, callID storiface.CallID, ok bool, err string) error {
|
||||
func (mgr *SectorMgr) ReturnReadPiece(ctx context.Context, callID storiface.CallID, ok bool, err *storiface.CallError) error {
|
||||
panic("not supported")
|
||||
}
|
||||
|
||||
func (mgr *SectorMgr) ReturnFetch(ctx context.Context, callID storiface.CallID, err string) error {
|
||||
func (mgr *SectorMgr) ReturnFetch(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error {
|
||||
panic("not supported")
|
||||
}
|
||||
|
||||
|
4
extern/sector-storage/mock/mock_test.go
vendored
4
extern/sector-storage/mock/mock_test.go
vendored
@ -9,9 +9,9 @@ import (
|
||||
)
|
||||
|
||||
func TestOpFinish(t *testing.T) {
|
||||
sb := NewMockSectorMgr(2048, nil)
|
||||
sb := NewMockSectorMgr(nil)
|
||||
|
||||
sid, pieces, err := sb.StageFakeData(123)
|
||||
sid, pieces, err := sb.StageFakeData(123, abi.RegisteredSealProof_StackedDrg2KiBV1_1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
2
extern/sector-storage/request_queue.go
vendored
2
extern/sector-storage/request_queue.go
vendored
@ -20,7 +20,7 @@ func (q requestQueue) Less(i, j int) bool {
|
||||
return q[i].taskType.Less(q[j].taskType)
|
||||
}
|
||||
|
||||
return q[i].sector.Number < q[j].sector.Number // optimize minerActor.NewSectors bitfield
|
||||
return q[i].sector.ID.Number < q[j].sector.ID.Number // optimize minerActor.NewSectors bitfield
|
||||
}
|
||||
|
||||
func (q requestQueue) Swap(i, j int) {
|
||||
|
9
extern/sector-storage/resources.go
vendored
9
extern/sector-storage/resources.go
vendored
@ -314,4 +314,13 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
|
||||
func init() {
|
||||
ResourceTable[sealtasks.TTUnseal] = ResourceTable[sealtasks.TTPreCommit1] // TODO: measure accurately
|
||||
ResourceTable[sealtasks.TTReadUnsealed] = ResourceTable[sealtasks.TTFetch]
|
||||
|
||||
// V1_1 is the same as V1
|
||||
for _, m := range ResourceTable {
|
||||
m[abi.RegisteredSealProof_StackedDrg2KiBV1_1] = m[abi.RegisteredSealProof_StackedDrg2KiBV1]
|
||||
m[abi.RegisteredSealProof_StackedDrg8MiBV1_1] = m[abi.RegisteredSealProof_StackedDrg8MiBV1]
|
||||
m[abi.RegisteredSealProof_StackedDrg512MiBV1_1] = m[abi.RegisteredSealProof_StackedDrg512MiBV1]
|
||||
m[abi.RegisteredSealProof_StackedDrg32GiBV1_1] = m[abi.RegisteredSealProof_StackedDrg32GiBV1]
|
||||
m[abi.RegisteredSealProof_StackedDrg64GiBV1_1] = m[abi.RegisteredSealProof_StackedDrg64GiBV1]
|
||||
}
|
||||
}
|
||||
|
14
extern/sector-storage/roprov.go
vendored
14
extern/sector-storage/roprov.go
vendored
@ -5,7 +5,7 @@ import (
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||
@ -14,23 +14,17 @@ import (
|
||||
type readonlyProvider struct {
|
||||
index stores.SectorIndex
|
||||
stor *stores.Local
|
||||
spt abi.RegisteredSealProof
|
||||
}
|
||||
|
||||
func (l *readonlyProvider) AcquireSector(ctx context.Context, id abi.SectorID, existing storiface.SectorFileType, allocate storiface.SectorFileType, sealing storiface.PathType) (storiface.SectorPaths, func(), error) {
|
||||
func (l *readonlyProvider) AcquireSector(ctx context.Context, id storage.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, sealing storiface.PathType) (storiface.SectorPaths, func(), error) {
|
||||
if allocate != storiface.FTNone {
|
||||
return storiface.SectorPaths{}, nil, xerrors.New("read-only storage")
|
||||
}
|
||||
|
||||
ssize, err := l.spt.SectorSize()
|
||||
if err != nil {
|
||||
return storiface.SectorPaths{}, nil, xerrors.Errorf("failed to determine sector size: %w", err)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
|
||||
// use TryLock to avoid blocking
|
||||
locked, err := l.index.StorageTryLock(ctx, id, existing, storiface.FTNone)
|
||||
locked, err := l.index.StorageTryLock(ctx, id.ID, existing, storiface.FTNone)
|
||||
if err != nil {
|
||||
cancel()
|
||||
return storiface.SectorPaths{}, nil, xerrors.Errorf("acquiring sector lock: %w", err)
|
||||
@ -40,7 +34,7 @@ func (l *readonlyProvider) AcquireSector(ctx context.Context, id abi.SectorID, e
|
||||
return storiface.SectorPaths{}, nil, xerrors.Errorf("failed to acquire sector lock")
|
||||
}
|
||||
|
||||
p, _, err := l.stor.AcquireSector(ctx, id, ssize, existing, allocate, sealing, storiface.AcquireMove)
|
||||
p, _, err := l.stor.AcquireSector(ctx, id, existing, allocate, sealing, storiface.AcquireMove)
|
||||
|
||||
return p, cancel, err
|
||||
}
|
||||
|
23
extern/sector-storage/sched.go
vendored
23
extern/sector-storage/sched.go
vendored
@ -11,6 +11,7 @@ import (
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||
@ -51,8 +52,6 @@ type WorkerSelector interface {
|
||||
}
|
||||
|
||||
type scheduler struct {
|
||||
spt abi.RegisteredSealProof
|
||||
|
||||
workersLk sync.RWMutex
|
||||
workers map[WorkerID]*workerHandle
|
||||
|
||||
@ -122,7 +121,7 @@ type activeResources struct {
|
||||
}
|
||||
|
||||
type workerRequest struct {
|
||||
sector abi.SectorID
|
||||
sector storage.SectorRef
|
||||
taskType sealtasks.TaskType
|
||||
priority int // larger values more important
|
||||
sel WorkerSelector
|
||||
@ -143,10 +142,8 @@ type workerResponse struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func newScheduler(spt abi.RegisteredSealProof) *scheduler {
|
||||
func newScheduler() *scheduler {
|
||||
return &scheduler{
|
||||
spt: spt,
|
||||
|
||||
workers: map[WorkerID]*workerHandle{},
|
||||
|
||||
schedule: make(chan *workerRequest),
|
||||
@ -168,7 +165,7 @@ func newScheduler(spt abi.RegisteredSealProof) *scheduler {
|
||||
}
|
||||
}
|
||||
|
||||
func (sh *scheduler) Schedule(ctx context.Context, sector abi.SectorID, taskType sealtasks.TaskType, sel WorkerSelector, prepare WorkerAction, work WorkerAction) error {
|
||||
func (sh *scheduler) Schedule(ctx context.Context, sector storage.SectorRef, taskType sealtasks.TaskType, sel WorkerSelector, prepare WorkerAction, work WorkerAction) error {
|
||||
ret := make(chan workerResponse)
|
||||
|
||||
select {
|
||||
@ -315,7 +312,7 @@ func (sh *scheduler) diag() SchedDiagInfo {
|
||||
task := (*sh.schedQueue)[sqi]
|
||||
|
||||
out.Requests = append(out.Requests, SchedDiagRequestInfo{
|
||||
Sector: task.sector,
|
||||
Sector: task.sector.ID,
|
||||
TaskType: task.taskType,
|
||||
Priority: task.priority,
|
||||
})
|
||||
@ -378,7 +375,7 @@ func (sh *scheduler) trySched() {
|
||||
}()
|
||||
|
||||
task := (*sh.schedQueue)[sqi]
|
||||
needRes := ResourceTable[task.taskType][sh.spt]
|
||||
needRes := ResourceTable[task.taskType][task.sector.ProofType]
|
||||
|
||||
task.indexHeap = sqi
|
||||
for wnd, windowRequest := range sh.openWindows {
|
||||
@ -400,7 +397,7 @@ func (sh *scheduler) trySched() {
|
||||
}
|
||||
|
||||
rpcCtx, cancel := context.WithTimeout(task.ctx, SelectorTimeout)
|
||||
ok, err := task.sel.Ok(rpcCtx, task.taskType, sh.spt, worker)
|
||||
ok, err := task.sel.Ok(rpcCtx, task.taskType, task.sector.ProofType, worker)
|
||||
cancel()
|
||||
if err != nil {
|
||||
log.Errorf("trySched(1) req.sel.Ok error: %+v", err)
|
||||
@ -456,21 +453,21 @@ func (sh *scheduler) trySched() {
|
||||
|
||||
for sqi := 0; sqi < sh.schedQueue.Len(); sqi++ {
|
||||
task := (*sh.schedQueue)[sqi]
|
||||
needRes := ResourceTable[task.taskType][sh.spt]
|
||||
needRes := ResourceTable[task.taskType][task.sector.ProofType]
|
||||
|
||||
selectedWindow := -1
|
||||
for _, wnd := range acceptableWindows[task.indexHeap] {
|
||||
wid := sh.openWindows[wnd].worker
|
||||
wr := sh.workers[wid].info.Resources
|
||||
|
||||
log.Debugf("SCHED try assign sqi:%d sector %d to window %d", sqi, task.sector.Number, wnd)
|
||||
log.Debugf("SCHED try assign sqi:%d sector %d to window %d", sqi, task.sector.ID.Number, wnd)
|
||||
|
||||
// TODO: allow bigger windows
|
||||
if !windows[wnd].allocated.canHandleRequest(needRes, wid, "schedAssign", wr) {
|
||||
continue
|
||||
}
|
||||
|
||||
log.Debugf("SCHED ASSIGNED sqi:%d sector %d task %s to window %d", sqi, task.sector.Number, task.taskType, wnd)
|
||||
log.Debugf("SCHED ASSIGNED sqi:%d sector %d task %s to window %d", sqi, task.sector.ID.Number, task.taskType, wnd)
|
||||
|
||||
windows[wnd].allocated.add(wr, needRes)
|
||||
// TODO: We probably want to re-sort acceptableWindows here based on new
|
||||
|
59
extern/sector-storage/sched_test.go
vendored
59
extern/sector-storage/sched_test.go
vendored
@ -47,55 +47,55 @@ type schedTestWorker struct {
|
||||
session uuid.UUID
|
||||
}
|
||||
|
||||
func (s *schedTestWorker) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) {
|
||||
func (s *schedTestWorker) SealPreCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (s *schedTestWorker) SealPreCommit2(ctx context.Context, sector abi.SectorID, pc1o storage.PreCommit1Out) (storiface.CallID, error) {
|
||||
func (s *schedTestWorker) SealPreCommit2(ctx context.Context, sector storage.SectorRef, pc1o storage.PreCommit1Out) (storiface.CallID, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (s *schedTestWorker) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) {
|
||||
func (s *schedTestWorker) SealCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (s *schedTestWorker) SealCommit2(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (storiface.CallID, error) {
|
||||
func (s *schedTestWorker) SealCommit2(ctx context.Context, sector storage.SectorRef, c1o storage.Commit1Out) (storiface.CallID, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (s *schedTestWorker) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) (storiface.CallID, error) {
|
||||
func (s *schedTestWorker) FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (storiface.CallID, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (s *schedTestWorker) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) (storiface.CallID, error) {
|
||||
func (s *schedTestWorker) ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) (storiface.CallID, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (s *schedTestWorker) Remove(ctx context.Context, sector abi.SectorID) (storiface.CallID, error) {
|
||||
func (s *schedTestWorker) Remove(ctx context.Context, sector storage.SectorRef) (storiface.CallID, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (s *schedTestWorker) NewSector(ctx context.Context, sector abi.SectorID) (storiface.CallID, error) {
|
||||
func (s *schedTestWorker) NewSector(ctx context.Context, sector storage.SectorRef) (storiface.CallID, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (s *schedTestWorker) AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) {
|
||||
func (s *schedTestWorker) AddPiece(ctx context.Context, sector storage.SectorRef, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (s *schedTestWorker) MoveStorage(ctx context.Context, sector abi.SectorID, types storiface.SectorFileType) (storiface.CallID, error) {
|
||||
func (s *schedTestWorker) MoveStorage(ctx context.Context, sector storage.SectorRef, types storiface.SectorFileType) (storiface.CallID, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (s *schedTestWorker) Fetch(ctx context.Context, id abi.SectorID, ft storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) (storiface.CallID, error) {
|
||||
func (s *schedTestWorker) Fetch(ctx context.Context, id storage.SectorRef, ft storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) (storiface.CallID, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (s *schedTestWorker) UnsealPiece(ctx context.Context, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, cid cid.Cid) (storiface.CallID, error) {
|
||||
func (s *schedTestWorker) UnsealPiece(ctx context.Context, id storage.SectorRef, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, cid cid.Cid) (storiface.CallID, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (s *schedTestWorker) ReadPiece(ctx context.Context, writer io.Writer, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (storiface.CallID, error) {
|
||||
func (s *schedTestWorker) ReadPiece(ctx context.Context, writer io.Writer, id storage.SectorRef, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (storiface.CallID, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
@ -165,8 +165,7 @@ func addTestWorker(t *testing.T, sched *scheduler, index *stores.Index, name str
|
||||
}
|
||||
|
||||
func TestSchedStartStop(t *testing.T) {
|
||||
spt := abi.RegisteredSealProof_StackedDrg32GiBV1
|
||||
sched := newScheduler(spt)
|
||||
sched := newScheduler()
|
||||
go sched.runSched()
|
||||
|
||||
addTestWorker(t, sched, stores.NewIndex(), "fred", nil)
|
||||
@ -211,12 +210,15 @@ func TestSched(t *testing.T) {
|
||||
go func() {
|
||||
defer rm.wg.Done()
|
||||
|
||||
sectorNum := abi.SectorID{
|
||||
Miner: 8,
|
||||
Number: sid,
|
||||
sectorRef := storage.SectorRef{
|
||||
ID: abi.SectorID{
|
||||
Miner: 8,
|
||||
Number: sid,
|
||||
},
|
||||
ProofType: spt,
|
||||
}
|
||||
|
||||
err := sched.Schedule(ctx, sectorNum, taskType, sel, func(ctx context.Context, w Worker) error {
|
||||
err := sched.Schedule(ctx, sectorRef, taskType, sel, func(ctx context.Context, w Worker) error {
|
||||
wi, err := w.Info(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
@ -286,7 +288,7 @@ func TestSched(t *testing.T) {
|
||||
return func(t *testing.T) {
|
||||
index := stores.NewIndex()
|
||||
|
||||
sched := newScheduler(spt)
|
||||
sched := newScheduler()
|
||||
sched.testSync = make(chan struct{})
|
||||
|
||||
go sched.runSched()
|
||||
@ -518,7 +520,6 @@ func (s slowishSelector) Cmp(ctx context.Context, task sealtasks.TaskType, a, b
|
||||
var _ WorkerSelector = slowishSelector(true)
|
||||
|
||||
func BenchmarkTrySched(b *testing.B) {
|
||||
spt := abi.RegisteredSealProof_StackedDrg32GiBV1
|
||||
logging.SetAllLoggers(logging.LevelInfo)
|
||||
defer logging.SetAllLoggers(logging.LevelDebug)
|
||||
ctx := context.Background()
|
||||
@ -528,7 +529,7 @@ func BenchmarkTrySched(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
b.StopTimer()
|
||||
|
||||
sched := newScheduler(spt)
|
||||
sched := newScheduler()
|
||||
sched.workers[WorkerID{}] = &workerHandle{
|
||||
workerRpc: nil,
|
||||
info: storiface.WorkerInfo{
|
||||
@ -568,9 +569,8 @@ func BenchmarkTrySched(b *testing.B) {
|
||||
}
|
||||
|
||||
func TestWindowCompact(t *testing.T) {
|
||||
sh := scheduler{
|
||||
spt: abi.RegisteredSealProof_StackedDrg32GiBV1,
|
||||
}
|
||||
sh := scheduler{}
|
||||
spt := abi.RegisteredSealProof_StackedDrg32GiBV1
|
||||
|
||||
test := func(start [][]sealtasks.TaskType, expect [][]sealtasks.TaskType) func(t *testing.T) {
|
||||
return func(t *testing.T) {
|
||||
@ -584,8 +584,11 @@ func TestWindowCompact(t *testing.T) {
|
||||
window := &schedWindow{}
|
||||
|
||||
for _, task := range windowTasks {
|
||||
window.todo = append(window.todo, &workerRequest{taskType: task})
|
||||
window.allocated.add(wh.info.Resources, ResourceTable[task][sh.spt])
|
||||
window.todo = append(window.todo, &workerRequest{
|
||||
taskType: task,
|
||||
sector: storage.SectorRef{ProofType: spt},
|
||||
})
|
||||
window.allocated.add(wh.info.Resources, ResourceTable[task][spt])
|
||||
}
|
||||
|
||||
wh.activeWindows = append(wh.activeWindows, window)
|
||||
@ -604,7 +607,7 @@ func TestWindowCompact(t *testing.T) {
|
||||
|
||||
for ti, task := range tasks {
|
||||
require.Equal(t, task, wh.activeWindows[wi].todo[ti].taskType, "%d, %d", wi, ti)
|
||||
expectRes.add(wh.info.Resources, ResourceTable[task][sh.spt])
|
||||
expectRes.add(wh.info.Resources, ResourceTable[task][spt])
|
||||
}
|
||||
|
||||
require.Equal(t, expectRes.cpuUse, wh.activeWindows[wi].allocated.cpuUse, "%d", wi)
|
||||
|
8
extern/sector-storage/sched_worker.go
vendored
8
extern/sector-storage/sched_worker.go
vendored
@ -294,7 +294,7 @@ func (sw *schedWorker) workerCompactWindows() {
|
||||
var moved []int
|
||||
|
||||
for ti, todo := range window.todo {
|
||||
needRes := ResourceTable[todo.taskType][sw.sched.spt]
|
||||
needRes := ResourceTable[todo.taskType][todo.sector.ProofType]
|
||||
if !lower.allocated.canHandleRequest(needRes, sw.wid, "compactWindows", worker.info.Resources) {
|
||||
continue
|
||||
}
|
||||
@ -350,7 +350,7 @@ assignLoop:
|
||||
|
||||
worker.lk.Lock()
|
||||
for t, todo := range firstWindow.todo {
|
||||
needRes := ResourceTable[todo.taskType][sw.sched.spt]
|
||||
needRes := ResourceTable[todo.taskType][todo.sector.ProofType]
|
||||
if worker.preparing.canHandleRequest(needRes, sw.wid, "startPreparing", worker.info.Resources) {
|
||||
tidx = t
|
||||
break
|
||||
@ -364,7 +364,7 @@ assignLoop:
|
||||
|
||||
todo := firstWindow.todo[tidx]
|
||||
|
||||
log.Debugf("assign worker sector %d", todo.sector.Number)
|
||||
log.Debugf("assign worker sector %d", todo.sector.ID.Number)
|
||||
err := sw.startProcessingTask(sw.taskDone, todo)
|
||||
|
||||
if err != nil {
|
||||
@ -389,7 +389,7 @@ assignLoop:
|
||||
func (sw *schedWorker) startProcessingTask(taskDone chan struct{}, req *workerRequest) error {
|
||||
w, sh := sw.worker, sw.sched
|
||||
|
||||
needRes := ResourceTable[req.taskType][sh.spt]
|
||||
needRes := ResourceTable[req.taskType][req.sector.ProofType]
|
||||
|
||||
w.lk.Lock()
|
||||
w.preparing.add(w.info.Resources, needRes)
|
||||
|
2
extern/sector-storage/stats.go
vendored
2
extern/sector-storage/stats.go
vendored
@ -46,7 +46,7 @@ func (m *Manager) WorkerJobs() map[uuid.UUID][]storiface.WorkerJob {
|
||||
for _, request := range window.todo {
|
||||
out[uuid.UUID(id)] = append(out[uuid.UUID(id)], storiface.WorkerJob{
|
||||
ID: storiface.UndefCall,
|
||||
Sector: request.sector,
|
||||
Sector: request.sector.ID,
|
||||
Task: request.taskType,
|
||||
RunWait: wi + 1,
|
||||
Start: request.start,
|
||||
|
9
extern/sector-storage/stores/http_handler.go
vendored
9
extern/sector-storage/stores/http_handler.go
vendored
@ -12,6 +12,8 @@ import (
|
||||
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/tarutil"
|
||||
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
)
|
||||
|
||||
var log = logging.Logger("stores")
|
||||
@ -73,7 +75,12 @@ func (handler *FetchHandler) remoteGetSector(w http.ResponseWriter, r *http.Requ
|
||||
// The caller has a lock on this sector already, no need to get one here
|
||||
|
||||
// passing 0 spt because we don't allocate anything
|
||||
paths, _, err := handler.Local.AcquireSector(r.Context(), id, 0, ft, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove)
|
||||
si := storage.SectorRef{
|
||||
ID: id,
|
||||
ProofType: 0,
|
||||
}
|
||||
|
||||
paths, _, err := handler.Local.AcquireSector(r.Context(), si, ft, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove)
|
||||
if err != nil {
|
||||
log.Errorf("%+v", err)
|
||||
w.WriteHeader(500)
|
||||
|
6
extern/sector-storage/stores/interface.go
vendored
6
extern/sector-storage/stores/interface.go
vendored
@ -5,12 +5,14 @@ import (
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||
)
|
||||
|
||||
type Store interface {
|
||||
AcquireSector(ctx context.Context, s abi.SectorID, ssize abi.SectorSize, existing storiface.SectorFileType, allocate storiface.SectorFileType, sealing storiface.PathType, op storiface.AcquireMode) (paths storiface.SectorPaths, stores storiface.SectorPaths, err error)
|
||||
AcquireSector(ctx context.Context, s storage.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, sealing storiface.PathType, op storiface.AcquireMode) (paths storiface.SectorPaths, stores storiface.SectorPaths, err error)
|
||||
Remove(ctx context.Context, s abi.SectorID, types storiface.SectorFileType, force bool) error
|
||||
|
||||
// like remove, but doesn't remove the primary sector copy, nor the last
|
||||
@ -18,7 +20,7 @@ type Store interface {
|
||||
RemoveCopies(ctx context.Context, s abi.SectorID, types storiface.SectorFileType) error
|
||||
|
||||
// move sectors into storage
|
||||
MoveStorage(ctx context.Context, s abi.SectorID, ssize abi.SectorSize, types storiface.SectorFileType) error
|
||||
MoveStorage(ctx context.Context, s storage.SectorRef, types storiface.SectorFileType) error
|
||||
|
||||
FsStat(ctx context.Context, id ID) (fsutil.FsStat, error)
|
||||
}
|
||||
|
33
extern/sector-storage/stores/local.go
vendored
33
extern/sector-storage/stores/local.go
vendored
@ -14,6 +14,7 @@ import (
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||
@ -325,7 +326,12 @@ func (st *Local) reportStorage(ctx context.Context) {
|
||||
}
|
||||
}
|
||||
|
||||
func (st *Local) Reserve(ctx context.Context, sid abi.SectorID, ssize abi.SectorSize, ft storiface.SectorFileType, storageIDs storiface.SectorPaths, overheadTab map[storiface.SectorFileType]int) (func(), error) {
|
||||
func (st *Local) Reserve(ctx context.Context, sid storage.SectorRef, ft storiface.SectorFileType, storageIDs storiface.SectorPaths, overheadTab map[storiface.SectorFileType]int) (func(), error) {
|
||||
ssize, err := sid.ProofType.SectorSize()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
st.localLk.Lock()
|
||||
|
||||
done := func() {}
|
||||
@ -355,7 +361,7 @@ func (st *Local) Reserve(ctx context.Context, sid abi.SectorID, ssize abi.Sector
|
||||
overhead := int64(overheadTab[fileType]) * int64(ssize) / storiface.FSOverheadDen
|
||||
|
||||
if stat.Available < overhead {
|
||||
return nil, xerrors.Errorf("can't reserve %d bytes in '%s' (id:%s), only %d available", overhead, p.local, id, stat.Available)
|
||||
return nil, storiface.Err(storiface.ErrTempAllocateSpace, xerrors.Errorf("can't reserve %d bytes in '%s' (id:%s), only %d available", overhead, p.local, id, stat.Available))
|
||||
}
|
||||
|
||||
p.reserved += overhead
|
||||
@ -375,11 +381,16 @@ func (st *Local) Reserve(ctx context.Context, sid abi.SectorID, ssize abi.Sector
|
||||
return done, nil
|
||||
}
|
||||
|
||||
func (st *Local) AcquireSector(ctx context.Context, sid abi.SectorID, ssize abi.SectorSize, existing storiface.SectorFileType, allocate storiface.SectorFileType, pathType storiface.PathType, op storiface.AcquireMode) (storiface.SectorPaths, storiface.SectorPaths, error) {
|
||||
func (st *Local) AcquireSector(ctx context.Context, sid storage.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, pathType storiface.PathType, op storiface.AcquireMode) (storiface.SectorPaths, storiface.SectorPaths, error) {
|
||||
if existing|allocate != existing^allocate {
|
||||
return storiface.SectorPaths{}, storiface.SectorPaths{}, xerrors.New("can't both find and allocate a sector")
|
||||
}
|
||||
|
||||
ssize, err := sid.ProofType.SectorSize()
|
||||
if err != nil {
|
||||
return storiface.SectorPaths{}, storiface.SectorPaths{}, err
|
||||
}
|
||||
|
||||
st.localLk.RLock()
|
||||
defer st.localLk.RUnlock()
|
||||
|
||||
@ -391,7 +402,7 @@ func (st *Local) AcquireSector(ctx context.Context, sid abi.SectorID, ssize abi.
|
||||
continue
|
||||
}
|
||||
|
||||
si, err := st.index.StorageFindSector(ctx, sid, fileType, ssize, false)
|
||||
si, err := st.index.StorageFindSector(ctx, sid.ID, fileType, ssize, false)
|
||||
if err != nil {
|
||||
log.Warnf("finding existing sector %d(t:%d) failed: %+v", sid, fileType, err)
|
||||
continue
|
||||
@ -407,7 +418,7 @@ func (st *Local) AcquireSector(ctx context.Context, sid abi.SectorID, ssize abi.
|
||||
continue
|
||||
}
|
||||
|
||||
spath := p.sectorPath(sid, fileType)
|
||||
spath := p.sectorPath(sid.ID, fileType)
|
||||
storiface.SetPathByType(&out, fileType, spath)
|
||||
storiface.SetPathByType(&storageIDs, fileType, string(info.ID))
|
||||
|
||||
@ -449,7 +460,7 @@ func (st *Local) AcquireSector(ctx context.Context, sid abi.SectorID, ssize abi.
|
||||
|
||||
// TODO: Check free space
|
||||
|
||||
best = p.sectorPath(sid, fileType)
|
||||
best = p.sectorPath(sid.ID, fileType)
|
||||
bestID = si.ID
|
||||
break
|
||||
}
|
||||
@ -578,13 +589,13 @@ func (st *Local) removeSector(ctx context.Context, sid abi.SectorID, typ storifa
|
||||
return nil
|
||||
}
|
||||
|
||||
func (st *Local) MoveStorage(ctx context.Context, s abi.SectorID, ssize abi.SectorSize, types storiface.SectorFileType) error {
|
||||
dest, destIds, err := st.AcquireSector(ctx, s, ssize, storiface.FTNone, types, storiface.PathStorage, storiface.AcquireMove)
|
||||
func (st *Local) MoveStorage(ctx context.Context, s storage.SectorRef, types storiface.SectorFileType) error {
|
||||
dest, destIds, err := st.AcquireSector(ctx, s, storiface.FTNone, types, storiface.PathStorage, storiface.AcquireMove)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("acquire dest storage: %w", err)
|
||||
}
|
||||
|
||||
src, srcIds, err := st.AcquireSector(ctx, s, ssize, types, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove)
|
||||
src, srcIds, err := st.AcquireSector(ctx, s, types, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("acquire src storage: %w", err)
|
||||
}
|
||||
@ -616,7 +627,7 @@ func (st *Local) MoveStorage(ctx context.Context, s abi.SectorID, ssize abi.Sect
|
||||
|
||||
log.Debugf("moving %v(%d) to storage: %s(se:%t; st:%t) -> %s(se:%t; st:%t)", s, fileType, sst.ID, sst.CanSeal, sst.CanStore, dst.ID, dst.CanSeal, dst.CanStore)
|
||||
|
||||
if err := st.index.StorageDropSector(ctx, ID(storiface.PathByType(srcIds, fileType)), s, fileType); err != nil {
|
||||
if err := st.index.StorageDropSector(ctx, ID(storiface.PathByType(srcIds, fileType)), s.ID, fileType); err != nil {
|
||||
return xerrors.Errorf("dropping source sector from index: %w", err)
|
||||
}
|
||||
|
||||
@ -625,7 +636,7 @@ func (st *Local) MoveStorage(ctx context.Context, s abi.SectorID, ssize abi.Sect
|
||||
return xerrors.Errorf("moving sector %v(%d): %w", s, fileType, err)
|
||||
}
|
||||
|
||||
if err := st.index.StorageDeclareSector(ctx, ID(storiface.PathByType(destIds, fileType)), s, fileType, true); err != nil {
|
||||
if err := st.index.StorageDeclareSector(ctx, ID(storiface.PathByType(destIds, fileType)), s.ID, fileType, true); err != nil {
|
||||
return xerrors.Errorf("declare sector %d(t:%d) -> %s: %w", s, fileType, ID(storiface.PathByType(destIds, fileType)), err)
|
||||
}
|
||||
}
|
||||
|
27
extern/sector-storage/stores/remote.go
vendored
27
extern/sector-storage/stores/remote.go
vendored
@ -19,6 +19,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/tarutil"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
"github.com/hashicorp/go-multierror"
|
||||
files "github.com/ipfs/go-ipfs-files"
|
||||
@ -58,7 +59,7 @@ func NewRemote(local *Local, index SectorIndex, auth http.Header, fetchLimit int
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, ssize abi.SectorSize, existing storiface.SectorFileType, allocate storiface.SectorFileType, pathType storiface.PathType, op storiface.AcquireMode) (storiface.SectorPaths, storiface.SectorPaths, error) {
|
||||
func (r *Remote) AcquireSector(ctx context.Context, s storage.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, pathType storiface.PathType, op storiface.AcquireMode) (storiface.SectorPaths, storiface.SectorPaths, error) {
|
||||
if existing|allocate != existing^allocate {
|
||||
return storiface.SectorPaths{}, storiface.SectorPaths{}, xerrors.New("can't both find and allocate a sector")
|
||||
}
|
||||
@ -66,9 +67,9 @@ func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, ssize abi.Se
|
||||
for {
|
||||
r.fetchLk.Lock()
|
||||
|
||||
c, locked := r.fetching[s]
|
||||
c, locked := r.fetching[s.ID]
|
||||
if !locked {
|
||||
r.fetching[s] = make(chan struct{})
|
||||
r.fetching[s.ID] = make(chan struct{})
|
||||
r.fetchLk.Unlock()
|
||||
break
|
||||
}
|
||||
@ -85,12 +86,12 @@ func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, ssize abi.Se
|
||||
|
||||
defer func() {
|
||||
r.fetchLk.Lock()
|
||||
close(r.fetching[s])
|
||||
delete(r.fetching, s)
|
||||
close(r.fetching[s.ID])
|
||||
delete(r.fetching, s.ID)
|
||||
r.fetchLk.Unlock()
|
||||
}()
|
||||
|
||||
paths, stores, err := r.local.AcquireSector(ctx, s, ssize, existing, allocate, pathType, op)
|
||||
paths, stores, err := r.local.AcquireSector(ctx, s, existing, allocate, pathType, op)
|
||||
if err != nil {
|
||||
return storiface.SectorPaths{}, storiface.SectorPaths{}, xerrors.Errorf("local acquire error: %w", err)
|
||||
}
|
||||
@ -106,7 +107,7 @@ func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, ssize abi.Se
|
||||
}
|
||||
}
|
||||
|
||||
apaths, ids, err := r.local.AcquireSector(ctx, s, ssize, storiface.FTNone, toFetch, pathType, op)
|
||||
apaths, ids, err := r.local.AcquireSector(ctx, s, storiface.FTNone, toFetch, pathType, op)
|
||||
if err != nil {
|
||||
return storiface.SectorPaths{}, storiface.SectorPaths{}, xerrors.Errorf("allocate local sector for fetching: %w", err)
|
||||
}
|
||||
@ -116,7 +117,7 @@ func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, ssize abi.Se
|
||||
odt = storiface.FsOverheadFinalized
|
||||
}
|
||||
|
||||
releaseStorage, err := r.local.Reserve(ctx, s, ssize, toFetch, ids, odt)
|
||||
releaseStorage, err := r.local.Reserve(ctx, s, toFetch, ids, odt)
|
||||
if err != nil {
|
||||
return storiface.SectorPaths{}, storiface.SectorPaths{}, xerrors.Errorf("reserving storage space: %w", err)
|
||||
}
|
||||
@ -134,7 +135,7 @@ func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, ssize abi.Se
|
||||
dest := storiface.PathByType(apaths, fileType)
|
||||
storageID := storiface.PathByType(ids, fileType)
|
||||
|
||||
url, err := r.acquireFromRemote(ctx, s, fileType, dest)
|
||||
url, err := r.acquireFromRemote(ctx, s.ID, fileType, dest)
|
||||
if err != nil {
|
||||
return storiface.SectorPaths{}, storiface.SectorPaths{}, err
|
||||
}
|
||||
@ -142,7 +143,7 @@ func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, ssize abi.Se
|
||||
storiface.SetPathByType(&paths, fileType, dest)
|
||||
storiface.SetPathByType(&stores, fileType, storageID)
|
||||
|
||||
if err := r.index.StorageDeclareSector(ctx, ID(storageID), s, fileType, op == storiface.AcquireMove); err != nil {
|
||||
if err := r.index.StorageDeclareSector(ctx, ID(storageID), s.ID, fileType, op == storiface.AcquireMove); err != nil {
|
||||
log.Warnf("declaring sector %v in %s failed: %+v", s, storageID, err)
|
||||
continue
|
||||
}
|
||||
@ -281,14 +282,14 @@ func (r *Remote) fetch(ctx context.Context, url, outname string) error {
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Remote) MoveStorage(ctx context.Context, s abi.SectorID, ssize abi.SectorSize, types storiface.SectorFileType) error {
|
||||
func (r *Remote) MoveStorage(ctx context.Context, s storage.SectorRef, types storiface.SectorFileType) error {
|
||||
// Make sure we have the data local
|
||||
_, _, err := r.AcquireSector(ctx, s, ssize, types, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove)
|
||||
_, _, err := r.AcquireSector(ctx, s, types, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("acquire src storage (remote): %w", err)
|
||||
}
|
||||
|
||||
return r.local.MoveStorage(ctx, s, ssize, types)
|
||||
return r.local.MoveStorage(ctx, s, types)
|
||||
}
|
||||
|
||||
func (r *Remote) Remove(ctx context.Context, sid abi.SectorID, typ storiface.SectorFileType, force bool) error {
|
||||
|
85
extern/sector-storage/storiface/worker.go
vendored
85
extern/sector-storage/storiface/worker.go
vendored
@ -2,6 +2,7 @@ package storiface
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
@ -77,29 +78,69 @@ var _ fmt.Stringer = &CallID{}
|
||||
var UndefCall CallID
|
||||
|
||||
type WorkerCalls interface {
|
||||
AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (CallID, error)
|
||||
SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (CallID, error)
|
||||
SealPreCommit2(ctx context.Context, sector abi.SectorID, pc1o storage.PreCommit1Out) (CallID, error)
|
||||
SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (CallID, error)
|
||||
SealCommit2(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (CallID, error)
|
||||
FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) (CallID, error)
|
||||
ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) (CallID, error)
|
||||
MoveStorage(ctx context.Context, sector abi.SectorID, types SectorFileType) (CallID, error)
|
||||
UnsealPiece(context.Context, abi.SectorID, UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (CallID, error)
|
||||
ReadPiece(context.Context, io.Writer, abi.SectorID, UnpaddedByteIndex, abi.UnpaddedPieceSize) (CallID, error)
|
||||
Fetch(context.Context, abi.SectorID, SectorFileType, PathType, AcquireMode) (CallID, error)
|
||||
AddPiece(ctx context.Context, sector storage.SectorRef, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (CallID, error)
|
||||
SealPreCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (CallID, error)
|
||||
SealPreCommit2(ctx context.Context, sector storage.SectorRef, pc1o storage.PreCommit1Out) (CallID, error)
|
||||
SealCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (CallID, error)
|
||||
SealCommit2(ctx context.Context, sector storage.SectorRef, c1o storage.Commit1Out) (CallID, error)
|
||||
FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (CallID, error)
|
||||
ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) (CallID, error)
|
||||
MoveStorage(ctx context.Context, sector storage.SectorRef, types SectorFileType) (CallID, error)
|
||||
UnsealPiece(context.Context, storage.SectorRef, UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (CallID, error)
|
||||
ReadPiece(context.Context, io.Writer, storage.SectorRef, UnpaddedByteIndex, abi.UnpaddedPieceSize) (CallID, error)
|
||||
Fetch(context.Context, storage.SectorRef, SectorFileType, PathType, AcquireMode) (CallID, error)
|
||||
}
|
||||
|
||||
type ErrorCode int
|
||||
|
||||
const (
|
||||
ErrUnknown ErrorCode = iota
|
||||
)
|
||||
|
||||
const (
|
||||
// Temp Errors
|
||||
ErrTempUnknown ErrorCode = iota + 100
|
||||
ErrTempWorkerRestart
|
||||
ErrTempAllocateSpace
|
||||
)
|
||||
|
||||
type CallError struct {
|
||||
Code ErrorCode
|
||||
Message string
|
||||
sub error
|
||||
}
|
||||
|
||||
func (c *CallError) Error() string {
|
||||
return fmt.Sprintf("storage call error %d: %s", c.Code, c.Message)
|
||||
}
|
||||
|
||||
func (c *CallError) Unwrap() error {
|
||||
if c.sub != nil {
|
||||
return c.sub
|
||||
}
|
||||
|
||||
return errors.New(c.Message)
|
||||
}
|
||||
|
||||
func Err(code ErrorCode, sub error) *CallError {
|
||||
return &CallError{
|
||||
Code: code,
|
||||
Message: sub.Error(),
|
||||
|
||||
sub: sub,
|
||||
}
|
||||
}
|
||||
|
||||
type WorkerReturn interface {
|
||||
ReturnAddPiece(ctx context.Context, callID CallID, pi abi.PieceInfo, err string) error
|
||||
ReturnSealPreCommit1(ctx context.Context, callID CallID, p1o storage.PreCommit1Out, err string) error
|
||||
ReturnSealPreCommit2(ctx context.Context, callID CallID, sealed storage.SectorCids, err string) error
|
||||
ReturnSealCommit1(ctx context.Context, callID CallID, out storage.Commit1Out, err string) error
|
||||
ReturnSealCommit2(ctx context.Context, callID CallID, proof storage.Proof, err string) error
|
||||
ReturnFinalizeSector(ctx context.Context, callID CallID, err string) error
|
||||
ReturnReleaseUnsealed(ctx context.Context, callID CallID, err string) error
|
||||
ReturnMoveStorage(ctx context.Context, callID CallID, err string) error
|
||||
ReturnUnsealPiece(ctx context.Context, callID CallID, err string) error
|
||||
ReturnReadPiece(ctx context.Context, callID CallID, ok bool, err string) error
|
||||
ReturnFetch(ctx context.Context, callID CallID, err string) error
|
||||
ReturnAddPiece(ctx context.Context, callID CallID, pi abi.PieceInfo, err *CallError) error
|
||||
ReturnSealPreCommit1(ctx context.Context, callID CallID, p1o storage.PreCommit1Out, err *CallError) error
|
||||
ReturnSealPreCommit2(ctx context.Context, callID CallID, sealed storage.SectorCids, err *CallError) error
|
||||
ReturnSealCommit1(ctx context.Context, callID CallID, out storage.Commit1Out, err *CallError) error
|
||||
ReturnSealCommit2(ctx context.Context, callID CallID, proof storage.Proof, err *CallError) error
|
||||
ReturnFinalizeSector(ctx context.Context, callID CallID, err *CallError) error
|
||||
ReturnReleaseUnsealed(ctx context.Context, callID CallID, err *CallError) error
|
||||
ReturnMoveStorage(ctx context.Context, callID CallID, err *CallError) error
|
||||
ReturnUnsealPiece(ctx context.Context, callID CallID, err *CallError) error
|
||||
ReturnReadPiece(ctx context.Context, callID CallID, ok bool, err *CallError) error
|
||||
ReturnFetch(ctx context.Context, callID CallID, err *CallError) error
|
||||
}
|
||||
|
22
extern/sector-storage/teststorage_test.go
vendored
22
extern/sector-storage/teststorage_test.go
vendored
@ -31,50 +31,50 @@ func (t *testExec) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID,
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (t *testExec) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storage.PreCommit1Out, error) {
|
||||
func (t *testExec) SealPreCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storage.PreCommit1Out, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (t *testExec) SealPreCommit2(ctx context.Context, sector abi.SectorID, pc1o storage.PreCommit1Out) (storage.SectorCids, error) {
|
||||
func (t *testExec) SealPreCommit2(ctx context.Context, sector storage.SectorRef, pc1o storage.PreCommit1Out) (storage.SectorCids, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (t *testExec) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) {
|
||||
func (t *testExec) SealCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (t *testExec) SealCommit2(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (storage.Proof, error) {
|
||||
func (t *testExec) SealCommit2(ctx context.Context, sector storage.SectorRef, c1o storage.Commit1Out) (storage.Proof, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (t *testExec) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) error {
|
||||
func (t *testExec) FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (t *testExec) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) error {
|
||||
func (t *testExec) ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (t *testExec) Remove(ctx context.Context, sector abi.SectorID) error {
|
||||
func (t *testExec) Remove(ctx context.Context, sector storage.SectorRef) error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (t *testExec) NewSector(ctx context.Context, sector abi.SectorID) error {
|
||||
func (t *testExec) NewSector(ctx context.Context, sector storage.SectorRef) error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (t *testExec) AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (abi.PieceInfo, error) {
|
||||
func (t *testExec) AddPiece(ctx context.Context, sector storage.SectorRef, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (abi.PieceInfo, error) {
|
||||
resp := make(chan apres)
|
||||
t.apch <- resp
|
||||
ar := <-resp
|
||||
return ar.pi, ar.err
|
||||
}
|
||||
|
||||
func (t *testExec) UnsealPiece(ctx context.Context, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd cid.Cid) error {
|
||||
func (t *testExec) UnsealPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd cid.Cid) error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (t *testExec) ReadPiece(ctx context.Context, writer io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) {
|
||||
func (t *testExec) ReadPiece(ctx context.Context, writer io.Writer, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
|
23
extern/sector-storage/testworker_test.go
vendored
23
extern/sector-storage/testworker_test.go
vendored
@ -31,11 +31,6 @@ type testWorker struct {
|
||||
}
|
||||
|
||||
func newTestWorker(wcfg WorkerConfig, lstor *stores.Local, ret storiface.WorkerReturn) *testWorker {
|
||||
ssize, err := wcfg.SealProof.SectorSize()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
acceptTasks := map[sealtasks.TaskType]struct{}{}
|
||||
for _, taskType := range wcfg.TaskTypes {
|
||||
acceptTasks[taskType] = struct{}{}
|
||||
@ -46,15 +41,15 @@ func newTestWorker(wcfg WorkerConfig, lstor *stores.Local, ret storiface.WorkerR
|
||||
lstor: lstor,
|
||||
ret: ret,
|
||||
|
||||
mockSeal: mock.NewMockSectorMgr(ssize, nil),
|
||||
mockSeal: mock.NewMockSectorMgr(nil),
|
||||
|
||||
session: uuid.New(),
|
||||
}
|
||||
}
|
||||
|
||||
func (t *testWorker) asyncCall(sector abi.SectorID, work func(ci storiface.CallID)) (storiface.CallID, error) {
|
||||
func (t *testWorker) asyncCall(sector storage.SectorRef, work func(ci storiface.CallID)) (storiface.CallID, error) {
|
||||
ci := storiface.CallID{
|
||||
Sector: sector,
|
||||
Sector: sector.ID,
|
||||
ID: uuid.New(),
|
||||
}
|
||||
|
||||
@ -63,16 +58,16 @@ func (t *testWorker) asyncCall(sector abi.SectorID, work func(ci storiface.CallI
|
||||
return ci, nil
|
||||
}
|
||||
|
||||
func (t *testWorker) AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) {
|
||||
func (t *testWorker) AddPiece(ctx context.Context, sector storage.SectorRef, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) {
|
||||
return t.asyncCall(sector, func(ci storiface.CallID) {
|
||||
p, err := t.mockSeal.AddPiece(ctx, sector, pieceSizes, newPieceSize, pieceData)
|
||||
if err := t.ret.ReturnAddPiece(ctx, ci, p, errstr(err)); err != nil {
|
||||
if err := t.ret.ReturnAddPiece(ctx, ci, p, toCallError(err)); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (t *testWorker) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) {
|
||||
func (t *testWorker) SealPreCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) {
|
||||
return t.asyncCall(sector, func(ci storiface.CallID) {
|
||||
t.pc1s++
|
||||
|
||||
@ -84,15 +79,15 @@ func (t *testWorker) SealPreCommit1(ctx context.Context, sector abi.SectorID, ti
|
||||
defer t.pc1lk.Unlock()
|
||||
|
||||
p1o, err := t.mockSeal.SealPreCommit1(ctx, sector, ticket, pieces)
|
||||
if err := t.ret.ReturnSealPreCommit1(ctx, ci, p1o, errstr(err)); err != nil {
|
||||
if err := t.ret.ReturnSealPreCommit1(ctx, ci, p1o, toCallError(err)); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (t *testWorker) Fetch(ctx context.Context, sector abi.SectorID, fileType storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) (storiface.CallID, error) {
|
||||
func (t *testWorker) Fetch(ctx context.Context, sector storage.SectorRef, fileType storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) (storiface.CallID, error) {
|
||||
return t.asyncCall(sector, func(ci storiface.CallID) {
|
||||
if err := t.ret.ReturnFetch(ctx, ci, ""); err != nil {
|
||||
if err := t.ret.ReturnFetch(ctx, ci, nil); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
})
|
||||
|
98
extern/sector-storage/worker_local.go
vendored
98
extern/sector-storage/worker_local.go
vendored
@ -20,7 +20,7 @@ import (
|
||||
ffi "github.com/filecoin-project/filecoin-ffi"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-statestore"
|
||||
storage2 "github.com/filecoin-project/specs-storage/storage"
|
||||
storage "github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
|
||||
@ -31,7 +31,6 @@ import (
|
||||
var pathTypes = []storiface.SectorFileType{storiface.FTUnsealed, storiface.FTSealed, storiface.FTCache}
|
||||
|
||||
type WorkerConfig struct {
|
||||
SealProof abi.RegisteredSealProof
|
||||
TaskTypes []sealtasks.TaskType
|
||||
NoSwap bool
|
||||
}
|
||||
@ -40,7 +39,6 @@ type WorkerConfig struct {
|
||||
type ExecutorFunc func() (ffiwrapper.Storage, error)
|
||||
|
||||
type LocalWorker struct {
|
||||
scfg *ffiwrapper.Config
|
||||
storage stores.Store
|
||||
localStore *stores.Local
|
||||
sindex stores.SectorIndex
|
||||
@ -64,9 +62,6 @@ func newLocalWorker(executor ExecutorFunc, wcfg WorkerConfig, store stores.Store
|
||||
}
|
||||
|
||||
w := &LocalWorker{
|
||||
scfg: &ffiwrapper.Config{
|
||||
SealProofType: wcfg.SealProof,
|
||||
},
|
||||
storage: store,
|
||||
localStore: local,
|
||||
sindex: sindex,
|
||||
@ -95,7 +90,7 @@ func newLocalWorker(executor ExecutorFunc, wcfg WorkerConfig, store stores.Store
|
||||
|
||||
go func() {
|
||||
for _, call := range unfinished {
|
||||
err := xerrors.Errorf("worker restarted")
|
||||
err := storiface.Err(storiface.ErrTempWorkerRestart, xerrors.New("worker restarted"))
|
||||
|
||||
// TODO: Handle restarting PC1 once support is merged
|
||||
|
||||
@ -119,18 +114,13 @@ type localWorkerPathProvider struct {
|
||||
op storiface.AcquireMode
|
||||
}
|
||||
|
||||
func (l *localWorkerPathProvider) AcquireSector(ctx context.Context, sector abi.SectorID, existing storiface.SectorFileType, allocate storiface.SectorFileType, sealing storiface.PathType) (storiface.SectorPaths, func(), error) {
|
||||
ssize, err := l.w.scfg.SealProofType.SectorSize()
|
||||
func (l *localWorkerPathProvider) AcquireSector(ctx context.Context, sector storage.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, sealing storiface.PathType) (storiface.SectorPaths, func(), error) {
|
||||
paths, storageIDs, err := l.w.storage.AcquireSector(ctx, sector, existing, allocate, sealing, l.op)
|
||||
if err != nil {
|
||||
return storiface.SectorPaths{}, nil, err
|
||||
}
|
||||
|
||||
paths, storageIDs, err := l.w.storage.AcquireSector(ctx, sector, ssize, existing, allocate, sealing, l.op)
|
||||
if err != nil {
|
||||
return storiface.SectorPaths{}, nil, err
|
||||
}
|
||||
|
||||
releaseStorage, err := l.w.localStore.Reserve(ctx, sector, ssize, allocate, storageIDs, storiface.FSOverheadSeal)
|
||||
releaseStorage, err := l.w.localStore.Reserve(ctx, sector, allocate, storageIDs, storiface.FSOverheadSeal)
|
||||
if err != nil {
|
||||
return storiface.SectorPaths{}, nil, xerrors.Errorf("reserving storage space: %w", err)
|
||||
}
|
||||
@ -147,7 +137,7 @@ func (l *localWorkerPathProvider) AcquireSector(ctx context.Context, sector abi.
|
||||
|
||||
sid := storiface.PathByType(storageIDs, fileType)
|
||||
|
||||
if err := l.w.sindex.StorageDeclareSector(ctx, stores.ID(sid), sector, fileType, l.op == storiface.AcquireMove); err != nil {
|
||||
if err := l.w.sindex.StorageDeclareSector(ctx, stores.ID(sid), sector.ID, fileType, l.op == storiface.AcquireMove); err != nil {
|
||||
log.Errorf("declare sector error: %+v", err)
|
||||
}
|
||||
}
|
||||
@ -155,7 +145,7 @@ func (l *localWorkerPathProvider) AcquireSector(ctx context.Context, sector abi.
|
||||
}
|
||||
|
||||
func (l *LocalWorker) ffiExec() (ffiwrapper.Storage, error) {
|
||||
return ffiwrapper.New(&localWorkerPathProvider{w: l}, l.scfg)
|
||||
return ffiwrapper.New(&localWorkerPathProvider{w: l})
|
||||
}
|
||||
|
||||
type ReturnType string
|
||||
@ -176,15 +166,15 @@ const (
|
||||
|
||||
// in: func(WorkerReturn, context.Context, CallID, err string)
|
||||
// in: func(WorkerReturn, context.Context, CallID, ret T, err string)
|
||||
func rfunc(in interface{}) func(context.Context, storiface.CallID, storiface.WorkerReturn, interface{}, error) error {
|
||||
func rfunc(in interface{}) func(context.Context, storiface.CallID, storiface.WorkerReturn, interface{}, *storiface.CallError) error {
|
||||
rf := reflect.ValueOf(in)
|
||||
ft := rf.Type()
|
||||
withRet := ft.NumIn() == 5
|
||||
|
||||
return func(ctx context.Context, ci storiface.CallID, wr storiface.WorkerReturn, i interface{}, err error) error {
|
||||
return func(ctx context.Context, ci storiface.CallID, wr storiface.WorkerReturn, i interface{}, err *storiface.CallError) error {
|
||||
rctx := reflect.ValueOf(ctx)
|
||||
rwr := reflect.ValueOf(wr)
|
||||
rerr := reflect.ValueOf(errstr(err))
|
||||
rerr := reflect.ValueOf(err)
|
||||
rci := reflect.ValueOf(ci)
|
||||
|
||||
var ro []reflect.Value
|
||||
@ -208,7 +198,7 @@ func rfunc(in interface{}) func(context.Context, storiface.CallID, storiface.Wor
|
||||
}
|
||||
}
|
||||
|
||||
var returnFunc = map[ReturnType]func(context.Context, storiface.CallID, storiface.WorkerReturn, interface{}, error) error{
|
||||
var returnFunc = map[ReturnType]func(context.Context, storiface.CallID, storiface.WorkerReturn, interface{}, *storiface.CallError) error{
|
||||
AddPiece: rfunc(storiface.WorkerReturn.ReturnAddPiece),
|
||||
SealPreCommit1: rfunc(storiface.WorkerReturn.ReturnSealPreCommit1),
|
||||
SealPreCommit2: rfunc(storiface.WorkerReturn.ReturnSealPreCommit2),
|
||||
@ -222,9 +212,9 @@ var returnFunc = map[ReturnType]func(context.Context, storiface.CallID, storifac
|
||||
Fetch: rfunc(storiface.WorkerReturn.ReturnFetch),
|
||||
}
|
||||
|
||||
func (l *LocalWorker) asyncCall(ctx context.Context, sector abi.SectorID, rt ReturnType, work func(ctx context.Context, ci storiface.CallID) (interface{}, error)) (storiface.CallID, error) {
|
||||
func (l *LocalWorker) asyncCall(ctx context.Context, sector storage.SectorRef, rt ReturnType, work func(ctx context.Context, ci storiface.CallID) (interface{}, error)) (storiface.CallID, error) {
|
||||
ci := storiface.CallID{
|
||||
Sector: sector,
|
||||
Sector: sector.ID,
|
||||
ID: uuid.New(),
|
||||
}
|
||||
|
||||
@ -255,7 +245,7 @@ func (l *LocalWorker) asyncCall(ctx context.Context, sector abi.SectorID, rt Ret
|
||||
}
|
||||
}
|
||||
|
||||
if doReturn(ctx, rt, ci, l.ret, res, err) {
|
||||
if doReturn(ctx, rt, ci, l.ret, res, toCallError(err)) {
|
||||
if err := l.ct.onReturned(ci); err != nil {
|
||||
log.Errorf("tracking call (done): %+v", err)
|
||||
}
|
||||
@ -265,8 +255,17 @@ func (l *LocalWorker) asyncCall(ctx context.Context, sector abi.SectorID, rt Ret
|
||||
return ci, nil
|
||||
}
|
||||
|
||||
func toCallError(err error) *storiface.CallError {
|
||||
var serr *storiface.CallError
|
||||
if err != nil && !xerrors.As(err, &serr) {
|
||||
serr = storiface.Err(storiface.ErrUnknown, err)
|
||||
}
|
||||
|
||||
return serr
|
||||
}
|
||||
|
||||
// doReturn tries to send the result to manager, returns true if successful
|
||||
func doReturn(ctx context.Context, rt ReturnType, ci storiface.CallID, ret storiface.WorkerReturn, res interface{}, rerr error) bool {
|
||||
func doReturn(ctx context.Context, rt ReturnType, ci storiface.CallID, ret storiface.WorkerReturn, res interface{}, rerr *storiface.CallError) bool {
|
||||
for {
|
||||
err := returnFunc[rt](ctx, ci, ret, res, rerr)
|
||||
if err == nil {
|
||||
@ -289,15 +288,7 @@ func doReturn(ctx context.Context, rt ReturnType, ci storiface.CallID, ret stori
|
||||
return true
|
||||
}
|
||||
|
||||
func errstr(err error) string {
|
||||
if err != nil {
|
||||
return err.Error()
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
func (l *LocalWorker) NewSector(ctx context.Context, sector abi.SectorID) error {
|
||||
func (l *LocalWorker) NewSector(ctx context.Context, sector storage.SectorRef) error {
|
||||
sb, err := l.executor()
|
||||
if err != nil {
|
||||
return err
|
||||
@ -306,7 +297,7 @@ func (l *LocalWorker) NewSector(ctx context.Context, sector abi.SectorID) error
|
||||
return sb.NewSector(ctx, sector)
|
||||
}
|
||||
|
||||
func (l *LocalWorker) AddPiece(ctx context.Context, sector abi.SectorID, epcs []abi.UnpaddedPieceSize, sz abi.UnpaddedPieceSize, r io.Reader) (storiface.CallID, error) {
|
||||
func (l *LocalWorker) AddPiece(ctx context.Context, sector storage.SectorRef, epcs []abi.UnpaddedPieceSize, sz abi.UnpaddedPieceSize, r io.Reader) (storiface.CallID, error) {
|
||||
sb, err := l.executor()
|
||||
if err != nil {
|
||||
return storiface.UndefCall, err
|
||||
@ -317,7 +308,7 @@ func (l *LocalWorker) AddPiece(ctx context.Context, sector abi.SectorID, epcs []
|
||||
})
|
||||
}
|
||||
|
||||
func (l *LocalWorker) Fetch(ctx context.Context, sector abi.SectorID, fileType storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) (storiface.CallID, error) {
|
||||
func (l *LocalWorker) Fetch(ctx context.Context, sector storage.SectorRef, fileType storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) (storiface.CallID, error) {
|
||||
return l.asyncCall(ctx, sector, Fetch, func(ctx context.Context, ci storiface.CallID) (interface{}, error) {
|
||||
_, done, err := (&localWorkerPathProvider{w: l, op: am}).AcquireSector(ctx, sector, fileType, storiface.FTNone, ptype)
|
||||
if err == nil {
|
||||
@ -328,16 +319,16 @@ func (l *LocalWorker) Fetch(ctx context.Context, sector abi.SectorID, fileType s
|
||||
})
|
||||
}
|
||||
|
||||
func (l *LocalWorker) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) {
|
||||
func (l *LocalWorker) SealPreCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) {
|
||||
return l.asyncCall(ctx, sector, SealPreCommit1, func(ctx context.Context, ci storiface.CallID) (interface{}, error) {
|
||||
|
||||
{
|
||||
// cleanup previous failed attempts if they exist
|
||||
if err := l.storage.Remove(ctx, sector, storiface.FTSealed, true); err != nil {
|
||||
if err := l.storage.Remove(ctx, sector.ID, storiface.FTSealed, true); err != nil {
|
||||
return nil, xerrors.Errorf("cleaning up sealed data: %w", err)
|
||||
}
|
||||
|
||||
if err := l.storage.Remove(ctx, sector, storiface.FTCache, true); err != nil {
|
||||
if err := l.storage.Remove(ctx, sector.ID, storiface.FTCache, true); err != nil {
|
||||
return nil, xerrors.Errorf("cleaning up cache data: %w", err)
|
||||
}
|
||||
}
|
||||
@ -351,7 +342,7 @@ func (l *LocalWorker) SealPreCommit1(ctx context.Context, sector abi.SectorID, t
|
||||
})
|
||||
}
|
||||
|
||||
func (l *LocalWorker) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage2.PreCommit1Out) (storiface.CallID, error) {
|
||||
func (l *LocalWorker) SealPreCommit2(ctx context.Context, sector storage.SectorRef, phase1Out storage.PreCommit1Out) (storiface.CallID, error) {
|
||||
sb, err := l.executor()
|
||||
if err != nil {
|
||||
return storiface.UndefCall, err
|
||||
@ -362,7 +353,7 @@ func (l *LocalWorker) SealPreCommit2(ctx context.Context, sector abi.SectorID, p
|
||||
})
|
||||
}
|
||||
|
||||
func (l *LocalWorker) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage2.SectorCids) (storiface.CallID, error) {
|
||||
func (l *LocalWorker) SealCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) {
|
||||
sb, err := l.executor()
|
||||
if err != nil {
|
||||
return storiface.UndefCall, err
|
||||
@ -373,7 +364,7 @@ func (l *LocalWorker) SealCommit1(ctx context.Context, sector abi.SectorID, tick
|
||||
})
|
||||
}
|
||||
|
||||
func (l *LocalWorker) SealCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage2.Commit1Out) (storiface.CallID, error) {
|
||||
func (l *LocalWorker) SealCommit2(ctx context.Context, sector storage.SectorRef, phase1Out storage.Commit1Out) (storiface.CallID, error) {
|
||||
sb, err := l.executor()
|
||||
if err != nil {
|
||||
return storiface.UndefCall, err
|
||||
@ -384,7 +375,7 @@ func (l *LocalWorker) SealCommit2(ctx context.Context, sector abi.SectorID, phas
|
||||
})
|
||||
}
|
||||
|
||||
func (l *LocalWorker) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage2.Range) (storiface.CallID, error) {
|
||||
func (l *LocalWorker) FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (storiface.CallID, error) {
|
||||
sb, err := l.executor()
|
||||
if err != nil {
|
||||
return storiface.UndefCall, err
|
||||
@ -396,7 +387,7 @@ func (l *LocalWorker) FinalizeSector(ctx context.Context, sector abi.SectorID, k
|
||||
}
|
||||
|
||||
if len(keepUnsealed) == 0 {
|
||||
if err := l.storage.Remove(ctx, sector, storiface.FTUnsealed, true); err != nil {
|
||||
if err := l.storage.Remove(ctx, sector.ID, storiface.FTUnsealed, true); err != nil {
|
||||
return nil, xerrors.Errorf("removing unsealed data: %w", err)
|
||||
}
|
||||
}
|
||||
@ -405,7 +396,7 @@ func (l *LocalWorker) FinalizeSector(ctx context.Context, sector abi.SectorID, k
|
||||
})
|
||||
}
|
||||
|
||||
func (l *LocalWorker) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage2.Range) (storiface.CallID, error) {
|
||||
func (l *LocalWorker) ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) (storiface.CallID, error) {
|
||||
return storiface.UndefCall, xerrors.Errorf("implement me")
|
||||
}
|
||||
|
||||
@ -425,18 +416,13 @@ func (l *LocalWorker) Remove(ctx context.Context, sector abi.SectorID) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (l *LocalWorker) MoveStorage(ctx context.Context, sector abi.SectorID, types storiface.SectorFileType) (storiface.CallID, error) {
|
||||
func (l *LocalWorker) MoveStorage(ctx context.Context, sector storage.SectorRef, types storiface.SectorFileType) (storiface.CallID, error) {
|
||||
return l.asyncCall(ctx, sector, MoveStorage, func(ctx context.Context, ci storiface.CallID) (interface{}, error) {
|
||||
ssize, err := l.scfg.SealProofType.SectorSize()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil, l.storage.MoveStorage(ctx, sector, ssize, types)
|
||||
return nil, l.storage.MoveStorage(ctx, sector, types)
|
||||
})
|
||||
}
|
||||
|
||||
func (l *LocalWorker) UnsealPiece(ctx context.Context, sector abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, cid cid.Cid) (storiface.CallID, error) {
|
||||
func (l *LocalWorker) UnsealPiece(ctx context.Context, sector storage.SectorRef, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, cid cid.Cid) (storiface.CallID, error) {
|
||||
sb, err := l.executor()
|
||||
if err != nil {
|
||||
return storiface.UndefCall, err
|
||||
@ -447,11 +433,11 @@ func (l *LocalWorker) UnsealPiece(ctx context.Context, sector abi.SectorID, inde
|
||||
return nil, xerrors.Errorf("unsealing sector: %w", err)
|
||||
}
|
||||
|
||||
if err = l.storage.RemoveCopies(ctx, sector, storiface.FTSealed); err != nil {
|
||||
if err = l.storage.RemoveCopies(ctx, sector.ID, storiface.FTSealed); err != nil {
|
||||
return nil, xerrors.Errorf("removing source data: %w", err)
|
||||
}
|
||||
|
||||
if err = l.storage.RemoveCopies(ctx, sector, storiface.FTCache); err != nil {
|
||||
if err = l.storage.RemoveCopies(ctx, sector.ID, storiface.FTCache); err != nil {
|
||||
return nil, xerrors.Errorf("removing source data: %w", err)
|
||||
}
|
||||
|
||||
@ -459,7 +445,7 @@ func (l *LocalWorker) UnsealPiece(ctx context.Context, sector abi.SectorID, inde
|
||||
})
|
||||
}
|
||||
|
||||
func (l *LocalWorker) ReadPiece(ctx context.Context, writer io.Writer, sector abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (storiface.CallID, error) {
|
||||
func (l *LocalWorker) ReadPiece(ctx context.Context, writer io.Writer, sector storage.SectorRef, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (storiface.CallID, error) {
|
||||
sb, err := l.executor()
|
||||
if err != nil {
|
||||
return storiface.UndefCall, err
|
||||
|
22
extern/sector-storage/worker_tracked.go
vendored
22
extern/sector-storage/worker_tracked.go
vendored
@ -42,7 +42,7 @@ func (wt *workTracker) onDone(callID storiface.CallID) {
|
||||
delete(wt.running, callID)
|
||||
}
|
||||
|
||||
func (wt *workTracker) track(wid WorkerID, sid abi.SectorID, task sealtasks.TaskType) func(storiface.CallID, error) (storiface.CallID, error) {
|
||||
func (wt *workTracker) track(wid WorkerID, sid storage.SectorRef, task sealtasks.TaskType) func(storiface.CallID, error) (storiface.CallID, error) {
|
||||
return func(callID storiface.CallID, err error) (storiface.CallID, error) {
|
||||
if err != nil {
|
||||
return callID, err
|
||||
@ -60,7 +60,7 @@ func (wt *workTracker) track(wid WorkerID, sid abi.SectorID, task sealtasks.Task
|
||||
wt.running[callID] = trackedWork{
|
||||
job: storiface.WorkerJob{
|
||||
ID: callID,
|
||||
Sector: sid,
|
||||
Sector: sid.ID,
|
||||
Task: task,
|
||||
Start: time.Now(),
|
||||
},
|
||||
@ -99,39 +99,39 @@ type trackedWorker struct {
|
||||
tracker *workTracker
|
||||
}
|
||||
|
||||
func (t *trackedWorker) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) {
|
||||
func (t *trackedWorker) SealPreCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) {
|
||||
return t.tracker.track(t.wid, sector, sealtasks.TTPreCommit1)(t.Worker.SealPreCommit1(ctx, sector, ticket, pieces))
|
||||
}
|
||||
|
||||
func (t *trackedWorker) SealPreCommit2(ctx context.Context, sector abi.SectorID, pc1o storage.PreCommit1Out) (storiface.CallID, error) {
|
||||
func (t *trackedWorker) SealPreCommit2(ctx context.Context, sector storage.SectorRef, pc1o storage.PreCommit1Out) (storiface.CallID, error) {
|
||||
return t.tracker.track(t.wid, sector, sealtasks.TTPreCommit2)(t.Worker.SealPreCommit2(ctx, sector, pc1o))
|
||||
}
|
||||
|
||||
func (t *trackedWorker) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) {
|
||||
func (t *trackedWorker) SealCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) {
|
||||
return t.tracker.track(t.wid, sector, sealtasks.TTCommit1)(t.Worker.SealCommit1(ctx, sector, ticket, seed, pieces, cids))
|
||||
}
|
||||
|
||||
func (t *trackedWorker) SealCommit2(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (storiface.CallID, error) {
|
||||
func (t *trackedWorker) SealCommit2(ctx context.Context, sector storage.SectorRef, c1o storage.Commit1Out) (storiface.CallID, error) {
|
||||
return t.tracker.track(t.wid, sector, sealtasks.TTCommit2)(t.Worker.SealCommit2(ctx, sector, c1o))
|
||||
}
|
||||
|
||||
func (t *trackedWorker) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) (storiface.CallID, error) {
|
||||
func (t *trackedWorker) FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (storiface.CallID, error) {
|
||||
return t.tracker.track(t.wid, sector, sealtasks.TTFinalize)(t.Worker.FinalizeSector(ctx, sector, keepUnsealed))
|
||||
}
|
||||
|
||||
func (t *trackedWorker) AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) {
|
||||
func (t *trackedWorker) AddPiece(ctx context.Context, sector storage.SectorRef, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) {
|
||||
return t.tracker.track(t.wid, sector, sealtasks.TTAddPiece)(t.Worker.AddPiece(ctx, sector, pieceSizes, newPieceSize, pieceData))
|
||||
}
|
||||
|
||||
func (t *trackedWorker) Fetch(ctx context.Context, s abi.SectorID, ft storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) (storiface.CallID, error) {
|
||||
func (t *trackedWorker) Fetch(ctx context.Context, s storage.SectorRef, ft storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) (storiface.CallID, error) {
|
||||
return t.tracker.track(t.wid, s, sealtasks.TTFetch)(t.Worker.Fetch(ctx, s, ft, ptype, am))
|
||||
}
|
||||
|
||||
func (t *trackedWorker) UnsealPiece(ctx context.Context, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, cid cid.Cid) (storiface.CallID, error) {
|
||||
func (t *trackedWorker) UnsealPiece(ctx context.Context, id storage.SectorRef, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, cid cid.Cid) (storiface.CallID, error) {
|
||||
return t.tracker.track(t.wid, id, sealtasks.TTUnseal)(t.Worker.UnsealPiece(ctx, id, index, size, randomness, cid))
|
||||
}
|
||||
|
||||
func (t *trackedWorker) ReadPiece(ctx context.Context, writer io.Writer, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (storiface.CallID, error) {
|
||||
func (t *trackedWorker) ReadPiece(ctx context.Context, writer io.Writer, id storage.SectorRef, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (storiface.CallID, error) {
|
||||
return t.tracker.track(t.wid, id, sealtasks.TTReadUnsealed)(t.Worker.ReadPiece(ctx, writer, id, index, size))
|
||||
}
|
||||
|
||||
|
14
extern/storage-sealing/checks.go
vendored
14
extern/storage-sealing/checks.go
vendored
@ -14,7 +14,6 @@ import (
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/crypto"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/zerocomm"
|
||||
)
|
||||
|
||||
@ -166,23 +165,14 @@ func (m *Sealing) checkCommit(ctx context.Context, si SectorInfo, proof []byte,
|
||||
return &ErrBadSeed{xerrors.Errorf("seed has changed")}
|
||||
}
|
||||
|
||||
ss, err := m.api.StateMinerSectorSize(ctx, m.maddr, tok)
|
||||
if err != nil {
|
||||
return &ErrApi{err}
|
||||
}
|
||||
spt, err := ffiwrapper.SealProofTypeFromSectorSize(ss)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if *si.CommR != pci.Info.SealedCID {
|
||||
log.Warn("on-chain sealed CID doesn't match!")
|
||||
}
|
||||
|
||||
ok, err := m.verif.VerifySeal(proof2.SealVerifyInfo{
|
||||
SectorID: m.minerSector(si.SectorNumber),
|
||||
SectorID: m.minerSectorID(si.SectorNumber),
|
||||
SealedCID: pci.Info.SealedCID,
|
||||
SealProof: spt,
|
||||
SealProof: pci.Info.SealProof,
|
||||
Proof: proof,
|
||||
Randomness: si.TicketValue,
|
||||
InteractiveRandomness: si.SeedValue,
|
||||
|
15
extern/storage-sealing/fsm.go
vendored
15
extern/storage-sealing/fsm.go
vendored
@ -269,7 +269,7 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta
|
||||
|
||||
*/
|
||||
|
||||
m.stats.updateSector(m.minerSector(state.SectorNumber), state.State)
|
||||
m.stats.updateSector(m.minerSectorID(state.SectorNumber), state.State)
|
||||
|
||||
switch state.State {
|
||||
// Happy path
|
||||
@ -396,6 +396,15 @@ func (m *Sealing) restartSectors(ctx context.Context) error {
|
||||
return xerrors.Errorf("getting the sealing delay: %w", err)
|
||||
}
|
||||
|
||||
spt, err := m.currentSealProof(ctx)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting current seal proof: %w", err)
|
||||
}
|
||||
ssize, err := spt.SectorSize()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
m.unsealedInfoMap.lk.Lock()
|
||||
defer m.unsealedInfoMap.lk.Unlock()
|
||||
for _, sector := range trackedSectors {
|
||||
@ -410,7 +419,9 @@ func (m *Sealing) restartSectors(ctx context.Context) error {
|
||||
// something's funky here, but probably safe to move on
|
||||
log.Warnf("sector %v was already in the unsealedInfoMap when restarting", sector.SectorNumber)
|
||||
} else {
|
||||
ui := UnsealedSectorInfo{}
|
||||
ui := UnsealedSectorInfo{
|
||||
ssize: ssize,
|
||||
}
|
||||
for _, p := range sector.Pieces {
|
||||
if p.DealInfo != nil {
|
||||
ui.numDeals++
|
||||
|
21
extern/storage-sealing/garbage.go
vendored
21
extern/storage-sealing/garbage.go
vendored
@ -6,9 +6,10 @@ import (
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
)
|
||||
|
||||
func (m *Sealing) pledgeSector(ctx context.Context, sectorID abi.SectorID, existingPieceSizes []abi.UnpaddedPieceSize, sizes ...abi.UnpaddedPieceSize) ([]abi.PieceInfo, error) {
|
||||
func (m *Sealing) pledgeSector(ctx context.Context, sectorID storage.SectorRef, existingPieceSizes []abi.UnpaddedPieceSize, sizes ...abi.UnpaddedPieceSize) ([]abi.PieceInfo, error) {
|
||||
if len(sizes) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
@ -47,21 +48,31 @@ func (m *Sealing) PledgeSector() error {
|
||||
// this, as we run everything here async, and it's cancelled when the
|
||||
// command exits
|
||||
|
||||
size := abi.PaddedPieceSize(m.sealer.SectorSize()).Unpadded()
|
||||
spt, err := m.currentSealProof(ctx)
|
||||
if err != nil {
|
||||
log.Errorf("%+v", err)
|
||||
return
|
||||
}
|
||||
|
||||
size, err := spt.SectorSize()
|
||||
if err != nil {
|
||||
log.Errorf("%+v", err)
|
||||
return
|
||||
}
|
||||
|
||||
sid, err := m.sc.Next()
|
||||
if err != nil {
|
||||
log.Errorf("%+v", err)
|
||||
return
|
||||
}
|
||||
sectorID := m.minerSector(sid)
|
||||
sectorID := m.minerSector(spt, sid)
|
||||
err = m.sealer.NewSector(ctx, sectorID)
|
||||
if err != nil {
|
||||
log.Errorf("%+v", err)
|
||||
return
|
||||
}
|
||||
|
||||
pieces, err := m.pledgeSector(ctx, sectorID, []abi.UnpaddedPieceSize{}, size)
|
||||
pieces, err := m.pledgeSector(ctx, sectorID, []abi.UnpaddedPieceSize{}, abi.PaddedPieceSize(size).Unpadded())
|
||||
if err != nil {
|
||||
log.Errorf("%+v", err)
|
||||
return
|
||||
@ -75,7 +86,7 @@ func (m *Sealing) PledgeSector() error {
|
||||
}
|
||||
}
|
||||
|
||||
if err := m.newSectorCC(sid, ps); err != nil {
|
||||
if err := m.newSectorCC(ctx, sid, ps); err != nil {
|
||||
log.Errorf("%+v", err)
|
||||
return
|
||||
}
|
||||
|
101
extern/storage-sealing/sealing.go
vendored
101
extern/storage-sealing/sealing.go
vendored
@ -8,13 +8,15 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/ipfs/go-datastore"
|
||||
"github.com/ipfs/go-datastore/namespace"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
padreader "github.com/filecoin-project/go-padreader"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
@ -53,6 +55,7 @@ type SealingAPI interface {
|
||||
StateMinerWorkerAddress(ctx context.Context, maddr address.Address, tok TipSetToken) (address.Address, error)
|
||||
StateMinerPreCommitDepositForPower(context.Context, address.Address, miner.SectorPreCommitInfo, TipSetToken) (big.Int, error)
|
||||
StateMinerInitialPledgeCollateral(context.Context, address.Address, miner.SectorPreCommitInfo, TipSetToken) (big.Int, error)
|
||||
StateMinerInfo(context.Context, address.Address, TipSetToken) (miner.MinerInfo, error)
|
||||
StateMinerSectorAllocated(context.Context, address.Address, abi.SectorNumber, TipSetToken) (bool, error)
|
||||
StateMarketStorageDeal(context.Context, abi.DealID, TipSetToken) (market.DealProposal, error)
|
||||
StateNetworkVersion(ctx context.Context, tok TipSetToken) (network.Version, error)
|
||||
@ -105,6 +108,7 @@ type UnsealedSectorInfo struct {
|
||||
// stored should always equal sum of pieceSizes.Padded()
|
||||
stored abi.PaddedPieceSize
|
||||
pieceSizes []abi.UnpaddedPieceSize
|
||||
ssize abi.SectorSize
|
||||
}
|
||||
|
||||
func New(api SealingAPI, fc FeeConfig, events Events, maddr address.Address, ds datastore.Batching, sealer sectorstorage.SectorManager, sc SectorIDCounter, verif ffiwrapper.Verifier, pcp PreCommitPolicy, gc GetSealingConfigFunc, notifee SectorStateNotifee) *Sealing {
|
||||
@ -151,19 +155,30 @@ func (m *Sealing) Run(ctx context.Context) error {
|
||||
func (m *Sealing) Stop(ctx context.Context) error {
|
||||
return m.sectors.Stop(ctx)
|
||||
}
|
||||
|
||||
func (m *Sealing) AddPieceToAnySector(ctx context.Context, size abi.UnpaddedPieceSize, r io.Reader, d DealInfo) (abi.SectorNumber, abi.PaddedPieceSize, error) {
|
||||
log.Infof("Adding piece for deal %d (publish msg: %s)", d.DealID, d.PublishCid)
|
||||
if (padreader.PaddedSize(uint64(size))) != size {
|
||||
return 0, 0, xerrors.Errorf("cannot allocate unpadded piece")
|
||||
}
|
||||
|
||||
if size > abi.PaddedPieceSize(m.sealer.SectorSize()).Unpadded() {
|
||||
sp, err := m.currentSealProof(ctx)
|
||||
if err != nil {
|
||||
return 0, 0, xerrors.Errorf("getting current seal proof type: %w", err)
|
||||
}
|
||||
|
||||
ssize, err := sp.SectorSize()
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
|
||||
if size > abi.PaddedPieceSize(ssize).Unpadded() {
|
||||
return 0, 0, xerrors.Errorf("piece cannot fit into a sector")
|
||||
}
|
||||
|
||||
m.unsealedInfoMap.lk.Lock()
|
||||
|
||||
sid, pads, err := m.getSectorAndPadding(size)
|
||||
sid, pads, err := m.getSectorAndPadding(ctx, size)
|
||||
if err != nil {
|
||||
m.unsealedInfoMap.lk.Unlock()
|
||||
return 0, 0, xerrors.Errorf("getting available sector: %w", err)
|
||||
@ -185,7 +200,7 @@ func (m *Sealing) AddPieceToAnySector(ctx context.Context, size abi.UnpaddedPiec
|
||||
return 0, 0, xerrors.Errorf("adding piece to sector: %w", err)
|
||||
}
|
||||
|
||||
startPacking := m.unsealedInfoMap.infos[sid].numDeals >= getDealPerSectorLimit(m.sealer.SectorSize())
|
||||
startPacking := m.unsealedInfoMap.infos[sid].numDeals >= getDealPerSectorLimit(ssize)
|
||||
|
||||
m.unsealedInfoMap.lk.Unlock()
|
||||
|
||||
@ -201,7 +216,16 @@ func (m *Sealing) AddPieceToAnySector(ctx context.Context, size abi.UnpaddedPiec
|
||||
// Caller should hold m.unsealedInfoMap.lk
|
||||
func (m *Sealing) addPiece(ctx context.Context, sectorID abi.SectorNumber, size abi.UnpaddedPieceSize, r io.Reader, di *DealInfo) error {
|
||||
log.Infof("Adding piece to sector %d", sectorID)
|
||||
ppi, err := m.sealer.AddPiece(sectorstorage.WithPriority(ctx, DealSectorPriority), m.minerSector(sectorID), m.unsealedInfoMap.infos[sectorID].pieceSizes, size, r)
|
||||
sp, err := m.currentSealProof(ctx)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting current seal proof type: %w", err)
|
||||
}
|
||||
ssize, err := sp.SectorSize()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ppi, err := m.sealer.AddPiece(sectorstorage.WithPriority(ctx, DealSectorPriority), m.minerSector(sp, sectorID), m.unsealedInfoMap.infos[sectorID].pieceSizes, size, r)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("writing piece: %w", err)
|
||||
}
|
||||
@ -224,6 +248,7 @@ func (m *Sealing) addPiece(ctx context.Context, sectorID abi.SectorNumber, size
|
||||
numDeals: num,
|
||||
stored: ui.stored + piece.Piece.Size,
|
||||
pieceSizes: append(ui.pieceSizes, piece.Piece.Size.Unpadded()),
|
||||
ssize: ssize,
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -257,16 +282,16 @@ func (m *Sealing) StartPacking(sectorID abi.SectorNumber) error {
|
||||
}
|
||||
|
||||
// Caller should hold m.unsealedInfoMap.lk
|
||||
func (m *Sealing) getSectorAndPadding(size abi.UnpaddedPieceSize) (abi.SectorNumber, []abi.PaddedPieceSize, error) {
|
||||
ss := abi.PaddedPieceSize(m.sealer.SectorSize())
|
||||
func (m *Sealing) getSectorAndPadding(ctx context.Context, size abi.UnpaddedPieceSize) (abi.SectorNumber, []abi.PaddedPieceSize, error) {
|
||||
for k, v := range m.unsealedInfoMap.infos {
|
||||
pads, padLength := ffiwrapper.GetRequiredPadding(v.stored, size.Padded())
|
||||
if v.stored+size.Padded()+padLength <= ss {
|
||||
|
||||
if v.stored+size.Padded()+padLength <= abi.PaddedPieceSize(v.ssize) {
|
||||
return k, pads, nil
|
||||
}
|
||||
}
|
||||
|
||||
ns, err := m.newDealSector()
|
||||
ns, ssize, err := m.newDealSector(ctx)
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
@ -275,23 +300,24 @@ func (m *Sealing) getSectorAndPadding(size abi.UnpaddedPieceSize) (abi.SectorNum
|
||||
numDeals: 0,
|
||||
stored: 0,
|
||||
pieceSizes: nil,
|
||||
ssize: ssize,
|
||||
}
|
||||
|
||||
return ns, nil, nil
|
||||
}
|
||||
|
||||
// newDealSector creates a new sector for deal storage
|
||||
func (m *Sealing) newDealSector() (abi.SectorNumber, error) {
|
||||
func (m *Sealing) newDealSector(ctx context.Context) (abi.SectorNumber, abi.SectorSize, error) {
|
||||
// First make sure we don't have too many 'open' sectors
|
||||
|
||||
cfg, err := m.getConfig()
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("getting config: %w", err)
|
||||
return 0, 0, xerrors.Errorf("getting config: %w", err)
|
||||
}
|
||||
|
||||
if cfg.MaxSealingSectorsForDeals > 0 {
|
||||
if m.stats.curSealing() > cfg.MaxSealingSectorsForDeals {
|
||||
return 0, ErrTooManySectorsSealing
|
||||
return 0, 0, ErrTooManySectorsSealing
|
||||
}
|
||||
}
|
||||
|
||||
@ -338,36 +364,36 @@ func (m *Sealing) newDealSector() (abi.SectorNumber, error) {
|
||||
}
|
||||
}
|
||||
|
||||
spt, err := m.currentSealProof(ctx)
|
||||
if err != nil {
|
||||
return 0, 0, xerrors.Errorf("getting current seal proof type: %w", err)
|
||||
}
|
||||
|
||||
// Now actually create a new sector
|
||||
|
||||
sid, err := m.sc.Next()
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("getting sector number: %w", err)
|
||||
return 0, 0, xerrors.Errorf("getting sector number: %w", err)
|
||||
}
|
||||
|
||||
err = m.sealer.NewSector(context.TODO(), m.minerSector(sid))
|
||||
err = m.sealer.NewSector(context.TODO(), m.minerSector(spt, sid))
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("initializing sector: %w", err)
|
||||
}
|
||||
|
||||
rt, err := ffiwrapper.SealProofTypeFromSectorSize(m.sealer.SectorSize())
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("bad sector size: %w", err)
|
||||
return 0, 0, xerrors.Errorf("initializing sector: %w", err)
|
||||
}
|
||||
|
||||
log.Infof("Creating sector %d", sid)
|
||||
err = m.sectors.Send(uint64(sid), SectorStart{
|
||||
ID: sid,
|
||||
SectorType: rt,
|
||||
SectorType: spt,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("starting the sector fsm: %w", err)
|
||||
return 0, 0, xerrors.Errorf("starting the sector fsm: %w", err)
|
||||
}
|
||||
|
||||
cf, err := m.getConfig()
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("getting the sealing delay: %w", err)
|
||||
return 0, 0, xerrors.Errorf("getting the sealing delay: %w", err)
|
||||
}
|
||||
|
||||
if cf.WaitDealsDelay > 0 {
|
||||
@ -380,25 +406,42 @@ func (m *Sealing) newDealSector() (abi.SectorNumber, error) {
|
||||
}()
|
||||
}
|
||||
|
||||
return sid, nil
|
||||
ssize, err := spt.SectorSize()
|
||||
return sid, ssize, err
|
||||
}
|
||||
|
||||
// newSectorCC accepts a slice of pieces with no deal (junk data)
|
||||
func (m *Sealing) newSectorCC(sid abi.SectorNumber, pieces []Piece) error {
|
||||
rt, err := ffiwrapper.SealProofTypeFromSectorSize(m.sealer.SectorSize())
|
||||
func (m *Sealing) newSectorCC(ctx context.Context, sid abi.SectorNumber, pieces []Piece) error {
|
||||
spt, err := m.currentSealProof(ctx)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("bad sector size: %w", err)
|
||||
return xerrors.Errorf("getting current seal proof type: %w", err)
|
||||
}
|
||||
|
||||
log.Infof("Creating CC sector %d", sid)
|
||||
return m.sectors.Send(uint64(sid), SectorStartCC{
|
||||
ID: sid,
|
||||
Pieces: pieces,
|
||||
SectorType: rt,
|
||||
SectorType: spt,
|
||||
})
|
||||
}
|
||||
|
||||
func (m *Sealing) minerSector(num abi.SectorNumber) abi.SectorID {
|
||||
func (m *Sealing) currentSealProof(ctx context.Context) (abi.RegisteredSealProof, error) {
|
||||
mi, err := m.api.StateMinerInfo(ctx, m.maddr, nil)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return mi.SealProofType, nil
|
||||
}
|
||||
|
||||
func (m *Sealing) minerSector(spt abi.RegisteredSealProof, num abi.SectorNumber) storage.SectorRef {
|
||||
return storage.SectorRef{
|
||||
ID: m.minerSectorID(num),
|
||||
ProofType: spt,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Sealing) minerSectorID(num abi.SectorNumber) abi.SectorID {
|
||||
mid, err := address.IDFromAddress(m.maddr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
2
extern/storage-sealing/states_proving.go
vendored
2
extern/storage-sealing/states_proving.go
vendored
@ -32,7 +32,7 @@ func (m *Sealing) handleFaultReported(ctx statemachine.Context, sector SectorInf
|
||||
}
|
||||
|
||||
func (m *Sealing) handleRemoving(ctx statemachine.Context, sector SectorInfo) error {
|
||||
if err := m.sealer.Remove(ctx.Context(), m.minerSector(sector.SectorNumber)); err != nil {
|
||||
if err := m.sealer.Remove(ctx.Context(), m.minerSector(sector.SectorType, sector.SectorNumber)); err != nil {
|
||||
return ctx.Send(SectorRemoveFailed{err})
|
||||
}
|
||||
|
||||
|
21
extern/storage-sealing/states_sealing.go
vendored
21
extern/storage-sealing/states_sealing.go
vendored
@ -31,7 +31,12 @@ func (m *Sealing) handlePacking(ctx statemachine.Context, sector SectorInfo) err
|
||||
allocated += piece.Piece.Size.Unpadded()
|
||||
}
|
||||
|
||||
ubytes := abi.PaddedPieceSize(m.sealer.SectorSize()).Unpadded()
|
||||
ssize, err := sector.SectorType.SectorSize()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ubytes := abi.PaddedPieceSize(ssize).Unpadded()
|
||||
|
||||
if allocated > ubytes {
|
||||
return xerrors.Errorf("too much data in sector: %d > %d", allocated, ubytes)
|
||||
@ -46,7 +51,7 @@ func (m *Sealing) handlePacking(ctx statemachine.Context, sector SectorInfo) err
|
||||
log.Warnf("Creating %d filler pieces for sector %d", len(fillerSizes), sector.SectorNumber)
|
||||
}
|
||||
|
||||
fillerPieces, err := m.pledgeSector(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorNumber), sector.existingPieceSizes(), fillerSizes...)
|
||||
fillerPieces, err := m.pledgeSector(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), sector.existingPieceSizes(), fillerSizes...)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("filling up the sector (%v): %w", fillerSizes, err)
|
||||
}
|
||||
@ -148,7 +153,7 @@ func (m *Sealing) handlePreCommit1(ctx statemachine.Context, sector SectorInfo)
|
||||
// process has just restarted and the worker had the result ready)
|
||||
}
|
||||
|
||||
pc1o, err := m.sealer.SealPreCommit1(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorNumber), sector.TicketValue, sector.pieceInfos())
|
||||
pc1o, err := m.sealer.SealPreCommit1(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), sector.TicketValue, sector.pieceInfos())
|
||||
if err != nil {
|
||||
return ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("seal pre commit(1) failed: %w", err)})
|
||||
}
|
||||
@ -159,7 +164,7 @@ func (m *Sealing) handlePreCommit1(ctx statemachine.Context, sector SectorInfo)
|
||||
}
|
||||
|
||||
func (m *Sealing) handlePreCommit2(ctx statemachine.Context, sector SectorInfo) error {
|
||||
cids, err := m.sealer.SealPreCommit2(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorNumber), sector.PreCommit1Out)
|
||||
cids, err := m.sealer.SealPreCommit2(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), sector.PreCommit1Out)
|
||||
if err != nil {
|
||||
return ctx.Send(SectorSealPreCommit2Failed{xerrors.Errorf("seal pre commit(2) failed: %w", err)})
|
||||
}
|
||||
@ -386,12 +391,12 @@ func (m *Sealing) handleCommitting(ctx statemachine.Context, sector SectorInfo)
|
||||
Unsealed: *sector.CommD,
|
||||
Sealed: *sector.CommR,
|
||||
}
|
||||
c2in, err := m.sealer.SealCommit1(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorNumber), sector.TicketValue, sector.SeedValue, sector.pieceInfos(), cids)
|
||||
c2in, err := m.sealer.SealCommit1(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), sector.TicketValue, sector.SeedValue, sector.pieceInfos(), cids)
|
||||
if err != nil {
|
||||
return ctx.Send(SectorComputeProofFailed{xerrors.Errorf("computing seal proof failed(1): %w", err)})
|
||||
}
|
||||
|
||||
proof, err := m.sealer.SealCommit2(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorNumber), c2in)
|
||||
proof, err := m.sealer.SealCommit2(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), c2in)
|
||||
if err != nil {
|
||||
return ctx.Send(SectorComputeProofFailed{xerrors.Errorf("computing seal proof failed(2): %w", err)})
|
||||
}
|
||||
@ -492,7 +497,7 @@ func (m *Sealing) handleCommitWait(ctx statemachine.Context, sector SectorInfo)
|
||||
func (m *Sealing) handleFinalizeSector(ctx statemachine.Context, sector SectorInfo) error {
|
||||
// TODO: Maybe wait for some finality
|
||||
|
||||
if err := m.sealer.FinalizeSector(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorNumber), sector.keepUnsealedRanges(false)); err != nil {
|
||||
if err := m.sealer.FinalizeSector(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), sector.keepUnsealedRanges(false)); err != nil {
|
||||
return ctx.Send(SectorFinalizeFailed{xerrors.Errorf("finalize sector: %w", err)})
|
||||
}
|
||||
|
||||
@ -503,7 +508,7 @@ func (m *Sealing) handleProvingSector(ctx statemachine.Context, sector SectorInf
|
||||
// TODO: track sector health / expiration
|
||||
log.Infof("Proving sector %d", sector.SectorNumber)
|
||||
|
||||
if err := m.sealer.ReleaseUnsealed(ctx.Context(), m.minerSector(sector.SectorNumber), sector.keepUnsealedRanges(true)); err != nil {
|
||||
if err := m.sealer.ReleaseUnsealed(ctx.Context(), m.minerSector(sector.SectorType, sector.SectorNumber), sector.keepUnsealedRanges(true)); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
|
16
go.mod
16
go.mod
@ -30,18 +30,18 @@ require (
|
||||
github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03
|
||||
github.com/filecoin-project/go-data-transfer v1.2.0
|
||||
github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f
|
||||
github.com/filecoin-project/go-fil-markets v1.0.4
|
||||
github.com/filecoin-project/go-fil-markets v1.0.5-0.20201113164554-c5eba40d5335
|
||||
github.com/filecoin-project/go-jsonrpc v0.1.2-0.20201008195726-68c6a2704e49
|
||||
github.com/filecoin-project/go-multistore v0.0.3
|
||||
github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20
|
||||
github.com/filecoin-project/go-paramfetch v0.0.2-0.20200701152213-3e0f0afdc261
|
||||
github.com/filecoin-project/go-state-types v0.0.0-20201013222834-41ea465f274f
|
||||
github.com/filecoin-project/go-state-types v0.0.0-20201102161440-c8033295a1fc
|
||||
github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe
|
||||
github.com/filecoin-project/go-statestore v0.1.0
|
||||
github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b
|
||||
github.com/filecoin-project/specs-actors v0.9.12
|
||||
github.com/filecoin-project/specs-actors/v2 v2.2.0
|
||||
github.com/filecoin-project/specs-storage v0.1.1-0.20200907031224-ed2e5cd13796
|
||||
github.com/filecoin-project/specs-actors v0.9.13
|
||||
github.com/filecoin-project/specs-actors/v2 v2.3.2
|
||||
github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506
|
||||
github.com/filecoin-project/test-vectors/schema v0.0.5
|
||||
github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1
|
||||
github.com/go-kit/kit v0.10.0
|
||||
@ -136,9 +136,9 @@ require (
|
||||
go.uber.org/fx v1.9.0
|
||||
go.uber.org/multierr v1.5.0
|
||||
go.uber.org/zap v1.16.0
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381
|
||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208
|
||||
golang.org/x/sys v0.0.0-20200926100807-9d91bd62050c
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1
|
||||
gopkg.in/cheggaaa/pb.v1 v1.0.28
|
||||
|
39
go.sum
39
go.sum
@ -255,8 +255,8 @@ github.com/filecoin-project/go-ds-versioning v0.1.0 h1:y/X6UksYTsK8TLCI7rttCKEvl
|
||||
github.com/filecoin-project/go-ds-versioning v0.1.0/go.mod h1:mp16rb4i2QPmxBnmanUx8i/XANp+PFCCJWiAb+VW4/s=
|
||||
github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f h1:GxJzR3oRIMTPtpZ0b7QF8FKPK6/iPAc7trhlL5k/g+s=
|
||||
github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ=
|
||||
github.com/filecoin-project/go-fil-markets v1.0.4 h1:OGEoNppGcAjzIznDHFb/yy7ypVgHMO2CQZg6E9nViWI=
|
||||
github.com/filecoin-project/go-fil-markets v1.0.4/go.mod h1:AJySOJC00JRWEZzRG2KsfUnqEf5ITXxeX09BE9N4f9c=
|
||||
github.com/filecoin-project/go-fil-markets v1.0.5-0.20201113164554-c5eba40d5335 h1:DF8eu0WdEBnSVdu71+jfT4YMk6fO7AIJk2ZiWd3l15c=
|
||||
github.com/filecoin-project/go-fil-markets v1.0.5-0.20201113164554-c5eba40d5335/go.mod h1:AJySOJC00JRWEZzRG2KsfUnqEf5ITXxeX09BE9N4f9c=
|
||||
github.com/filecoin-project/go-hamt-ipld v0.1.5 h1:uoXrKbCQZ49OHpsTCkrThPNelC4W3LPEk0OrS/ytIBM=
|
||||
github.com/filecoin-project/go-hamt-ipld v0.1.5 h1:uoXrKbCQZ49OHpsTCkrThPNelC4W3LPEk0OrS/ytIBM=
|
||||
github.com/filecoin-project/go-hamt-ipld v0.1.5/go.mod h1:6Is+ONR5Cd5R6XZoCse1CWaXZc0Hdb/JeX+EQCQzX24=
|
||||
@ -275,8 +275,8 @@ github.com/filecoin-project/go-state-types v0.0.0-20200903145444-247639ffa6ad/go
|
||||
github.com/filecoin-project/go-state-types v0.0.0-20200904021452-1883f36ca2f4/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I=
|
||||
github.com/filecoin-project/go-state-types v0.0.0-20200928172055-2df22083d8ab h1:cEDC5Ei8UuT99hPWhCjA72SM9AuRtnpvdSTIYbnzN8I=
|
||||
github.com/filecoin-project/go-state-types v0.0.0-20200928172055-2df22083d8ab/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g=
|
||||
github.com/filecoin-project/go-state-types v0.0.0-20201013222834-41ea465f274f h1:TZDTu4MtBKSFLXWGKLy+cvC3nHfMFIrVgWLAz/+GgZQ=
|
||||
github.com/filecoin-project/go-state-types v0.0.0-20201013222834-41ea465f274f/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g=
|
||||
github.com/filecoin-project/go-state-types v0.0.0-20201102161440-c8033295a1fc h1:+hbMY4Pcx2oizrfH08VWXwrj5mU8aJT6g0UNxGHFCGU=
|
||||
github.com/filecoin-project/go-state-types v0.0.0-20201102161440-c8033295a1fc/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g=
|
||||
github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe h1:dF8u+LEWeIcTcfUcCf3WFVlc81Fr2JKg8zPzIbBDKDw=
|
||||
github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe/go.mod h1:FGwQgZAt2Gh5mjlwJUlVB62JeYdo+if0xWxSEfBD9ig=
|
||||
github.com/filecoin-project/go-statestore v0.1.0 h1:t56reH59843TwXHkMcwyuayStBIiWBRilQjQ+5IiwdQ=
|
||||
@ -286,11 +286,13 @@ github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b/
|
||||
github.com/filecoin-project/specs-actors v0.9.4/go.mod h1:BStZQzx5x7TmCkLv0Bpa07U6cPKol6fd3w9KjMPZ6Z4=
|
||||
github.com/filecoin-project/specs-actors v0.9.12 h1:iIvk58tuMtmloFNHhAOQHG+4Gci6Lui0n7DYQGi3cJk=
|
||||
github.com/filecoin-project/specs-actors v0.9.12/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao=
|
||||
github.com/filecoin-project/specs-actors v0.9.13 h1:rUEOQouefi9fuVY/2HOroROJlZbOzWYXXeIh41KF2M4=
|
||||
github.com/filecoin-project/specs-actors v0.9.13/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao=
|
||||
github.com/filecoin-project/specs-actors/v2 v2.0.1/go.mod h1:v2NZVYinNIKA9acEMBm5wWXxqv5+frFEbekBFemYghY=
|
||||
github.com/filecoin-project/specs-actors/v2 v2.2.0 h1:IyCICb0NHYeD0sdSqjVGwWydn/7r7xXuxdpvGAcRCGY=
|
||||
github.com/filecoin-project/specs-actors/v2 v2.2.0/go.mod h1:rlv5Mx9wUhV8Qsz+vUezZNm+zL4tK08O0HreKKPB2Wc=
|
||||
github.com/filecoin-project/specs-storage v0.1.1-0.20200907031224-ed2e5cd13796 h1:dJsTPWpG2pcTeojO2pyn0c6l+x/3MZYCBgo/9d11JEk=
|
||||
github.com/filecoin-project/specs-storage v0.1.1-0.20200907031224-ed2e5cd13796/go.mod h1:nJRRM7Aa9XVvygr3W9k6xGF46RWzr2zxF/iGoAIfA/g=
|
||||
github.com/filecoin-project/specs-actors/v2 v2.3.2 h1:2Vcf4CGa29kRh4JJ02m+FbvD/p3YNnLGsaHfw7Uj49g=
|
||||
github.com/filecoin-project/specs-actors/v2 v2.3.2/go.mod h1:UuJQLoTx/HPvvWeqlIFmC/ywlOLHNe8SNQ3OunFbu2Y=
|
||||
github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506 h1:Ur/l2+6qN+lQiqjozWWc5p9UDaAMDZKTlDS98oRnlIw=
|
||||
github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506/go.mod h1:nJRRM7Aa9XVvygr3W9k6xGF46RWzr2zxF/iGoAIfA/g=
|
||||
github.com/filecoin-project/test-vectors/schema v0.0.5 h1:w3zHQhzM4pYxJDl21avXjOKBLF8egrvwUwjpT8TquDg=
|
||||
github.com/filecoin-project/test-vectors/schema v0.0.5/go.mod h1:iQ9QXLpYWL3m7warwvK1JC/pTri8mnfEmKygNDqqY6E=
|
||||
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
|
||||
@ -1495,8 +1497,8 @@ github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee/go.mod h1:
|
||||
github.com/whyrusleeping/yamux v1.1.5/go.mod h1:E8LnQQ8HKx5KD29HZFUwM1PxCOdPRzGwur1mcYhXcD8=
|
||||
github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xlab/c-for-go v0.0.0-20201002084316-c134bfab968f h1:nMhj+x/m7ZQsHBz0L3gpytp0v6ogokdbrQDnhB8Kh7s=
|
||||
github.com/xlab/c-for-go v0.0.0-20201002084316-c134bfab968f/go.mod h1:h/1PEBwj7Ym/8kOuMWvO2ujZ6Lt+TMbySEXNhjjR87I=
|
||||
github.com/xlab/c-for-go v0.0.0-20201112171043-ea6dce5809cb h1:/7/dQyiKnxAOj9L69FhST7uMe17U015XPzX7cy+5ykM=
|
||||
github.com/xlab/c-for-go v0.0.0-20201112171043-ea6dce5809cb/go.mod h1:pbNsDSxn1ICiNn9Ct4ZGNrwzfkkwYbx/lw8VuyutFIg=
|
||||
github.com/xlab/pkgconfig v0.0.0-20170226114623-cea12a0fd245 h1:Sw125DKxZhPUI4JLlWugkzsrlB50jR9v2khiD9FxuSo=
|
||||
github.com/xlab/pkgconfig v0.0.0-20170226114623-cea12a0fd245/go.mod h1:C+diUUz7pxhNY6KAoLgrTYARGWnt82zWTylZlxT92vk=
|
||||
github.com/xorcare/golden v0.6.0/go.mod h1:7T39/ZMvaSEZlBPoYfVFmsBLmUl3uz9IuzWj/U6FtvQ=
|
||||
@ -1504,6 +1506,7 @@ github.com/xorcare/golden v0.6.1-0.20191112154924-b87f686d7542 h1:oWgZJmC1DorFZD
|
||||
github.com/xorcare/golden v0.6.1-0.20191112154924-b87f686d7542/go.mod h1:7T39/ZMvaSEZlBPoYfVFmsBLmUl3uz9IuzWj/U6FtvQ=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/zondax/hid v0.9.0 h1:eiT3P6vNxAEVxXMw66eZUAAnU2zD33JBkfG/EnfAKl8=
|
||||
github.com/zondax/hid v0.9.0/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM=
|
||||
github.com/zondax/ledger-go v0.12.1 h1:hYRcyznPRJp+5mzF2sazTLP2nGvGjYDD2VzhHhFomLU=
|
||||
@ -1594,6 +1597,7 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
|
||||
golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a h1:vclmkQCjlDX5OydZ9wv8rBCcS0QyQY66Mpf/7BZbInM=
|
||||
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/exp v0.0.0-20181106170214-d68db9428509/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
@ -1668,6 +1672,8 @@ golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU=
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
@ -1685,6 +1691,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 h1:qwRHBd0NqMbJxfbotnDhm2ByMI1Shq4Y6oRJo21SGJA=
|
||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180202135801-37707fdb30a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@ -1751,11 +1759,15 @@ golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20200812155832-6a926be9bd1d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200926100807-9d91bd62050c h1:38q6VNPWR010vN82/SB121GujZNIfAUb4YttE2rhGuc=
|
||||
golang.org/x/sys v0.0.0-20200926100807-9d91bd62050c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
@ -1801,6 +1813,8 @@ golang.org/x/tools v0.0.0-20200216192241-b320d3a0f5a2/go.mod h1:TB2adYChydJhpapK
|
||||
golang.org/x/tools v0.0.0-20200711155855-7342f9734a7d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200827010519-17fd2f27a9e3 h1:r3P/5xOq/dK1991B65Oy6E1fRF/2d/fSYZJ/fXGVfJc=
|
||||
golang.org/x/tools v0.0.0-20200827010519-17fd2f27a9e3/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20201112185108-eeaa07dd7696 h1:Bfazo+enXJET5SbHeh95NtxabJF6fJ9r/jpfRJgd3j4=
|
||||
golang.org/x/tools v0.0.0-20201112185108-eeaa07dd7696/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@ -1929,8 +1943,13 @@ launchpad.net/gocheck v0.0.0-20140225173054-000000000087 h1:Izowp2XBH6Ya6rv+hqbc
|
||||
launchpad.net/gocheck v0.0.0-20140225173054-000000000087/go.mod h1:hj7XX3B/0A+80Vse0e+BUHsHMTEhd0O4cpUHr/e/BUM=
|
||||
modernc.org/cc v1.0.0 h1:nPibNuDEx6tvYrUAtvDTTw98rx5juGsa5zuDnKwEEQQ=
|
||||
modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw=
|
||||
modernc.org/fileutil v1.0.0/go.mod h1:JHsWpkrk/CnVV1H/eGlFf85BEpfkrp56ro8nojIq9Q8=
|
||||
modernc.org/golex v1.0.0 h1:wWpDlbK8ejRfSyi0frMyhilD3JBvtcx2AdGDnU+JtsE=
|
||||
modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk=
|
||||
modernc.org/golex v1.0.1 h1:EYKY1a3wStt0RzHaH8mdSRNg78Ub0OHxYfCRWw35YtM=
|
||||
modernc.org/golex v1.0.1/go.mod h1:QCA53QtsT1NdGkaZZkF5ezFwk4IXh4BGNafAARTC254=
|
||||
modernc.org/lex v1.0.0/go.mod h1:G6rxMTy3cH2iA0iXL/HRRv4Znu8MK4higxph/lE7ypk=
|
||||
modernc.org/lexer v1.0.0/go.mod h1:F/Dld0YKYdZCLQ7bD0USbWL4YKCyTDRDHiDTOs0q0vk=
|
||||
modernc.org/mathutil v1.1.1 h1:FeylZSVX8S+58VsyJlkEj2bcpdytmp9MmDKZkKx8OIE=
|
||||
modernc.org/mathutil v1.1.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
|
||||
modernc.org/strutil v1.1.0 h1:+1/yCzZxY2pZwwrsbH+4T7BQMoLQ9QiBshRC9eicYsc=
|
||||
|
@ -15,6 +15,7 @@ import (
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
"github.com/filecoin-project/go-fil-markets/shared"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
specstorage "github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
)
|
||||
@ -52,9 +53,12 @@ func (rpn *retrievalProviderNode) UnsealSector(ctx context.Context, sectorID abi
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sid := abi.SectorID{
|
||||
Miner: abi.ActorID(mid),
|
||||
Number: sectorID,
|
||||
ref := specstorage.SectorRef{
|
||||
ID: abi.SectorID{
|
||||
Miner: abi.ActorID(mid),
|
||||
Number: sectorID,
|
||||
},
|
||||
ProofType: si.SectorType,
|
||||
}
|
||||
|
||||
r, w := io.Pipe()
|
||||
@ -63,7 +67,7 @@ func (rpn *retrievalProviderNode) UnsealSector(ctx context.Context, sectorID abi
|
||||
if si.CommD != nil {
|
||||
commD = *si.CommD
|
||||
}
|
||||
err := rpn.sealer.ReadPiece(ctx, w, sid, storiface.UnpaddedByteIndex(offset), length, si.TicketValue, commD)
|
||||
err := rpn.sealer.ReadPiece(ctx, w, ref, storiface.UnpaddedByteIndex(offset), length, si.TicketValue, commD)
|
||||
_ = w.CloseWithError(err)
|
||||
}()
|
||||
|
||||
|
@ -167,6 +167,19 @@ func (n *ProviderNodeAdapter) GetMinerWorkerAddress(ctx context.Context, miner a
|
||||
return mi.Worker, nil
|
||||
}
|
||||
|
||||
func (n *ProviderNodeAdapter) GetProofType(ctx context.Context, miner address.Address, tok shared.TipSetToken) (abi.RegisteredSealProof, error) {
|
||||
tsk, err := types.TipSetKeyFromBytes(tok)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
mi, err := n.StateMinerInfo(ctx, miner, tsk)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return mi.SealProofType, nil
|
||||
}
|
||||
|
||||
func (n *ProviderNodeAdapter) SignBytes(ctx context.Context, signer address.Address, b []byte) (*crypto.Signature, error) {
|
||||
signer, err := n.StateAccountKey(ctx, signer, types.EmptyTSK)
|
||||
if err != nil {
|
||||
|
@ -53,6 +53,8 @@ var (
|
||||
PubsubSendRPC = stats.Int64("pubsub/send_rpc", "Counter for total sent RPCs", stats.UnitDimensionless)
|
||||
PubsubDropRPC = stats.Int64("pubsub/drop_rpc", "Counter for total dropped RPCs", stats.UnitDimensionless)
|
||||
APIRequestDuration = stats.Float64("api/request_duration_ms", "Duration of API requests", stats.UnitMilliseconds)
|
||||
VMFlushCopyDuration = stats.Float64("vm/flush_copy_ms", "Time spent in VM Flush Copy", stats.UnitMilliseconds)
|
||||
VMFlushCopyCount = stats.Int64("vm/flush_copy_count", "Number of copied objects", stats.UnitDimensionless)
|
||||
)
|
||||
|
||||
var (
|
||||
@ -146,6 +148,14 @@ var (
|
||||
Aggregation: defaultMillisecondsDistribution,
|
||||
TagKeys: []tag.Key{APIInterface, Endpoint},
|
||||
}
|
||||
VMFlushCopyDurationView = &view.View{
|
||||
Measure: VMFlushCopyDuration,
|
||||
Aggregation: view.Sum(),
|
||||
}
|
||||
VMFlushCopyCountView = &view.View{
|
||||
Measure: VMFlushCopyCount,
|
||||
Aggregation: view.Sum(),
|
||||
}
|
||||
)
|
||||
|
||||
// DefaultViews is an array of OpenCensus views for metric gathering purposes
|
||||
@ -171,6 +181,8 @@ var DefaultViews = append([]*view.View{
|
||||
PubsubSendRPCView,
|
||||
PubsubDropRPCView,
|
||||
APIRequestDurationView,
|
||||
VMFlushCopyCountView,
|
||||
VMFlushCopyDurationView,
|
||||
},
|
||||
rpcmetrics.DefaultViews...)
|
||||
|
||||
|
@ -8,6 +8,7 @@ import (
|
||||
|
||||
metricsi "github.com/ipfs/go-metrics-interface"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/lotus/chain"
|
||||
"github.com/filecoin-project/lotus/chain/exchange"
|
||||
"github.com/filecoin-project/lotus/chain/store"
|
||||
@ -339,7 +340,7 @@ func Online() Option {
|
||||
Override(new(stores.SectorIndex), From(new(*stores.Index))),
|
||||
Override(new(dtypes.MinerID), modules.MinerID),
|
||||
Override(new(dtypes.MinerAddress), modules.MinerAddress),
|
||||
Override(new(*ffiwrapper.Config), modules.ProofsConfig),
|
||||
Override(new(abi.RegisteredSealProof), modules.SealProofType),
|
||||
Override(new(stores.LocalStorage), From(new(repo.LockedRepo))),
|
||||
Override(new(sealing.SectorIDCounter), modules.SectorIDCounter),
|
||||
Override(new(*sectorstorage.Manager), modules.SectorStorage),
|
||||
|
@ -43,7 +43,6 @@ import (
|
||||
"github.com/filecoin-project/go-multistore"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
||||
marketevents "github.com/filecoin-project/lotus/markets/loggers"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
@ -141,11 +140,6 @@ func (a *API) ClientStartDeal(ctx context.Context, params *api.StartDealParams)
|
||||
return nil, xerrors.Errorf("failed getting miner's deadline info: %w", err)
|
||||
}
|
||||
|
||||
rt, err := ffiwrapper.SealProofTypeFromSectorSize(mi.SectorSize)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("bad sector size: %w", err)
|
||||
}
|
||||
|
||||
if uint64(params.Data.PieceSize.Padded()) > uint64(mi.SectorSize) {
|
||||
return nil, xerrors.New("data doesn't fit in a sector")
|
||||
}
|
||||
@ -171,7 +165,7 @@ func (a *API) ClientStartDeal(ctx context.Context, params *api.StartDealParams)
|
||||
EndEpoch: calcDealExpiration(params.MinBlocksDuration, md, dealStart),
|
||||
Price: params.EpochPrice,
|
||||
Collateral: params.ProviderCollateral,
|
||||
Rt: rt,
|
||||
Rt: mi.SealProofType,
|
||||
FastRetrieval: params.FastRetrieval,
|
||||
VerifiedDeal: params.VerifiedDeal,
|
||||
StoreID: storeID,
|
||||
@ -647,7 +641,7 @@ func (a *API) ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Addre
|
||||
|
||||
func (a *API) ClientCalcCommP(ctx context.Context, inpath string) (*api.CommPRet, error) {
|
||||
|
||||
// Hard-code the sector size to 32GiB, because:
|
||||
// Hard-code the sector type to 32GiBV1_1, because:
|
||||
// - pieceio.GeneratePieceCommitment requires a RegisteredSealProof
|
||||
// - commP itself is sector-size independent, with rather low probability of that changing
|
||||
// ( note how the final rust call is identical for every RegSP type )
|
||||
@ -655,12 +649,7 @@ func (a *API) ClientCalcCommP(ctx context.Context, inpath string) (*api.CommPRet
|
||||
//
|
||||
// IF/WHEN this changes in the future we will have to be able to calculate
|
||||
// "old style" commP, and thus will need to introduce a version switch or similar
|
||||
arbitrarySectorSize := abi.SectorSize(32 << 30)
|
||||
|
||||
rt, err := ffiwrapper.SealProofTypeFromSectorSize(arbitrarySectorSize)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("bad sector size: %w", err)
|
||||
}
|
||||
arbitraryProofType := abi.RegisteredSealProof_StackedDrg32GiBV1_1
|
||||
|
||||
rdr, err := os.Open(inpath)
|
||||
if err != nil {
|
||||
@ -673,7 +662,7 @@ func (a *API) ClientCalcCommP(ctx context.Context, inpath string) (*api.CommPRet
|
||||
return nil, err
|
||||
}
|
||||
|
||||
commP, pieceSize, err := pieceio.GeneratePieceCommitment(rt, rdr, uint64(stat.Size()))
|
||||
commP, pieceSize, err := pieceio.GeneratePieceCommitment(arbitraryProofType, rdr, uint64(stat.Size()))
|
||||
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("computing commP failed: %w", err)
|
||||
|
@ -123,7 +123,12 @@ func (a *StateAPI) StateMinerActiveSectors(ctx context.Context, maddr address.Ad
|
||||
}
|
||||
|
||||
func (m *StateModule) StateMinerInfo(ctx context.Context, actor address.Address, tsk types.TipSetKey) (miner.MinerInfo, error) {
|
||||
act, err := m.StateManager.LoadActorTsk(ctx, actor, tsk)
|
||||
ts, err := m.Chain.GetTipSetFromKey(tsk)
|
||||
if err != nil {
|
||||
return miner.MinerInfo{}, xerrors.Errorf("failed to load tipset: %w", err)
|
||||
}
|
||||
|
||||
act, err := m.StateManager.LoadActor(ctx, actor, ts)
|
||||
if err != nil {
|
||||
return miner.MinerInfo{}, xerrors.Errorf("failed to load miner actor: %w", err)
|
||||
}
|
||||
@ -133,7 +138,16 @@ func (m *StateModule) StateMinerInfo(ctx context.Context, actor address.Address,
|
||||
return miner.MinerInfo{}, xerrors.Errorf("failed to load miner actor state: %w", err)
|
||||
}
|
||||
|
||||
return mas.Info()
|
||||
// TODO: You know, this is terrible.
|
||||
// I mean, we _really_ shouldn't do this. Maybe we should convert somewhere else?
|
||||
info, err := mas.Info()
|
||||
if err != nil {
|
||||
return miner.MinerInfo{}, err
|
||||
}
|
||||
if m.StateManager.GetNtwkVersion(ctx, ts.Height()) >= network.Version7 && info.SealProofType < abi.RegisteredSealProof_StackedDrg2KiBV1_1 {
|
||||
info.SealProofType += abi.RegisteredSealProof_StackedDrg2KiBV1_1
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
|
||||
func (a *StateAPI) StateMinerDeadlines(ctx context.Context, m address.Address, tsk types.TipSetKey) ([]api.Deadline, error) {
|
||||
|
@ -24,7 +24,6 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
|
||||
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||
@ -43,7 +42,6 @@ import (
|
||||
type StorageMinerAPI struct {
|
||||
common.CommonAPI
|
||||
|
||||
ProofsConfig *ffiwrapper.Config
|
||||
SectorBlocks *sectorblocks.SectorBlocks
|
||||
|
||||
PieceStore dtypes.ProviderPieceStore
|
||||
|
@ -85,8 +85,8 @@ func minerAddrFromDS(ds dtypes.MetadataDS) (address.Address, error) {
|
||||
return address.NewFromBytes(maddrb)
|
||||
}
|
||||
|
||||
func GetParams(sbc *ffiwrapper.Config) error {
|
||||
ssize, err := sbc.SealProofType.SectorSize()
|
||||
func GetParams(spt abi.RegisteredSealProof) error {
|
||||
ssize, err := spt.SectorSize()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -97,6 +97,7 @@ func GetParams(sbc *ffiwrapper.Config) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO: We should fetch the params for the actual proof type, not just based on the size.
|
||||
if err := paramfetch.GetParams(context.TODO(), build.ParametersJSON(), uint64(ssize)); err != nil {
|
||||
return xerrors.Errorf("fetching proof parameters: %w", err)
|
||||
}
|
||||
@ -121,22 +122,13 @@ func StorageNetworkName(ctx helpers.MetricsCtx, a lapi.FullNode) (dtypes.Network
|
||||
return a.StateNetworkName(ctx)
|
||||
}
|
||||
|
||||
func ProofsConfig(maddr dtypes.MinerAddress, fnapi lapi.FullNode) (*ffiwrapper.Config, error) {
|
||||
func SealProofType(maddr dtypes.MinerAddress, fnapi lapi.FullNode) (abi.RegisteredSealProof, error) {
|
||||
mi, err := fnapi.StateMinerInfo(context.TODO(), address.Address(maddr), types.EmptyTSK)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return 0, err
|
||||
}
|
||||
|
||||
spt, err := ffiwrapper.SealProofTypeFromSectorSize(mi.SectorSize)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("bad sector size: %w", err)
|
||||
}
|
||||
|
||||
sb := &ffiwrapper.Config{
|
||||
SealProofType: spt,
|
||||
}
|
||||
|
||||
return sb, nil
|
||||
return mi.SealProofType, nil
|
||||
}
|
||||
|
||||
type sidsc struct {
|
||||
@ -519,7 +511,6 @@ func BasicDealFilter(user dtypes.StorageDealFilter) func(onlineOk dtypes.Conside
|
||||
}
|
||||
|
||||
func StorageProvider(minerAddress dtypes.MinerAddress,
|
||||
ffiConfig *ffiwrapper.Config,
|
||||
storedAsk *storedask.StoredAsk,
|
||||
h host.Host, ds dtypes.MetadataDS,
|
||||
mds dtypes.StagingMultiDstore,
|
||||
@ -537,7 +528,7 @@ func StorageProvider(minerAddress dtypes.MinerAddress,
|
||||
|
||||
opt := storageimpl.CustomDealDecisionLogic(storageimpl.DealDeciderFunc(df))
|
||||
|
||||
return storageimpl.NewProvider(net, namespace.Wrap(ds, datastore.NewKey("/deals/provider")), store, mds, pieceStore, dataTransfer, spn, address.Address(minerAddress), ffiConfig.SealProofType, storedAsk, opt)
|
||||
return storageimpl.NewProvider(net, namespace.Wrap(ds, datastore.NewKey("/deals/provider")), store, mds, pieceStore, dataTransfer, spn, address.Address(minerAddress), storedAsk, opt)
|
||||
}
|
||||
|
||||
func RetrievalDealFilter(userFilter dtypes.RetrievalDealFilter) func(onlineOk dtypes.ConsiderOnlineRetrievalDealsConfigFunc,
|
||||
@ -602,13 +593,13 @@ func RetrievalProvider(h host.Host,
|
||||
var WorkerCallsPrefix = datastore.NewKey("/worker/calls")
|
||||
var ManagerWorkPrefix = datastore.NewKey("/stmgr/calls")
|
||||
|
||||
func SectorStorage(mctx helpers.MetricsCtx, lc fx.Lifecycle, ls stores.LocalStorage, si stores.SectorIndex, cfg *ffiwrapper.Config, sc sectorstorage.SealerConfig, urls sectorstorage.URLs, sa sectorstorage.StorageAuth, ds dtypes.MetadataDS) (*sectorstorage.Manager, error) {
|
||||
func SectorStorage(mctx helpers.MetricsCtx, lc fx.Lifecycle, ls stores.LocalStorage, si stores.SectorIndex, sc sectorstorage.SealerConfig, urls sectorstorage.URLs, sa sectorstorage.StorageAuth, ds dtypes.MetadataDS) (*sectorstorage.Manager, error) {
|
||||
ctx := helpers.LifecycleCtx(mctx, lc)
|
||||
|
||||
wsts := statestore.New(namespace.Wrap(ds, WorkerCallsPrefix))
|
||||
smsts := statestore.New(namespace.Wrap(ds, ManagerWorkPrefix))
|
||||
|
||||
sst, err := sectorstorage.New(ctx, ls, si, cfg, sc, urls, sa, wsts, smsts)
|
||||
sst, err := sectorstorage.New(ctx, ls, si, sc, urls, sa, wsts, smsts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -93,6 +93,22 @@ func TestDealMining(t *testing.T) {
|
||||
test.TestDealMining(t, builder.MockSbBuilder, 50*time.Millisecond, false)
|
||||
}
|
||||
|
||||
func TestSDRUpgrade(t *testing.T) {
|
||||
logging.SetLogLevel("miner", "ERROR")
|
||||
logging.SetLogLevel("chainstore", "ERROR")
|
||||
logging.SetLogLevel("chain", "ERROR")
|
||||
logging.SetLogLevel("sub", "ERROR")
|
||||
logging.SetLogLevel("storageminer", "ERROR")
|
||||
|
||||
oldDelay := policy.GetPreCommitChallengeDelay()
|
||||
policy.SetPreCommitChallengeDelay(5)
|
||||
t.Cleanup(func() {
|
||||
policy.SetPreCommitChallengeDelay(oldDelay)
|
||||
})
|
||||
|
||||
test.TestSDRUpgrade(t, builder.MockSbBuilder, 50*time.Millisecond)
|
||||
}
|
||||
|
||||
func TestPledgeSectors(t *testing.T) {
|
||||
logging.SetLogLevel("miner", "ERROR")
|
||||
logging.SetLogLevel("chainstore", "ERROR")
|
||||
|
@ -26,7 +26,6 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain"
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
"github.com/filecoin-project/lotus/chain/actors/policy"
|
||||
"github.com/filecoin-project/lotus/chain/gen"
|
||||
genesis2 "github.com/filecoin-project/lotus/chain/gen/genesis"
|
||||
"github.com/filecoin-project/lotus/chain/messagepool"
|
||||
@ -356,7 +355,7 @@ func mockSbBuilderOpts(t *testing.T, fullOpts []test.FullNodeOpts, storage []tes
|
||||
preseals = test.GenesisPreseals
|
||||
}
|
||||
|
||||
genm, k, err := mockstorage.PreSeal(2048, maddr, preseals)
|
||||
genm, k, err := mockstorage.PreSeal(abi.RegisteredSealProof_StackedDrg2KiBV1, maddr, preseals)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -458,7 +457,7 @@ func mockSbBuilderOpts(t *testing.T, fullOpts []test.FullNodeOpts, storage []tes
|
||||
|
||||
storers[i] = CreateTestStorageNode(ctx, t, genms[i].Worker, maddrs[i], pidKeys[i], f, mn, node.Options(
|
||||
node.Override(new(sectorstorage.SectorManager), func() (sectorstorage.SectorManager, error) {
|
||||
return mock.NewMockSectorMgr(policy.GetDefaultSectorSize(), sectors), nil
|
||||
return mock.NewMockSectorMgr(sectors), nil
|
||||
}),
|
||||
node.Override(new(ffiwrapper.Verifier), mock.MockVerifier),
|
||||
node.Unset(new(*sectorstorage.Manager)),
|
||||
|
@ -39,13 +39,8 @@ func NewSealingAPIAdapter(api storageMinerApi) SealingAPIAdapter {
|
||||
}
|
||||
|
||||
func (s SealingAPIAdapter) StateMinerSectorSize(ctx context.Context, maddr address.Address, tok sealing.TipSetToken) (abi.SectorSize, error) {
|
||||
tsk, err := types.TipSetKeyFromBytes(tok)
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("failed to unmarshal TipSetToken to TipSetKey: %w", err)
|
||||
}
|
||||
|
||||
// TODO: update storage-fsm to just StateMinerInfo
|
||||
mi, err := s.delegate.StateMinerInfo(ctx, maddr, tsk)
|
||||
mi, err := s.StateMinerInfo(ctx, maddr, tok)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@ -70,14 +65,19 @@ func (s SealingAPIAdapter) StateMinerInitialPledgeCollateral(ctx context.Context
|
||||
return s.delegate.StateMinerInitialPledgeCollateral(ctx, a, pci, tsk)
|
||||
}
|
||||
|
||||
func (s SealingAPIAdapter) StateMinerWorkerAddress(ctx context.Context, maddr address.Address, tok sealing.TipSetToken) (address.Address, error) {
|
||||
func (s SealingAPIAdapter) StateMinerInfo(ctx context.Context, maddr address.Address, tok sealing.TipSetToken) (miner.MinerInfo, error) {
|
||||
tsk, err := types.TipSetKeyFromBytes(tok)
|
||||
if err != nil {
|
||||
return address.Undef, xerrors.Errorf("failed to unmarshal TipSetToken to TipSetKey: %w", err)
|
||||
return miner.MinerInfo{}, xerrors.Errorf("failed to unmarshal TipSetToken to TipSetKey: %w", err)
|
||||
}
|
||||
|
||||
// TODO: update storage-fsm to just StateMinerInfo
|
||||
mi, err := s.delegate.StateMinerInfo(ctx, maddr, tsk)
|
||||
return s.delegate.StateMinerInfo(ctx, maddr, tsk)
|
||||
}
|
||||
|
||||
func (s SealingAPIAdapter) StateMinerWorkerAddress(ctx context.Context, maddr address.Address, tok sealing.TipSetToken) (address.Address, error) {
|
||||
// TODO: update storage-fsm to just StateMinerInfo
|
||||
mi, err := s.StateMinerInfo(ctx, maddr, tok)
|
||||
if err != nil {
|
||||
return address.Undef, err
|
||||
}
|
||||
|
@ -214,12 +214,7 @@ func NewWinningPoStProver(api api.FullNode, prover storage.Prover, verifier ffiw
|
||||
return nil, xerrors.Errorf("getting sector size: %w", err)
|
||||
}
|
||||
|
||||
spt, err := ffiwrapper.SealProofTypeFromSectorSize(mi.SectorSize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
wpt, err := spt.RegisteredWinningPoStProof()
|
||||
wpt, err := mi.SealProofType.RegisteredWinningPoStProof()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -13,17 +13,21 @@ import (
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/chain/wallet"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/zerocomm"
|
||||
"github.com/filecoin-project/lotus/genesis"
|
||||
)
|
||||
|
||||
func PreSeal(ssize abi.SectorSize, maddr address.Address, sectors int) (*genesis.Miner, *types.KeyInfo, error) {
|
||||
func PreSeal(spt abi.RegisteredSealProof, maddr address.Address, sectors int) (*genesis.Miner, *types.KeyInfo, error) {
|
||||
k, err := wallet.GenerateKey(types.KTBLS)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
ssize, err := spt.SectorSize()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
genm := &genesis.Miner{
|
||||
ID: maddr,
|
||||
Owner: k.Address,
|
||||
@ -34,15 +38,10 @@ func PreSeal(ssize abi.SectorSize, maddr address.Address, sectors int) (*genesis
|
||||
Sectors: make([]*genesis.PreSeal, sectors),
|
||||
}
|
||||
|
||||
st, err := ffiwrapper.SealProofTypeFromSectorSize(ssize)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
for i := range genm.Sectors {
|
||||
preseal := &genesis.PreSeal{}
|
||||
|
||||
preseal.ProofType = st
|
||||
preseal.ProofType = spt
|
||||
preseal.CommD = zerocomm.ZeroPieceCommitment(abi.PaddedPieceSize(ssize).Unpadded())
|
||||
d, _ := commcid.CIDToPieceCommitmentV1(preseal.CommD)
|
||||
r := mock.CommDR(d)
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/go-bitfield"
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
@ -188,24 +189,30 @@ func (s *WindowPoStScheduler) runSubmitPoST(
|
||||
return submitErr
|
||||
}
|
||||
|
||||
func (s *WindowPoStScheduler) checkSectors(ctx context.Context, check bitfield.BitField) (bitfield.BitField, error) {
|
||||
func (s *WindowPoStScheduler) checkSectors(ctx context.Context, check bitfield.BitField, tsk types.TipSetKey) (bitfield.BitField, error) {
|
||||
mid, err := address.IDFromAddress(s.actor)
|
||||
if err != nil {
|
||||
return bitfield.BitField{}, err
|
||||
}
|
||||
|
||||
sectors := make(map[abi.SectorID]struct{})
|
||||
var tocheck []abi.SectorID
|
||||
err = check.ForEach(func(snum uint64) error {
|
||||
s := abi.SectorID{
|
||||
Miner: abi.ActorID(mid),
|
||||
Number: abi.SectorNumber(snum),
|
||||
}
|
||||
sectorInfos, err := s.api.StateMinerSectors(ctx, s.actor, &check, tsk)
|
||||
if err != nil {
|
||||
return bitfield.BitField{}, err
|
||||
}
|
||||
|
||||
tocheck = append(tocheck, s)
|
||||
sectors[s] = struct{}{}
|
||||
return nil
|
||||
})
|
||||
sectors := make(map[abi.SectorNumber]struct{})
|
||||
var tocheck []storage.SectorRef
|
||||
for _, info := range sectorInfos {
|
||||
sectors[info.SectorNumber] = struct{}{}
|
||||
tocheck = append(tocheck, storage.SectorRef{
|
||||
ProofType: info.SealProof,
|
||||
ID: abi.SectorID{
|
||||
Miner: abi.ActorID(mid),
|
||||
Number: info.SectorNumber,
|
||||
},
|
||||
})
|
||||
|
||||
}
|
||||
if err != nil {
|
||||
return bitfield.BitField{}, xerrors.Errorf("iterating over bitfield: %w", err)
|
||||
}
|
||||
@ -215,20 +222,20 @@ func (s *WindowPoStScheduler) checkSectors(ctx context.Context, check bitfield.B
|
||||
return bitfield.BitField{}, xerrors.Errorf("checking provable sectors: %w", err)
|
||||
}
|
||||
for _, id := range bad {
|
||||
delete(sectors, id)
|
||||
delete(sectors, id.Number)
|
||||
}
|
||||
|
||||
log.Warnw("Checked sectors", "checked", len(tocheck), "good", len(sectors))
|
||||
|
||||
sbf := bitfield.New()
|
||||
for s := range sectors {
|
||||
sbf.Set(uint64(s.Number))
|
||||
sbf.Set(uint64(s))
|
||||
}
|
||||
|
||||
return sbf, nil
|
||||
}
|
||||
|
||||
func (s *WindowPoStScheduler) checkNextRecoveries(ctx context.Context, dlIdx uint64, partitions []api.Partition) ([]miner.RecoveryDeclaration, *types.SignedMessage, error) {
|
||||
func (s *WindowPoStScheduler) checkNextRecoveries(ctx context.Context, dlIdx uint64, partitions []api.Partition, tsk types.TipSetKey) ([]miner.RecoveryDeclaration, *types.SignedMessage, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "storage.checkNextRecoveries")
|
||||
defer span.End()
|
||||
|
||||
@ -254,7 +261,7 @@ func (s *WindowPoStScheduler) checkNextRecoveries(ctx context.Context, dlIdx uin
|
||||
|
||||
faulty += uc
|
||||
|
||||
recovered, err := s.checkSectors(ctx, unrecovered)
|
||||
recovered, err := s.checkSectors(ctx, unrecovered, tsk)
|
||||
if err != nil {
|
||||
return nil, nil, xerrors.Errorf("checking unrecovered sectors: %w", err)
|
||||
}
|
||||
@ -320,7 +327,7 @@ func (s *WindowPoStScheduler) checkNextRecoveries(ctx context.Context, dlIdx uin
|
||||
return recoveries, sm, nil
|
||||
}
|
||||
|
||||
func (s *WindowPoStScheduler) checkNextFaults(ctx context.Context, dlIdx uint64, partitions []api.Partition) ([]miner.FaultDeclaration, *types.SignedMessage, error) {
|
||||
func (s *WindowPoStScheduler) checkNextFaults(ctx context.Context, dlIdx uint64, partitions []api.Partition, tsk types.TipSetKey) ([]miner.FaultDeclaration, *types.SignedMessage, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "storage.checkNextFaults")
|
||||
defer span.End()
|
||||
|
||||
@ -335,7 +342,7 @@ func (s *WindowPoStScheduler) checkNextFaults(ctx context.Context, dlIdx uint64,
|
||||
return nil, nil, xerrors.Errorf("determining non faulty sectors: %w", err)
|
||||
}
|
||||
|
||||
good, err := s.checkSectors(ctx, nonFaulty)
|
||||
good, err := s.checkSectors(ctx, nonFaulty, tsk)
|
||||
if err != nil {
|
||||
return nil, nil, xerrors.Errorf("checking sectors: %w", err)
|
||||
}
|
||||
@ -438,7 +445,7 @@ func (s *WindowPoStScheduler) runPost(ctx context.Context, di dline.Info, ts *ty
|
||||
}
|
||||
)
|
||||
|
||||
if recoveries, sigmsg, err = s.checkNextRecoveries(context.TODO(), declDeadline, partitions); err != nil {
|
||||
if recoveries, sigmsg, err = s.checkNextRecoveries(context.TODO(), declDeadline, partitions, ts.Key()); err != nil {
|
||||
// TODO: This is potentially quite bad, but not even trying to post when this fails is objectively worse
|
||||
log.Errorf("checking sector recoveries: %v", err)
|
||||
}
|
||||
@ -457,7 +464,7 @@ func (s *WindowPoStScheduler) runPost(ctx context.Context, di dline.Info, ts *ty
|
||||
return // FORK: declaring faults after ignition upgrade makes no sense
|
||||
}
|
||||
|
||||
if faults, sigmsg, err = s.checkNextFaults(context.TODO(), declDeadline, partitions); err != nil {
|
||||
if faults, sigmsg, err = s.checkNextFaults(context.TODO(), declDeadline, partitions, ts.Key()); err != nil {
|
||||
// TODO: This is also potentially really bad, but we try to post anyways
|
||||
log.Errorf("checking sector faults: %v", err)
|
||||
}
|
||||
@ -527,7 +534,7 @@ func (s *WindowPoStScheduler) runPost(ctx context.Context, di dline.Info, ts *ty
|
||||
return nil, xerrors.Errorf("adding recoveries to set of sectors to prove: %w", err)
|
||||
}
|
||||
|
||||
good, err := s.checkSectors(ctx, toProve)
|
||||
good, err := s.checkSectors(ctx, toProve, ts.Key())
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("checking sectors to skip: %w", err)
|
||||
}
|
||||
|
@ -11,6 +11,7 @@ import (
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-bitfield"
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
@ -116,7 +117,7 @@ func (m *mockProver) GenerateWindowPoSt(ctx context.Context, aid abi.ActorID, si
|
||||
type mockFaultTracker struct {
|
||||
}
|
||||
|
||||
func (m mockFaultTracker) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []abi.SectorID) ([]abi.SectorID, error) {
|
||||
func (m mockFaultTracker) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef) ([]abi.SectorID, error) {
|
||||
// Returns "bad" sectors so just return nil meaning all sectors are good
|
||||
return nil, nil
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user