Merge branch 'master' into inmem-journal
This commit is contained in:
commit
3206f92063
@ -113,7 +113,7 @@ jobs:
|
||||
test: &test
|
||||
description: |
|
||||
Run tests with gotestsum.
|
||||
parameters:
|
||||
parameters: &test-params
|
||||
executor:
|
||||
type: executor
|
||||
default: golang
|
||||
@ -161,6 +161,7 @@ jobs:
|
||||
name: go test
|
||||
environment:
|
||||
LOTUS_TEST_WINDOW_POST: << parameters.winpost-test >>
|
||||
SKIP_CONFORMANCE: "1"
|
||||
command: |
|
||||
mkdir -p /tmp/test-reports/<< parameters.test-suite-name >>
|
||||
mkdir -p /tmp/test-artifacts
|
||||
@ -191,6 +192,63 @@ jobs:
|
||||
<<: *test
|
||||
test-window-post:
|
||||
<<: *test
|
||||
test-conformance:
|
||||
description: |
|
||||
Run tests using a corpus of interoperable test vectors for Filecoin
|
||||
implementations to test their correctness and compliance with the Filecoin
|
||||
specifications.
|
||||
parameters:
|
||||
<<: *test-params
|
||||
vectors-branch:
|
||||
type: string
|
||||
default: ""
|
||||
description: |
|
||||
Branch on github.com/filecoin-project/test-vectors to checkout and
|
||||
test with. If empty (the default) the commit defined by the git
|
||||
submodule is used.
|
||||
executor: << parameters.executor >>
|
||||
steps:
|
||||
- install-deps
|
||||
- prepare
|
||||
- run:
|
||||
command: make deps lotus
|
||||
no_output_timeout: 30m
|
||||
- download-params
|
||||
- when:
|
||||
condition:
|
||||
not:
|
||||
equal: [ "", << parameters.vectors-branch >> ]
|
||||
steps:
|
||||
- run:
|
||||
name: checkout vectors branch
|
||||
command: |
|
||||
cd extern/test-vectors
|
||||
git fetch
|
||||
git checkout origin/<< parameters.vectors-branch >>
|
||||
- run:
|
||||
name: go get vectors branch
|
||||
command: go get github.com/filecoin-project/test-vectors@<< parameters.vectors-branch >>
|
||||
- go/install-gotestsum:
|
||||
gobin: $HOME/.local/bin
|
||||
version: 0.5.2
|
||||
- run:
|
||||
name: go test
|
||||
environment:
|
||||
SKIP_CONFORMANCE: "0"
|
||||
command: |
|
||||
mkdir -p /tmp/test-reports
|
||||
mkdir -p /tmp/test-artifacts
|
||||
gotestsum \
|
||||
--format pkgname-and-test-fails \
|
||||
--junitfile /tmp/test-reports/junit.xml \
|
||||
-- \
|
||||
-v -coverpkg ./chain/vm/,github.com/filecoin-project/specs-actors/... -coverprofile=/tmp/conformance.out ./conformance/
|
||||
go tool cover -html=/tmp/conformance.out -o /tmp/test-artifacts/conformance-coverage.html
|
||||
no_output_timeout: 30m
|
||||
- store_test_results:
|
||||
path: /tmp/test-reports
|
||||
- store_artifacts:
|
||||
path: /tmp/test-artifacts/conformance-coverage.html
|
||||
|
||||
build-macos:
|
||||
description: build darwin lotus binary
|
||||
@ -356,6 +414,14 @@ workflows:
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+$/
|
||||
- test-conformance:
|
||||
test-suite-name: conformance
|
||||
packages: "./conformance"
|
||||
- test-conformance:
|
||||
name: test-conformance-bleeding-edge
|
||||
test-suite-name: conformance-bleeding-edge
|
||||
packages: "./conformance"
|
||||
vectors-branch: master
|
||||
- build-debug
|
||||
- build-all:
|
||||
requires:
|
||||
|
10
.github/labels.yml
vendored
10
.github/labels.yml
vendored
@ -26,11 +26,14 @@
|
||||
color: 00A4E2
|
||||
description: "Area: Chain/VM"
|
||||
- name: area/chain/sync
|
||||
color: 00A4E2
|
||||
color: 00A4E4
|
||||
description: "Area: Chain/Sync"
|
||||
- name: area/chain/misc
|
||||
color: 00A4E2
|
||||
color: 00A4E6
|
||||
description: "Area: Chain/Misc"
|
||||
- name: area/markets
|
||||
color: 00A4E8
|
||||
description: "Area: Markets"
|
||||
- name: area/sealing/fsm
|
||||
color: 0bb1ed
|
||||
description: "Area: Sealing/FSM"
|
||||
@ -149,6 +152,9 @@
|
||||
- name: impact/test-flakiness
|
||||
color: DDE1E4
|
||||
description: "Impact: Test Flakiness"
|
||||
- name: impact/consensus
|
||||
color: b20014
|
||||
description: "Impact: Consensus"
|
||||
|
||||
###
|
||||
### Topics
|
||||
|
3
.gitmodules
vendored
3
.gitmodules
vendored
@ -5,3 +5,6 @@
|
||||
[submodule "extern/serialization-vectors"]
|
||||
path = extern/serialization-vectors
|
||||
url = https://github.com/filecoin-project/serialization-vectors
|
||||
[submodule "extern/test-vectors"]
|
||||
path = extern/test-vectors
|
||||
url = https://github.com/filecoin-project/test-vectors.git
|
||||
|
55
CHANGELOG.md
55
CHANGELOG.md
@ -1,5 +1,60 @@
|
||||
# Lotus changelog
|
||||
|
||||
# 0.5.7 / 2020-08-31
|
||||
|
||||
This patch release includes some bugfixes and enhancements to the sector lifecycle and message pool logic.
|
||||
|
||||
## Changes
|
||||
|
||||
- Rebuild unsealed infos on miner restart (https://github.com/filecoin-project/lotus/pull/3401)
|
||||
- CLI to attach storage paths to workers (https://github.com/filecoin-project/lotus/pull/3405)
|
||||
- Do not select negative performing message chains for inclusion (https://github.com/filecoin-project/lotus/pull/3392)
|
||||
- Remove a redundant error-check (https://github.com/filecoin-project/lotus/pull/3421)
|
||||
- Correctly move unsealed sectors in `FinalizeSectors` (https://github.com/filecoin-project/lotus/pull/3424)
|
||||
- Improve worker selection logic (https://github.com/filecoin-project/lotus/pull/3425)
|
||||
- Don't use context to close bitswap (https://github.com/filecoin-project/lotus/pull/3430)
|
||||
- Correctly estimate gas premium when there is only one message on chain (https://github.com/filecoin-project/lotus/pull/3428)
|
||||
|
||||
# 0.5.6 / 2020-08-29
|
||||
|
||||
Hotfix release that fixes a panic in the sealing scheduler (https://github.com/filecoin-project/lotus/pull/3389).
|
||||
|
||||
# 0.5.5
|
||||
|
||||
This patch release introduces a large number of improvements to the sealing process.
|
||||
It also updates go-fil-markets to
|
||||
[version 0.5.8](https://github.com/filecoin-project/go-fil-markets/releases/tag/v0.5.8),
|
||||
and go-libp2p-pubsub to [v0.3.5](https://github.com/libp2p/go-libp2p-pubsub/releases/tag/v0.3.5).
|
||||
|
||||
#### Downstream upgrades
|
||||
|
||||
- Upgrades markets to v0.5.8 (https://github.com/filecoin-project/lotus/pull/3384)
|
||||
- Upgrades go-libp2p-pubsub to v0.3.5 (https://github.com/filecoin-project/lotus/pull/3305)
|
||||
|
||||
#### Sector sealing
|
||||
|
||||
- The following improvements were introduced in https://github.com/filecoin-project/lotus/pull/3350.
|
||||
|
||||
- Allow `lotus-miner sectors remove` to remove a sector in any state.
|
||||
- Create a separate state in the storage FSM dedicated to submitting the Commit message.
|
||||
- Recovery for when the Deal IDs of deals in a sector get changed in a reorg.
|
||||
- Auto-retry sending Precommit and Commit messages if they run out of gas
|
||||
- Auto-retry sector remove tasks when they fail
|
||||
- Compact worker windows, and allow their tasks to be executed in any order
|
||||
|
||||
- Don't simply skip PoSt for bad sectors (https://github.com/filecoin-project/lotus/pull/3323)
|
||||
|
||||
#### Message Pool
|
||||
|
||||
- Spam Protection: Track required funds for pending messages (https://github.com/filecoin-project/lotus/pull/3313)
|
||||
|
||||
#### Chainwatch
|
||||
|
||||
- Add more power and reward metrics (https://github.com/filecoin-project/lotus/pull/3367)
|
||||
- Fix raciness in sector deal table (https://github.com/filecoin-project/lotus/pull/3275)
|
||||
- Parallelize miner processing (https://github.com/filecoin-project/lotus/pull/3380)
|
||||
- Accept Lotus API and token (https://github.com/filecoin-project/lotus/pull/3337)
|
||||
|
||||
# 0.5.4
|
||||
|
||||
A patch release, containing a few nice bugfixes and improvements:
|
||||
|
@ -40,9 +40,6 @@ All work is tracked via issues. An attempt at keeping an up-to-date view on rema
|
||||
The lotus Filecoin implementation unfolds into the following packages:
|
||||
|
||||
- [This repo](https://github.com/filecoin-project/lotus)
|
||||
- [storage-fsm](https://github.com/filecoin-project/storage-fsm)
|
||||
- [sector-storage](https://github.com/filecoin-project/sector-storage)
|
||||
- [specs-storage](https://github.com/filecoin-project/specs-storage)
|
||||
- [go-fil-markets](https://github.com/filecoin-project/go-fil-markets) which has its own [kanban work tracker available here](https://app.zenhub.com/workspaces/markets-shared-components-5daa144a7046a60001c6e253/board)
|
||||
- [spec-actors](https://github.com/filecoin-project/specs-actors) which has its own [kanban work tracker available here](https://app.zenhub.com/workspaces/actors-5ee6f3aa87591f0016c05685/board)
|
||||
|
||||
|
@ -109,7 +109,10 @@ type FullNode interface {
|
||||
ChainGetPath(ctx context.Context, from types.TipSetKey, to types.TipSetKey) ([]*HeadChange, error)
|
||||
|
||||
// ChainExport returns a stream of bytes with CAR dump of chain data.
|
||||
ChainExport(context.Context, types.TipSetKey) (<-chan []byte, error)
|
||||
// The exported chain data includes the header chain from the given tipset
|
||||
// back to genesis, the entire genesis state, and the most recent 'nroots'
|
||||
// state trees.
|
||||
ChainExport(ctx context.Context, nroots abi.ChainEpoch, tsk types.TipSetKey) (<-chan []byte, error)
|
||||
|
||||
// MethodGroup: Beacon
|
||||
// The Beacon method group contains methods for interacting with the random beacon (DRAND)
|
||||
@ -243,6 +246,8 @@ type FullNode interface {
|
||||
ClientGetDealInfo(context.Context, cid.Cid) (*DealInfo, error)
|
||||
// ClientListDeals returns information about the deals made by the local client.
|
||||
ClientListDeals(ctx context.Context) ([]DealInfo, error)
|
||||
// ClientGetDealUpdates returns the status of updated deals
|
||||
ClientGetDealUpdates(ctx context.Context) (<-chan DealInfo, error)
|
||||
// ClientHasLocal indicates whether a certain CID is locally stored.
|
||||
ClientHasLocal(ctx context.Context, root cid.Cid) (bool, error)
|
||||
// ClientFindData identifies peers that have a certain file, and returns QueryOffers (one per peer).
|
||||
@ -323,7 +328,7 @@ type FullNode interface {
|
||||
StateMinerAvailableBalance(context.Context, address.Address, types.TipSetKey) (types.BigInt, error)
|
||||
// StateSectorPreCommitInfo returns the PreCommit info for the specified miner's sector
|
||||
StateSectorPreCommitInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error)
|
||||
// StateSectorGetInfo returns the on-chain info for the specified miner's sector
|
||||
// StateSectorGetInfo returns the on-chain info for the specified miner's sector. Returns null in case the sector info isn't found
|
||||
// NOTE: returned info.Expiration may not be accurate in some cases, use StateSectorExpiration to get accurate
|
||||
// expiration epoch
|
||||
StateSectorGetInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorOnChainInfo, error)
|
||||
@ -427,7 +432,7 @@ type FullNode interface {
|
||||
PaychVoucherCreate(context.Context, address.Address, types.BigInt, uint64) (*paych.SignedVoucher, error)
|
||||
PaychVoucherAdd(context.Context, address.Address, *paych.SignedVoucher, []byte, types.BigInt) (types.BigInt, error)
|
||||
PaychVoucherList(context.Context, address.Address) ([]*paych.SignedVoucher, error)
|
||||
PaychVoucherSubmit(context.Context, address.Address, *paych.SignedVoucher) (cid.Cid, error)
|
||||
PaychVoucherSubmit(context.Context, address.Address, *paych.SignedVoucher, []byte, []byte) (cid.Cid, error)
|
||||
}
|
||||
|
||||
type FileRef struct {
|
||||
|
@ -120,16 +120,18 @@ type SectorLog struct {
|
||||
}
|
||||
|
||||
type SectorInfo struct {
|
||||
SectorID abi.SectorNumber
|
||||
State SectorState
|
||||
CommD *cid.Cid
|
||||
CommR *cid.Cid
|
||||
Proof []byte
|
||||
Deals []abi.DealID
|
||||
Ticket SealTicket
|
||||
Seed SealSeed
|
||||
Retries uint64
|
||||
ToUpgrade bool
|
||||
SectorID abi.SectorNumber
|
||||
State SectorState
|
||||
CommD *cid.Cid
|
||||
CommR *cid.Cid
|
||||
Proof []byte
|
||||
Deals []abi.DealID
|
||||
Ticket SealTicket
|
||||
Seed SealSeed
|
||||
PreCommitMsg *cid.Cid
|
||||
CommitMsg *cid.Cid
|
||||
Retries uint64
|
||||
ToUpgrade bool
|
||||
|
||||
LastErr string
|
||||
|
||||
|
@ -27,11 +27,13 @@ type WorkerAPI interface {
|
||||
|
||||
storage.Sealer
|
||||
|
||||
MoveStorage(ctx context.Context, sector abi.SectorID) error
|
||||
MoveStorage(ctx context.Context, sector abi.SectorID, types stores.SectorFileType) error
|
||||
|
||||
UnsealPiece(context.Context, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) error
|
||||
ReadPiece(context.Context, io.Writer, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize) (bool, error)
|
||||
|
||||
StorageAddLocal(ctx context.Context, path string) error
|
||||
|
||||
Fetch(context.Context, abi.SectorID, stores.SectorFileType, stores.PathType, stores.AcquireMode) error
|
||||
|
||||
Closing(context.Context) (<-chan struct{}, error)
|
||||
|
@ -86,7 +86,7 @@ type FullNodeStruct struct {
|
||||
ChainGetNode func(ctx context.Context, p string) (*api.IpldObject, error) `perm:"read"`
|
||||
ChainGetMessage func(context.Context, cid.Cid) (*types.Message, error) `perm:"read"`
|
||||
ChainGetPath func(context.Context, types.TipSetKey, types.TipSetKey) ([]*api.HeadChange, error) `perm:"read"`
|
||||
ChainExport func(context.Context, types.TipSetKey) (<-chan []byte, error) `perm:"read"`
|
||||
ChainExport func(context.Context, abi.ChainEpoch, types.TipSetKey) (<-chan []byte, error) `perm:"read"`
|
||||
|
||||
BeaconGetEntry func(ctx context.Context, epoch abi.ChainEpoch) (*types.BeaconEntry, error) `perm:"read"`
|
||||
|
||||
@ -139,6 +139,7 @@ type FullNodeStruct struct {
|
||||
ClientStartDeal func(ctx context.Context, params *api.StartDealParams) (*cid.Cid, error) `perm:"admin"`
|
||||
ClientGetDealInfo func(context.Context, cid.Cid) (*api.DealInfo, error) `perm:"read"`
|
||||
ClientListDeals func(ctx context.Context) ([]api.DealInfo, error) `perm:"write"`
|
||||
ClientGetDealUpdates func(ctx context.Context) (<-chan api.DealInfo, error) `perm:"read"`
|
||||
ClientRetrieve func(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) error `perm:"admin"`
|
||||
ClientRetrieveWithEvents func(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) (<-chan marketevents.RetrievalEvent, error) `perm:"admin"`
|
||||
ClientQueryAsk func(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.SignedStorageAsk, error) `perm:"read"`
|
||||
@ -214,7 +215,7 @@ type FullNodeStruct struct {
|
||||
PaychVoucherAdd func(context.Context, address.Address, *paych.SignedVoucher, []byte, types.BigInt) (types.BigInt, error) `perm:"write"`
|
||||
PaychVoucherCreate func(context.Context, address.Address, big.Int, uint64) (*paych.SignedVoucher, error) `perm:"sign"`
|
||||
PaychVoucherList func(context.Context, address.Address) ([]*paych.SignedVoucher, error) `perm:"write"`
|
||||
PaychVoucherSubmit func(context.Context, address.Address, *paych.SignedVoucher) (cid.Cid, error) `perm:"sign"`
|
||||
PaychVoucherSubmit func(context.Context, address.Address, *paych.SignedVoucher, []byte, []byte) (cid.Cid, error) `perm:"sign"`
|
||||
}
|
||||
}
|
||||
|
||||
@ -316,7 +317,8 @@ type WorkerStruct struct {
|
||||
FinalizeSector func(context.Context, abi.SectorID, []storage.Range) error `perm:"admin"`
|
||||
ReleaseUnsealed func(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) error `perm:"admin"`
|
||||
Remove func(ctx context.Context, sector abi.SectorID) error `perm:"admin"`
|
||||
MoveStorage func(ctx context.Context, sector abi.SectorID) error `perm:"admin"`
|
||||
MoveStorage func(ctx context.Context, sector abi.SectorID, types stores.SectorFileType) error `perm:"admin"`
|
||||
StorageAddLocal func(ctx context.Context, path string) error `perm:"admin"`
|
||||
|
||||
UnsealPiece func(context.Context, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) error `perm:"admin"`
|
||||
ReadPiece func(context.Context, io.Writer, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize) (bool, error) `perm:"admin"`
|
||||
@ -433,6 +435,10 @@ func (c *FullNodeStruct) ClientListDeals(ctx context.Context) ([]api.DealInfo, e
|
||||
return c.Internal.ClientListDeals(ctx)
|
||||
}
|
||||
|
||||
func (c *FullNodeStruct) ClientGetDealUpdates(ctx context.Context) (<-chan api.DealInfo, error) {
|
||||
return c.Internal.ClientGetDealUpdates(ctx)
|
||||
}
|
||||
|
||||
func (c *FullNodeStruct) ClientRetrieve(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) error {
|
||||
return c.Internal.ClientRetrieve(ctx, order, ref)
|
||||
}
|
||||
@ -648,8 +654,8 @@ func (c *FullNodeStruct) ChainGetPath(ctx context.Context, from types.TipSetKey,
|
||||
return c.Internal.ChainGetPath(ctx, from, to)
|
||||
}
|
||||
|
||||
func (c *FullNodeStruct) ChainExport(ctx context.Context, tsk types.TipSetKey) (<-chan []byte, error) {
|
||||
return c.Internal.ChainExport(ctx, tsk)
|
||||
func (c *FullNodeStruct) ChainExport(ctx context.Context, nroots abi.ChainEpoch, tsk types.TipSetKey) (<-chan []byte, error) {
|
||||
return c.Internal.ChainExport(ctx, nroots, tsk)
|
||||
}
|
||||
|
||||
func (c *FullNodeStruct) BeaconGetEntry(ctx context.Context, epoch abi.ChainEpoch) (*types.BeaconEntry, error) {
|
||||
@ -920,8 +926,8 @@ func (c *FullNodeStruct) PaychNewPayment(ctx context.Context, from, to address.A
|
||||
return c.Internal.PaychNewPayment(ctx, from, to, vouchers)
|
||||
}
|
||||
|
||||
func (c *FullNodeStruct) PaychVoucherSubmit(ctx context.Context, ch address.Address, sv *paych.SignedVoucher) (cid.Cid, error) {
|
||||
return c.Internal.PaychVoucherSubmit(ctx, ch, sv)
|
||||
func (c *FullNodeStruct) PaychVoucherSubmit(ctx context.Context, ch address.Address, sv *paych.SignedVoucher, secret []byte, proof []byte) (cid.Cid, error) {
|
||||
return c.Internal.PaychVoucherSubmit(ctx, ch, sv, secret, proof)
|
||||
}
|
||||
|
||||
// StorageMinerStruct
|
||||
@ -1214,8 +1220,12 @@ func (w *WorkerStruct) Remove(ctx context.Context, sector abi.SectorID) error {
|
||||
return w.Internal.Remove(ctx, sector)
|
||||
}
|
||||
|
||||
func (w *WorkerStruct) MoveStorage(ctx context.Context, sector abi.SectorID) error {
|
||||
return w.Internal.MoveStorage(ctx, sector)
|
||||
func (w *WorkerStruct) MoveStorage(ctx context.Context, sector abi.SectorID, types stores.SectorFileType) error {
|
||||
return w.Internal.MoveStorage(ctx, sector, types)
|
||||
}
|
||||
|
||||
func (w *WorkerStruct) StorageAddLocal(ctx context.Context, path string) error {
|
||||
return w.Internal.StorageAddLocal(ctx, path)
|
||||
}
|
||||
|
||||
func (w *WorkerStruct) UnsealPiece(ctx context.Context, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, c cid.Cid) error {
|
||||
|
@ -41,7 +41,7 @@ func (bm *BlockMiner) MineBlocks() {
|
||||
nulls := atomic.SwapInt64(&bm.nulls, 0)
|
||||
if err := bm.miner.MineOne(bm.ctx, miner.MineReq{
|
||||
InjectNulls: abi.ChainEpoch(nulls),
|
||||
Done: func(bool, error) {},
|
||||
Done: func(bool, abi.ChainEpoch, error) {},
|
||||
}); err != nil {
|
||||
bm.t.Error(err)
|
||||
}
|
||||
|
@ -24,6 +24,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
||||
"github.com/filecoin-project/lotus/miner"
|
||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||
dag "github.com/ipfs/go-merkledag"
|
||||
dstest "github.com/ipfs/go-merkledag/test"
|
||||
unixfile "github.com/ipfs/go-unixfs/file"
|
||||
@ -35,7 +36,7 @@ import (
|
||||
|
||||
var MineNext = miner.MineReq{
|
||||
InjectNulls: 0,
|
||||
Done: func(bool, error) {},
|
||||
Done: func(bool, abi.ChainEpoch, error) {},
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
@ -31,22 +31,20 @@ func (ts *testSuite) testMining(t *testing.T) {
|
||||
newHeads, err := api.ChainNotify(ctx)
|
||||
require.NoError(t, err)
|
||||
initHead := (<-newHeads)[0]
|
||||
if initHead.Val.Height() != 2 {
|
||||
<-newHeads
|
||||
}
|
||||
baseHeight := initHead.Val.Height()
|
||||
|
||||
h1, err := api.ChainHead(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, abi.ChainEpoch(2), h1.Height())
|
||||
require.Equal(t, int64(h1.Height()), int64(baseHeight))
|
||||
|
||||
MineUntilBlock(ctx, t, sn[0], nil)
|
||||
MineUntilBlock(ctx, t, apis[0], sn[0], nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
<-newHeads
|
||||
|
||||
h2, err := api.ChainHead(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, abi.ChainEpoch(3), h2.Height())
|
||||
require.Greater(t, int64(h2.Height()), int64(h1.Height()))
|
||||
}
|
||||
|
||||
func (ts *testSuite) testMiningReal(t *testing.T) {
|
||||
@ -70,7 +68,7 @@ func (ts *testSuite) testMiningReal(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, abi.ChainEpoch(2), h1.Height())
|
||||
|
||||
MineUntilBlock(ctx, t, sn[0], nil)
|
||||
MineUntilBlock(ctx, t, apis[0], sn[0], nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
<-newHeads
|
||||
@ -79,7 +77,7 @@ func (ts *testSuite) testMiningReal(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, abi.ChainEpoch(3), h2.Height())
|
||||
|
||||
MineUntilBlock(ctx, t, sn[0], nil)
|
||||
MineUntilBlock(ctx, t, apis[0], sn[0], nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
<-newHeads
|
||||
@ -144,7 +142,7 @@ func TestDealMining(t *testing.T, b APIBuilder, blocktime time.Duration, carExpo
|
||||
complChan := minedTwo
|
||||
for atomic.LoadInt32(&mine) != 0 {
|
||||
wait := make(chan int)
|
||||
mdone := func(mined bool, err error) {
|
||||
mdone := func(mined bool, _ abi.ChainEpoch, err error) {
|
||||
n := 0
|
||||
if mined {
|
||||
n = 1
|
||||
|
@ -3,6 +3,7 @@ package test
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||
|
||||
@ -36,25 +37,46 @@ func SendFunds(ctx context.Context, t *testing.T, sender TestNode, addr address.
|
||||
}
|
||||
}
|
||||
|
||||
func MineUntilBlock(ctx context.Context, t *testing.T, sn TestStorageNode, cb func()) {
|
||||
func MineUntilBlock(ctx context.Context, t *testing.T, fn TestNode, sn TestStorageNode, cb func(abi.ChainEpoch)) {
|
||||
for i := 0; i < 1000; i++ {
|
||||
var success bool
|
||||
var err error
|
||||
var epoch abi.ChainEpoch
|
||||
wait := make(chan struct{})
|
||||
sn.MineOne(ctx, miner.MineReq{
|
||||
Done: func(win bool, e error) {
|
||||
mineErr := sn.MineOne(ctx, miner.MineReq{
|
||||
Done: func(win bool, ep abi.ChainEpoch, e error) {
|
||||
success = win
|
||||
err = e
|
||||
epoch = ep
|
||||
wait <- struct{}{}
|
||||
},
|
||||
})
|
||||
if mineErr != nil {
|
||||
t.Fatal(mineErr)
|
||||
}
|
||||
<-wait
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if success {
|
||||
// Wait until it shows up on the given full nodes ChainHead
|
||||
nloops := 50
|
||||
for i := 0; i < nloops; i++ {
|
||||
ts, err := fn.ChainHead(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if ts.Height() == epoch {
|
||||
break
|
||||
}
|
||||
if i == nloops-1 {
|
||||
t.Fatal("block never managed to sync to node")
|
||||
}
|
||||
time.Sleep(time.Millisecond * 10)
|
||||
}
|
||||
|
||||
if cb != nil {
|
||||
cb()
|
||||
cb(epoch)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -53,7 +53,7 @@ func TestPledgeSector(t *testing.T, b APIBuilder, blocktime time.Duration, nSect
|
||||
defer close(done)
|
||||
for mine {
|
||||
build.Clock.Sleep(blocktime)
|
||||
if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, error) {
|
||||
if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) {
|
||||
|
||||
}}); err != nil {
|
||||
t.Error(err)
|
||||
@ -216,6 +216,10 @@ func TestWindowPost(t *testing.T, b APIBuilder, blocktime time.Duration, nSector
|
||||
sn, err := parts[0].Sectors.First()
|
||||
require.NoError(t, err)
|
||||
|
||||
all, err := parts[0].Sectors.All(2)
|
||||
require.NoError(t, err)
|
||||
fmt.Println("the sectors", all)
|
||||
|
||||
s = abi.SectorID{
|
||||
Miner: abi.ActorID(mid),
|
||||
Number: abi.SectorNumber(sn),
|
||||
|
@ -25,7 +25,7 @@ func buildType() string {
|
||||
}
|
||||
|
||||
// BuildVersion is the local build version, set by build system
|
||||
const BuildVersion = "0.5.4"
|
||||
const BuildVersion = "0.5.7"
|
||||
|
||||
func UserVersion() string {
|
||||
return BuildVersion + buildType() + CurrentCommit
|
||||
@ -53,7 +53,7 @@ func (ve Version) EqMajorMinor(v2 Version) bool {
|
||||
}
|
||||
|
||||
// APIVersion is a semver version of the rpc api exposed
|
||||
var APIVersion Version = newVer(0, 12, 0)
|
||||
var APIVersion Version = newVer(0, 14, 0)
|
||||
|
||||
//nolint:varcheck,deadcode
|
||||
const (
|
||||
|
@ -37,6 +37,10 @@ func ValidateBlockValues(b RandomBeacon, h *types.BlockHeader, prevEntry types.B
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(h.BeaconEntries) == 0 {
|
||||
return xerrors.Errorf("expected to have beacon entries in this block, but didn't find any")
|
||||
}
|
||||
|
||||
last := h.BeaconEntries[len(h.BeaconEntries)-1]
|
||||
if last.Round != maxRound {
|
||||
return xerrors.Errorf("expected final beacon entry in block to be at round %d, got %d", maxRound, last.Round)
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
stdbig "math/big"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
@ -48,6 +49,10 @@ const RbfDenom = 256
|
||||
|
||||
var RepublishInterval = pubsub.TimeCacheDuration + time.Duration(5*build.BlockDelaySecs+build.PropagationDelaySecs)*time.Second
|
||||
|
||||
var minimumBaseFee = types.NewInt(uint64(build.MinimumBaseFee))
|
||||
|
||||
var MaxActorPendingMessages = 1000
|
||||
|
||||
var (
|
||||
ErrMessageTooBig = errors.New("message too big")
|
||||
|
||||
@ -55,12 +60,15 @@ var (
|
||||
|
||||
ErrNonceTooLow = errors.New("message nonce too low")
|
||||
|
||||
ErrGasFeeCapTooLow = errors.New("gas fee cap too low")
|
||||
|
||||
ErrNotEnoughFunds = errors.New("not enough funds to execute transaction")
|
||||
|
||||
ErrInvalidToAddr = errors.New("message had invalid to address")
|
||||
|
||||
ErrBroadcastAnyway = errors.New("broadcasting message despite validation fail")
|
||||
ErrRBFTooLowPremium = errors.New("replace by fee has too low GasPremium")
|
||||
ErrSoftValidationFailure = errors.New("validation failure")
|
||||
ErrRBFTooLowPremium = errors.New("replace by fee has too low GasPremium")
|
||||
ErrTooManyPendingMessages = errors.New("too many pending messages for actor")
|
||||
|
||||
ErrTryAgain = errors.New("state inconsistency while pushing message; please try again")
|
||||
)
|
||||
@ -142,17 +150,19 @@ type MessagePool struct {
|
||||
}
|
||||
|
||||
type msgSet struct {
|
||||
msgs map[uint64]*types.SignedMessage
|
||||
nextNonce uint64
|
||||
msgs map[uint64]*types.SignedMessage
|
||||
nextNonce uint64
|
||||
requiredFunds *stdbig.Int
|
||||
}
|
||||
|
||||
func newMsgSet() *msgSet {
|
||||
return &msgSet{
|
||||
msgs: make(map[uint64]*types.SignedMessage),
|
||||
msgs: make(map[uint64]*types.SignedMessage),
|
||||
requiredFunds: stdbig.NewInt(0),
|
||||
}
|
||||
}
|
||||
|
||||
func (ms *msgSet) add(m *types.SignedMessage, mp *MessagePool) (bool, error) {
|
||||
func (ms *msgSet) add(m *types.SignedMessage, mp *MessagePool, limit bool) (bool, error) {
|
||||
if len(ms.msgs) == 0 || m.Message.Nonce >= ms.nextNonce {
|
||||
ms.nextNonce = m.Message.Nonce + 1
|
||||
}
|
||||
@ -174,21 +184,51 @@ func (ms *msgSet) add(m *types.SignedMessage, mp *MessagePool) (bool, error) {
|
||||
ErrRBFTooLowPremium)
|
||||
}
|
||||
}
|
||||
|
||||
ms.requiredFunds.Sub(ms.requiredFunds, exms.Message.RequiredFunds().Int)
|
||||
//ms.requiredFunds.Sub(ms.requiredFunds, exms.Message.Value.Int)
|
||||
}
|
||||
|
||||
if !has && limit && len(ms.msgs) > MaxActorPendingMessages {
|
||||
log.Errorf("too many pending messages from actor %s", m.Message.From)
|
||||
return false, ErrTooManyPendingMessages
|
||||
}
|
||||
|
||||
ms.msgs[m.Message.Nonce] = m
|
||||
ms.requiredFunds.Add(ms.requiredFunds, m.Message.RequiredFunds().Int)
|
||||
//ms.requiredFunds.Add(ms.requiredFunds, m.Message.Value.Int)
|
||||
|
||||
return !has, nil
|
||||
}
|
||||
|
||||
func (ms *msgSet) rm(nonce uint64) {
|
||||
m, has := ms.msgs[nonce]
|
||||
if has {
|
||||
ms.requiredFunds.Sub(ms.requiredFunds, m.Message.RequiredFunds().Int)
|
||||
//ms.requiredFunds.Sub(ms.requiredFunds, m.Message.Value.Int)
|
||||
delete(ms.msgs, nonce)
|
||||
}
|
||||
}
|
||||
|
||||
func (ms *msgSet) getRequiredFunds(nonce uint64) types.BigInt {
|
||||
requiredFunds := new(stdbig.Int).Set(ms.requiredFunds)
|
||||
|
||||
m, has := ms.msgs[nonce]
|
||||
if has {
|
||||
requiredFunds.Sub(requiredFunds, m.Message.RequiredFunds().Int)
|
||||
//requiredFunds.Sub(requiredFunds, m.Message.Value.Int)
|
||||
}
|
||||
|
||||
return types.BigInt{Int: requiredFunds}
|
||||
}
|
||||
|
||||
func New(api Provider, ds dtypes.MetadataDS, netName dtypes.NetworkName) (*MessagePool, error) {
|
||||
cache, _ := lru.New2Q(build.BlsSignatureCacheSize)
|
||||
verifcache, _ := lru.New2Q(build.VerifSigCacheSize)
|
||||
|
||||
cfg, err := loadConfig(ds)
|
||||
if err != nil {
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("error loading mpool config: %w", err)
|
||||
}
|
||||
return nil, xerrors.Errorf("error loading mpool config: %w", err)
|
||||
}
|
||||
|
||||
mp := &MessagePool{
|
||||
@ -288,7 +328,7 @@ func (mp *MessagePool) addLocal(m *types.SignedMessage, msgb []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mp *MessagePool) verifyMsgBeforePush(m *types.SignedMessage, epoch abi.ChainEpoch) error {
|
||||
func (mp *MessagePool) verifyMsgBeforeAdd(m *types.SignedMessage, epoch abi.ChainEpoch) error {
|
||||
minGas := vm.PricelistByEpoch(epoch).OnChainMessage(m.ChainLength())
|
||||
|
||||
if err := m.VMMessage().ValidForBlockInclusion(minGas.Total()); err != nil {
|
||||
@ -309,25 +349,12 @@ func (mp *MessagePool) Push(m *types.SignedMessage) (cid.Cid, error) {
|
||||
<-mp.addSema
|
||||
}()
|
||||
|
||||
mp.curTsLk.Lock()
|
||||
curTs := mp.curTs
|
||||
epoch := curTs.Height()
|
||||
mp.curTsLk.Unlock()
|
||||
if err := mp.verifyMsgBeforePush(m, epoch); err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
|
||||
msgb, err := m.Serialize()
|
||||
if err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
|
||||
mp.curTsLk.Lock()
|
||||
if mp.curTs != curTs {
|
||||
mp.curTsLk.Unlock()
|
||||
return cid.Undef, ErrTryAgain
|
||||
}
|
||||
|
||||
if err := mp.addTs(m, mp.curTs); err != nil {
|
||||
mp.curTsLk.Unlock()
|
||||
return cid.Undef, err
|
||||
@ -350,7 +377,7 @@ func (mp *MessagePool) checkMessage(m *types.SignedMessage) error {
|
||||
return xerrors.Errorf("mpool message too large (%dB): %w", m.Size(), ErrMessageTooBig)
|
||||
}
|
||||
|
||||
// Perform syntaxtic validation, minGas=0 as we check if correctly in select messages
|
||||
// Perform syntactic validation, minGas=0 as we check the actual mingas before we add it
|
||||
if err := m.Message.ValidForBlockInclusion(0); err != nil {
|
||||
return xerrors.Errorf("message not valid for block inclusion: %w", err)
|
||||
}
|
||||
@ -363,8 +390,12 @@ func (mp *MessagePool) checkMessage(m *types.SignedMessage) error {
|
||||
return ErrMessageValueTooHigh
|
||||
}
|
||||
|
||||
if m.Message.GasFeeCap.LessThan(minimumBaseFee) {
|
||||
return ErrGasFeeCapTooLow
|
||||
}
|
||||
|
||||
if err := mp.VerifyMsgSig(m); err != nil {
|
||||
log.Warnf("mpooladd signature verification failed: %s", err)
|
||||
log.Warnf("signature verification failed: %s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -424,48 +455,71 @@ func (mp *MessagePool) VerifyMsgSig(m *types.SignedMessage) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mp *MessagePool) checkBalance(m *types.SignedMessage, curTs *types.TipSet) error {
|
||||
balance, err := mp.getStateBalance(m.Message.From, curTs)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to check sender balance: %s: %w", err, ErrSoftValidationFailure)
|
||||
}
|
||||
|
||||
requiredFunds := m.Message.RequiredFunds()
|
||||
if balance.LessThan(requiredFunds) {
|
||||
return xerrors.Errorf("not enough funds (required: %s, balance: %s): %w", types.FIL(requiredFunds), types.FIL(balance), ErrNotEnoughFunds)
|
||||
}
|
||||
|
||||
// add Value for soft failure check
|
||||
//requiredFunds = types.BigAdd(requiredFunds, m.Message.Value)
|
||||
|
||||
mset, ok := mp.pending[m.Message.From]
|
||||
if ok {
|
||||
requiredFunds = types.BigAdd(requiredFunds, mset.getRequiredFunds(m.Message.Nonce))
|
||||
}
|
||||
|
||||
if balance.LessThan(requiredFunds) {
|
||||
// Note: we fail here for ErrSoftValidationFailure to signal a soft failure because we might
|
||||
// be out of sync.
|
||||
return xerrors.Errorf("not enough funds including pending messages (required: %s, balance: %s): %w", types.FIL(requiredFunds), types.FIL(balance), ErrSoftValidationFailure)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mp *MessagePool) addTs(m *types.SignedMessage, curTs *types.TipSet) error {
|
||||
snonce, err := mp.getStateNonce(m.Message.From, curTs)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to look up actor state nonce: %s: %w", err, ErrBroadcastAnyway)
|
||||
return xerrors.Errorf("failed to look up actor state nonce: %s: %w", err, ErrSoftValidationFailure)
|
||||
}
|
||||
|
||||
if snonce > m.Message.Nonce {
|
||||
return xerrors.Errorf("minimum expected nonce is %d: %w", snonce, ErrNonceTooLow)
|
||||
}
|
||||
|
||||
balance, err := mp.getStateBalance(m.Message.From, curTs)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to check sender balance: %s: %w", err, ErrBroadcastAnyway)
|
||||
}
|
||||
|
||||
if balance.LessThan(m.Message.RequiredFunds()) {
|
||||
return xerrors.Errorf("not enough funds (required: %s, balance: %s): %w", types.FIL(m.Message.RequiredFunds()), types.FIL(balance), ErrNotEnoughFunds)
|
||||
}
|
||||
|
||||
mp.lk.Lock()
|
||||
defer mp.lk.Unlock()
|
||||
|
||||
return mp.addLocked(m)
|
||||
if err := mp.verifyMsgBeforeAdd(m, curTs.Height()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := mp.checkBalance(m, curTs); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return mp.addLocked(m, true)
|
||||
}
|
||||
|
||||
func (mp *MessagePool) addSkipChecks(m *types.SignedMessage) error {
|
||||
mp.lk.Lock()
|
||||
defer mp.lk.Unlock()
|
||||
|
||||
return mp.addLocked(m)
|
||||
return mp.addLocked(m, false)
|
||||
}
|
||||
|
||||
func (mp *MessagePool) addLocked(m *types.SignedMessage) error {
|
||||
func (mp *MessagePool) addLocked(m *types.SignedMessage, limit bool) error {
|
||||
log.Debugf("mpooladd: %s %d", m.Message.From, m.Message.Nonce)
|
||||
if m.Signature.Type == crypto.SigTypeBLS {
|
||||
mp.blsSigCache.Add(m.Cid(), m.Signature)
|
||||
}
|
||||
|
||||
if m.Message.GasLimit > build.BlockGasLimit {
|
||||
return xerrors.Errorf("given message has too high of a gas limit")
|
||||
}
|
||||
|
||||
if _, err := mp.api.PutMessage(m); err != nil {
|
||||
log.Warnf("mpooladd cs.PutMessage failed: %s", err)
|
||||
return err
|
||||
@ -482,7 +536,7 @@ func (mp *MessagePool) addLocked(m *types.SignedMessage) error {
|
||||
mp.pending[m.Message.From] = mset
|
||||
}
|
||||
|
||||
incr, err := mset.add(m, mp)
|
||||
incr, err := mset.add(m, mp, limit)
|
||||
if err != nil {
|
||||
log.Info(err)
|
||||
return err
|
||||
@ -601,6 +655,16 @@ func (mp *MessagePool) PushWithNonce(ctx context.Context, addr address.Address,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = mp.checkMessage(msg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
msgb, err := msg.Serialize()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// reacquire the locks and check state for consistency
|
||||
mp.curTsLk.Lock()
|
||||
defer mp.curTsLk.Unlock()
|
||||
@ -621,16 +685,15 @@ func (mp *MessagePool) PushWithNonce(ctx context.Context, addr address.Address,
|
||||
return nil, ErrTryAgain
|
||||
}
|
||||
|
||||
if err := mp.verifyMsgBeforePush(msg, mp.curTs.Height()); err != nil {
|
||||
if err := mp.verifyMsgBeforeAdd(msg, curTs.Height()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
msgb, err := msg.Serialize()
|
||||
if err != nil {
|
||||
if err := mp.checkBalance(msg, curTs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := mp.addLocked(msg); err != nil {
|
||||
if err := mp.addLocked(msg, true); err != nil {
|
||||
return nil, xerrors.Errorf("add locked failed: %w", err)
|
||||
}
|
||||
if err := mp.addLocal(msg, msgb); err != nil {
|
||||
@ -670,7 +733,7 @@ func (mp *MessagePool) remove(from address.Address, nonce uint64) {
|
||||
|
||||
// NB: This deletes any message with the given nonce. This makes sense
|
||||
// as two messages with the same sender cannot have the same nonce
|
||||
delete(mset.msgs, nonce)
|
||||
mset.rm(nonce)
|
||||
|
||||
if len(mset.msgs) == 0 {
|
||||
delete(mp.pending, from)
|
||||
|
@ -373,6 +373,7 @@ func TestPruningSimple(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tma.setBalance(sender, 1) // in FIL
|
||||
target := mock.Address(1001)
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
@ -430,6 +431,8 @@ func TestLoadLocal(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tma.setBalance(a1, 1) // in FIL
|
||||
tma.setBalance(a2, 1) // in FIL
|
||||
gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin.StorageMarketActorCodeID, M: 2}]
|
||||
msgs := make(map[cid.Cid]struct{})
|
||||
for i := 0; i < 10; i++ {
|
||||
@ -500,6 +503,8 @@ func TestClearAll(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tma.setBalance(a1, 1) // in FIL
|
||||
tma.setBalance(a2, 1) // in FIL
|
||||
gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin.StorageMarketActorCodeID, M: 2}]
|
||||
for i := 0; i < 10; i++ {
|
||||
m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1))
|
||||
@ -552,6 +557,9 @@ func TestClearNonLocal(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tma.setBalance(a1, 1) // in FIL
|
||||
tma.setBalance(a2, 1) // in FIL
|
||||
|
||||
gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin.StorageMarketActorCodeID, M: 2}]
|
||||
for i := 0; i < 10; i++ {
|
||||
m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1))
|
||||
@ -619,6 +627,10 @@ func TestUpdates(t *testing.T) {
|
||||
}
|
||||
|
||||
gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin.StorageMarketActorCodeID, M: 2}]
|
||||
|
||||
tma.setBalance(a1, 1) // in FIL
|
||||
tma.setBalance(a2, 1) // in FIL
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1))
|
||||
_, err := mp.Push(m)
|
||||
|
@ -109,7 +109,7 @@ func (mp *MessagePool) republishPendingMessages() error {
|
||||
|
||||
// we can't fit the current chain but there is gas to spare
|
||||
// trim it and push it down
|
||||
chain.Trim(gasLimit, mp, baseFee, ts, false)
|
||||
chain.Trim(gasLimit, mp, baseFee, ts)
|
||||
for j := i; j < len(chains)-1; j++ {
|
||||
if chains[j].Before(chains[j+1]) {
|
||||
break
|
||||
|
@ -217,7 +217,7 @@ tailLoop:
|
||||
for gasLimit >= minGas && last < len(chains) {
|
||||
// trim if necessary
|
||||
if chains[last].gasLimit > gasLimit {
|
||||
chains[last].Trim(gasLimit, mp, baseFee, ts, false)
|
||||
chains[last].Trim(gasLimit, mp, baseFee, ts)
|
||||
}
|
||||
|
||||
// push down if it hasn't been invalidated
|
||||
@ -284,7 +284,7 @@ tailLoop:
|
||||
}
|
||||
|
||||
// dependencies fit, just trim it
|
||||
chain.Trim(gasLimit-depGasLimit, mp, baseFee, ts, false)
|
||||
chain.Trim(gasLimit-depGasLimit, mp, baseFee, ts)
|
||||
last += i
|
||||
continue tailLoop
|
||||
}
|
||||
@ -389,7 +389,7 @@ func (mp *MessagePool) selectMessagesGreedy(curTs, ts *types.TipSet) ([]*types.S
|
||||
tailLoop:
|
||||
for gasLimit >= minGas && last < len(chains) {
|
||||
// trim
|
||||
chains[last].Trim(gasLimit, mp, baseFee, ts, false)
|
||||
chains[last].Trim(gasLimit, mp, baseFee, ts)
|
||||
|
||||
// push down if it hasn't been invalidated
|
||||
if chains[last].valid {
|
||||
@ -462,15 +462,27 @@ func (mp *MessagePool) selectPriorityMessages(pending map[address.Address]map[ui
|
||||
}
|
||||
}
|
||||
|
||||
if len(chains) == 0 {
|
||||
return nil, gasLimit
|
||||
}
|
||||
|
||||
// 2. Sort the chains
|
||||
sort.Slice(chains, func(i, j int) bool {
|
||||
return chains[i].Before(chains[j])
|
||||
})
|
||||
|
||||
// 3. Merge chains until the block limit; we are willing to include negative performing chains
|
||||
// as these are messages from our own miners
|
||||
if len(chains) != 0 && chains[0].gasPerf < 0 {
|
||||
log.Warnw("all priority messages in mpool have negative gas performance", "bestGasPerf", chains[0].gasPerf)
|
||||
return nil, gasLimit
|
||||
}
|
||||
|
||||
// 3. Merge chains until the block limit, as long as they have non-negative gas performance
|
||||
last := len(chains)
|
||||
for i, chain := range chains {
|
||||
if chain.gasPerf < 0 {
|
||||
break
|
||||
}
|
||||
|
||||
if chain.gasLimit <= gasLimit {
|
||||
gasLimit -= chain.gasLimit
|
||||
result = append(result, chain.msgs...)
|
||||
@ -484,8 +496,8 @@ func (mp *MessagePool) selectPriorityMessages(pending map[address.Address]map[ui
|
||||
|
||||
tailLoop:
|
||||
for gasLimit >= minGas && last < len(chains) {
|
||||
// trim, without discarding negative performing messages
|
||||
chains[last].Trim(gasLimit, mp, baseFee, ts, true)
|
||||
// trim, discarding negative performing messages
|
||||
chains[last].Trim(gasLimit, mp, baseFee, ts)
|
||||
|
||||
// push down if it hasn't been invalidated
|
||||
if chains[last].valid {
|
||||
@ -503,6 +515,12 @@ tailLoop:
|
||||
if !chain.valid {
|
||||
continue
|
||||
}
|
||||
|
||||
// if gasPerf < 0 we have no more profitable chains
|
||||
if chain.gasPerf < 0 {
|
||||
break tailLoop
|
||||
}
|
||||
|
||||
// does it fit in the bock?
|
||||
if chain.gasLimit <= gasLimit {
|
||||
gasLimit -= chain.gasLimit
|
||||
@ -515,9 +533,9 @@ tailLoop:
|
||||
continue tailLoop
|
||||
}
|
||||
|
||||
// the merge loop ended after processing all the chains and we probably still have gas to spare
|
||||
// -- mark the end.
|
||||
last = len(chains)
|
||||
// the merge loop ended after processing all the chains and we probably still have gas to spare;
|
||||
// end the loop
|
||||
break
|
||||
}
|
||||
|
||||
return result, gasLimit
|
||||
@ -755,9 +773,9 @@ func (mc *msgChain) Before(other *msgChain) bool {
|
||||
(mc.gasPerf == other.gasPerf && mc.gasReward.Cmp(other.gasReward) > 0)
|
||||
}
|
||||
|
||||
func (mc *msgChain) Trim(gasLimit int64, mp *MessagePool, baseFee types.BigInt, ts *types.TipSet, priority bool) {
|
||||
func (mc *msgChain) Trim(gasLimit int64, mp *MessagePool, baseFee types.BigInt, ts *types.TipSet) {
|
||||
i := len(mc.msgs) - 1
|
||||
for i >= 0 && (mc.gasLimit > gasLimit || (!priority && mc.gasPerf < 0)) {
|
||||
for i >= 0 && (mc.gasLimit > gasLimit || mc.gasPerf < 0) {
|
||||
gasReward := mp.getGasReward(mc.msgs[i], baseFee, ts)
|
||||
mc.gasReward = new(big.Int).Sub(mc.gasReward, gasReward)
|
||||
mc.gasLimit -= mc.msgs[i].Message.GasLimit
|
||||
|
@ -23,6 +23,11 @@ import (
|
||||
logging "github.com/ipfs/go-log"
|
||||
)
|
||||
|
||||
func init() {
|
||||
// bump this for the selection tests
|
||||
MaxActorPendingMessages = 1000000
|
||||
}
|
||||
|
||||
func makeTestMessage(w *wallet.Wallet, from, to address.Address, nonce uint64, gasLimit int64, gasPrice uint64) *types.SignedMessage {
|
||||
msg := &types.Message{
|
||||
From: from,
|
||||
|
@ -95,7 +95,7 @@ func (sm *StateManager) Call(ctx context.Context, msg *types.Message, ts *types.
|
||||
|
||||
state := ts.ParentState()
|
||||
|
||||
r := store.NewChainRand(sm.cs, ts.Cids(), ts.Height())
|
||||
r := store.NewChainRand(sm.cs, ts.Cids())
|
||||
|
||||
return sm.CallRaw(ctx, msg, state, r, ts.Height())
|
||||
}
|
||||
@ -113,7 +113,7 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri
|
||||
return nil, xerrors.Errorf("computing tipset state: %w", err)
|
||||
}
|
||||
|
||||
r := store.NewChainRand(sm.cs, ts.Cids(), ts.Height())
|
||||
r := store.NewChainRand(sm.cs, ts.Cids())
|
||||
|
||||
if span.IsRecordingEvents() {
|
||||
span.AddAttributes(
|
||||
|
@ -338,7 +338,7 @@ func (sm *StateManager) computeTipSetState(ctx context.Context, ts *types.TipSet
|
||||
cids[i] = v.Cid()
|
||||
}
|
||||
|
||||
r := store.NewChainRand(sm.cs, cids, blks[0].Height)
|
||||
r := store.NewChainRand(sm.cs, cids)
|
||||
|
||||
blkmsgs, err := sm.cs.BlockMsgsForTipset(ts)
|
||||
if err != nil {
|
||||
|
@ -157,7 +157,7 @@ func MinerSectorInfo(ctx context.Context, sm *StateManager, maddr address.Addres
|
||||
return nil, err
|
||||
}
|
||||
if !ok {
|
||||
return nil, xerrors.New("sector not found")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return sectorInfo, nil
|
||||
@ -432,7 +432,7 @@ func ComputeState(ctx context.Context, sm *StateManager, height abi.ChainEpoch,
|
||||
return cid.Undef, nil, err
|
||||
}
|
||||
|
||||
r := store.NewChainRand(sm.cs, ts.Cids(), height)
|
||||
r := store.NewChainRand(sm.cs, ts.Cids())
|
||||
vmopt := &vm.VMOpts{
|
||||
StateBase: base,
|
||||
Epoch: height,
|
||||
|
@ -1137,7 +1137,7 @@ func (cs *ChainStore) GetTipsetByHeight(ctx context.Context, h abi.ChainEpoch, t
|
||||
return cs.LoadTipSet(lbts.Parents())
|
||||
}
|
||||
|
||||
func recurseLinks(bs bstore.Blockstore, root cid.Cid, in []cid.Cid) ([]cid.Cid, error) {
|
||||
func recurseLinks(bs bstore.Blockstore, walked *cid.Set, root cid.Cid, in []cid.Cid) ([]cid.Cid, error) {
|
||||
if root.Prefix().Codec != cid.DagCBOR {
|
||||
return in, nil
|
||||
}
|
||||
@ -1154,9 +1154,14 @@ func recurseLinks(bs bstore.Blockstore, root cid.Cid, in []cid.Cid) ([]cid.Cid,
|
||||
return
|
||||
}
|
||||
|
||||
// traversed this already...
|
||||
if !walked.Visit(c) {
|
||||
return
|
||||
}
|
||||
|
||||
in = append(in, c)
|
||||
var err error
|
||||
in, err = recurseLinks(bs, c, in)
|
||||
in, err = recurseLinks(bs, walked, c, in)
|
||||
if err != nil {
|
||||
rerr = err
|
||||
}
|
||||
@ -1168,12 +1173,13 @@ func recurseLinks(bs bstore.Blockstore, root cid.Cid, in []cid.Cid) ([]cid.Cid,
|
||||
return in, rerr
|
||||
}
|
||||
|
||||
func (cs *ChainStore) Export(ctx context.Context, ts *types.TipSet, w io.Writer) error {
|
||||
func (cs *ChainStore) Export(ctx context.Context, ts *types.TipSet, inclRecentRoots abi.ChainEpoch, w io.Writer) error {
|
||||
if ts == nil {
|
||||
ts = cs.GetHeaviestTipSet()
|
||||
}
|
||||
|
||||
seen := cid.NewSet()
|
||||
walked := cid.NewSet()
|
||||
|
||||
h := &car.CarHeader{
|
||||
Roots: ts.Cids(),
|
||||
@ -1205,7 +1211,7 @@ func (cs *ChainStore) Export(ctx context.Context, ts *types.TipSet, w io.Writer)
|
||||
return xerrors.Errorf("unmarshaling block header (cid=%s): %w", blk, err)
|
||||
}
|
||||
|
||||
cids, err := recurseLinks(cs.bs, b.Messages, []cid.Cid{b.Messages})
|
||||
cids, err := recurseLinks(cs.bs, walked, b.Messages, []cid.Cid{b.Messages})
|
||||
if err != nil {
|
||||
return xerrors.Errorf("recursing messages failed: %w", err)
|
||||
}
|
||||
@ -1221,8 +1227,8 @@ func (cs *ChainStore) Export(ctx context.Context, ts *types.TipSet, w io.Writer)
|
||||
|
||||
out := cids
|
||||
|
||||
if b.Height == 0 {
|
||||
cids, err := recurseLinks(cs.bs, b.ParentStateRoot, []cid.Cid{b.ParentStateRoot})
|
||||
if b.Height == 0 || b.Height > ts.Height()-inclRecentRoots {
|
||||
cids, err := recurseLinks(cs.bs, walked, b.ParentStateRoot, []cid.Cid{b.ParentStateRoot})
|
||||
if err != nil {
|
||||
return xerrors.Errorf("recursing genesis state failed: %w", err)
|
||||
}
|
||||
@ -1305,14 +1311,12 @@ func (cs *ChainStore) GetLatestBeaconEntry(ts *types.TipSet) (*types.BeaconEntry
|
||||
type chainRand struct {
|
||||
cs *ChainStore
|
||||
blks []cid.Cid
|
||||
bh abi.ChainEpoch
|
||||
}
|
||||
|
||||
func NewChainRand(cs *ChainStore, blks []cid.Cid, bheight abi.ChainEpoch) vm.Rand {
|
||||
func NewChainRand(cs *ChainStore, blks []cid.Cid) vm.Rand {
|
||||
return &chainRand{
|
||||
cs: cs,
|
||||
blks: blks,
|
||||
bh: bheight,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -97,7 +97,7 @@ func TestChainExportImport(t *testing.T) {
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
if err := cg.ChainStore().Export(context.TODO(), last, buf); err != nil {
|
||||
if err := cg.ChainStore().Export(context.TODO(), last, 0, buf); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -41,7 +41,7 @@ func (cs *ChainStore) Weight(ctx context.Context, ts *types.TipSet) (types.BigIn
|
||||
|
||||
var st power.State
|
||||
if err := cst.Get(ctx, act.Head, &st); err != nil {
|
||||
return types.NewInt(0), xerrors.Errorf("get power actor head: %w", err)
|
||||
return types.NewInt(0), xerrors.Errorf("get power actor head (%s, height=%d): %w", act.Head, ts.Height(), err)
|
||||
}
|
||||
tpow = st.TotalQualityAdjPower // TODO: REVIEW: Is this correct?
|
||||
}
|
||||
|
@ -3,6 +3,7 @@ package sub
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
@ -40,6 +41,9 @@ import (
|
||||
|
||||
var log = logging.Logger("sub")
|
||||
|
||||
var ErrSoftFailure = errors.New("soft validation failure")
|
||||
var ErrInsufficientPower = errors.New("incoming block's miner does not have minimum power")
|
||||
|
||||
func HandleIncomingBlocks(ctx context.Context, bsub *pubsub.Subscription, s *chain.Syncer, bserv bserv.BlockService, cmgr connmgr.ConnManager) {
|
||||
for {
|
||||
msg, err := bsub.Next(ctx)
|
||||
@ -258,16 +262,15 @@ func (bv *BlockValidator) Validate(ctx context.Context, pid peer.ID, msg *pubsub
|
||||
|
||||
stats.Record(ctx, metrics.BlockReceived.M(1))
|
||||
|
||||
recordFailure := func(what string) {
|
||||
ctx, _ = tag.New(ctx, tag.Insert(metrics.FailureType, what))
|
||||
stats.Record(ctx, metrics.BlockValidationFailure.M(1))
|
||||
recordFailureFlagPeer := func(what string) {
|
||||
recordFailure(ctx, metrics.BlockValidationFailure, what)
|
||||
bv.flagPeer(pid)
|
||||
}
|
||||
|
||||
blk, what, err := bv.decodeAndCheckBlock(msg)
|
||||
if err != nil {
|
||||
log.Error("got invalid block over pubsub: ", err)
|
||||
recordFailure(what)
|
||||
recordFailureFlagPeer(what)
|
||||
return pubsub.ValidationReject
|
||||
}
|
||||
|
||||
@ -275,7 +278,7 @@ func (bv *BlockValidator) Validate(ctx context.Context, pid peer.ID, msg *pubsub
|
||||
err = bv.validateMsgMeta(ctx, blk)
|
||||
if err != nil {
|
||||
log.Warnf("error validating message metadata: %s", err)
|
||||
recordFailure("invalid_block_meta")
|
||||
recordFailureFlagPeer("invalid_block_meta")
|
||||
return pubsub.ValidationReject
|
||||
}
|
||||
|
||||
@ -288,11 +291,12 @@ func (bv *BlockValidator) Validate(ctx context.Context, pid peer.ID, msg *pubsub
|
||||
// if we are synced and the miner is unknown, then the block is rejcected.
|
||||
key, err := bv.checkPowerAndGetWorkerKey(ctx, blk.Header)
|
||||
if err != nil {
|
||||
if bv.isChainNearSynced() {
|
||||
if err != ErrSoftFailure && bv.isChainNearSynced() {
|
||||
log.Warnf("received block from unknown miner or miner that doesn't meet min power over pubsub; rejecting message")
|
||||
recordFailure("unknown_miner")
|
||||
recordFailureFlagPeer("unknown_miner")
|
||||
return pubsub.ValidationReject
|
||||
}
|
||||
|
||||
log.Warnf("cannot validate block message; unknown miner or miner that doesn't meet min power in unsynced chain")
|
||||
return pubsub.ValidationIgnore
|
||||
}
|
||||
@ -300,13 +304,13 @@ func (bv *BlockValidator) Validate(ctx context.Context, pid peer.ID, msg *pubsub
|
||||
err = sigs.CheckBlockSignature(ctx, blk.Header, key)
|
||||
if err != nil {
|
||||
log.Errorf("block signature verification failed: %s", err)
|
||||
recordFailure("signature_verification_failed")
|
||||
recordFailureFlagPeer("signature_verification_failed")
|
||||
return pubsub.ValidationReject
|
||||
}
|
||||
|
||||
if blk.Header.ElectionProof.WinCount < 1 {
|
||||
log.Errorf("block is not claiming to be winning")
|
||||
recordFailure("not_winning")
|
||||
recordFailureFlagPeer("not_winning")
|
||||
return pubsub.ValidationReject
|
||||
}
|
||||
|
||||
@ -473,19 +477,19 @@ func (bv *BlockValidator) checkPowerAndGetWorkerKey(ctx context.Context, bh *typ
|
||||
baseTs := bv.chain.GetHeaviestTipSet()
|
||||
lbts, err := stmgr.GetLookbackTipSetForRound(ctx, bv.stmgr, baseTs, bh.Height)
|
||||
if err != nil {
|
||||
log.Warnf("failed to load lookback tipset for incoming block")
|
||||
return address.Undef, err
|
||||
log.Warnf("failed to load lookback tipset for incoming block: %s", err)
|
||||
return address.Undef, ErrSoftFailure
|
||||
}
|
||||
|
||||
hmp, err := stmgr.MinerHasMinPower(ctx, bv.stmgr, bh.Miner, lbts)
|
||||
if err != nil {
|
||||
log.Warnf("failed to determine if incoming block's miner has minimum power")
|
||||
return address.Undef, err
|
||||
log.Warnf("failed to determine if incoming block's miner has minimum power: %s", err)
|
||||
return address.Undef, ErrSoftFailure
|
||||
}
|
||||
|
||||
if !hmp {
|
||||
log.Warnf("incoming block's miner does not have minimum power")
|
||||
return address.Undef, xerrors.New("incoming block's miner does not have minimum power")
|
||||
return address.Undef, ErrInsufficientPower
|
||||
}
|
||||
|
||||
return key, nil
|
||||
@ -541,14 +545,16 @@ func (mv *MessageValidator) Validate(ctx context.Context, pid peer.ID, msg *pubs
|
||||
log.Debugf("failed to add message from network to message pool (From: %s, To: %s, Nonce: %d, Value: %s): %s", m.Message.From, m.Message.To, m.Message.Nonce, types.FIL(m.Message.Value), err)
|
||||
ctx, _ = tag.New(
|
||||
ctx,
|
||||
tag.Insert(metrics.FailureType, "add"),
|
||||
tag.Upsert(metrics.Local, "false"),
|
||||
)
|
||||
stats.Record(ctx, metrics.MessageValidationFailure.M(1))
|
||||
recordFailure(ctx, metrics.MessageValidationFailure, "add")
|
||||
switch {
|
||||
case xerrors.Is(err, messagepool.ErrBroadcastAnyway):
|
||||
case xerrors.Is(err, messagepool.ErrSoftValidationFailure):
|
||||
fallthrough
|
||||
case xerrors.Is(err, messagepool.ErrRBFTooLowPremium):
|
||||
fallthrough
|
||||
case xerrors.Is(err, messagepool.ErrTooManyPendingMessages):
|
||||
fallthrough
|
||||
case xerrors.Is(err, messagepool.ErrNonceTooLow):
|
||||
return pubsub.ValidationIgnore
|
||||
default:
|
||||
@ -560,37 +566,41 @@ func (mv *MessageValidator) Validate(ctx context.Context, pid peer.ID, msg *pubs
|
||||
}
|
||||
|
||||
func (mv *MessageValidator) validateLocalMessage(ctx context.Context, msg *pubsub.Message) pubsub.ValidationResult {
|
||||
ctx, _ = tag.New(
|
||||
ctx,
|
||||
tag.Upsert(metrics.Local, "true"),
|
||||
)
|
||||
// do some lightweight validation
|
||||
stats.Record(ctx, metrics.MessagePublished.M(1))
|
||||
|
||||
m, err := types.DecodeSignedMessage(msg.Message.GetData())
|
||||
if err != nil {
|
||||
log.Warnf("failed to decode local message: %s", err)
|
||||
stats.Record(ctx, metrics.MessageValidationFailure.M(1))
|
||||
recordFailure(ctx, metrics.MessageValidationFailure, "decode")
|
||||
return pubsub.ValidationIgnore
|
||||
}
|
||||
|
||||
if m.Size() > 32*1024 {
|
||||
log.Warnf("local message is too large! (%dB)", m.Size())
|
||||
stats.Record(ctx, metrics.MessageValidationFailure.M(1))
|
||||
recordFailure(ctx, metrics.MessageValidationFailure, "oversize")
|
||||
return pubsub.ValidationIgnore
|
||||
}
|
||||
|
||||
if m.Message.To == address.Undef {
|
||||
log.Warn("local message has invalid destination address")
|
||||
stats.Record(ctx, metrics.MessageValidationFailure.M(1))
|
||||
recordFailure(ctx, metrics.MessageValidationFailure, "undef-addr")
|
||||
return pubsub.ValidationIgnore
|
||||
}
|
||||
|
||||
if !m.Message.Value.LessThan(types.TotalFilecoinInt) {
|
||||
log.Warnf("local messages has too high value: %s", m.Message.Value)
|
||||
stats.Record(ctx, metrics.MessageValidationFailure.M(1))
|
||||
recordFailure(ctx, metrics.MessageValidationFailure, "value-too-high")
|
||||
return pubsub.ValidationIgnore
|
||||
}
|
||||
|
||||
if err := mv.mpool.VerifyMsgSig(m); err != nil {
|
||||
log.Warnf("signature verification failed for local message: %s", err)
|
||||
stats.Record(ctx, metrics.MessageValidationFailure.M(1))
|
||||
recordFailure(ctx, metrics.MessageValidationFailure, "verify-sig")
|
||||
return pubsub.ValidationIgnore
|
||||
}
|
||||
|
||||
@ -613,3 +623,11 @@ func HandleIncomingMessages(ctx context.Context, mpool *messagepool.MessagePool,
|
||||
// Do nothing... everything happens in validate
|
||||
}
|
||||
}
|
||||
|
||||
func recordFailure(ctx context.Context, metric *stats.Int64Measure, failureType string) {
|
||||
ctx, _ = tag.New(
|
||||
ctx,
|
||||
tag.Upsert(metrics.FailureType, failureType),
|
||||
)
|
||||
stats.Record(ctx, metric.M(1))
|
||||
}
|
||||
|
@ -123,6 +123,8 @@ type Syncer struct {
|
||||
receiptTracker *blockReceiptTracker
|
||||
|
||||
verifier ffiwrapper.Verifier
|
||||
|
||||
windowSize int
|
||||
}
|
||||
|
||||
// NewSyncer creates a new Syncer object.
|
||||
@ -148,6 +150,7 @@ func NewSyncer(sm *stmgr.StateManager, bsync *blocksync.BlockSync, connmgr connm
|
||||
receiptTracker: newBlockReceiptTracker(),
|
||||
connmgr: connmgr,
|
||||
verifier: verifier,
|
||||
windowSize: defaultMessageFetchWindowSize,
|
||||
|
||||
incoming: pubsub.New(50),
|
||||
}
|
||||
@ -655,7 +658,7 @@ func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock) (er
|
||||
validationStart := build.Clock.Now()
|
||||
defer func() {
|
||||
stats.Record(ctx, metrics.BlockValidationDurationMilliseconds.M(metrics.SinceInMilliseconds(validationStart)))
|
||||
log.Infow("block validation", "took", time.Since(validationStart), "height", b.Header.Height)
|
||||
log.Infow("block validation", "took", time.Since(validationStart), "height", b.Header.Height, "age", time.Since(time.Unix(int64(b.Header.Timestamp), 0)))
|
||||
}()
|
||||
|
||||
ctx, span := trace.StartSpan(ctx, "validateBlock")
|
||||
@ -1413,7 +1416,8 @@ func (syncer *Syncer) iterFullTipsets(ctx context.Context, headers []*types.TipS
|
||||
|
||||
span.AddAttributes(trace.Int64Attribute("num_headers", int64(len(headers))))
|
||||
|
||||
windowSize := defaultMessageFetchWindowSize
|
||||
windowSize := syncer.windowSize
|
||||
mainLoop:
|
||||
for i := len(headers) - 1; i >= 0; {
|
||||
fts, err := syncer.store.TryFillTipSet(headers[i])
|
||||
if err != nil {
|
||||
@ -1441,6 +1445,12 @@ func (syncer *Syncer) iterFullTipsets(ctx context.Context, headers []*types.TipS
|
||||
nreq := batchSize - len(bstout)
|
||||
bstips, err := syncer.Bsync.GetChainMessages(ctx, next, uint64(nreq))
|
||||
if err != nil {
|
||||
// TODO check errors for temporary nature
|
||||
if windowSize > 1 {
|
||||
windowSize /= 2
|
||||
log.Infof("error fetching messages: %s; reducing window size to %d and trying again", err, windowSize)
|
||||
continue mainLoop
|
||||
}
|
||||
return xerrors.Errorf("message processing failed: %w", err)
|
||||
}
|
||||
|
||||
@ -1475,9 +1485,24 @@ func (syncer *Syncer) iterFullTipsets(ctx context.Context, headers []*types.TipS
|
||||
return xerrors.Errorf("message processing failed: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if i >= windowSize {
|
||||
newWindowSize := windowSize + 10
|
||||
if newWindowSize > int(blocksync.MaxRequestLength) {
|
||||
newWindowSize = int(blocksync.MaxRequestLength)
|
||||
}
|
||||
if newWindowSize > windowSize {
|
||||
windowSize = newWindowSize
|
||||
log.Infof("successfully fetched %d messages; increasing window size to %d", len(bstout), windowSize)
|
||||
}
|
||||
}
|
||||
|
||||
i -= batchSize
|
||||
}
|
||||
|
||||
// remember our window size
|
||||
syncer.windowSize = windowSize
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -34,7 +34,6 @@ type Runtime struct {
|
||||
|
||||
vm *VM
|
||||
state *state.StateTree
|
||||
msg *types.Message
|
||||
vmsg vmr.Message
|
||||
height abi.ChainEpoch
|
||||
cst cbor.IpldStore
|
||||
|
@ -97,7 +97,6 @@ func (vm *VM) makeRuntime(ctx context.Context, msg *types.Message, origin addres
|
||||
ctx: ctx,
|
||||
vm: vm,
|
||||
state: vm.cstate,
|
||||
msg: msg,
|
||||
origin: origin,
|
||||
originNonce: originNonce,
|
||||
height: vm.blockHeight,
|
||||
|
11
cli/chain.go
11
cli/chain.go
@ -859,6 +859,10 @@ var chainExportCmd = &cli.Command{
|
||||
&cli.StringFlag{
|
||||
Name: "tipset",
|
||||
},
|
||||
&cli.Int64Flag{
|
||||
Name: "recent-stateroots",
|
||||
Usage: "specify the number of recent state roots to include in the export",
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
api, closer, err := GetFullNodeAPI(cctx)
|
||||
@ -872,6 +876,11 @@ var chainExportCmd = &cli.Command{
|
||||
return fmt.Errorf("must specify filename to export chain to")
|
||||
}
|
||||
|
||||
rsrs := abi.ChainEpoch(cctx.Int64("recent-stateroots"))
|
||||
if cctx.IsSet("recent-stateroots") && rsrs < build.Finality {
|
||||
return fmt.Errorf("\"recent-stateroots\" has to be greater than %d", build.Finality)
|
||||
}
|
||||
|
||||
fi, err := os.Create(cctx.Args().First())
|
||||
if err != nil {
|
||||
return err
|
||||
@ -888,7 +897,7 @@ var chainExportCmd = &cli.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
stream, err := api.ChainExport(ctx, ts.Key())
|
||||
stream, err := api.ChainExport(ctx, rsrs, ts.Key())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
201
cli/client.go
201
cli/client.go
@ -1,6 +1,7 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
@ -978,6 +979,10 @@ var clientListDeals = &cli.Command{
|
||||
Usage: "use color in display output",
|
||||
Value: true,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "watch",
|
||||
Usage: "watch deal updates in real-time, rather than a one time list",
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
api, closer, err := GetFullNodeAPI(cctx)
|
||||
@ -987,81 +992,97 @@ var clientListDeals = &cli.Command{
|
||||
defer closer()
|
||||
ctx := ReqContext(cctx)
|
||||
|
||||
head, err := api.ChainHead(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
verbose := cctx.Bool("verbose")
|
||||
color := cctx.Bool("color")
|
||||
watch := cctx.Bool("watch")
|
||||
|
||||
localDeals, err := api.ClientListDeals(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sort.Slice(localDeals, func(i, j int) bool {
|
||||
return localDeals[i].CreationTime.Before(localDeals[j].CreationTime)
|
||||
})
|
||||
if watch {
|
||||
updates, err := api.ClientGetDealUpdates(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var deals []deal
|
||||
for _, v := range localDeals {
|
||||
if v.DealID == 0 {
|
||||
deals = append(deals, deal{
|
||||
LocalDeal: v,
|
||||
OnChainDealState: market.DealState{
|
||||
SectorStartEpoch: -1,
|
||||
LastUpdatedEpoch: -1,
|
||||
SlashEpoch: -1,
|
||||
},
|
||||
})
|
||||
} else {
|
||||
onChain, err := api.StateMarketStorageDeal(ctx, v.DealID, head.Key())
|
||||
for {
|
||||
tm.Clear()
|
||||
tm.MoveCursor(1, 1)
|
||||
|
||||
err = outputStorageDeals(ctx, tm.Screen, api, localDeals, verbose, color)
|
||||
if err != nil {
|
||||
deals = append(deals, deal{LocalDeal: v})
|
||||
} else {
|
||||
deals = append(deals, deal{
|
||||
LocalDeal: v,
|
||||
OnChainDealState: onChain.State,
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
tm.Flush()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
case updated := <-updates:
|
||||
var found bool
|
||||
for i, existing := range localDeals {
|
||||
if existing.ProposalCid.Equals(updated.ProposalCid) {
|
||||
localDeals[i] = updated
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
localDeals = append(localDeals, updated)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
color := cctx.Bool("color")
|
||||
return outputStorageDeals(ctx, os.Stdout, api, localDeals, cctx.Bool("verbose"), cctx.Bool("color"))
|
||||
},
|
||||
}
|
||||
|
||||
if cctx.Bool("verbose") {
|
||||
w := tabwriter.NewWriter(os.Stdout, 2, 4, 2, ' ', 0)
|
||||
fmt.Fprintf(w, "Created\tDealCid\tDealId\tProvider\tState\tOn Chain?\tSlashed?\tPieceCID\tSize\tPrice\tDuration\tMessage\n")
|
||||
for _, d := range deals {
|
||||
onChain := "N"
|
||||
if d.OnChainDealState.SectorStartEpoch != -1 {
|
||||
onChain = fmt.Sprintf("Y (epoch %d)", d.OnChainDealState.SectorStartEpoch)
|
||||
}
|
||||
|
||||
slashed := "N"
|
||||
if d.OnChainDealState.SlashEpoch != -1 {
|
||||
slashed = fmt.Sprintf("Y (epoch %d)", d.OnChainDealState.SlashEpoch)
|
||||
}
|
||||
|
||||
price := types.FIL(types.BigMul(d.LocalDeal.PricePerEpoch, types.NewInt(d.LocalDeal.Duration)))
|
||||
fmt.Fprintf(w, "%s\t%s\t%d\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%d\t%s\n", d.LocalDeal.CreationTime.Format(time.Stamp), d.LocalDeal.ProposalCid, d.LocalDeal.DealID, d.LocalDeal.Provider, dealStateString(color, d.LocalDeal.State), onChain, slashed, d.LocalDeal.PieceCID, types.SizeStr(types.NewInt(d.LocalDeal.Size)), price, d.LocalDeal.Duration, d.LocalDeal.Message)
|
||||
}
|
||||
return w.Flush()
|
||||
func dealFromDealInfo(ctx context.Context, full api.FullNode, head *types.TipSet, v api.DealInfo) deal {
|
||||
if v.DealID == 0 {
|
||||
return deal{
|
||||
LocalDeal: v,
|
||||
OnChainDealState: market.DealState{
|
||||
SectorStartEpoch: -1,
|
||||
LastUpdatedEpoch: -1,
|
||||
SlashEpoch: -1,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
w := tablewriter.New(tablewriter.Col("DealCid"),
|
||||
tablewriter.Col("DealId"),
|
||||
tablewriter.Col("Provider"),
|
||||
tablewriter.Col("State"),
|
||||
tablewriter.Col("On Chain?"),
|
||||
tablewriter.Col("Slashed?"),
|
||||
tablewriter.Col("PieceCID"),
|
||||
tablewriter.Col("Size"),
|
||||
tablewriter.Col("Price"),
|
||||
tablewriter.Col("Duration"),
|
||||
tablewriter.NewLineCol("Message"))
|
||||
onChain, err := full.StateMarketStorageDeal(ctx, v.DealID, head.Key())
|
||||
if err != nil {
|
||||
return deal{LocalDeal: v}
|
||||
}
|
||||
|
||||
return deal{
|
||||
LocalDeal: v,
|
||||
OnChainDealState: onChain.State,
|
||||
}
|
||||
}
|
||||
|
||||
func outputStorageDeals(ctx context.Context, out io.Writer, full api.FullNode, localDeals []api.DealInfo, verbose bool, color bool) error {
|
||||
sort.Slice(localDeals, func(i, j int) bool {
|
||||
return localDeals[i].CreationTime.Before(localDeals[j].CreationTime)
|
||||
})
|
||||
|
||||
head, err := full.ChainHead(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var deals []deal
|
||||
for _, localDeal := range localDeals {
|
||||
deals = append(deals, dealFromDealInfo(ctx, full, head, localDeal))
|
||||
}
|
||||
|
||||
if verbose {
|
||||
w := tabwriter.NewWriter(out, 2, 4, 2, ' ', 0)
|
||||
fmt.Fprintf(w, "Created\tDealCid\tDealId\tProvider\tState\tOn Chain?\tSlashed?\tPieceCID\tSize\tPrice\tDuration\tMessage\n")
|
||||
for _, d := range deals {
|
||||
propcid := ellipsis(d.LocalDeal.ProposalCid.String(), 8)
|
||||
|
||||
onChain := "N"
|
||||
if d.OnChainDealState.SectorStartEpoch != -1 {
|
||||
onChain = fmt.Sprintf("Y (epoch %d)", d.OnChainDealState.SectorStartEpoch)
|
||||
@ -1072,27 +1093,57 @@ var clientListDeals = &cli.Command{
|
||||
slashed = fmt.Sprintf("Y (epoch %d)", d.OnChainDealState.SlashEpoch)
|
||||
}
|
||||
|
||||
piece := ellipsis(d.LocalDeal.PieceCID.String(), 8)
|
||||
|
||||
price := types.FIL(types.BigMul(d.LocalDeal.PricePerEpoch, types.NewInt(d.LocalDeal.Duration)))
|
||||
fmt.Fprintf(w, "%s\t%s\t%d\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%d\t%s\n", d.LocalDeal.CreationTime.Format(time.Stamp), d.LocalDeal.ProposalCid, d.LocalDeal.DealID, d.LocalDeal.Provider, dealStateString(color, d.LocalDeal.State), onChain, slashed, d.LocalDeal.PieceCID, types.SizeStr(types.NewInt(d.LocalDeal.Size)), price, d.LocalDeal.Duration, d.LocalDeal.Message)
|
||||
}
|
||||
return w.Flush()
|
||||
}
|
||||
|
||||
w.Write(map[string]interface{}{
|
||||
"DealCid": propcid,
|
||||
"DealId": d.LocalDeal.DealID,
|
||||
"Provider": d.LocalDeal.Provider,
|
||||
"State": dealStateString(color, d.LocalDeal.State),
|
||||
"On Chain?": onChain,
|
||||
"Slashed?": slashed,
|
||||
"PieceCID": piece,
|
||||
"Size": types.SizeStr(types.NewInt(d.LocalDeal.Size)),
|
||||
"Price": price,
|
||||
"Duration": d.LocalDeal.Duration,
|
||||
"Message": d.LocalDeal.Message,
|
||||
})
|
||||
w := tablewriter.New(tablewriter.Col("DealCid"),
|
||||
tablewriter.Col("DealId"),
|
||||
tablewriter.Col("Provider"),
|
||||
tablewriter.Col("State"),
|
||||
tablewriter.Col("On Chain?"),
|
||||
tablewriter.Col("Slashed?"),
|
||||
tablewriter.Col("PieceCID"),
|
||||
tablewriter.Col("Size"),
|
||||
tablewriter.Col("Price"),
|
||||
tablewriter.Col("Duration"),
|
||||
tablewriter.NewLineCol("Message"))
|
||||
|
||||
for _, d := range deals {
|
||||
propcid := ellipsis(d.LocalDeal.ProposalCid.String(), 8)
|
||||
|
||||
onChain := "N"
|
||||
if d.OnChainDealState.SectorStartEpoch != -1 {
|
||||
onChain = fmt.Sprintf("Y (epoch %d)", d.OnChainDealState.SectorStartEpoch)
|
||||
}
|
||||
|
||||
return w.Flush(os.Stdout)
|
||||
},
|
||||
slashed := "N"
|
||||
if d.OnChainDealState.SlashEpoch != -1 {
|
||||
slashed = fmt.Sprintf("Y (epoch %d)", d.OnChainDealState.SlashEpoch)
|
||||
}
|
||||
|
||||
piece := ellipsis(d.LocalDeal.PieceCID.String(), 8)
|
||||
|
||||
price := types.FIL(types.BigMul(d.LocalDeal.PricePerEpoch, types.NewInt(d.LocalDeal.Duration)))
|
||||
|
||||
w.Write(map[string]interface{}{
|
||||
"DealCid": propcid,
|
||||
"DealId": d.LocalDeal.DealID,
|
||||
"Provider": d.LocalDeal.Provider,
|
||||
"State": dealStateString(color, d.LocalDeal.State),
|
||||
"On Chain?": onChain,
|
||||
"Slashed?": slashed,
|
||||
"PieceCID": piece,
|
||||
"Size": types.SizeStr(types.NewInt(d.LocalDeal.Size)),
|
||||
"Price": price,
|
||||
"Duration": d.LocalDeal.Duration,
|
||||
"Message": d.LocalDeal.Message,
|
||||
})
|
||||
}
|
||||
|
||||
return w.Flush(out)
|
||||
}
|
||||
|
||||
func dealStateString(c bool, state storagemarket.StorageDealStatus) string {
|
||||
|
17
cli/cmd.go
17
cli/cmd.go
@ -75,6 +75,8 @@ func flagForAPI(t repo.RepoType) string {
|
||||
return "api"
|
||||
case repo.StorageMiner:
|
||||
return "miner-api"
|
||||
case repo.Worker:
|
||||
return "worker-api"
|
||||
default:
|
||||
panic(fmt.Sprintf("Unknown repo type: %v", t))
|
||||
}
|
||||
@ -86,6 +88,8 @@ func flagForRepo(t repo.RepoType) string {
|
||||
return "repo"
|
||||
case repo.StorageMiner:
|
||||
return "miner-repo"
|
||||
case repo.Worker:
|
||||
return "worker-repo"
|
||||
default:
|
||||
panic(fmt.Sprintf("Unknown repo type: %v", t))
|
||||
}
|
||||
@ -97,6 +101,8 @@ func envForRepo(t repo.RepoType) string {
|
||||
return "FULLNODE_API_INFO"
|
||||
case repo.StorageMiner:
|
||||
return "MINER_API_INFO"
|
||||
case repo.Worker:
|
||||
return "WORKER_API_INFO"
|
||||
default:
|
||||
panic(fmt.Sprintf("Unknown repo type: %v", t))
|
||||
}
|
||||
@ -109,6 +115,8 @@ func envForRepoDeprecation(t repo.RepoType) string {
|
||||
return "FULLNODE_API_INFO"
|
||||
case repo.StorageMiner:
|
||||
return "STORAGE_API_INFO"
|
||||
case repo.Worker:
|
||||
return "WORKER_API_INFO"
|
||||
default:
|
||||
panic(fmt.Sprintf("Unknown repo type: %v", t))
|
||||
}
|
||||
@ -234,6 +242,15 @@ func GetStorageMinerAPI(ctx *cli.Context, opts ...jsonrpc.Option) (api.StorageMi
|
||||
return client.NewStorageMinerRPC(ctx.Context, addr, headers, opts...)
|
||||
}
|
||||
|
||||
func GetWorkerAPI(ctx *cli.Context) (api.WorkerAPI, jsonrpc.ClientCloser, error) {
|
||||
addr, headers, err := GetRawAPI(ctx, repo.Worker)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return client.NewWorkerRPC(ctx.Context, addr, headers)
|
||||
}
|
||||
|
||||
func DaemonContext(cctx *cli.Context) context.Context {
|
||||
if mtCtx, ok := cctx.App.Metadata[metadataTraceContext]; ok {
|
||||
return mtCtx.(context.Context)
|
||||
|
@ -20,7 +20,7 @@ var logList = &cli.Command{
|
||||
Name: "list",
|
||||
Usage: "List log systems",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
api, closer, err := GetFullNodeAPI(cctx)
|
||||
api, closer, err := GetAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -71,7 +71,7 @@ var logSetLevel = &cli.Command{
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
api, closer, err := GetFullNodeAPI(cctx)
|
||||
api, closer, err := GetAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -468,7 +468,7 @@ var paychVoucherSubmitCmd = &cli.Command{
|
||||
|
||||
ctx := ReqContext(cctx)
|
||||
|
||||
mcid, err := api.PaychVoucherSubmit(ctx, ch, sv)
|
||||
mcid, err := api.PaychVoucherSubmit(ctx, ch, sv, nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -107,12 +107,14 @@ func TestPaymentChannelVouchers(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
nodes, addrs := startTwoNodesOneMiner(ctx, t, blocktime)
|
||||
paymentCreator := nodes[0]
|
||||
paymentReceiver := nodes[1]
|
||||
creatorAddr := addrs[0]
|
||||
receiverAddr := addrs[1]
|
||||
|
||||
// Create mock CLI
|
||||
mockCLI := newMockCLI(t)
|
||||
creatorCLI := mockCLI.client(paymentCreator.ListenAddr)
|
||||
receiverCLI := mockCLI.client(paymentReceiver.ListenAddr)
|
||||
|
||||
// creator: paych get <creator> <receiver> <amount>
|
||||
channelAmt := "100000"
|
||||
@ -127,43 +129,123 @@ func TestPaymentChannelVouchers(t *testing.T) {
|
||||
// creator: paych voucher create <channel> <amount>
|
||||
// Note: implied --lane=0
|
||||
voucherAmt1 := 100
|
||||
vamt1 := strconv.Itoa(voucherAmt1)
|
||||
cmd = []string{chAddr.String(), vamt1}
|
||||
cmd = []string{chAddr.String(), strconv.Itoa(voucherAmt1)}
|
||||
voucher1 := creatorCLI.runCmd(paychVoucherCreateCmd, cmd)
|
||||
vouchers = append(vouchers, voucherSpec{serialized: voucher1, lane: 0, amt: voucherAmt1})
|
||||
|
||||
// creator: paych voucher create <channel> <amount> --lane=5
|
||||
lane5 := "--lane=5"
|
||||
voucherAmt2 := 50
|
||||
vamt2 := strconv.Itoa(voucherAmt2)
|
||||
cmd = []string{lane5, chAddr.String(), vamt2}
|
||||
cmd = []string{lane5, chAddr.String(), strconv.Itoa(voucherAmt2)}
|
||||
voucher2 := creatorCLI.runCmd(paychVoucherCreateCmd, cmd)
|
||||
vouchers = append(vouchers, voucherSpec{serialized: voucher2, lane: 5, amt: voucherAmt2})
|
||||
|
||||
// creator: paych voucher create <channel> <amount> --lane=5
|
||||
voucherAmt3 := 70
|
||||
vamt3 := strconv.Itoa(voucherAmt3)
|
||||
cmd = []string{lane5, chAddr.String(), vamt3}
|
||||
cmd = []string{lane5, chAddr.String(), strconv.Itoa(voucherAmt3)}
|
||||
voucher3 := creatorCLI.runCmd(paychVoucherCreateCmd, cmd)
|
||||
vouchers = append(vouchers, voucherSpec{serialized: voucher3, lane: 5, amt: voucherAmt3})
|
||||
|
||||
// creator: paych voucher create <channel> <amount> --lane=5
|
||||
voucherAmt4 := 80
|
||||
cmd = []string{lane5, chAddr.String(), strconv.Itoa(voucherAmt4)}
|
||||
voucher4 := creatorCLI.runCmd(paychVoucherCreateCmd, cmd)
|
||||
vouchers = append(vouchers, voucherSpec{serialized: voucher4, lane: 5, amt: voucherAmt4})
|
||||
|
||||
// creator: paych voucher list <channel> --export
|
||||
cmd = []string{"--export", chAddr.String()}
|
||||
list := creatorCLI.runCmd(paychVoucherListCmd, cmd)
|
||||
|
||||
// Check that voucher list output is correct
|
||||
// Check that voucher list output is correct on creator
|
||||
checkVoucherOutput(t, list, vouchers)
|
||||
|
||||
// creator: paych voucher best-spendable <channel>
|
||||
cmd = []string{"--export", chAddr.String()}
|
||||
bestSpendable := creatorCLI.runCmd(paychVoucherBestSpendableCmd, cmd)
|
||||
|
||||
// Check that best spendable output is correct
|
||||
// Check that best spendable output is correct on creator
|
||||
bestVouchers := []voucherSpec{
|
||||
{serialized: voucher1, lane: 0, amt: voucherAmt1},
|
||||
{serialized: voucher3, lane: 5, amt: voucherAmt3},
|
||||
{serialized: voucher4, lane: 5, amt: voucherAmt4},
|
||||
}
|
||||
checkVoucherOutput(t, bestSpendable, bestVouchers)
|
||||
|
||||
// receiver: paych voucher add <voucher>
|
||||
cmd = []string{chAddr.String(), voucher1}
|
||||
receiverCLI.runCmd(paychVoucherAddCmd, cmd)
|
||||
|
||||
// receiver: paych voucher add <voucher>
|
||||
cmd = []string{chAddr.String(), voucher2}
|
||||
receiverCLI.runCmd(paychVoucherAddCmd, cmd)
|
||||
|
||||
// receiver: paych voucher add <voucher>
|
||||
cmd = []string{chAddr.String(), voucher3}
|
||||
receiverCLI.runCmd(paychVoucherAddCmd, cmd)
|
||||
|
||||
// receiver: paych voucher add <voucher>
|
||||
cmd = []string{chAddr.String(), voucher4}
|
||||
receiverCLI.runCmd(paychVoucherAddCmd, cmd)
|
||||
|
||||
// receiver: paych voucher list <channel> --export
|
||||
cmd = []string{"--export", chAddr.String()}
|
||||
list = receiverCLI.runCmd(paychVoucherListCmd, cmd)
|
||||
|
||||
// Check that voucher list output is correct on receiver
|
||||
checkVoucherOutput(t, list, vouchers)
|
||||
|
||||
// receiver: paych voucher best-spendable <channel>
|
||||
cmd = []string{"--export", chAddr.String()}
|
||||
bestSpendable = receiverCLI.runCmd(paychVoucherBestSpendableCmd, cmd)
|
||||
|
||||
// Check that best spendable output is correct on receiver
|
||||
bestVouchers = []voucherSpec{
|
||||
{serialized: voucher1, lane: 0, amt: voucherAmt1},
|
||||
{serialized: voucher4, lane: 5, amt: voucherAmt4},
|
||||
}
|
||||
checkVoucherOutput(t, bestSpendable, bestVouchers)
|
||||
|
||||
// receiver: paych voucher submit <channel> <voucher>
|
||||
cmd = []string{chAddr.String(), voucher1}
|
||||
receiverCLI.runCmd(paychVoucherSubmitCmd, cmd)
|
||||
|
||||
// receiver: paych voucher best-spendable <channel>
|
||||
cmd = []string{"--export", chAddr.String()}
|
||||
bestSpendable = receiverCLI.runCmd(paychVoucherBestSpendableCmd, cmd)
|
||||
|
||||
// Check that best spendable output no longer includes submitted voucher
|
||||
bestVouchers = []voucherSpec{
|
||||
{serialized: voucher4, lane: 5, amt: voucherAmt4},
|
||||
}
|
||||
checkVoucherOutput(t, bestSpendable, bestVouchers)
|
||||
|
||||
// There are three vouchers in lane 5: 50, 70, 80
|
||||
// Submit the voucher for 50. Best spendable should still be 80.
|
||||
// receiver: paych voucher submit <channel> <voucher>
|
||||
cmd = []string{chAddr.String(), voucher2}
|
||||
receiverCLI.runCmd(paychVoucherSubmitCmd, cmd)
|
||||
|
||||
// receiver: paych voucher best-spendable <channel>
|
||||
cmd = []string{"--export", chAddr.String()}
|
||||
bestSpendable = receiverCLI.runCmd(paychVoucherBestSpendableCmd, cmd)
|
||||
|
||||
// Check that best spendable output still includes the voucher for 80
|
||||
bestVouchers = []voucherSpec{
|
||||
{serialized: voucher4, lane: 5, amt: voucherAmt4},
|
||||
}
|
||||
checkVoucherOutput(t, bestSpendable, bestVouchers)
|
||||
|
||||
// Submit the voucher for 80
|
||||
// receiver: paych voucher submit <channel> <voucher>
|
||||
cmd = []string{chAddr.String(), voucher4}
|
||||
receiverCLI.runCmd(paychVoucherSubmitCmd, cmd)
|
||||
|
||||
// receiver: paych voucher best-spendable <channel>
|
||||
cmd = []string{"--export", chAddr.String()}
|
||||
bestSpendable = receiverCLI.runCmd(paychVoucherBestSpendableCmd, cmd)
|
||||
|
||||
// Check that best spendable output no longer includes submitted voucher
|
||||
bestVouchers = []voucherSpec{}
|
||||
checkVoucherOutput(t, bestSpendable, bestVouchers)
|
||||
}
|
||||
|
||||
func checkVoucherOutput(t *testing.T, list string, vouchers []voucherSpec) {
|
||||
@ -171,14 +253,20 @@ func checkVoucherOutput(t *testing.T, list string, vouchers []voucherSpec) {
|
||||
listVouchers := make(map[string]string)
|
||||
for _, line := range lines {
|
||||
parts := strings.Split(line, ";")
|
||||
serialized := strings.TrimSpace(parts[1])
|
||||
listVouchers[serialized] = strings.TrimSpace(parts[0])
|
||||
if len(parts) == 2 {
|
||||
serialized := strings.TrimSpace(parts[1])
|
||||
listVouchers[serialized] = strings.TrimSpace(parts[0])
|
||||
}
|
||||
}
|
||||
for _, vchr := range vouchers {
|
||||
res, ok := listVouchers[vchr.serialized]
|
||||
require.True(t, ok)
|
||||
require.Regexp(t, fmt.Sprintf("Lane %d", vchr.lane), res)
|
||||
require.Regexp(t, fmt.Sprintf("%d", vchr.amt), res)
|
||||
delete(listVouchers, vchr.serialized)
|
||||
}
|
||||
for _, vchr := range listVouchers {
|
||||
require.Fail(t, "Extra voucher "+vchr)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -26,6 +26,11 @@ func main() {
|
||||
EnvVars: []string{"LOTUS_PATH"},
|
||||
Value: "~/.lotus", // TODO: Consider XDG_DATA_HOME
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "api",
|
||||
EnvVars: []string{"FULLNODE_API_INFO"},
|
||||
Value: "",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "db",
|
||||
EnvVars: []string{"LOTUS_DB"},
|
||||
|
@ -96,12 +96,6 @@ func (p *Processor) HandleMarketChanges(ctx context.Context, marketTips ActorTip
|
||||
log.Fatalw("Failed to persist market actors", "error", err)
|
||||
}
|
||||
|
||||
// we persist the dealID <--> minerID,sectorID here since the dealID needs to be stored above first
|
||||
if err := p.storePreCommitDealInfo(p.sectorDealEvents); err != nil {
|
||||
close(p.sectorDealEvents)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := p.updateMarket(ctx, marketChanges); err != nil {
|
||||
log.Fatalw("Failed to update market actors", "error", err)
|
||||
}
|
||||
@ -272,48 +266,6 @@ func (p *Processor) storeMarketActorDealProposals(ctx context.Context, marketTip
|
||||
|
||||
}
|
||||
|
||||
func (p *Processor) storePreCommitDealInfo(dealEvents <-chan *SectorDealEvent) error {
|
||||
tx, err := p.db.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := tx.Exec(`create temp table mds (like minerid_dealid_sectorid excluding constraints) on commit drop;`); err != nil {
|
||||
return xerrors.Errorf("Failed to create temp table for minerid_dealid_sectorid: %w", err)
|
||||
}
|
||||
|
||||
stmt, err := tx.Prepare(`copy mds (deal_id, miner_id, sector_id) from STDIN`)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("Failed to prepare minerid_dealid_sectorid statement: %w", err)
|
||||
}
|
||||
|
||||
for sde := range dealEvents {
|
||||
for _, did := range sde.DealIDs {
|
||||
if _, err := stmt.Exec(
|
||||
uint64(did),
|
||||
sde.MinerID.String(),
|
||||
sde.SectorID,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := stmt.Close(); err != nil {
|
||||
return xerrors.Errorf("Failed to close miner sector deals statement: %w", err)
|
||||
}
|
||||
|
||||
if _, err := tx.Exec(`insert into minerid_dealid_sectorid select * from mds on conflict do nothing`); err != nil {
|
||||
return xerrors.Errorf("Failed to insert into miner deal sector table: %w", err)
|
||||
}
|
||||
|
||||
if err := tx.Commit(); err != nil {
|
||||
return xerrors.Errorf("Failed to commit miner deal sector table: %w", err)
|
||||
}
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (p *Processor) updateMarketActorDealProposals(ctx context.Context, marketTip []marketActorInfo) error {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
|
@ -3,7 +3,6 @@ package processor
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sync/errgroup"
|
||||
"golang.org/x/xerrors"
|
||||
@ -120,10 +119,6 @@ func (p *Processor) persistMessagesAndReceipts(ctx context.Context, blocks map[c
|
||||
}
|
||||
|
||||
func (p *Processor) storeReceipts(recs map[mrec]*types.MessageReceipt) error {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
log.Debugw("Persisted Receipts", "duration", time.Since(start).String())
|
||||
}()
|
||||
tx, err := p.db.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
@ -164,10 +159,6 @@ create temp table recs (like receipts excluding constraints) on commit drop;
|
||||
}
|
||||
|
||||
func (p *Processor) storeMsgInclusions(incls map[cid.Cid][]cid.Cid) error {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
log.Debugw("Persisted Message Inclusions", "duration", time.Since(start).String())
|
||||
}()
|
||||
tx, err := p.db.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
@ -206,10 +197,6 @@ create temp table mi (like block_messages excluding constraints) on commit drop;
|
||||
}
|
||||
|
||||
func (p *Processor) storeMessages(msgs map[cid.Cid]*types.Message) error {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
log.Debugw("Persisted Messages", "duration", time.Since(start).String())
|
||||
}()
|
||||
tx, err := p.db.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -271,7 +271,11 @@ func (p *Processor) persistMiners(ctx context.Context, miners []minerActorInfo)
|
||||
preCommitEvents := make(chan *MinerSectorsEvent, 8)
|
||||
sectorEvents := make(chan *MinerSectorsEvent, 8)
|
||||
partitionEvents := make(chan *MinerSectorsEvent, 8)
|
||||
p.sectorDealEvents = make(chan *SectorDealEvent, 8)
|
||||
dealEvents := make(chan *SectorDealEvent, 8)
|
||||
|
||||
grp.Go(func() error {
|
||||
return p.storePreCommitDealInfo(dealEvents)
|
||||
})
|
||||
|
||||
grp.Go(func() error {
|
||||
return p.storeMinerSectorEvents(ctx, sectorEvents, preCommitEvents, partitionEvents)
|
||||
@ -280,9 +284,9 @@ func (p *Processor) persistMiners(ctx context.Context, miners []minerActorInfo)
|
||||
grp.Go(func() error {
|
||||
defer func() {
|
||||
close(preCommitEvents)
|
||||
close(p.sectorDealEvents)
|
||||
close(dealEvents)
|
||||
}()
|
||||
return p.storeMinerPreCommitInfo(ctx, miners, preCommitEvents, p.sectorDealEvents)
|
||||
return p.storeMinerPreCommitInfo(ctx, miners, preCommitEvents, dealEvents)
|
||||
})
|
||||
|
||||
grp.Go(func() error {
|
||||
@ -314,100 +318,107 @@ func (p *Processor) storeMinerPreCommitInfo(ctx context.Context, miners []minerA
|
||||
return xerrors.Errorf("Failed to prepare miner precommit info statement: %w", err)
|
||||
}
|
||||
|
||||
grp, _ := errgroup.WithContext(ctx)
|
||||
for _, m := range miners {
|
||||
minerSectors, err := adt.AsArray(p.ctxStore, m.state.Sectors)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
changes, err := p.getMinerPreCommitChanges(ctx, m)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), types.ErrActorNotFound.Error()) {
|
||||
continue
|
||||
} else {
|
||||
m := m
|
||||
grp.Go(func() error {
|
||||
minerSectors, err := adt.AsArray(p.ctxStore, m.state.Sectors)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if changes == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
preCommitAdded := make([]uint64, len(changes.Added))
|
||||
for i, added := range changes.Added {
|
||||
if len(added.Info.DealIDs) > 0 {
|
||||
sectorDeals <- &SectorDealEvent{
|
||||
MinerID: m.common.addr,
|
||||
SectorID: uint64(added.Info.SectorNumber),
|
||||
DealIDs: added.Info.DealIDs,
|
||||
changes, err := p.getMinerPreCommitChanges(ctx, m)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), types.ErrActorNotFound.Error()) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
if added.Info.ReplaceCapacity {
|
||||
if _, err := stmt.Exec(
|
||||
m.common.addr.String(),
|
||||
added.Info.SectorNumber,
|
||||
added.Info.SealedCID.String(),
|
||||
m.common.stateroot.String(),
|
||||
added.Info.SealRandEpoch,
|
||||
added.Info.Expiration,
|
||||
added.PreCommitDeposit.String(),
|
||||
added.PreCommitEpoch,
|
||||
added.DealWeight.String(),
|
||||
added.VerifiedDealWeight.String(),
|
||||
added.Info.ReplaceCapacity,
|
||||
added.Info.ReplaceSectorDeadline,
|
||||
added.Info.ReplaceSectorPartition,
|
||||
added.Info.ReplaceSectorNumber,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if _, err := stmt.Exec(
|
||||
m.common.addr.String(),
|
||||
added.Info.SectorNumber,
|
||||
added.Info.SealedCID.String(),
|
||||
m.common.stateroot.String(),
|
||||
added.Info.SealRandEpoch,
|
||||
added.Info.Expiration,
|
||||
added.PreCommitDeposit.String(),
|
||||
added.PreCommitEpoch,
|
||||
added.DealWeight.String(),
|
||||
added.VerifiedDealWeight.String(),
|
||||
added.Info.ReplaceCapacity,
|
||||
nil, // replace deadline
|
||||
nil, // replace partition
|
||||
nil, // replace sector
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
}
|
||||
preCommitAdded[i] = uint64(added.Info.SectorNumber)
|
||||
}
|
||||
if len(preCommitAdded) > 0 {
|
||||
sectorEvents <- &MinerSectorsEvent{
|
||||
MinerID: m.common.addr,
|
||||
StateRoot: m.common.stateroot,
|
||||
SectorIDs: preCommitAdded,
|
||||
Event: PreCommitAdded,
|
||||
}
|
||||
}
|
||||
var preCommitExpired []uint64
|
||||
for _, removed := range changes.Removed {
|
||||
var sector miner.SectorOnChainInfo
|
||||
if found, err := minerSectors.Get(uint64(removed.Info.SectorNumber), §or); err != nil {
|
||||
return err
|
||||
} else if !found {
|
||||
preCommitExpired = append(preCommitExpired, uint64(removed.Info.SectorNumber))
|
||||
}
|
||||
}
|
||||
if len(preCommitExpired) > 0 {
|
||||
sectorEvents <- &MinerSectorsEvent{
|
||||
MinerID: m.common.addr,
|
||||
StateRoot: m.common.stateroot,
|
||||
SectorIDs: preCommitExpired,
|
||||
Event: PreCommitExpired,
|
||||
if changes == nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
preCommitAdded := make([]uint64, len(changes.Added))
|
||||
for i, added := range changes.Added {
|
||||
if len(added.Info.DealIDs) > 0 {
|
||||
sectorDeals <- &SectorDealEvent{
|
||||
MinerID: m.common.addr,
|
||||
SectorID: uint64(added.Info.SectorNumber),
|
||||
DealIDs: added.Info.DealIDs,
|
||||
}
|
||||
}
|
||||
if added.Info.ReplaceCapacity {
|
||||
if _, err := stmt.Exec(
|
||||
m.common.addr.String(),
|
||||
added.Info.SectorNumber,
|
||||
added.Info.SealedCID.String(),
|
||||
m.common.stateroot.String(),
|
||||
added.Info.SealRandEpoch,
|
||||
added.Info.Expiration,
|
||||
added.PreCommitDeposit.String(),
|
||||
added.PreCommitEpoch,
|
||||
added.DealWeight.String(),
|
||||
added.VerifiedDealWeight.String(),
|
||||
added.Info.ReplaceCapacity,
|
||||
added.Info.ReplaceSectorDeadline,
|
||||
added.Info.ReplaceSectorPartition,
|
||||
added.Info.ReplaceSectorNumber,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if _, err := stmt.Exec(
|
||||
m.common.addr.String(),
|
||||
added.Info.SectorNumber,
|
||||
added.Info.SealedCID.String(),
|
||||
m.common.stateroot.String(),
|
||||
added.Info.SealRandEpoch,
|
||||
added.Info.Expiration,
|
||||
added.PreCommitDeposit.String(),
|
||||
added.PreCommitEpoch,
|
||||
added.DealWeight.String(),
|
||||
added.VerifiedDealWeight.String(),
|
||||
added.Info.ReplaceCapacity,
|
||||
nil, // replace deadline
|
||||
nil, // replace partition
|
||||
nil, // replace sector
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
}
|
||||
preCommitAdded[i] = uint64(added.Info.SectorNumber)
|
||||
}
|
||||
if len(preCommitAdded) > 0 {
|
||||
sectorEvents <- &MinerSectorsEvent{
|
||||
MinerID: m.common.addr,
|
||||
StateRoot: m.common.stateroot,
|
||||
SectorIDs: preCommitAdded,
|
||||
Event: PreCommitAdded,
|
||||
}
|
||||
}
|
||||
var preCommitExpired []uint64
|
||||
for _, removed := range changes.Removed {
|
||||
var sector miner.SectorOnChainInfo
|
||||
if found, err := minerSectors.Get(uint64(removed.Info.SectorNumber), §or); err != nil {
|
||||
return err
|
||||
} else if !found {
|
||||
preCommitExpired = append(preCommitExpired, uint64(removed.Info.SectorNumber))
|
||||
}
|
||||
}
|
||||
if len(preCommitExpired) > 0 {
|
||||
sectorEvents <- &MinerSectorsEvent{
|
||||
MinerID: m.common.addr,
|
||||
StateRoot: m.common.stateroot,
|
||||
SectorIDs: preCommitExpired,
|
||||
Event: PreCommitExpired,
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if err := grp.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := stmt.Close(); err != nil {
|
||||
@ -439,67 +450,75 @@ func (p *Processor) storeMinerSectorInfo(ctx context.Context, miners []minerActo
|
||||
return xerrors.Errorf("Failed to prepare miner sector info statement: %w", err)
|
||||
}
|
||||
|
||||
grp, _ := errgroup.WithContext(ctx)
|
||||
for _, m := range miners {
|
||||
changes, err := p.getMinerSectorChanges(ctx, m)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), types.ErrActorNotFound.Error()) {
|
||||
continue
|
||||
} else {
|
||||
m := m
|
||||
grp.Go(func() error {
|
||||
changes, err := p.getMinerSectorChanges(ctx, m)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), types.ErrActorNotFound.Error()) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
if changes == nil {
|
||||
continue
|
||||
}
|
||||
var sectorsAdded []uint64
|
||||
var ccAdded []uint64
|
||||
var extended []uint64
|
||||
for _, added := range changes.Added {
|
||||
// add the sector to the table
|
||||
if _, err := stmt.Exec(
|
||||
m.common.addr.String(),
|
||||
added.SectorNumber,
|
||||
added.SealedCID.String(),
|
||||
m.common.stateroot.String(),
|
||||
added.Activation.String(),
|
||||
added.Expiration.String(),
|
||||
added.DealWeight.String(),
|
||||
added.VerifiedDealWeight.String(),
|
||||
added.InitialPledge.String(),
|
||||
added.ExpectedDayReward.String(),
|
||||
added.ExpectedStoragePledge.String(),
|
||||
); err != nil {
|
||||
return err
|
||||
if changes == nil {
|
||||
return nil
|
||||
}
|
||||
if len(added.DealIDs) == 0 {
|
||||
ccAdded = append(ccAdded, uint64(added.SectorNumber))
|
||||
} else {
|
||||
sectorsAdded = append(sectorsAdded, uint64(added.SectorNumber))
|
||||
var sectorsAdded []uint64
|
||||
var ccAdded []uint64
|
||||
var extended []uint64
|
||||
for _, added := range changes.Added {
|
||||
// add the sector to the table
|
||||
if _, err := stmt.Exec(
|
||||
m.common.addr.String(),
|
||||
added.SectorNumber,
|
||||
added.SealedCID.String(),
|
||||
m.common.stateroot.String(),
|
||||
added.Activation.String(),
|
||||
added.Expiration.String(),
|
||||
added.DealWeight.String(),
|
||||
added.VerifiedDealWeight.String(),
|
||||
added.InitialPledge.String(),
|
||||
added.ExpectedDayReward.String(),
|
||||
added.ExpectedStoragePledge.String(),
|
||||
); err != nil {
|
||||
log.Errorw("writing miner sector changes statement", "error", err.Error())
|
||||
}
|
||||
if len(added.DealIDs) == 0 {
|
||||
ccAdded = append(ccAdded, uint64(added.SectorNumber))
|
||||
} else {
|
||||
sectorsAdded = append(sectorsAdded, uint64(added.SectorNumber))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, mod := range changes.Extended {
|
||||
extended = append(extended, uint64(mod.To.SectorNumber))
|
||||
}
|
||||
for _, mod := range changes.Extended {
|
||||
extended = append(extended, uint64(mod.To.SectorNumber))
|
||||
}
|
||||
|
||||
events <- &MinerSectorsEvent{
|
||||
MinerID: m.common.addr,
|
||||
StateRoot: m.common.stateroot,
|
||||
SectorIDs: ccAdded,
|
||||
Event: CommitCapacityAdded,
|
||||
}
|
||||
events <- &MinerSectorsEvent{
|
||||
MinerID: m.common.addr,
|
||||
StateRoot: m.common.stateroot,
|
||||
SectorIDs: sectorsAdded,
|
||||
Event: SectorAdded,
|
||||
}
|
||||
events <- &MinerSectorsEvent{
|
||||
MinerID: m.common.addr,
|
||||
StateRoot: m.common.stateroot,
|
||||
SectorIDs: extended,
|
||||
Event: SectorExtended,
|
||||
}
|
||||
events <- &MinerSectorsEvent{
|
||||
MinerID: m.common.addr,
|
||||
StateRoot: m.common.stateroot,
|
||||
SectorIDs: ccAdded,
|
||||
Event: CommitCapacityAdded,
|
||||
}
|
||||
events <- &MinerSectorsEvent{
|
||||
MinerID: m.common.addr,
|
||||
StateRoot: m.common.stateroot,
|
||||
SectorIDs: sectorsAdded,
|
||||
Event: SectorAdded,
|
||||
}
|
||||
events <- &MinerSectorsEvent{
|
||||
MinerID: m.common.addr,
|
||||
StateRoot: m.common.stateroot,
|
||||
SectorIDs: extended,
|
||||
Event: SectorExtended,
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := grp.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := stmt.Close(); err != nil {
|
||||
@ -911,6 +930,48 @@ func (p *Processor) storeMinersActorInfoState(ctx context.Context, miners []mine
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
func (p *Processor) storePreCommitDealInfo(dealEvents <-chan *SectorDealEvent) error {
|
||||
tx, err := p.db.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := tx.Exec(`create temp table mds (like minerid_dealid_sectorid excluding constraints) on commit drop;`); err != nil {
|
||||
return xerrors.Errorf("Failed to create temp table for minerid_dealid_sectorid: %w", err)
|
||||
}
|
||||
|
||||
stmt, err := tx.Prepare(`copy mds (deal_id, miner_id, sector_id) from STDIN`)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("Failed to prepare minerid_dealid_sectorid statement: %w", err)
|
||||
}
|
||||
|
||||
for sde := range dealEvents {
|
||||
for _, did := range sde.DealIDs {
|
||||
if _, err := stmt.Exec(
|
||||
uint64(did),
|
||||
sde.MinerID.String(),
|
||||
sde.SectorID,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := stmt.Close(); err != nil {
|
||||
return xerrors.Errorf("Failed to close miner sector deals statement: %w", err)
|
||||
}
|
||||
|
||||
if _, err := tx.Exec(`insert into minerid_dealid_sectorid select * from mds on conflict do nothing`); err != nil {
|
||||
return xerrors.Errorf("Failed to insert into miner deal sector table: %w", err)
|
||||
}
|
||||
|
||||
if err := tx.Commit(); err != nil {
|
||||
return xerrors.Errorf("Failed to commit miner deal sector table: %w", err)
|
||||
}
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (p *Processor) storeMinersPower(miners []minerActorInfo) error {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
|
@ -47,8 +47,6 @@ func (p *Processor) subMpool(ctx context.Context) {
|
||||
msgs[v.Message.Message.Cid()] = &v.Message.Message
|
||||
}
|
||||
|
||||
log.Debugf("Processing %d mpool updates", len(msgs))
|
||||
|
||||
err := p.storeMessages(msgs)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
|
@ -7,6 +7,7 @@ import (
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/specs-actors/actors/abi/big"
|
||||
"github.com/filecoin-project/specs-actors/actors/builtin"
|
||||
"github.com/filecoin-project/specs-actors/actors/builtin/power"
|
||||
"github.com/filecoin-project/specs-actors/actors/util/smoothing"
|
||||
@ -15,7 +16,19 @@ import (
|
||||
type powerActorInfo struct {
|
||||
common actorInfo
|
||||
|
||||
epochSmoothingEstimate *smoothing.FilterEstimate
|
||||
totalRawBytes big.Int
|
||||
totalRawBytesCommitted big.Int
|
||||
totalQualityAdjustedBytes big.Int
|
||||
totalQualityAdjustedBytesCommitted big.Int
|
||||
totalPledgeCollateral big.Int
|
||||
|
||||
newRawBytes big.Int
|
||||
newQualityAdjustedBytes big.Int
|
||||
newPledgeCollateral big.Int
|
||||
newQAPowerSmoothed *smoothing.FilterEstimate
|
||||
|
||||
minerCount int64
|
||||
minerCountAboveMinimumPower int64
|
||||
}
|
||||
|
||||
func (p *Processor) setupPower() error {
|
||||
@ -25,13 +38,27 @@ func (p *Processor) setupPower() error {
|
||||
}
|
||||
|
||||
if _, err := tx.Exec(`
|
||||
create table if not exists power_smoothing_estimates
|
||||
create table if not exists chain_power
|
||||
(
|
||||
state_root text not null
|
||||
constraint power_smoothing_estimates_pk
|
||||
primary key,
|
||||
position_estimate text not null,
|
||||
velocity_estimate text not null
|
||||
state_root text not null
|
||||
constraint power_smoothing_estimates_pk
|
||||
primary key,
|
||||
|
||||
new_raw_bytes_power text not null,
|
||||
new_qa_bytes_power text not null,
|
||||
new_pledge_collateral text not null,
|
||||
|
||||
total_raw_bytes_power text not null,
|
||||
total_raw_bytes_committed text not null,
|
||||
total_qa_bytes_power text not null,
|
||||
total_qa_bytes_committed text not null,
|
||||
total_pledge_collateral text not null,
|
||||
|
||||
qa_smoothed_position_estimate text not null,
|
||||
qa_smoothed_velocity_estimate text not null,
|
||||
|
||||
miner_count int not null,
|
||||
minimum_consensus_miner_count int not null
|
||||
);
|
||||
`); err != nil {
|
||||
return err
|
||||
@ -60,8 +87,8 @@ func (p *Processor) processPowerActors(ctx context.Context, powerTips ActorTips)
|
||||
}()
|
||||
|
||||
var out []powerActorInfo
|
||||
for tipset, powers := range powerTips {
|
||||
for _, act := range powers {
|
||||
for tipset, powerStates := range powerTips {
|
||||
for _, act := range powerStates {
|
||||
var pw powerActorInfo
|
||||
pw.common = act
|
||||
|
||||
@ -80,7 +107,19 @@ func (p *Processor) processPowerActors(ctx context.Context, powerTips ActorTips)
|
||||
return nil, xerrors.Errorf("unmarshal state (@ %s): %w", pw.common.stateroot.String(), err)
|
||||
}
|
||||
|
||||
pw.epochSmoothingEstimate = powerActorState.ThisEpochQAPowerSmoothed
|
||||
pw.totalRawBytes = powerActorState.TotalRawBytePower
|
||||
pw.totalRawBytesCommitted = powerActorState.TotalBytesCommitted
|
||||
pw.totalQualityAdjustedBytes = powerActorState.TotalQualityAdjPower
|
||||
pw.totalQualityAdjustedBytesCommitted = powerActorState.TotalQABytesCommitted
|
||||
pw.totalPledgeCollateral = powerActorState.TotalPledgeCollateral
|
||||
|
||||
pw.newRawBytes = powerActorState.ThisEpochRawBytePower
|
||||
pw.newQualityAdjustedBytes = powerActorState.ThisEpochQualityAdjPower
|
||||
pw.newPledgeCollateral = powerActorState.ThisEpochPledgeCollateral
|
||||
pw.newQAPowerSmoothed = powerActorState.ThisEpochQAPowerSmoothed
|
||||
|
||||
pw.minerCount = powerActorState.MinerCount
|
||||
pw.minerCountAboveMinimumPower = powerActorState.MinerAboveMinPowerCount
|
||||
out = append(out, pw)
|
||||
}
|
||||
}
|
||||
@ -88,46 +127,59 @@ func (p *Processor) processPowerActors(ctx context.Context, powerTips ActorTips)
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (p *Processor) persistPowerActors(ctx context.Context, powers []powerActorInfo) error {
|
||||
func (p *Processor) persistPowerActors(ctx context.Context, powerStates []powerActorInfo) error {
|
||||
// NB: use errgroup when there is more than a single store operation
|
||||
return p.storePowerSmoothingEstimates(powers)
|
||||
return p.storePowerSmoothingEstimates(powerStates)
|
||||
}
|
||||
|
||||
func (p *Processor) storePowerSmoothingEstimates(powers []powerActorInfo) error {
|
||||
func (p *Processor) storePowerSmoothingEstimates(powerStates []powerActorInfo) error {
|
||||
tx, err := p.db.Begin()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("begin power_smoothing_estimates tx: %w", err)
|
||||
return xerrors.Errorf("begin chain_power tx: %w", err)
|
||||
}
|
||||
|
||||
if _, err := tx.Exec(`create temp table rse (like power_smoothing_estimates) on commit drop`); err != nil {
|
||||
return xerrors.Errorf("prep power_smoothing_estimates: %w", err)
|
||||
if _, err := tx.Exec(`create temp table cp (like chain_power) on commit drop`); err != nil {
|
||||
return xerrors.Errorf("prep chain_power: %w", err)
|
||||
}
|
||||
|
||||
stmt, err := tx.Prepare(`copy rse (state_root, position_estimate, velocity_estimate) from stdin;`)
|
||||
stmt, err := tx.Prepare(`copy cp (state_root, new_raw_bytes_power, new_qa_bytes_power, new_pledge_collateral, total_raw_bytes_power, total_raw_bytes_committed, total_qa_bytes_power, total_qa_bytes_committed, total_pledge_collateral, qa_smoothed_position_estimate, qa_smoothed_velocity_estimate, miner_count, minimum_consensus_miner_count) from stdin;`)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("prepare tmp power_smoothing_estimates: %w", err)
|
||||
return xerrors.Errorf("prepare tmp chain_power: %w", err)
|
||||
}
|
||||
|
||||
for _, powerState := range powers {
|
||||
for _, ps := range powerStates {
|
||||
if _, err := stmt.Exec(
|
||||
powerState.common.stateroot.String(),
|
||||
powerState.epochSmoothingEstimate.PositionEstimate.String(),
|
||||
powerState.epochSmoothingEstimate.VelocityEstimate.String(),
|
||||
ps.common.stateroot.String(),
|
||||
ps.newRawBytes.String(),
|
||||
ps.newQualityAdjustedBytes.String(),
|
||||
ps.newPledgeCollateral.String(),
|
||||
|
||||
ps.totalRawBytes.String(),
|
||||
ps.totalRawBytesCommitted.String(),
|
||||
ps.totalQualityAdjustedBytes.String(),
|
||||
ps.totalQualityAdjustedBytesCommitted.String(),
|
||||
ps.totalPledgeCollateral.String(),
|
||||
|
||||
ps.newQAPowerSmoothed.PositionEstimate.String(),
|
||||
ps.newQAPowerSmoothed.VelocityEstimate.String(),
|
||||
|
||||
ps.minerCount,
|
||||
ps.minerCountAboveMinimumPower,
|
||||
); err != nil {
|
||||
return xerrors.Errorf("failed to store smoothing estimate: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := stmt.Close(); err != nil {
|
||||
return xerrors.Errorf("close prepared power_smoothing_estimates: %w", err)
|
||||
return xerrors.Errorf("close prepared chain_power: %w", err)
|
||||
}
|
||||
|
||||
if _, err := tx.Exec(`insert into power_smoothing_estimates select * from rse on conflict do nothing`); err != nil {
|
||||
return xerrors.Errorf("insert power_smoothing_estimates from tmp: %w", err)
|
||||
if _, err := tx.Exec(`insert into chain_power select * from cp on conflict do nothing`); err != nil {
|
||||
return xerrors.Errorf("insert chain_power from tmp: %w", err)
|
||||
}
|
||||
|
||||
if err := tx.Commit(); err != nil {
|
||||
return xerrors.Errorf("commit power_smoothing_estimates tx: %w", err)
|
||||
return xerrors.Errorf("commit chain_power tx: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -35,9 +35,6 @@ type Processor struct {
|
||||
|
||||
// number of blocks processed at a time
|
||||
batch int
|
||||
|
||||
// process communication channels
|
||||
sectorDealEvents chan *SectorDealEvent
|
||||
}
|
||||
|
||||
type ActorTips map[types.TipSetKey][]actorInfo
|
||||
@ -152,7 +149,6 @@ func (p *Processor) Start(ctx context.Context) {
|
||||
log.Errorf("Failed to handle market changes: %w", err)
|
||||
return
|
||||
}
|
||||
log.Info("Processed Market Changes")
|
||||
}()
|
||||
|
||||
grp.Add(1)
|
||||
@ -162,7 +158,6 @@ func (p *Processor) Start(ctx context.Context) {
|
||||
log.Errorf("Failed to handle miner changes: %w", err)
|
||||
return
|
||||
}
|
||||
log.Info("Processed Miner Changes")
|
||||
}()
|
||||
|
||||
grp.Add(1)
|
||||
@ -172,7 +167,6 @@ func (p *Processor) Start(ctx context.Context) {
|
||||
log.Errorf("Failed to handle reward changes: %w", err)
|
||||
return
|
||||
}
|
||||
log.Info("Processed Reward Changes")
|
||||
}()
|
||||
|
||||
grp.Add(1)
|
||||
@ -182,7 +176,6 @@ func (p *Processor) Start(ctx context.Context) {
|
||||
log.Errorf("Failed to handle power actor changes: %w", err)
|
||||
return
|
||||
}
|
||||
log.Info("Processes Power Changes")
|
||||
}()
|
||||
|
||||
grp.Add(1)
|
||||
@ -192,7 +185,6 @@ func (p *Processor) Start(ctx context.Context) {
|
||||
log.Errorf("Failed to handle message changes: %w", err)
|
||||
return
|
||||
}
|
||||
log.Info("Processed Message Changes")
|
||||
}()
|
||||
|
||||
grp.Add(1)
|
||||
@ -202,7 +194,6 @@ func (p *Processor) Start(ctx context.Context) {
|
||||
log.Errorf("Failed to handle common actor changes: %w", err)
|
||||
return
|
||||
}
|
||||
log.Info("Processed CommonActor Changes")
|
||||
}()
|
||||
|
||||
grp.Wait()
|
||||
@ -214,7 +205,7 @@ func (p *Processor) Start(ctx context.Context) {
|
||||
if err := p.refreshViews(); err != nil {
|
||||
log.Errorw("Failed to refresh views", "error", err)
|
||||
}
|
||||
log.Infow("Processed Batch", "duration", time.Since(loopStart).String())
|
||||
log.Infow("Processed Batch Complete", "duration", time.Since(loopStart).String())
|
||||
}
|
||||
}
|
||||
}()
|
||||
@ -378,7 +369,9 @@ where rnk <= $1
|
||||
maxBlock = bh.Height
|
||||
}
|
||||
}
|
||||
log.Infow("Gathered Blocks to Process", "start", minBlock, "end", maxBlock)
|
||||
if minBlock <= maxBlock {
|
||||
log.Infow("Gathered Blocks to Process", "start", minBlock, "end", maxBlock)
|
||||
}
|
||||
return out, rows.Close()
|
||||
}
|
||||
|
||||
|
@ -5,7 +5,6 @@ import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sync/errgroup"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/specs-actors/actors/abi/big"
|
||||
@ -13,20 +12,23 @@ import (
|
||||
"github.com/filecoin-project/specs-actors/actors/builtin/reward"
|
||||
"github.com/filecoin-project/specs-actors/actors/util/smoothing"
|
||||
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
|
||||
type rewardActorInfo struct {
|
||||
common actorInfo
|
||||
|
||||
// expected power in bytes during this epoch
|
||||
baselinePower big.Int
|
||||
cumSumBaselinePower big.Int
|
||||
cumSumRealizedPower big.Int
|
||||
|
||||
// base reward in attofil for each block found during this epoch
|
||||
baseBlockReward big.Int
|
||||
effectiveNetworkTime int64
|
||||
effectiveBaselinePower big.Int
|
||||
|
||||
epochSmoothingEstimate *smoothing.FilterEstimate
|
||||
newBaselinePower big.Int
|
||||
newBaseReward big.Int
|
||||
newSmoothingEstimate *smoothing.FilterEstimate
|
||||
|
||||
totalMinedReward big.Int
|
||||
}
|
||||
|
||||
func (p *Processor) setupRewards() error {
|
||||
@ -36,34 +38,23 @@ func (p *Processor) setupRewards() error {
|
||||
}
|
||||
|
||||
if _, err := tx.Exec(`
|
||||
/*
|
||||
* captures base block reward per miner per state root and does not
|
||||
* include penalties or gas reward
|
||||
*/
|
||||
create table if not exists base_block_rewards
|
||||
(
|
||||
state_root text not null
|
||||
constraint block_rewards_pk
|
||||
primary key,
|
||||
base_block_reward numeric not null
|
||||
);
|
||||
|
||||
/* captures chain-specific power state for any given stateroot */
|
||||
create table if not exists chain_power
|
||||
create table if not exists chain_reward
|
||||
(
|
||||
state_root text not null
|
||||
constraint chain_power_pk
|
||||
constraint chain_reward_pk
|
||||
primary key,
|
||||
baseline_power text not null
|
||||
);
|
||||
cum_sum_baseline text not null,
|
||||
cum_sum_realized text not null,
|
||||
effective_network_time int not null,
|
||||
effective_baseline_power text not null,
|
||||
|
||||
create table if not exists reward_smoothing_estimates
|
||||
(
|
||||
state_root text not null
|
||||
constraint reward_smoothing_estimates_pk
|
||||
primary key,
|
||||
position_estimate text not null,
|
||||
velocity_estimate text not null
|
||||
new_baseline_power text not null,
|
||||
new_reward numeric not null,
|
||||
new_reward_smoothed_position_estimate text not null,
|
||||
new_reward_smoothed_velocity_estimate text not null,
|
||||
|
||||
total_mined_reward text not null
|
||||
);
|
||||
`); err != nil {
|
||||
return err
|
||||
@ -113,9 +104,14 @@ func (p *Processor) processRewardActors(ctx context.Context, rewardTips ActorTip
|
||||
return nil, xerrors.Errorf("unmarshal state (@ %s): %w", rw.common.stateroot.String(), err)
|
||||
}
|
||||
|
||||
rw.baseBlockReward = rewardActorState.ThisEpochReward
|
||||
rw.baselinePower = rewardActorState.ThisEpochBaselinePower
|
||||
rw.epochSmoothingEstimate = rewardActorState.ThisEpochRewardSmoothed
|
||||
rw.cumSumBaselinePower = rewardActorState.CumsumBaseline
|
||||
rw.cumSumRealizedPower = rewardActorState.CumsumRealized
|
||||
rw.effectiveNetworkTime = int64(rewardActorState.EffectiveNetworkTime)
|
||||
rw.effectiveBaselinePower = rewardActorState.EffectiveBaselinePower
|
||||
rw.newBaselinePower = rewardActorState.ThisEpochBaselinePower
|
||||
rw.newBaseReward = rewardActorState.ThisEpochReward
|
||||
rw.newSmoothingEstimate = rewardActorState.ThisEpochRewardSmoothed
|
||||
rw.totalMinedReward = rewardActorState.TotalMined
|
||||
out = append(out, rw)
|
||||
}
|
||||
}
|
||||
@ -145,8 +141,14 @@ func (p *Processor) processRewardActors(ctx context.Context, rewardTips ActorTip
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rw.baseBlockReward = rewardActorState.ThisEpochReward
|
||||
rw.baselinePower = rewardActorState.ThisEpochBaselinePower
|
||||
rw.cumSumBaselinePower = rewardActorState.CumsumBaseline
|
||||
rw.cumSumRealizedPower = rewardActorState.CumsumRealized
|
||||
rw.effectiveNetworkTime = int64(rewardActorState.EffectiveNetworkTime)
|
||||
rw.effectiveBaselinePower = rewardActorState.EffectiveBaselinePower
|
||||
rw.newBaselinePower = rewardActorState.ThisEpochBaselinePower
|
||||
rw.newBaseReward = rewardActorState.ThisEpochReward
|
||||
rw.newSmoothingEstimate = rewardActorState.ThisEpochRewardSmoothed
|
||||
rw.totalMinedReward = rewardActorState.TotalMined
|
||||
out = append(out, rw)
|
||||
}
|
||||
|
||||
@ -159,149 +161,47 @@ func (p *Processor) persistRewardActors(ctx context.Context, rewards []rewardAct
|
||||
log.Debugw("Persisted Reward Actors", "duration", time.Since(start).String())
|
||||
}()
|
||||
|
||||
grp, ctx := errgroup.WithContext(ctx) //nolint
|
||||
|
||||
grp.Go(func() error {
|
||||
if err := p.storeChainPower(rewards); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
grp.Go(func() error {
|
||||
if err := p.storeBaseBlockReward(rewards); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
grp.Go(func() error {
|
||||
if err := p.storeRewardSmoothingEstimates(rewards); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
return grp.Wait()
|
||||
}
|
||||
|
||||
func (p *Processor) storeChainPower(rewards []rewardActorInfo) error {
|
||||
tx, err := p.db.Begin()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("begin chain_power tx: %w", err)
|
||||
return xerrors.Errorf("begin chain_reward tx: %w", err)
|
||||
}
|
||||
|
||||
if _, err := tx.Exec(`create temp table cp (like chain_power excluding constraints) on commit drop`); err != nil {
|
||||
return xerrors.Errorf("prep chain_power temp: %w", err)
|
||||
if _, err := tx.Exec(`create temp table cr (like chain_reward excluding constraints) on commit drop`); err != nil {
|
||||
return xerrors.Errorf("prep chain_reward temp: %w", err)
|
||||
}
|
||||
|
||||
stmt, err := tx.Prepare(`copy cp (state_root, baseline_power) from STDIN`)
|
||||
stmt, err := tx.Prepare(`copy cr ( state_root, cum_sum_baseline, cum_sum_realized, effective_network_time, effective_baseline_power, new_baseline_power, new_reward, new_reward_smoothed_position_estimate, new_reward_smoothed_velocity_estimate, total_mined_reward) from STDIN`)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("prepare tmp chain_power: %w", err)
|
||||
return xerrors.Errorf("prepare tmp chain_reward: %w", err)
|
||||
}
|
||||
|
||||
for _, rewardState := range rewards {
|
||||
if _, err := stmt.Exec(
|
||||
rewardState.common.stateroot.String(),
|
||||
rewardState.baselinePower.String(),
|
||||
rewardState.cumSumBaselinePower.String(),
|
||||
rewardState.cumSumRealizedPower.String(),
|
||||
rewardState.effectiveNetworkTime,
|
||||
rewardState.effectiveBaselinePower.String(),
|
||||
rewardState.newBaselinePower.String(),
|
||||
rewardState.newBaseReward.String(),
|
||||
rewardState.newSmoothingEstimate.PositionEstimate.String(),
|
||||
rewardState.newSmoothingEstimate.VelocityEstimate.String(),
|
||||
rewardState.totalMinedReward.String(),
|
||||
); err != nil {
|
||||
log.Errorw("failed to store chain power", "state_root", rewardState.common.stateroot, "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := stmt.Close(); err != nil {
|
||||
return xerrors.Errorf("close prepared chain_power: %w", err)
|
||||
return xerrors.Errorf("close prepared chain_reward: %w", err)
|
||||
}
|
||||
|
||||
if _, err := tx.Exec(`insert into chain_power select * from cp on conflict do nothing`); err != nil {
|
||||
return xerrors.Errorf("insert chain_power from tmp: %w", err)
|
||||
if _, err := tx.Exec(`insert into chain_reward select * from cr on conflict do nothing`); err != nil {
|
||||
return xerrors.Errorf("insert chain_reward from tmp: %w", err)
|
||||
}
|
||||
|
||||
if err := tx.Commit(); err != nil {
|
||||
return xerrors.Errorf("commit chain_power tx: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Processor) storeBaseBlockReward(rewards []rewardActorInfo) error {
|
||||
tx, err := p.db.Begin()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("begin base_block_reward tx: %w", err)
|
||||
}
|
||||
|
||||
if _, err := tx.Exec(`create temp table bbr (like base_block_rewards excluding constraints) on commit drop`); err != nil {
|
||||
return xerrors.Errorf("prep base_block_reward temp: %w", err)
|
||||
}
|
||||
|
||||
stmt, err := tx.Prepare(`copy bbr (state_root, base_block_reward) from STDIN`)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("prepare tmp base_block_reward: %w", err)
|
||||
}
|
||||
|
||||
for _, rewardState := range rewards {
|
||||
baseBlockReward := big.Div(rewardState.baseBlockReward, big.NewIntUnsigned(build.BlocksPerEpoch))
|
||||
if _, err := stmt.Exec(
|
||||
rewardState.common.stateroot.String(),
|
||||
baseBlockReward.String(),
|
||||
); err != nil {
|
||||
log.Errorw("failed to store base block reward", "state_root", rewardState.common.stateroot, "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := stmt.Close(); err != nil {
|
||||
return xerrors.Errorf("close prepared base_block_reward: %w", err)
|
||||
}
|
||||
|
||||
if _, err := tx.Exec(`insert into base_block_rewards select * from bbr on conflict do nothing`); err != nil {
|
||||
return xerrors.Errorf("insert base_block_reward from tmp: %w", err)
|
||||
}
|
||||
|
||||
if err := tx.Commit(); err != nil {
|
||||
return xerrors.Errorf("commit base_block_reward tx: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Processor) storeRewardSmoothingEstimates(rewards []rewardActorInfo) error {
|
||||
tx, err := p.db.Begin()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("begin reward_smoothing_estimates tx: %w", err)
|
||||
}
|
||||
|
||||
if _, err := tx.Exec(`create temp table rse (like reward_smoothing_estimates) on commit drop`); err != nil {
|
||||
return xerrors.Errorf("prep reward_smoothing_estimates: %w", err)
|
||||
}
|
||||
|
||||
stmt, err := tx.Prepare(`copy rse (state_root, position_estimate, velocity_estimate) from stdin;`)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("prepare tmp reward_smoothing_estimates: %w", err)
|
||||
}
|
||||
|
||||
for _, rewardState := range rewards {
|
||||
if rewardState.epochSmoothingEstimate == nil {
|
||||
continue
|
||||
}
|
||||
if _, err := stmt.Exec(
|
||||
rewardState.common.stateroot.String(),
|
||||
rewardState.epochSmoothingEstimate.PositionEstimate.String(),
|
||||
rewardState.epochSmoothingEstimate.VelocityEstimate.String(),
|
||||
); err != nil {
|
||||
return xerrors.Errorf("failed to store smoothing estimate: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := stmt.Close(); err != nil {
|
||||
return xerrors.Errorf("close prepared reward_smoothing_estimates: %w", err)
|
||||
}
|
||||
|
||||
if _, err := tx.Exec(`insert into reward_smoothing_estimates select * from rse on conflict do nothing`); err != nil {
|
||||
return xerrors.Errorf("insert reward_smoothing_estimates from tmp: %w", err)
|
||||
}
|
||||
|
||||
if err := tx.Commit(); err != nil {
|
||||
return xerrors.Errorf("commit reward_smoothing_estimates tx: %w", err)
|
||||
return xerrors.Errorf("commit chain_reward tx: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -2,20 +2,25 @@ package main
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"net/http"
|
||||
_ "net/http/pprof"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
_ "github.com/lib/pq"
|
||||
|
||||
lcli "github.com/filecoin-project/lotus/cli"
|
||||
"github.com/filecoin-project/go-jsonrpc"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
"github.com/urfave/cli/v2"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
lcli "github.com/filecoin-project/lotus/cli"
|
||||
"github.com/filecoin-project/lotus/cmd/lotus-chainwatch/processor"
|
||||
"github.com/filecoin-project/lotus/cmd/lotus-chainwatch/scheduler"
|
||||
"github.com/filecoin-project/lotus/cmd/lotus-chainwatch/syncer"
|
||||
"github.com/filecoin-project/lotus/cmd/lotus-chainwatch/util"
|
||||
)
|
||||
|
||||
var runCmd = &cli.Command{
|
||||
@ -39,9 +44,24 @@ var runCmd = &cli.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
api, closer, err := lcli.GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
var api api.FullNode
|
||||
var closer jsonrpc.ClientCloser
|
||||
var err error
|
||||
if tokenMaddr := cctx.String("api"); tokenMaddr != "" {
|
||||
toks := strings.Split(tokenMaddr, ":")
|
||||
if len(toks) != 2 {
|
||||
return fmt.Errorf("invalid api tokens, expected <token>:<maddr>, got: %s", tokenMaddr)
|
||||
}
|
||||
|
||||
api, closer, err = util.GetFullNodeAPIUsingCredentials(cctx.Context, toks[1], toks[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
api, closer, err = lcli.GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
defer closer()
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
@ -70,7 +90,7 @@ var runCmd = &cli.Command{
|
||||
}
|
||||
db.SetMaxOpenConns(1350)
|
||||
|
||||
sync := syncer.NewSyncer(db, api)
|
||||
sync := syncer.NewSyncer(db, api, 1400)
|
||||
sync.Start(ctx)
|
||||
|
||||
proc := processor.NewProcessor(ctx, db, api, maxBatch)
|
||||
|
@ -3,7 +3,6 @@ package scheduler
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"time"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
@ -24,9 +23,9 @@ func setupTopMinerByBaseRewardSchema(ctx context.Context, db *sql.DB) error {
|
||||
with total_rewards_by_miner as (
|
||||
select
|
||||
b.miner,
|
||||
sum(bbr.base_block_reward * b.win_count) as total_reward
|
||||
sum(cr.new_reward * b.win_count) as total_reward
|
||||
from blocks b
|
||||
inner join base_block_rewards bbr on b.parentstateroot = bbr.state_root
|
||||
inner join chain_reward cr on b.parentstateroot = cr.state_root
|
||||
group by 1
|
||||
) select
|
||||
rank() over (order by total_reward desc),
|
||||
@ -43,17 +42,17 @@ func setupTopMinerByBaseRewardSchema(ctx context.Context, db *sql.DB) error {
|
||||
b."timestamp"as current_timestamp,
|
||||
max(b.height) as current_height
|
||||
from blocks b
|
||||
join base_block_rewards bbr on b.parentstateroot = bbr.state_root
|
||||
where bbr.base_block_reward is not null
|
||||
join chain_reward cr on b.parentstateroot = cr.state_root
|
||||
where cr.new_reward is not null
|
||||
group by 1
|
||||
order by 1 desc
|
||||
limit 1;
|
||||
`); err != nil {
|
||||
return xerrors.Errorf("create top_miner_by_base_reward views: %w", err)
|
||||
return xerrors.Errorf("create top_miners_by_base_reward views: %w", err)
|
||||
}
|
||||
|
||||
if err := tx.Commit(); err != nil {
|
||||
return xerrors.Errorf("committing top_miner_by_base_reward views; %w", err)
|
||||
return xerrors.Errorf("committing top_miners_by_base_reward views; %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -65,11 +64,6 @@ func refreshTopMinerByBaseReward(ctx context.Context, db *sql.DB) error {
|
||||
default:
|
||||
}
|
||||
|
||||
t := time.Now()
|
||||
defer func() {
|
||||
log.Debugw("refresh top_miners_by_base_reward", "duration", time.Since(t).String())
|
||||
}()
|
||||
|
||||
_, err := db.Exec("refresh materialized view top_miners_by_base_reward;")
|
||||
if err != nil {
|
||||
return xerrors.Errorf("refresh top_miners_by_base_reward: %w", err)
|
||||
|
@ -40,7 +40,7 @@ func (s *Scheduler) Start(ctx context.Context) {
|
||||
|
||||
go func() {
|
||||
// run once on start after schema has initialized
|
||||
time.Sleep(5 * time.Second)
|
||||
time.Sleep(1 * time.Minute)
|
||||
if err := refreshTopMinerByBaseReward(ctx, s.db); err != nil {
|
||||
log.Errorw("failed to refresh top miner", "error", err)
|
||||
}
|
||||
|
@ -11,16 +11,17 @@ import (
|
||||
func (s *Syncer) subBlocks(ctx context.Context) {
|
||||
sub, err := s.node.SyncIncomingBlocks(ctx)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
log.Errorf("opening incoming block channel: %+v", err)
|
||||
return
|
||||
}
|
||||
|
||||
log.Infow("Capturing incoming blocks")
|
||||
for bh := range sub {
|
||||
err := s.storeHeaders(map[cid.Cid]*types.BlockHeader{
|
||||
bh.Cid(): bh,
|
||||
}, false, time.Now())
|
||||
if err != nil {
|
||||
log.Errorf("%+v", err)
|
||||
log.Errorf("storing incoming block header: %+v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -23,14 +23,17 @@ var log = logging.Logger("syncer")
|
||||
type Syncer struct {
|
||||
db *sql.DB
|
||||
|
||||
lookbackLimit uint64
|
||||
|
||||
headerLk sync.Mutex
|
||||
node api.FullNode
|
||||
}
|
||||
|
||||
func NewSyncer(db *sql.DB, node api.FullNode) *Syncer {
|
||||
func NewSyncer(db *sql.DB, node api.FullNode, lookbackLimit uint64) *Syncer {
|
||||
return &Syncer{
|
||||
db: db,
|
||||
node: node,
|
||||
db: db,
|
||||
node: node,
|
||||
lookbackLimit: lookbackLimit,
|
||||
}
|
||||
}
|
||||
|
||||
@ -148,25 +151,28 @@ create index if not exists state_heights_parentstateroot_index
|
||||
}
|
||||
|
||||
func (s *Syncer) Start(ctx context.Context) {
|
||||
if err := logging.SetLogLevel("syncer", "info"); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
log.Debug("Starting Syncer")
|
||||
|
||||
if err := s.setupSchemas(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// doing the initial sync here lets us avoid the HCCurrent case in the switch
|
||||
head, err := s.node.ChainHead(ctx)
|
||||
if err != nil {
|
||||
log.Fatalw("Failed to get chain head form lotus", "error", err)
|
||||
}
|
||||
// capture all reported blocks
|
||||
go s.subBlocks(ctx)
|
||||
|
||||
unsynced, err := s.unsyncedBlocks(ctx, head, time.Unix(0, 0))
|
||||
// we need to ensure that on a restart we don't reprocess the whole flarping chain
|
||||
var sinceEpoch uint64
|
||||
blkCID, height, err := s.mostRecentlySyncedBlockHeight()
|
||||
if err != nil {
|
||||
log.Fatalw("failed to gather unsynced blocks", "error", err)
|
||||
}
|
||||
|
||||
if err := s.storeHeaders(unsynced, true, time.Now()); err != nil {
|
||||
log.Fatalw("failed to store unsynced blocks", "error", err)
|
||||
log.Fatalw("failed to find most recently synced block", "error", err)
|
||||
} else {
|
||||
if height > 0 {
|
||||
log.Infow("Found starting point for syncing", "blockCID", blkCID.String(), "height", height)
|
||||
sinceEpoch = uint64(height)
|
||||
}
|
||||
}
|
||||
|
||||
// continue to keep the block headers table up to date.
|
||||
@ -175,13 +181,18 @@ func (s *Syncer) Start(ctx context.Context) {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
lastSynced := time.Now()
|
||||
go func() {
|
||||
for notif := range notifs {
|
||||
for _, change := range notif {
|
||||
switch change.Type {
|
||||
case store.HCCurrent:
|
||||
// This case is important for capturing the initial state of a node
|
||||
// which might be on a dead network with no new blocks being produced.
|
||||
// It also allows a fresh Chainwatch instance to start walking the
|
||||
// chain without waiting for a new block to come along.
|
||||
fallthrough
|
||||
case store.HCApply:
|
||||
unsynced, err := s.unsyncedBlocks(ctx, change.Val, lastSynced)
|
||||
unsynced, err := s.unsyncedBlocks(ctx, change.Val, sinceEpoch)
|
||||
if err != nil {
|
||||
log.Errorw("failed to gather unsynced blocks", "error", err)
|
||||
}
|
||||
@ -194,13 +205,13 @@ func (s *Syncer) Start(ctx context.Context) {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := s.storeHeaders(unsynced, true, lastSynced); err != nil {
|
||||
if err := s.storeHeaders(unsynced, true, time.Now()); err != nil {
|
||||
// so this is pretty bad, need some kind of retry..
|
||||
// for now just log an error and the blocks will be attempted again on next notifi
|
||||
log.Errorw("failed to store unsynced blocks", "error", err)
|
||||
}
|
||||
|
||||
lastSynced = time.Now()
|
||||
sinceEpoch = uint64(change.Val.Height())
|
||||
case store.HCRevert:
|
||||
log.Debug("revert todo")
|
||||
}
|
||||
@ -209,12 +220,8 @@ func (s *Syncer) Start(ctx context.Context) {
|
||||
}()
|
||||
}
|
||||
|
||||
func (s *Syncer) unsyncedBlocks(ctx context.Context, head *types.TipSet, since time.Time) (map[cid.Cid]*types.BlockHeader, error) {
|
||||
// get a list of blocks we have already synced in the past 3 mins. This ensures we aren't returning the entire
|
||||
// table every time.
|
||||
lookback := since.Add(-(time.Minute * 3))
|
||||
log.Debugw("Gathering unsynced blocks", "since", lookback.String())
|
||||
hasList, err := s.syncedBlocks(lookback)
|
||||
func (s *Syncer) unsyncedBlocks(ctx context.Context, head *types.TipSet, since uint64) (map[cid.Cid]*types.BlockHeader, error) {
|
||||
hasList, err := s.syncedBlocks(since, s.lookbackLimit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -257,9 +264,8 @@ func (s *Syncer) unsyncedBlocks(ctx context.Context, head *types.TipSet, since t
|
||||
return toSync, nil
|
||||
}
|
||||
|
||||
func (s *Syncer) syncedBlocks(timestamp time.Time) (map[cid.Cid]struct{}, error) {
|
||||
// timestamp is used to return a configurable amount of rows based on when they were last added.
|
||||
rws, err := s.db.Query(`select cid FROM blocks_synced where synced_at > $1`, timestamp.Unix())
|
||||
func (s *Syncer) syncedBlocks(since, limit uint64) (map[cid.Cid]struct{}, error) {
|
||||
rws, err := s.db.Query(`select bs.cid FROM blocks_synced bs left join blocks b on b.cid = bs.cid where b.height <= $1 and bs.processed_at is not null limit $2`, since, limit)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("Failed to query blocks_synced: %w", err)
|
||||
}
|
||||
@ -281,14 +287,43 @@ func (s *Syncer) syncedBlocks(timestamp time.Time) (map[cid.Cid]struct{}, error)
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (s *Syncer) mostRecentlySyncedBlockHeight() (cid.Cid, int64, error) {
|
||||
rw := s.db.QueryRow(`
|
||||
select blocks_synced.cid, b.height
|
||||
from blocks_synced
|
||||
left join blocks b on blocks_synced.cid = b.cid
|
||||
where processed_at is not null
|
||||
order by height desc
|
||||
limit 1
|
||||
`)
|
||||
|
||||
var c string
|
||||
var h int64
|
||||
if err := rw.Scan(&c, &h); err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return cid.Undef, 0, nil
|
||||
}
|
||||
return cid.Undef, -1, err
|
||||
}
|
||||
|
||||
ci, err := cid.Parse(c)
|
||||
if err != nil {
|
||||
return cid.Undef, -1, err
|
||||
}
|
||||
|
||||
return ci, h, nil
|
||||
}
|
||||
|
||||
func (s *Syncer) storeCirculatingSupply(ctx context.Context, tipset *types.TipSet) error {
|
||||
supply, err := s.node.StateCirculatingSupply(ctx, tipset.Key())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ceInsert := `insert into chain_economics (parent_state_root, circulating_fil, vested_fil, mined_fil, burnt_fil, locked_fil)` +
|
||||
`values ('%s', '%s', '%s', '%s', '%s', '%s');`
|
||||
ceInsert := `insert into chain_economics (parent_state_root, circulating_fil, vested_fil, mined_fil, burnt_fil, locked_fil) ` +
|
||||
`values ('%s', '%s', '%s', '%s', '%s', '%s') on conflict on constraint chain_economics_pk do ` +
|
||||
`update set (circulating_fil, vested_fil, mined_fil, burnt_fil, locked_fil) = ('%[2]s', '%[3]s', '%[4]s', '%[5]s', '%[6]s') ` +
|
||||
`where chain_economics.parent_state_root = '%[1]s';`
|
||||
|
||||
if _, err := s.db.Exec(fmt.Sprintf(ceInsert,
|
||||
tipset.ParentState().String(),
|
||||
|
34
cmd/lotus-chainwatch/util/api.go
Normal file
34
cmd/lotus-chainwatch/util/api.go
Normal file
@ -0,0 +1,34 @@
|
||||
package util
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
|
||||
"github.com/filecoin-project/go-jsonrpc"
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/api/client"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
manet "github.com/multiformats/go-multiaddr/net"
|
||||
)
|
||||
|
||||
func GetFullNodeAPIUsingCredentials(ctx context.Context, listenAddr, token string) (api.FullNode, jsonrpc.ClientCloser, error) {
|
||||
parsedAddr, err := ma.NewMultiaddr(listenAddr)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
_, addr, err := manet.DialArgs(parsedAddr)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return client.NewFullNodeRPC(ctx, apiURI(addr), apiHeaders(token))
|
||||
}
|
||||
func apiURI(addr string) string {
|
||||
return "ws://" + addr + "/rpc/v0"
|
||||
}
|
||||
func apiHeaders(token string) http.Header {
|
||||
headers := http.Header{}
|
||||
headers.Add("Authorization", "Bearer "+token)
|
||||
return headers
|
||||
}
|
@ -133,6 +133,18 @@ var runCmd = &cli.Command{
|
||||
Usage: "do not send any messages",
|
||||
Value: false,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "pre-commit",
|
||||
EnvVars: []string{"LOTUS_PCR_PRE_COMMIT"},
|
||||
Usage: "process PreCommitSector messages",
|
||||
Value: true,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "prove-commit",
|
||||
EnvVars: []string{"LOTUS_PCR_PROVE_COMMIT"},
|
||||
Usage: "process ProveCommitSector messages",
|
||||
Value: true,
|
||||
},
|
||||
&cli.IntFlag{
|
||||
Name: "head-delay",
|
||||
EnvVars: []string{"LOTUS_PCR_HEAD_DELAY"},
|
||||
@ -180,12 +192,16 @@ var runCmd = &cli.Command{
|
||||
percentExtra := cctx.Int("percent-extra")
|
||||
maxMessageQueue := cctx.Int("max-message-queue")
|
||||
dryRun := cctx.Bool("dry-run")
|
||||
preCommitEnabled := cctx.Bool("pre-commit")
|
||||
proveCommitEnabled := cctx.Bool("prove-commit")
|
||||
|
||||
rf := &refunder{
|
||||
api: api,
|
||||
wallet: from,
|
||||
percentExtra: percentExtra,
|
||||
dryRun: dryRun,
|
||||
api: api,
|
||||
wallet: from,
|
||||
percentExtra: percentExtra,
|
||||
dryRun: dryRun,
|
||||
preCommitEnabled: preCommitEnabled,
|
||||
proveCommitEnabled: proveCommitEnabled,
|
||||
}
|
||||
|
||||
for tipset := range tipsetsCh {
|
||||
@ -281,10 +297,12 @@ type refunderNodeApi interface {
|
||||
}
|
||||
|
||||
type refunder struct {
|
||||
api refunderNodeApi
|
||||
wallet address.Address
|
||||
percentExtra int
|
||||
dryRun bool
|
||||
api refunderNodeApi
|
||||
wallet address.Address
|
||||
percentExtra int
|
||||
dryRun bool
|
||||
preCommitEnabled bool
|
||||
proveCommitEnabled bool
|
||||
}
|
||||
|
||||
func (r *refunder) ProcessTipset(ctx context.Context, tipset *types.TipSet) (*MinersRefund, error) {
|
||||
@ -331,7 +349,12 @@ func (r *refunder) ProcessTipset(ctx context.Context, tipset *types.TipSet) (*Mi
|
||||
|
||||
switch m.Method {
|
||||
case builtin.MethodsMiner.ProveCommitSector:
|
||||
if !r.proveCommitEnabled {
|
||||
continue
|
||||
}
|
||||
|
||||
messageMethod = "ProveCommitSector"
|
||||
|
||||
if recps[i].ExitCode != exitcode.Ok {
|
||||
log.Debugw("skipping non-ok exitcode message", "method", messageMethod, "cid", msg.Cid, "miner", m.To, "exitcode", recps[i].ExitCode)
|
||||
continue
|
||||
@ -369,6 +392,10 @@ func (r *refunder) ProcessTipset(ctx context.Context, tipset *types.TipSet) (*Mi
|
||||
|
||||
refundValue = collateral
|
||||
case builtin.MethodsMiner.PreCommitSector:
|
||||
if !r.preCommitEnabled {
|
||||
continue
|
||||
}
|
||||
|
||||
messageMethod = "PreCommitSector"
|
||||
|
||||
if recps[i].ExitCode != exitcode.Ok {
|
||||
|
71
cmd/lotus-seal-worker/info.go
Normal file
71
cmd/lotus-seal-worker/info.go
Normal file
@ -0,0 +1,71 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/urfave/cli/v2"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
lcli "github.com/filecoin-project/lotus/cli"
|
||||
)
|
||||
|
||||
var infoCmd = &cli.Command{
|
||||
Name: "info",
|
||||
Usage: "Print worker info",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
api, closer, err := lcli.GetWorkerAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
|
||||
ver, err := api.Version(ctx)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting version: %w", err)
|
||||
}
|
||||
|
||||
fmt.Println("Worker version: ", ver)
|
||||
fmt.Print("CLI version: ")
|
||||
cli.VersionPrinter(cctx)
|
||||
fmt.Println()
|
||||
|
||||
info, err := api.Info(ctx)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting info: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("Hostname: %s\n", info.Hostname)
|
||||
fmt.Printf("CPUs: %d; GPUs: %v\n", info.Resources.CPUs, info.Resources.GPUs)
|
||||
fmt.Printf("RAM: %s; Swap: %s\n", types.SizeStr(types.NewInt(info.Resources.MemPhysical)), types.SizeStr(types.NewInt(info.Resources.MemSwap)))
|
||||
fmt.Printf("Reserved memory: %s\n", types.SizeStr(types.NewInt(info.Resources.MemReserved)))
|
||||
fmt.Println()
|
||||
|
||||
paths, err := api.Paths(ctx)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting path info: %w", err)
|
||||
}
|
||||
|
||||
for _, path := range paths {
|
||||
fmt.Printf("%s:\n", path.ID)
|
||||
fmt.Printf("\tWeight: %d; Use: ", path.Weight)
|
||||
if path.CanSeal || path.CanStore {
|
||||
fmt.Printf("Weight: %d; Use: ", path.Weight)
|
||||
if path.CanSeal {
|
||||
fmt.Print("Seal ")
|
||||
}
|
||||
if path.CanStore {
|
||||
fmt.Print("Store")
|
||||
}
|
||||
fmt.Println("")
|
||||
} else {
|
||||
fmt.Print("Use: ReadOnly")
|
||||
}
|
||||
fmt.Printf("\tLocal: %s\n", path.LocalPath)
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
@ -16,6 +16,7 @@ import (
|
||||
"github.com/google/uuid"
|
||||
"github.com/gorilla/mux"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
manet "github.com/multiformats/go-multiaddr/net"
|
||||
"github.com/urfave/cli/v2"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
@ -46,10 +47,10 @@ const FlagWorkerRepoDeprecation = "workerrepo"
|
||||
func main() {
|
||||
lotuslog.SetupLogLevels()
|
||||
|
||||
log.Info("Starting lotus worker")
|
||||
|
||||
local := []*cli.Command{
|
||||
runCmd,
|
||||
infoCmd,
|
||||
storageCmd,
|
||||
}
|
||||
|
||||
app := &cli.App{
|
||||
@ -153,6 +154,8 @@ var runCmd = &cli.Command{
|
||||
return nil
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
log.Info("Starting lotus worker")
|
||||
|
||||
if !cctx.Bool("enable-gpu-proving") {
|
||||
if err := os.Setenv("BELLMAN_NO_GPU", "true"); err != nil {
|
||||
return xerrors.Errorf("could not set no-gpu env: %+v", err)
|
||||
@ -342,6 +345,8 @@ var runCmd = &cli.Command{
|
||||
SealProof: spt,
|
||||
TaskTypes: taskTypes,
|
||||
}, remote, localStore, nodeApi),
|
||||
localStore: localStore,
|
||||
ls: lr,
|
||||
}
|
||||
|
||||
mux := mux.NewRouter()
|
||||
@ -383,6 +388,32 @@ var runCmd = &cli.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
{
|
||||
a, err := net.ResolveTCPAddr("tcp", address)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("parsing address: %w", err)
|
||||
}
|
||||
|
||||
ma, err := manet.FromNetAddr(a)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("creating api multiaddress: %w", err)
|
||||
}
|
||||
|
||||
if err := lr.SetAPIEndpoint(ma); err != nil {
|
||||
return xerrors.Errorf("setting api endpoint: %w", err)
|
||||
}
|
||||
|
||||
ainfo, err := lcli.GetAPIInfo(cctx, repo.StorageMiner)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("could not get miner API info: %w", err)
|
||||
}
|
||||
|
||||
// TODO: ideally this would be a token with some permissions dropped
|
||||
if err := lr.SetAPIToken(ainfo.Token); err != nil {
|
||||
return xerrors.Errorf("setting api token: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
log.Info("Waiting for tasks")
|
||||
|
||||
go func() {
|
||||
|
@ -3,19 +3,44 @@ package main
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/mitchellh/go-homedir"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
|
||||
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
||||
)
|
||||
|
||||
type worker struct {
|
||||
*sectorstorage.LocalWorker
|
||||
|
||||
localStore *stores.Local
|
||||
ls stores.LocalStorage
|
||||
}
|
||||
|
||||
func (w *worker) Version(context.Context) (build.Version, error) {
|
||||
return build.APIVersion, nil
|
||||
}
|
||||
|
||||
func (w *worker) StorageAddLocal(ctx context.Context, path string) error {
|
||||
path, err := homedir.Expand(path)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("expanding local path: %w", err)
|
||||
}
|
||||
|
||||
if err := w.localStore.OpenPath(ctx, path); err != nil {
|
||||
return xerrors.Errorf("opening local path: %w", err)
|
||||
}
|
||||
|
||||
if err := w.ls.SetStorage(func(sc *stores.StorageConfig) {
|
||||
sc.StoragePaths = append(sc.StoragePaths, stores.LocalPath{Path: path})
|
||||
}); err != nil {
|
||||
return xerrors.Errorf("get storage config: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ storage.Sealer = &worker{}
|
||||
|
105
cmd/lotus-seal-worker/storage.go
Normal file
105
cmd/lotus-seal-worker/storage.go
Normal file
@ -0,0 +1,105 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/mitchellh/go-homedir"
|
||||
"github.com/urfave/cli/v2"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
lcli "github.com/filecoin-project/lotus/cli"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
||||
)
|
||||
|
||||
const metaFile = "sectorstore.json"
|
||||
|
||||
var storageCmd = &cli.Command{
|
||||
Name: "storage",
|
||||
Usage: "manage sector storage",
|
||||
Subcommands: []*cli.Command{
|
||||
storageAttachCmd,
|
||||
},
|
||||
}
|
||||
|
||||
var storageAttachCmd = &cli.Command{
|
||||
Name: "attach",
|
||||
Usage: "attach local storage path",
|
||||
Flags: []cli.Flag{
|
||||
&cli.BoolFlag{
|
||||
Name: "init",
|
||||
Usage: "initialize the path first",
|
||||
},
|
||||
&cli.Uint64Flag{
|
||||
Name: "weight",
|
||||
Usage: "(for init) path weight",
|
||||
Value: 10,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "seal",
|
||||
Usage: "(for init) use path for sealing",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "store",
|
||||
Usage: "(for init) use path for long-term storage",
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
nodeApi, closer, err := lcli.GetWorkerAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
|
||||
if !cctx.Args().Present() {
|
||||
return xerrors.Errorf("must specify storage path to attach")
|
||||
}
|
||||
|
||||
p, err := homedir.Expand(cctx.Args().First())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("expanding path: %w", err)
|
||||
}
|
||||
|
||||
if cctx.Bool("init") {
|
||||
if err := os.MkdirAll(p, 0755); err != nil {
|
||||
if !os.IsExist(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
_, err := os.Stat(filepath.Join(p, metaFile))
|
||||
if !os.IsNotExist(err) {
|
||||
if err == nil {
|
||||
return xerrors.Errorf("path is already initialized")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
cfg := &stores.LocalStorageMeta{
|
||||
ID: stores.ID(uuid.New().String()),
|
||||
Weight: cctx.Uint64("weight"),
|
||||
CanSeal: cctx.Bool("seal"),
|
||||
CanStore: cctx.Bool("store"),
|
||||
}
|
||||
|
||||
if !(cfg.CanStore || cfg.CanSeal) {
|
||||
return xerrors.Errorf("must specify at least one of --store of --seal")
|
||||
}
|
||||
|
||||
b, err := json.MarshalIndent(cfg, "", " ")
|
||||
if err != nil {
|
||||
return xerrors.Errorf("marshaling storage config: %w", err)
|
||||
}
|
||||
|
||||
if err := ioutil.WriteFile(filepath.Join(p, metaFile), b, 0644); err != nil {
|
||||
return xerrors.Errorf("persisting storage metadata (%s): %w", filepath.Join(p, metaFile), err)
|
||||
}
|
||||
}
|
||||
|
||||
return nodeApi.StorageAddLocal(ctx, p)
|
||||
},
|
||||
}
|
@ -236,8 +236,10 @@ var stateList = []stateMeta{
|
||||
{col: 39, state: "Total"},
|
||||
{col: color.FgGreen, state: sealing.Proving},
|
||||
|
||||
{col: color.FgBlue, state: sealing.Empty},
|
||||
{col: color.FgBlue, state: sealing.WaitDeals},
|
||||
|
||||
{col: color.FgRed, state: sealing.UndefinedSectorState},
|
||||
{col: color.FgYellow, state: sealing.Empty},
|
||||
{col: color.FgYellow, state: sealing.Packing},
|
||||
{col: color.FgYellow, state: sealing.PreCommit1},
|
||||
{col: color.FgYellow, state: sealing.PreCommit2},
|
||||
@ -245,9 +247,13 @@ var stateList = []stateMeta{
|
||||
{col: color.FgYellow, state: sealing.PreCommitWait},
|
||||
{col: color.FgYellow, state: sealing.WaitSeed},
|
||||
{col: color.FgYellow, state: sealing.Committing},
|
||||
{col: color.FgYellow, state: sealing.SubmitCommit},
|
||||
{col: color.FgYellow, state: sealing.CommitWait},
|
||||
{col: color.FgYellow, state: sealing.FinalizeSector},
|
||||
|
||||
{col: color.FgCyan, state: sealing.Removing},
|
||||
{col: color.FgCyan, state: sealing.Removed},
|
||||
|
||||
{col: color.FgRed, state: sealing.FailedUnrecoverable},
|
||||
{col: color.FgRed, state: sealing.SealPreCommit1Failed},
|
||||
{col: color.FgRed, state: sealing.SealPreCommit2Failed},
|
||||
@ -259,6 +265,9 @@ var stateList = []stateMeta{
|
||||
{col: color.FgRed, state: sealing.Faulty},
|
||||
{col: color.FgRed, state: sealing.FaultReported},
|
||||
{col: color.FgRed, state: sealing.FaultedFinal},
|
||||
{col: color.FgRed, state: sealing.RemoveFailed},
|
||||
{col: color.FgRed, state: sealing.DealsExpired},
|
||||
{col: color.FgRed, state: sealing.RecoverDealIDs},
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
@ -155,6 +155,9 @@ var sealingJobsCmd = &cli.Command{
|
||||
|
||||
// oldest first
|
||||
sort.Slice(lines, func(i, j int) bool {
|
||||
if lines[i].RunWait != lines[j].RunWait {
|
||||
return lines[i].RunWait < lines[j].RunWait
|
||||
}
|
||||
return lines[i].Start.Before(lines[j].Start)
|
||||
})
|
||||
|
||||
@ -170,10 +173,14 @@ var sealingJobsCmd = &cli.Command{
|
||||
}
|
||||
|
||||
tw := tabwriter.NewWriter(os.Stdout, 2, 4, 2, ' ', 0)
|
||||
_, _ = fmt.Fprintf(tw, "ID\tSector\tWorker\tHostname\tTask\tTime\n")
|
||||
_, _ = fmt.Fprintf(tw, "ID\tSector\tWorker\tHostname\tTask\tState\tTime\n")
|
||||
|
||||
for _, l := range lines {
|
||||
_, _ = fmt.Fprintf(tw, "%d\t%d\t%d\t%s\t%s\t%s\n", l.ID, l.Sector.Number, l.wid, workerHostnames[l.wid], l.Task.Short(), time.Now().Sub(l.Start).Truncate(time.Millisecond*100))
|
||||
state := "running"
|
||||
if l.RunWait != 0 {
|
||||
state = fmt.Sprintf("assigned(%d)", l.RunWait-1)
|
||||
}
|
||||
_, _ = fmt.Fprintf(tw, "%d\t%d\t%d\t%s\t%s\t%s\t%s\n", l.ID, l.Sector.Number, l.wid, workerHostnames[l.wid], l.Task.Short(), state, time.Now().Sub(l.Start).Truncate(time.Millisecond*100))
|
||||
}
|
||||
|
||||
return tw.Flush()
|
||||
|
@ -97,6 +97,8 @@ var sectorsStatusCmd = &cli.Command{
|
||||
fmt.Printf("TicketH:\t%d\n", status.Ticket.Epoch)
|
||||
fmt.Printf("Seed:\t\t%x\n", status.Seed.Value)
|
||||
fmt.Printf("SeedH:\t\t%d\n", status.Seed.Epoch)
|
||||
fmt.Printf("Precommit:\t%s\n", status.PreCommitMsg)
|
||||
fmt.Printf("Commit:\t\t%s\n", status.CommitMsg)
|
||||
fmt.Printf("Proof:\t\t%x\n", status.Proof)
|
||||
fmt.Printf("Deals:\t\t%v\n", status.Deals)
|
||||
fmt.Printf("Retries:\t%d\n", status.Retries)
|
||||
|
@ -100,7 +100,11 @@ var DaemonCmd = &cli.Command{
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "import-chain",
|
||||
Usage: "on first run, load chain from given file",
|
||||
Usage: "on first run, load chain from given file and validate",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "import-snapshot",
|
||||
Usage: "import chain state from a given chain export file",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "halt-after-import",
|
||||
@ -191,13 +195,23 @@ var DaemonCmd = &cli.Command{
|
||||
}
|
||||
|
||||
chainfile := cctx.String("import-chain")
|
||||
if chainfile != "" {
|
||||
snapshot := cctx.String("import-snapshot")
|
||||
if chainfile != "" || snapshot != "" {
|
||||
if chainfile != "" && snapshot != "" {
|
||||
return fmt.Errorf("cannot specify both 'import-snapshot' and 'import-chain'")
|
||||
}
|
||||
var issnapshot bool
|
||||
if chainfile == "" {
|
||||
chainfile = snapshot
|
||||
issnapshot = true
|
||||
}
|
||||
|
||||
chainfile, err := homedir.Expand(chainfile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := ImportChain(r, chainfile); err != nil {
|
||||
if err := ImportChain(r, chainfile, issnapshot); err != nil {
|
||||
return err
|
||||
}
|
||||
if cctx.Bool("halt-after-import") {
|
||||
@ -312,7 +326,7 @@ func importKey(ctx context.Context, api api.FullNode, f string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func ImportChain(r repo.Repo, fname string) error {
|
||||
func ImportChain(r repo.Repo, fname string, snapshot bool) error {
|
||||
fi, err := os.Open(fname)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -345,11 +359,23 @@ func ImportChain(r repo.Repo, fname string) error {
|
||||
return xerrors.Errorf("importing chain failed: %w", err)
|
||||
}
|
||||
|
||||
gb, err := cst.GetTipsetByHeight(context.TODO(), 0, ts, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = cst.SetGenesis(gb.Blocks()[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
stm := stmgr.NewStateManager(cst)
|
||||
|
||||
log.Infof("validating imported chain...")
|
||||
if err := stm.ValidateChain(context.TODO(), ts); err != nil {
|
||||
return xerrors.Errorf("chain validation failed: %w", err)
|
||||
if !snapshot {
|
||||
log.Infof("validating imported chain...")
|
||||
if err := stm.ValidateChain(context.TODO(), ts); err != nil {
|
||||
return xerrors.Errorf("chain validation failed: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
log.Info("accepting %s as new head", ts.Cids())
|
||||
|
@ -60,7 +60,12 @@ func init() {
|
||||
}
|
||||
|
||||
}
|
||||
// TODO: beacon
|
||||
|
||||
mbi, err := api.MinerGetBaseInfo(ctx, addr, head.Height()+1, head.Key())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting base info: %w", err)
|
||||
}
|
||||
|
||||
ep := &types.ElectionProof{}
|
||||
ep.WinCount = ep.ComputeWinCount(types.NewInt(1), types.NewInt(1))
|
||||
for ep.WinCount == 0 {
|
||||
@ -75,7 +80,7 @@ func init() {
|
||||
uts := head.MinTimestamp() + uint64(build.BlockDelaySecs)
|
||||
nheight := head.Height() + 1
|
||||
blk, err := api.MinerCreateBlock(ctx, &lapi.BlockTemplate{
|
||||
addr, head.Key(), ticket, ep, nil, msgs, nheight, uts, gen.ValidWpostForTesting,
|
||||
addr, head.Key(), ticket, ep, mbi.BeaconEntries, msgs, nheight, uts, gen.ValidWpostForTesting,
|
||||
})
|
||||
if err != nil {
|
||||
return xerrors.Errorf("creating block: %w", err)
|
||||
|
153
conformance/driver.go
Normal file
153
conformance/driver.go
Normal file
@ -0,0 +1,153 @@
|
||||
package conformance
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/stmgr"
|
||||
"github.com/filecoin-project/lotus/chain/store"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/chain/vm"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
||||
"github.com/filecoin-project/lotus/lib/blockstore"
|
||||
|
||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||
"github.com/filecoin-project/specs-actors/actors/puppet"
|
||||
|
||||
"github.com/filecoin-project/test-vectors/chaos"
|
||||
"github.com/filecoin-project/test-vectors/schema"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
)
|
||||
|
||||
var (
|
||||
// BaseFee to use in the VM.
|
||||
// TODO make parametrisable through vector.
|
||||
BaseFee = abi.NewTokenAmount(100)
|
||||
)
|
||||
|
||||
type Driver struct {
|
||||
ctx context.Context
|
||||
selector schema.Selector
|
||||
}
|
||||
|
||||
func NewDriver(ctx context.Context, selector schema.Selector) *Driver {
|
||||
return &Driver{ctx: ctx, selector: selector}
|
||||
}
|
||||
|
||||
type ExecuteTipsetResult struct {
|
||||
ReceiptsRoot cid.Cid
|
||||
PostStateRoot cid.Cid
|
||||
|
||||
// AppliedMessages stores the messages that were applied, in the order they
|
||||
// were applied. It includes implicit messages (cron, rewards).
|
||||
AppliedMessages []*types.Message
|
||||
// AppliedResults stores the results of AppliedMessages, in the same order.
|
||||
AppliedResults []*vm.ApplyRet
|
||||
}
|
||||
|
||||
// ExecuteTipset executes the supplied tipset on top of the state represented
|
||||
// by the preroot CID.
|
||||
//
|
||||
// parentEpoch is the last epoch in which an actual tipset was processed. This
|
||||
// is used by Lotus for null block counting and cron firing.
|
||||
//
|
||||
// This method returns the the receipts root, the poststate root, and the VM
|
||||
// message results. The latter _include_ implicit messages, such as cron ticks
|
||||
// and reward withdrawal per miner.
|
||||
func (d *Driver) ExecuteTipset(bs blockstore.Blockstore, ds ds.Batching, preroot cid.Cid, parentEpoch abi.ChainEpoch, tipset *schema.Tipset) (*ExecuteTipsetResult, error) {
|
||||
var (
|
||||
syscalls = mkFakedSigSyscalls(vm.Syscalls(ffiwrapper.ProofVerifier))
|
||||
vmRand = new(testRand)
|
||||
|
||||
cs = store.NewChainStore(bs, ds, syscalls)
|
||||
sm = stmgr.NewStateManager(cs)
|
||||
)
|
||||
|
||||
blocks := make([]store.BlockMessages, 0, len(tipset.Blocks))
|
||||
for _, b := range tipset.Blocks {
|
||||
sb := store.BlockMessages{
|
||||
Miner: b.MinerAddr,
|
||||
WinCount: b.WinCount,
|
||||
}
|
||||
for _, m := range b.Messages {
|
||||
msg, err := types.DecodeMessage(m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch msg.From.Protocol() {
|
||||
case address.SECP256K1:
|
||||
sb.SecpkMessages = append(sb.SecpkMessages, msg)
|
||||
case address.BLS:
|
||||
sb.BlsMessages = append(sb.BlsMessages, msg)
|
||||
default:
|
||||
return nil, fmt.Errorf("from account is not secpk nor bls: %s", msg.From)
|
||||
}
|
||||
}
|
||||
blocks = append(blocks, sb)
|
||||
}
|
||||
|
||||
var (
|
||||
messages []*types.Message
|
||||
results []*vm.ApplyRet
|
||||
)
|
||||
|
||||
postcid, receiptsroot, err := sm.ApplyBlocks(context.Background(), parentEpoch, preroot, blocks, tipset.Epoch, vmRand, func(_ cid.Cid, msg *types.Message, ret *vm.ApplyRet) error {
|
||||
messages = append(messages, msg)
|
||||
results = append(results, ret)
|
||||
return nil
|
||||
}, tipset.BaseFee)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ret := &ExecuteTipsetResult{
|
||||
ReceiptsRoot: receiptsroot,
|
||||
PostStateRoot: postcid,
|
||||
AppliedMessages: messages,
|
||||
AppliedResults: results,
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// ExecuteMessage executes a conformance test vector message in a temporary VM.
|
||||
func (d *Driver) ExecuteMessage(bs blockstore.Blockstore, preroot cid.Cid, epoch abi.ChainEpoch, msg *types.Message) (*vm.ApplyRet, cid.Cid, error) {
|
||||
vmOpts := &vm.VMOpts{
|
||||
StateBase: preroot,
|
||||
Epoch: epoch,
|
||||
Rand: &testRand{}, // TODO always succeeds; need more flexibility.
|
||||
Bstore: bs,
|
||||
Syscalls: mkFakedSigSyscalls(vm.Syscalls(ffiwrapper.ProofVerifier)), // TODO always succeeds; need more flexibility.
|
||||
CircSupplyCalc: nil,
|
||||
BaseFee: BaseFee,
|
||||
}
|
||||
|
||||
lvm, err := vm.NewVM(vmOpts)
|
||||
if err != nil {
|
||||
return nil, cid.Undef, err
|
||||
}
|
||||
|
||||
invoker := vm.NewInvoker()
|
||||
|
||||
// add support for the puppet and chaos actors.
|
||||
if puppetOn, ok := d.selector["puppet_actor"]; ok && puppetOn == "true" {
|
||||
invoker.Register(puppet.PuppetActorCodeID, puppet.Actor{}, puppet.State{})
|
||||
}
|
||||
if chaosOn, ok := d.selector["chaos_actor"]; ok && chaosOn == "true" {
|
||||
invoker.Register(chaos.ChaosActorCodeCID, chaos.Actor{}, chaos.State{})
|
||||
}
|
||||
|
||||
lvm.SetInvoker(invoker)
|
||||
|
||||
ret, err := lvm.ApplyMessage(d.ctx, msg)
|
||||
if err != nil {
|
||||
return nil, cid.Undef, err
|
||||
}
|
||||
|
||||
root, err := lvm.Flush(d.ctx)
|
||||
return ret, root, err
|
||||
}
|
304
conformance/runner_test.go
Normal file
304
conformance/runner_test.go
Normal file
@ -0,0 +1,304 @@
|
||||
package conformance
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/chain/vm"
|
||||
"github.com/filecoin-project/lotus/lib/blockstore"
|
||||
|
||||
"github.com/filecoin-project/statediff"
|
||||
"github.com/filecoin-project/test-vectors/schema"
|
||||
|
||||
"github.com/fatih/color"
|
||||
"github.com/ipld/go-car"
|
||||
)
|
||||
|
||||
const (
|
||||
// EnvSkipConformance, if 1, skips the conformance test suite.
|
||||
EnvSkipConformance = "SKIP_CONFORMANCE"
|
||||
|
||||
// EnvCorpusRootDir is the name of the environment variable where the path
|
||||
// to an alternative corpus location can be provided.
|
||||
//
|
||||
// The default is defaultCorpusRoot.
|
||||
EnvCorpusRootDir = "CORPUS_DIR"
|
||||
|
||||
// defaultCorpusRoot is the directory where the test vector corpus is hosted.
|
||||
// It is mounted on the Lotus repo as a git submodule.
|
||||
//
|
||||
// When running this test, the corpus root can be overridden through the
|
||||
// -conformance.corpus CLI flag to run an alternate corpus.
|
||||
defaultCorpusRoot = "../extern/test-vectors/corpus"
|
||||
)
|
||||
|
||||
// ignore is a set of paths relative to root to skip.
|
||||
var ignore = map[string]struct{}{
|
||||
".git": {},
|
||||
"schema.json": {},
|
||||
}
|
||||
|
||||
// TestConformance is the entrypoint test that runs all test vectors found
|
||||
// in the corpus root directory.
|
||||
//
|
||||
// It locates all json files via a recursive walk, skipping over the ignore set,
|
||||
// as well as files beginning with _. It parses each file as a test vector, and
|
||||
// runs it via the Driver.
|
||||
func TestConformance(t *testing.T) {
|
||||
if skip := strings.TrimSpace(os.Getenv(EnvSkipConformance)); skip == "1" {
|
||||
t.SkipNow()
|
||||
}
|
||||
// corpusRoot is the effective corpus root path, taken from the `-conformance.corpus` CLI flag,
|
||||
// falling back to defaultCorpusRoot if not provided.
|
||||
corpusRoot := defaultCorpusRoot
|
||||
if dir := strings.TrimSpace(os.Getenv(EnvCorpusRootDir)); dir != "" {
|
||||
corpusRoot = dir
|
||||
}
|
||||
|
||||
var vectors []string
|
||||
err := filepath.Walk(corpusRoot+"/", func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
filename := filepath.Base(path)
|
||||
rel, err := filepath.Rel(corpusRoot, path)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if _, ok := ignore[rel]; ok {
|
||||
// skip over using the right error.
|
||||
if info.IsDir() {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if info.IsDir() {
|
||||
// dive into directories.
|
||||
return nil
|
||||
}
|
||||
if filepath.Ext(path) != ".json" {
|
||||
// skip if not .json.
|
||||
return nil
|
||||
}
|
||||
if ignored := strings.HasPrefix(filename, "_"); ignored {
|
||||
// ignore files starting with _.
|
||||
t.Logf("ignoring: %s", rel)
|
||||
return nil
|
||||
}
|
||||
vectors = append(vectors, rel)
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(vectors) == 0 {
|
||||
t.Fatalf("no test vectors found")
|
||||
}
|
||||
|
||||
// Run a test for each vector.
|
||||
for _, v := range vectors {
|
||||
path := filepath.Join(corpusRoot, v)
|
||||
raw, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to read test raw file: %s", path)
|
||||
}
|
||||
|
||||
var vector schema.TestVector
|
||||
err = json.Unmarshal(raw, &vector)
|
||||
if err != nil {
|
||||
t.Errorf("failed to parse test vector %s: %s; skipping", path, err)
|
||||
continue
|
||||
}
|
||||
|
||||
t.Run(v, func(t *testing.T) {
|
||||
for _, h := range vector.Hints {
|
||||
if h == schema.HintIncorrect {
|
||||
t.Logf("skipping vector marked as incorrect: %s", vector.Meta.ID)
|
||||
t.SkipNow()
|
||||
}
|
||||
}
|
||||
|
||||
// dispatch the execution depending on the vector class.
|
||||
switch vector.Class {
|
||||
case "message":
|
||||
executeMessageVector(t, &vector)
|
||||
case "tipset":
|
||||
executeTipsetVector(t, &vector)
|
||||
default:
|
||||
t.Fatalf("test vector class not supported: %s", vector.Class)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// executeMessageVector executes a message-class test vector.
|
||||
func executeMessageVector(t *testing.T, vector *schema.TestVector) {
|
||||
var (
|
||||
ctx = context.Background()
|
||||
epoch = vector.Pre.Epoch
|
||||
root = vector.Pre.StateTree.RootCID
|
||||
)
|
||||
|
||||
// Load the CAR into a new temporary Blockstore.
|
||||
bs := loadCAR(t, vector.CAR)
|
||||
|
||||
// Create a new Driver.
|
||||
driver := NewDriver(ctx, vector.Selector)
|
||||
|
||||
// Apply every message.
|
||||
for i, m := range vector.ApplyMessages {
|
||||
msg, err := types.DecodeMessage(m.Bytes)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to deserialize message: %s", err)
|
||||
}
|
||||
|
||||
// add an epoch if one's set.
|
||||
if m.Epoch != nil {
|
||||
epoch = *m.Epoch
|
||||
}
|
||||
|
||||
// Execute the message.
|
||||
var ret *vm.ApplyRet
|
||||
ret, root, err = driver.ExecuteMessage(bs, root, epoch, msg)
|
||||
if err != nil {
|
||||
t.Fatalf("fatal failure when executing message: %s", err)
|
||||
}
|
||||
|
||||
// Assert that the receipt matches what the test vector expects.
|
||||
assertMsgResult(t, vector.Post.Receipts[i], ret, strconv.Itoa(i))
|
||||
}
|
||||
|
||||
// Once all messages are applied, assert that the final state root matches
|
||||
// the expected postcondition root.
|
||||
if root != vector.Post.StateTree.RootCID {
|
||||
dumpThreeWayStateDiff(t, vector, bs, root)
|
||||
}
|
||||
}
|
||||
|
||||
// executeTipsetVector executes a tipset-class test vector.
|
||||
func executeTipsetVector(t *testing.T, vector *schema.TestVector) {
|
||||
var (
|
||||
ctx = context.Background()
|
||||
prevEpoch = vector.Pre.Epoch
|
||||
root = vector.Pre.StateTree.RootCID
|
||||
tmpds = ds.NewMapDatastore()
|
||||
)
|
||||
|
||||
// Load the CAR into a new temporary Blockstore.
|
||||
bs := loadCAR(t, vector.CAR)
|
||||
|
||||
// Create a new Driver.
|
||||
driver := NewDriver(ctx, vector.Selector)
|
||||
|
||||
// Apply every tipset.
|
||||
var receiptsIdx int
|
||||
for i, ts := range vector.ApplyTipsets {
|
||||
ts := ts // capture
|
||||
ret, err := driver.ExecuteTipset(bs, tmpds, root, prevEpoch, &ts)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to apply tipset %d message: %s", i, err)
|
||||
}
|
||||
|
||||
for j, v := range ret.AppliedResults {
|
||||
assertMsgResult(t, vector.Post.Receipts[receiptsIdx], v, fmt.Sprintf("%d of tipset %d", j, i))
|
||||
receiptsIdx++
|
||||
}
|
||||
|
||||
// Compare the receipts root.
|
||||
if expected, actual := vector.Post.ReceiptsRoots[i], ret.ReceiptsRoot; expected != actual {
|
||||
t.Errorf("post receipts root doesn't match; expected: %s, was: %s", expected, actual)
|
||||
}
|
||||
|
||||
prevEpoch = ts.Epoch
|
||||
root = ret.PostStateRoot
|
||||
}
|
||||
|
||||
// Once all messages are applied, assert that the final state root matches
|
||||
// the expected postcondition root.
|
||||
if root != vector.Post.StateTree.RootCID {
|
||||
dumpThreeWayStateDiff(t, vector, bs, root)
|
||||
}
|
||||
}
|
||||
|
||||
// assertMsgResult compares a message result. It takes the expected receipt
|
||||
// encoded in the vector, the actual receipt returned by Lotus, and a message
|
||||
// label to log in the assertion failure message to facilitate debugging.
|
||||
func assertMsgResult(t *testing.T, expected *schema.Receipt, actual *vm.ApplyRet, label string) {
|
||||
t.Helper()
|
||||
|
||||
if expected, actual := expected.ExitCode, actual.ExitCode; expected != actual {
|
||||
t.Errorf("exit code of msg %s did not match; expected: %s, got: %s", label, expected, actual)
|
||||
}
|
||||
if expected, actual := expected.GasUsed, actual.GasUsed; expected != actual {
|
||||
t.Errorf("gas used of msg %s did not match; expected: %d, got: %d", label, expected, actual)
|
||||
}
|
||||
if expected, actual := []byte(expected.ReturnValue), actual.Return; !bytes.Equal(expected, actual) {
|
||||
t.Errorf("return value of msg %s did not match; expected: %s, got: %s", label, base64.StdEncoding.EncodeToString(expected), base64.StdEncoding.EncodeToString(actual))
|
||||
}
|
||||
}
|
||||
|
||||
func dumpThreeWayStateDiff(t *testing.T, vector *schema.TestVector, bs blockstore.Blockstore, actual cid.Cid) {
|
||||
color.NoColor = false // enable colouring.
|
||||
|
||||
t.Errorf("wrong post root cid; expected %v, but got %v", vector.Post.StateTree.RootCID, actual)
|
||||
|
||||
var (
|
||||
a = color.New(color.FgMagenta, color.Bold).Sprint("(A) expected final state")
|
||||
b = color.New(color.FgYellow, color.Bold).Sprint("(B) actual final state")
|
||||
c = color.New(color.FgCyan, color.Bold).Sprint("(C) initial state")
|
||||
d1 = color.New(color.FgGreen, color.Bold).Sprint("[Δ1]")
|
||||
d2 = color.New(color.FgGreen, color.Bold).Sprint("[Δ2]")
|
||||
d3 = color.New(color.FgGreen, color.Bold).Sprint("[Δ3]")
|
||||
)
|
||||
|
||||
bold := color.New(color.Bold).SprintfFunc()
|
||||
|
||||
// run state diffs.
|
||||
t.Log(bold("=== dumping 3-way diffs between %s, %s, %s ===", a, b, c))
|
||||
|
||||
t.Log(bold("--- %s left: %s; right: %s ---", d1, a, b))
|
||||
t.Log(statediff.Diff(context.Background(), bs, vector.Post.StateTree.RootCID, actual))
|
||||
|
||||
t.Log(bold("--- %s left: %s; right: %s ---", d2, c, b))
|
||||
t.Log(statediff.Diff(context.Background(), bs, vector.Pre.StateTree.RootCID, actual))
|
||||
|
||||
t.Log(bold("--- %s left: %s; right: %s ---", d3, c, a))
|
||||
t.Log(statediff.Diff(context.Background(), bs, vector.Pre.StateTree.RootCID, vector.Post.StateTree.RootCID))
|
||||
}
|
||||
|
||||
func loadCAR(t *testing.T, vectorCAR schema.Base64EncodedBytes) blockstore.Blockstore {
|
||||
bs := blockstore.NewTemporary()
|
||||
|
||||
// Read the base64-encoded CAR from the vector, and inflate the gzip.
|
||||
buf := bytes.NewReader(vectorCAR)
|
||||
r, err := gzip.NewReader(buf)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to inflate gzipped CAR: %s", err)
|
||||
}
|
||||
defer r.Close() // nolint
|
||||
|
||||
// Load the CAR embedded in the test vector into the Blockstore.
|
||||
_, err = car.LoadCar(bs, r)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to load state tree car from test vector: %s", err)
|
||||
}
|
||||
return bs
|
||||
}
|
54
conformance/stubs.go
Normal file
54
conformance/stubs.go
Normal file
@ -0,0 +1,54 @@
|
||||
package conformance
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/lotus/chain/state"
|
||||
"github.com/filecoin-project/lotus/chain/vm"
|
||||
|
||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||
"github.com/filecoin-project/specs-actors/actors/crypto"
|
||||
"github.com/filecoin-project/specs-actors/actors/runtime"
|
||||
|
||||
cbor "github.com/ipfs/go-ipld-cbor"
|
||||
)
|
||||
|
||||
type testRand struct{}
|
||||
|
||||
var _ vm.Rand = (*testRand)(nil)
|
||||
|
||||
func (r *testRand) GetChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
|
||||
return []byte("i_am_random_____i_am_random_____"), nil // 32 bytes.
|
||||
}
|
||||
|
||||
func (r *testRand) GetBeaconRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
|
||||
return []byte("i_am_random_____i_am_random_____"), nil // 32 bytes.
|
||||
}
|
||||
|
||||
type testSyscalls struct {
|
||||
runtime.Syscalls
|
||||
}
|
||||
|
||||
// TODO VerifySignature this will always succeed; but we want to be able to test failures too.
|
||||
func (fss *testSyscalls) VerifySignature(_ crypto.Signature, _ address.Address, _ []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO VerifySeal this will always succeed; but we want to be able to test failures too.
|
||||
func (fss *testSyscalls) VerifySeal(_ abi.SealVerifyInfo) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO VerifyPoSt this will always succeed; but we want to be able to test failures too.
|
||||
func (fss *testSyscalls) VerifyPoSt(_ abi.WindowPoStVerifyInfo) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func mkFakedSigSyscalls(base vm.SyscallBuilder) vm.SyscallBuilder {
|
||||
return func(ctx context.Context, cstate *state.StateTree, cst cbor.IpldStore) runtime.Syscalls {
|
||||
return &testSyscalls{
|
||||
base(ctx, cstate, cst),
|
||||
}
|
||||
}
|
||||
}
|
@ -36,6 +36,7 @@
|
||||
* [ClientFindData](#ClientFindData)
|
||||
* [ClientGenCar](#ClientGenCar)
|
||||
* [ClientGetDealInfo](#ClientGetDealInfo)
|
||||
* [ClientGetDealUpdates](#ClientGetDealUpdates)
|
||||
* [ClientHasLocal](#ClientHasLocal)
|
||||
* [ClientImport](#ClientImport)
|
||||
* [ClientListDataTransfers](#ClientListDataTransfers)
|
||||
@ -197,7 +198,7 @@ Response:
|
||||
```json
|
||||
{
|
||||
"Version": "string value",
|
||||
"APIVersion": 3072,
|
||||
"APIVersion": 3584,
|
||||
"BlockDelay": 42
|
||||
}
|
||||
```
|
||||
@ -267,6 +268,9 @@ blockchain, but that do not require any form of state computation.
|
||||
|
||||
### ChainExport
|
||||
ChainExport returns a stream of bytes with CAR dump of chain data.
|
||||
The exported chain data includes the header chain from the given tipset
|
||||
back to genesis, the entire genesis state, and the most recent 'nroots'
|
||||
state trees.
|
||||
|
||||
|
||||
Perms: read
|
||||
@ -274,6 +278,7 @@ Perms: read
|
||||
Inputs:
|
||||
```json
|
||||
[
|
||||
10101,
|
||||
[
|
||||
{
|
||||
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
|
||||
@ -915,6 +920,42 @@ Response:
|
||||
}
|
||||
```
|
||||
|
||||
### ClientGetDealUpdates
|
||||
ClientGetDealUpdates returns the status of updated deals
|
||||
|
||||
|
||||
Perms: read
|
||||
|
||||
Inputs: `null`
|
||||
|
||||
Response:
|
||||
```json
|
||||
{
|
||||
"ProposalCid": {
|
||||
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
|
||||
},
|
||||
"State": 42,
|
||||
"Message": "string value",
|
||||
"Provider": "t01234",
|
||||
"DataRef": {
|
||||
"TransferType": "string value",
|
||||
"Root": {
|
||||
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
|
||||
},
|
||||
"PieceCid": null,
|
||||
"PieceSize": 1024
|
||||
},
|
||||
"PieceCID": {
|
||||
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
|
||||
},
|
||||
"Size": 42,
|
||||
"PricePerEpoch": "0",
|
||||
"Duration": 42,
|
||||
"DealID": 5432,
|
||||
"CreationTime": "0001-01-01T00:00:00Z"
|
||||
}
|
||||
```
|
||||
|
||||
### ClientHasLocal
|
||||
ClientHasLocal indicates whether a certain CID is locally stored.
|
||||
|
||||
@ -2437,7 +2478,9 @@ Inputs:
|
||||
"Type": 2,
|
||||
"Data": "Ynl0ZSBhcnJheQ=="
|
||||
}
|
||||
}
|
||||
},
|
||||
"Ynl0ZSBhcnJheQ==",
|
||||
"Ynl0ZSBhcnJheQ=="
|
||||
]
|
||||
```
|
||||
|
||||
@ -3603,7 +3646,7 @@ Response:
|
||||
```
|
||||
|
||||
### StateSectorGetInfo
|
||||
StateSectorGetInfo returns the on-chain info for the specified miner's sector
|
||||
StateSectorGetInfo returns the on-chain info for the specified miner's sector. Returns null in case the sector info isn't found
|
||||
NOTE: returned info.Expiration may not be accurate in some cases, use StateSectorExpiration to get accurate
|
||||
expiration epoch
|
||||
|
||||
|
@ -26,7 +26,7 @@ The necessary permissions for each are in [api/struct.go](https://github.com/fil
|
||||
|
||||
## How do I make an API request?
|
||||
|
||||
To demonstrate making an API request, we will take the method `ChainHead` from [api/api.go](https://github.com/filecoin-project/lotus/blob/master/api/api_full.go).
|
||||
To demonstrate making an API request, we will take the method `ChainHead` from [api/api_full.go](https://github.com/filecoin-project/lotus/blob/master/api/api_full.go).
|
||||
|
||||
```go
|
||||
ChainHead(context.Context) (*types.TipSet, error)
|
||||
|
@ -133,7 +133,7 @@ type MpoolConfig struct {
|
||||
|
||||
The meaning of these fields is as follows:
|
||||
- `PriorityAddrs` -- these are the addresses of actors whose pending messages should always
|
||||
be included in a block during message selection, regardless of profitability.
|
||||
be included in a block during message selection, as long as they are profitable.
|
||||
Miners should configure their own worker addresses so that they include their own messages
|
||||
when they produce a new block.
|
||||
Default is empty.
|
||||
|
@ -371,7 +371,11 @@ func (sb *Sealer) ReadPiece(ctx context.Context, writer io.Writer, sector abi.Se
|
||||
maxPieceSize := abi.PaddedPieceSize(sb.ssize)
|
||||
|
||||
pf, err := openPartialFile(maxPieceSize, path.Unsealed)
|
||||
if xerrors.Is(err, os.ErrNotExist) {
|
||||
if err != nil {
|
||||
if xerrors.Is(err, os.ErrNotExist) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return false, xerrors.Errorf("opening partial file: %w", err)
|
||||
}
|
||||
|
||||
|
4
extern/sector-storage/localworker.go
vendored
4
extern/sector-storage/localworker.go
vendored
@ -208,8 +208,8 @@ func (l *LocalWorker) Remove(ctx context.Context, sector abi.SectorID) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (l *LocalWorker) MoveStorage(ctx context.Context, sector abi.SectorID) error {
|
||||
if err := l.storage.MoveStorage(ctx, sector, l.scfg.SealProofType, stores.FTSealed|stores.FTCache); err != nil {
|
||||
func (l *LocalWorker) MoveStorage(ctx context.Context, sector abi.SectorID, types stores.SectorFileType) error {
|
||||
if err := l.storage.MoveStorage(ctx, sector, l.scfg.SealProofType, types); err != nil {
|
||||
return xerrors.Errorf("moving sealed data to storage: %w", err)
|
||||
}
|
||||
|
||||
|
34
extern/sector-storage/manager.go
vendored
34
extern/sector-storage/manager.go
vendored
@ -6,8 +6,7 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
|
||||
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/ipfs/go-cid"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
"github.com/mitchellh/go-homedir"
|
||||
@ -17,6 +16,7 @@ import (
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||
@ -31,7 +31,7 @@ type URLs []string
|
||||
type Worker interface {
|
||||
ffiwrapper.StorageSealer
|
||||
|
||||
MoveStorage(ctx context.Context, sector abi.SectorID) error
|
||||
MoveStorage(ctx context.Context, sector abi.SectorID, types stores.SectorFileType) error
|
||||
|
||||
Fetch(ctx context.Context, s abi.SectorID, ft stores.SectorFileType, ptype stores.PathType, am stores.AcquireMode) error
|
||||
UnsealPiece(context.Context, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) error
|
||||
@ -441,7 +441,7 @@ func (m *Manager) FinalizeSector(ctx context.Context, sector abi.SectorID, keepU
|
||||
err = m.sched.Schedule(ctx, sector, sealtasks.TTFetch, fetchSel,
|
||||
schedFetch(sector, stores.FTCache|stores.FTSealed|moveUnsealed, stores.PathStorage, stores.AcquireMove),
|
||||
func(ctx context.Context, w Worker) error {
|
||||
return w.MoveStorage(ctx, sector)
|
||||
return w.MoveStorage(ctx, sector, stores.FTCache|stores.FTSealed|moveUnsealed)
|
||||
})
|
||||
if err != nil {
|
||||
return xerrors.Errorf("moving sector to storage: %w", err)
|
||||
@ -463,25 +463,19 @@ func (m *Manager) Remove(ctx context.Context, sector abi.SectorID) error {
|
||||
return xerrors.Errorf("acquiring sector lock: %w", err)
|
||||
}
|
||||
|
||||
unsealed := stores.FTUnsealed
|
||||
{
|
||||
unsealedStores, err := m.index.StorageFindSector(ctx, sector, stores.FTUnsealed, 0, false)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("finding unsealed sector: %w", err)
|
||||
}
|
||||
var err error
|
||||
|
||||
if len(unsealedStores) == 0 { // can be already removed
|
||||
unsealed = stores.FTNone
|
||||
}
|
||||
if rerr := m.storage.Remove(ctx, sector, stores.FTSealed, true); rerr != nil {
|
||||
err = multierror.Append(err, xerrors.Errorf("removing sector (sealed): %w", rerr))
|
||||
}
|
||||
if rerr := m.storage.Remove(ctx, sector, stores.FTCache, true); rerr != nil {
|
||||
err = multierror.Append(err, xerrors.Errorf("removing sector (cache): %w", rerr))
|
||||
}
|
||||
if rerr := m.storage.Remove(ctx, sector, stores.FTUnsealed, true); rerr != nil {
|
||||
err = multierror.Append(err, xerrors.Errorf("removing sector (unsealed): %w", rerr))
|
||||
}
|
||||
|
||||
selector := newExistingSelector(m.index, sector, stores.FTCache|stores.FTSealed, false)
|
||||
|
||||
return m.sched.Schedule(ctx, sector, sealtasks.TTFinalize, selector,
|
||||
schedFetch(sector, stores.FTCache|stores.FTSealed|unsealed, stores.PathStorage, stores.AcquireMove),
|
||||
func(ctx context.Context, w Worker) error {
|
||||
return w.Remove(ctx, sector)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (m *Manager) StorageLocal(ctx context.Context) (map[stores.ID]string, error) {
|
||||
|
61
extern/sector-storage/mock/mock.go
vendored
61
extern/sector-storage/mock/mock.go
vendored
@ -3,12 +3,12 @@ package mock
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"sync"
|
||||
|
||||
"github.com/filecoin-project/go-bitfield"
|
||||
commcid "github.com/filecoin-project/go-fil-commcid"
|
||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
@ -291,32 +291,29 @@ func (mgr *SectorMgr) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorI
|
||||
return generateFakePoSt(si, abi.RegisteredSealProof.RegisteredWindowPoStProof, randomness), skipped, nil
|
||||
}
|
||||
|
||||
func generateFakePoSt(sectorInfo []abi.SectorInfo, rpt func(abi.RegisteredSealProof) (abi.RegisteredPoStProof, error), randomness abi.PoStRandomness) []abi.PoStProof {
|
||||
sectors := bitfield.New()
|
||||
func generateFakePoStProof(sectorInfo []abi.SectorInfo, randomness abi.PoStRandomness) []byte {
|
||||
hasher := sha256.New()
|
||||
_, _ = hasher.Write(randomness)
|
||||
for _, info := range sectorInfo {
|
||||
sectors.Set(uint64(info.SectorNumber))
|
||||
err := info.MarshalCBOR(hasher)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
return hasher.Sum(nil)
|
||||
|
||||
}
|
||||
|
||||
func generateFakePoSt(sectorInfo []abi.SectorInfo, rpt func(abi.RegisteredSealProof) (abi.RegisteredPoStProof, error), randomness abi.PoStRandomness) []abi.PoStProof {
|
||||
wp, err := rpt(sectorInfo[0].SealProof)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
var proofBuf bytes.Buffer
|
||||
|
||||
_, err = proofBuf.Write(randomness)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if err := sectors.MarshalCBOR(&proofBuf); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return []abi.PoStProof{
|
||||
{
|
||||
PoStProof: wp,
|
||||
ProofBytes: proofBuf.Bytes(),
|
||||
ProofBytes: generateFakePoStProof(sectorInfo, randomness),
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -412,36 +409,10 @@ func (m mockVerif) VerifyWindowPoSt(ctx context.Context, info abi.WindowPoStVeri
|
||||
|
||||
proof := info.Proofs[0]
|
||||
|
||||
if !bytes.Equal(proof.ProofBytes[:len(info.Randomness)], info.Randomness) {
|
||||
return false, xerrors.Errorf("bad randomness")
|
||||
expected := generateFakePoStProof(info.ChallengedSectors, info.Randomness)
|
||||
if !bytes.Equal(proof.ProofBytes, expected) {
|
||||
return false, xerrors.Errorf("bad proof")
|
||||
}
|
||||
|
||||
sectors := bitfield.New()
|
||||
if err := sectors.UnmarshalCBOR(bytes.NewReader(proof.ProofBytes[len(info.Randomness):])); err != nil {
|
||||
return false, xerrors.Errorf("unmarshaling sectors bitfield from \"proof\": %w", err)
|
||||
}
|
||||
|
||||
challenged := bitfield.New()
|
||||
for _, sector := range info.ChallengedSectors {
|
||||
challenged.Set(uint64(sector.SectorNumber))
|
||||
}
|
||||
|
||||
{
|
||||
b1, err := sectors.MarshalJSON()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
b2, err := challenged.MarshalJSON()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if !bytes.Equal(b1, b2) {
|
||||
return false, xerrors.Errorf("proven and challenged sector sets didn't match: %s != %s", string(b1), string(b2))
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
|
5
extern/sector-storage/request_queue.go
vendored
5
extern/sector-storage/request_queue.go
vendored
@ -7,6 +7,11 @@ type requestQueue []*workerRequest
|
||||
func (q requestQueue) Len() int { return len(q) }
|
||||
|
||||
func (q requestQueue) Less(i, j int) bool {
|
||||
oneMuchLess, muchLess := q[i].taskType.MuchLess(q[j].taskType)
|
||||
if oneMuchLess {
|
||||
return muchLess
|
||||
}
|
||||
|
||||
if q[i].priority != q[j].priority {
|
||||
return q[i].priority > q[j].priority
|
||||
}
|
||||
|
12
extern/sector-storage/resources.go
vendored
12
extern/sector-storage/resources.go
vendored
@ -22,17 +22,17 @@ func (r Resources) MultiThread() bool {
|
||||
|
||||
var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources{
|
||||
sealtasks.TTAddPiece: {
|
||||
abi.RegisteredSealProof_StackedDrg64GiBV1: Resources{ // This is probably a bit conservative
|
||||
MaxMemory: 64 << 30,
|
||||
MinMemory: 64 << 30,
|
||||
abi.RegisteredSealProof_StackedDrg64GiBV1: Resources{
|
||||
MaxMemory: 8 << 30,
|
||||
MinMemory: 8 << 30,
|
||||
|
||||
Threads: 1,
|
||||
|
||||
BaseMinMemory: 1 << 30,
|
||||
},
|
||||
abi.RegisteredSealProof_StackedDrg32GiBV1: Resources{ // This is probably a bit conservative
|
||||
MaxMemory: 32 << 30,
|
||||
MinMemory: 32 << 30,
|
||||
abi.RegisteredSealProof_StackedDrg32GiBV1: Resources{
|
||||
MaxMemory: 4 << 30,
|
||||
MinMemory: 4 << 30,
|
||||
|
||||
Threads: 1,
|
||||
|
||||
|
166
extern/sector-storage/sched.go
vendored
166
extern/sector-storage/sched.go
vendored
@ -21,6 +21,7 @@ type schedPrioCtxKey int
|
||||
var SchedPriorityKey schedPrioCtxKey
|
||||
var DefaultSchedPriority = 0
|
||||
var SelectorTimeout = 5 * time.Second
|
||||
var InitWait = 3 * time.Second
|
||||
|
||||
var (
|
||||
SchedWindows = 2
|
||||
@ -85,6 +86,9 @@ type workerHandle struct {
|
||||
|
||||
lk sync.Mutex
|
||||
|
||||
wndLk sync.Mutex
|
||||
activeWindows []*schedWindow
|
||||
|
||||
// stats / tracking
|
||||
wt *workTracker
|
||||
|
||||
@ -123,6 +127,8 @@ type workerRequest struct {
|
||||
prepare WorkerAction
|
||||
work WorkerAction
|
||||
|
||||
start time.Time
|
||||
|
||||
index int // The index of the item in the heap.
|
||||
|
||||
indexHeap int
|
||||
@ -147,7 +153,7 @@ func newScheduler(spt abi.RegisteredSealProof) *scheduler {
|
||||
workerClosing: make(chan WorkerID),
|
||||
|
||||
schedule: make(chan *workerRequest),
|
||||
windowRequests: make(chan *schedWindowRequest),
|
||||
windowRequests: make(chan *schedWindowRequest, 20),
|
||||
|
||||
schedQueue: &requestQueue{},
|
||||
|
||||
@ -171,6 +177,8 @@ func (sh *scheduler) Schedule(ctx context.Context, sector abi.SectorID, taskType
|
||||
prepare: prepare,
|
||||
work: work,
|
||||
|
||||
start: time.Now(),
|
||||
|
||||
ret: ret,
|
||||
ctx: ctx,
|
||||
}:
|
||||
@ -214,7 +222,12 @@ func (sh *scheduler) runSched() {
|
||||
|
||||
go sh.runWorkerWatcher()
|
||||
|
||||
iw := time.After(InitWait)
|
||||
var initialised bool
|
||||
|
||||
for {
|
||||
var doSched bool
|
||||
|
||||
select {
|
||||
case w := <-sh.newWorkers:
|
||||
sh.newWorker(w)
|
||||
@ -224,22 +237,47 @@ func (sh *scheduler) runSched() {
|
||||
|
||||
case req := <-sh.schedule:
|
||||
sh.schedQueue.Push(req)
|
||||
sh.trySched()
|
||||
doSched = true
|
||||
|
||||
if sh.testSync != nil {
|
||||
sh.testSync <- struct{}{}
|
||||
}
|
||||
case req := <-sh.windowRequests:
|
||||
sh.openWindows = append(sh.openWindows, req)
|
||||
sh.trySched()
|
||||
|
||||
doSched = true
|
||||
case ireq := <-sh.info:
|
||||
ireq(sh.diag())
|
||||
|
||||
case <-iw:
|
||||
initialised = true
|
||||
iw = nil
|
||||
doSched = true
|
||||
case <-sh.closing:
|
||||
sh.schedClose()
|
||||
return
|
||||
}
|
||||
|
||||
if doSched && initialised {
|
||||
// First gather any pending tasks, so we go through the scheduling loop
|
||||
// once for every added task
|
||||
loop:
|
||||
for {
|
||||
select {
|
||||
case req := <-sh.schedule:
|
||||
sh.schedQueue.Push(req)
|
||||
if sh.testSync != nil {
|
||||
sh.testSync <- struct{}{}
|
||||
}
|
||||
case req := <-sh.windowRequests:
|
||||
sh.openWindows = append(sh.openWindows, req)
|
||||
default:
|
||||
break loop
|
||||
}
|
||||
}
|
||||
|
||||
sh.trySched()
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@ -321,7 +359,7 @@ func (sh *scheduler) trySched() {
|
||||
}
|
||||
|
||||
// TODO: allow bigger windows
|
||||
if !windows[wnd].allocated.canHandleRequest(needRes, windowRequest.worker, worker.info.Resources) {
|
||||
if !windows[wnd].allocated.canHandleRequest(needRes, windowRequest.worker, "schedAcceptable", worker.info.Resources) {
|
||||
continue
|
||||
}
|
||||
|
||||
@ -392,13 +430,17 @@ func (sh *scheduler) trySched() {
|
||||
log.Debugf("SCHED try assign sqi:%d sector %d to window %d", sqi, task.sector.Number, wnd)
|
||||
|
||||
// TODO: allow bigger windows
|
||||
if !windows[wnd].allocated.canHandleRequest(needRes, wid, wr) {
|
||||
if !windows[wnd].allocated.canHandleRequest(needRes, wid, "schedAssign", wr) {
|
||||
continue
|
||||
}
|
||||
|
||||
log.Debugf("SCHED ASSIGNED sqi:%d sector %d to window %d", sqi, task.sector.Number, wnd)
|
||||
log.Debugf("SCHED ASSIGNED sqi:%d sector %d task %s to window %d", sqi, task.sector.Number, task.taskType, wnd)
|
||||
|
||||
windows[wnd].allocated.add(wr, needRes)
|
||||
// TODO: We probably want to re-sort acceptableWindows here based on new
|
||||
// workerHandle.utilization + windows[wnd].allocated.utilization (workerHandle.utilization is used in all
|
||||
// task selectors, but not in the same way, so need to figure out how to do that in a non-O(n^2 way), and
|
||||
// without additional network roundtrips (O(n^2) could be avoided by turning acceptableWindows.[] into heaps))
|
||||
|
||||
selectedWindow = wnd
|
||||
break
|
||||
@ -475,8 +517,6 @@ func (sh *scheduler) runWorker(wid WorkerID) {
|
||||
taskDone := make(chan struct{}, 1)
|
||||
windowsRequested := 0
|
||||
|
||||
var activeWindows []*schedWindow
|
||||
|
||||
ctx, cancel := context.WithCancel(context.TODO())
|
||||
defer cancel()
|
||||
|
||||
@ -510,7 +550,9 @@ func (sh *scheduler) runWorker(wid WorkerID) {
|
||||
|
||||
select {
|
||||
case w := <-scheduledWindows:
|
||||
activeWindows = append(activeWindows, w)
|
||||
worker.wndLk.Lock()
|
||||
worker.activeWindows = append(worker.activeWindows, w)
|
||||
worker.wndLk.Unlock()
|
||||
case <-taskDone:
|
||||
log.Debugw("task done", "workerid", wid)
|
||||
case <-sh.closing:
|
||||
@ -521,46 +563,115 @@ func (sh *scheduler) runWorker(wid WorkerID) {
|
||||
return
|
||||
}
|
||||
|
||||
sh.workersLk.RLock()
|
||||
worker.wndLk.Lock()
|
||||
|
||||
windowsRequested -= sh.workerCompactWindows(worker, wid)
|
||||
|
||||
assignLoop:
|
||||
// process windows in order
|
||||
for len(activeWindows) > 0 {
|
||||
// process tasks within a window in order
|
||||
for len(activeWindows[0].todo) > 0 {
|
||||
todo := activeWindows[0].todo[0]
|
||||
needRes := ResourceTable[todo.taskType][sh.spt]
|
||||
for len(worker.activeWindows) > 0 {
|
||||
firstWindow := worker.activeWindows[0]
|
||||
|
||||
// process tasks within a window, preferring tasks at lower indexes
|
||||
for len(firstWindow.todo) > 0 {
|
||||
tidx := -1
|
||||
|
||||
sh.workersLk.RLock()
|
||||
worker.lk.Lock()
|
||||
ok := worker.preparing.canHandleRequest(needRes, wid, worker.info.Resources)
|
||||
for t, todo := range firstWindow.todo {
|
||||
needRes := ResourceTable[todo.taskType][sh.spt]
|
||||
if worker.preparing.canHandleRequest(needRes, wid, "startPreparing", worker.info.Resources) {
|
||||
tidx = t
|
||||
break
|
||||
}
|
||||
}
|
||||
worker.lk.Unlock()
|
||||
|
||||
if !ok {
|
||||
sh.workersLk.RUnlock()
|
||||
if tidx == -1 {
|
||||
break assignLoop
|
||||
}
|
||||
|
||||
todo := firstWindow.todo[tidx]
|
||||
|
||||
log.Debugf("assign worker sector %d", todo.sector.Number)
|
||||
err := sh.assignWorker(taskDone, wid, worker, todo)
|
||||
sh.workersLk.RUnlock()
|
||||
|
||||
if err != nil {
|
||||
log.Error("assignWorker error: %+v", err)
|
||||
go todo.respond(xerrors.Errorf("assignWorker error: %w", err))
|
||||
}
|
||||
|
||||
activeWindows[0].todo = activeWindows[0].todo[1:]
|
||||
// Note: we're not freeing window.allocated resources here very much on purpose
|
||||
copy(firstWindow.todo[tidx:], firstWindow.todo[tidx+1:])
|
||||
firstWindow.todo[len(firstWindow.todo)-1] = nil
|
||||
firstWindow.todo = firstWindow.todo[:len(firstWindow.todo)-1]
|
||||
}
|
||||
|
||||
copy(activeWindows, activeWindows[1:])
|
||||
activeWindows[len(activeWindows)-1] = nil
|
||||
activeWindows = activeWindows[:len(activeWindows)-1]
|
||||
copy(worker.activeWindows, worker.activeWindows[1:])
|
||||
worker.activeWindows[len(worker.activeWindows)-1] = nil
|
||||
worker.activeWindows = worker.activeWindows[:len(worker.activeWindows)-1]
|
||||
|
||||
windowsRequested--
|
||||
}
|
||||
|
||||
worker.wndLk.Unlock()
|
||||
sh.workersLk.RUnlock()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (sh *scheduler) workerCompactWindows(worker *workerHandle, wid WorkerID) int {
|
||||
// move tasks from older windows to newer windows if older windows
|
||||
// still can fit them
|
||||
if len(worker.activeWindows) > 1 {
|
||||
for wi, window := range worker.activeWindows[1:] {
|
||||
lower := worker.activeWindows[wi]
|
||||
var moved []int
|
||||
|
||||
for ti, todo := range window.todo {
|
||||
needRes := ResourceTable[todo.taskType][sh.spt]
|
||||
if !lower.allocated.canHandleRequest(needRes, wid, "compactWindows", worker.info.Resources) {
|
||||
continue
|
||||
}
|
||||
|
||||
moved = append(moved, ti)
|
||||
lower.todo = append(lower.todo, todo)
|
||||
lower.allocated.add(worker.info.Resources, needRes)
|
||||
window.allocated.free(worker.info.Resources, needRes)
|
||||
}
|
||||
|
||||
if len(moved) > 0 {
|
||||
newTodo := make([]*workerRequest, 0, len(window.todo)-len(moved))
|
||||
for i, t := range window.todo {
|
||||
if len(moved) > 0 && moved[0] == i {
|
||||
moved = moved[1:]
|
||||
continue
|
||||
}
|
||||
|
||||
newTodo = append(newTodo, t)
|
||||
}
|
||||
window.todo = newTodo
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var compacted int
|
||||
var newWindows []*schedWindow
|
||||
|
||||
for _, window := range worker.activeWindows {
|
||||
if len(window.todo) == 0 {
|
||||
compacted++
|
||||
continue
|
||||
}
|
||||
|
||||
newWindows = append(newWindows, window)
|
||||
}
|
||||
|
||||
worker.activeWindows = newWindows
|
||||
|
||||
return compacted
|
||||
}
|
||||
|
||||
func (sh *scheduler) assignWorker(taskDone chan struct{}, wid WorkerID, w *workerHandle, req *workerRequest) error {
|
||||
needRes := ResourceTable[req.taskType][sh.spt]
|
||||
|
||||
@ -663,14 +774,19 @@ func (sh *scheduler) dropWorker(wid WorkerID) {
|
||||
}
|
||||
|
||||
func (sh *scheduler) workerCleanup(wid WorkerID, w *workerHandle) {
|
||||
if !w.cleanupStarted {
|
||||
select {
|
||||
case <-w.closingMgr:
|
||||
default:
|
||||
close(w.closingMgr)
|
||||
}
|
||||
|
||||
sh.workersLk.Unlock()
|
||||
select {
|
||||
case <-w.closedMgr:
|
||||
case <-time.After(time.Second):
|
||||
log.Errorf("timeout closing worker manager goroutine %d", wid)
|
||||
}
|
||||
sh.workersLk.Lock()
|
||||
|
||||
if !w.cleanupStarted {
|
||||
w.cleanupStarted = true
|
||||
|
28
extern/sector-storage/sched_resources.go
vendored
28
extern/sector-storage/sched_resources.go
vendored
@ -7,7 +7,7 @@ import (
|
||||
)
|
||||
|
||||
func (a *activeResources) withResources(id WorkerID, wr storiface.WorkerResources, r Resources, locker sync.Locker, cb func() error) error {
|
||||
for !a.canHandleRequest(r, id, wr) {
|
||||
for !a.canHandleRequest(r, id, "withResources", wr) {
|
||||
if a.cond == nil {
|
||||
a.cond = sync.NewCond(locker)
|
||||
}
|
||||
@ -52,37 +52,37 @@ func (a *activeResources) free(wr storiface.WorkerResources, r Resources) {
|
||||
a.memUsedMax -= r.MaxMemory
|
||||
}
|
||||
|
||||
func (a *activeResources) canHandleRequest(needRes Resources, wid WorkerID, res storiface.WorkerResources) bool {
|
||||
func (a *activeResources) canHandleRequest(needRes Resources, wid WorkerID, caller string, res storiface.WorkerResources) bool {
|
||||
|
||||
// TODO: dedupe needRes.BaseMinMemory per task type (don't add if that task is already running)
|
||||
minNeedMem := res.MemReserved + a.memUsedMin + needRes.MinMemory + needRes.BaseMinMemory
|
||||
if minNeedMem > res.MemPhysical {
|
||||
log.Debugf("sched: not scheduling on worker %d; not enough physical memory - need: %dM, have %dM", wid, minNeedMem/mib, res.MemPhysical/mib)
|
||||
log.Debugf("sched: not scheduling on worker %d for %s; not enough physical memory - need: %dM, have %dM", wid, caller, minNeedMem/mib, res.MemPhysical/mib)
|
||||
return false
|
||||
}
|
||||
|
||||
maxNeedMem := res.MemReserved + a.memUsedMax + needRes.MaxMemory + needRes.BaseMinMemory
|
||||
|
||||
if maxNeedMem > res.MemSwap+res.MemPhysical {
|
||||
log.Debugf("sched: not scheduling on worker %d; not enough virtual memory - need: %dM, have %dM", wid, maxNeedMem/mib, (res.MemSwap+res.MemPhysical)/mib)
|
||||
log.Debugf("sched: not scheduling on worker %d for %s; not enough virtual memory - need: %dM, have %dM", wid, caller, maxNeedMem/mib, (res.MemSwap+res.MemPhysical)/mib)
|
||||
return false
|
||||
}
|
||||
|
||||
if needRes.MultiThread() {
|
||||
if a.cpuUse > 0 {
|
||||
log.Debugf("sched: not scheduling on worker %d; multicore process needs %d threads, %d in use, target %d", wid, res.CPUs, a.cpuUse, res.CPUs)
|
||||
log.Debugf("sched: not scheduling on worker %d for %s; multicore process needs %d threads, %d in use, target %d", wid, caller, res.CPUs, a.cpuUse, res.CPUs)
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
if a.cpuUse+uint64(needRes.Threads) > res.CPUs {
|
||||
log.Debugf("sched: not scheduling on worker %d; not enough threads, need %d, %d in use, target %d", wid, needRes.Threads, a.cpuUse, res.CPUs)
|
||||
log.Debugf("sched: not scheduling on worker %d for %s; not enough threads, need %d, %d in use, target %d", wid, caller, needRes.Threads, a.cpuUse, res.CPUs)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if len(res.GPUs) > 0 && needRes.CanGPU {
|
||||
if a.gpuUsed {
|
||||
log.Debugf("sched: not scheduling on worker %d; GPU in use", wid)
|
||||
log.Debugf("sched: not scheduling on worker %d for %s; GPU in use", wid, caller)
|
||||
return false
|
||||
}
|
||||
}
|
||||
@ -108,3 +108,17 @@ func (a *activeResources) utilization(wr storiface.WorkerResources) float64 {
|
||||
|
||||
return max
|
||||
}
|
||||
|
||||
func (wh *workerHandle) utilization() float64 {
|
||||
wh.lk.Lock()
|
||||
u := wh.active.utilization(wh.info.Resources)
|
||||
u += wh.preparing.utilization(wh.info.Resources)
|
||||
wh.lk.Unlock()
|
||||
wh.wndLk.Lock()
|
||||
for _, window := range wh.activeWindows {
|
||||
u += window.allocated.utilization(wh.info.Resources)
|
||||
}
|
||||
wh.wndLk.Unlock()
|
||||
|
||||
return u
|
||||
}
|
||||
|
129
extern/sector-storage/sched_test.go
vendored
129
extern/sector-storage/sched_test.go
vendored
@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"runtime"
|
||||
"sort"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
@ -22,6 +23,10 @@ import (
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
)
|
||||
|
||||
func init() {
|
||||
InitWait = 10 * time.Millisecond
|
||||
}
|
||||
|
||||
func TestWithPriority(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
@ -77,7 +82,7 @@ func (s *schedTestWorker) AddPiece(ctx context.Context, sector abi.SectorID, pie
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (s *schedTestWorker) MoveStorage(ctx context.Context, sector abi.SectorID) error {
|
||||
func (s *schedTestWorker) MoveStorage(ctx context.Context, sector abi.SectorID, types stores.SectorFileType) error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
@ -301,7 +306,8 @@ func TestSched(t *testing.T) {
|
||||
done: map[string]chan struct{}{},
|
||||
}
|
||||
|
||||
for _, task := range tasks {
|
||||
for i, task := range tasks {
|
||||
log.Info("TASK", i)
|
||||
task(t, sched, index, &rm)
|
||||
}
|
||||
|
||||
@ -415,6 +421,45 @@ func TestSched(t *testing.T) {
|
||||
)
|
||||
}
|
||||
|
||||
diag := func() task {
|
||||
return func(t *testing.T, s *scheduler, index *stores.Index, meta *runMeta) {
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
for _, request := range s.diag().Requests {
|
||||
log.Infof("!!! sDIAG: sid(%d) task(%s)", request.Sector.Number, request.TaskType)
|
||||
}
|
||||
|
||||
wj := (&Manager{sched: s}).WorkerJobs()
|
||||
|
||||
type line struct {
|
||||
storiface.WorkerJob
|
||||
wid uint64
|
||||
}
|
||||
|
||||
lines := make([]line, 0)
|
||||
|
||||
for wid, jobs := range wj {
|
||||
for _, job := range jobs {
|
||||
lines = append(lines, line{
|
||||
WorkerJob: job,
|
||||
wid: wid,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// oldest first
|
||||
sort.Slice(lines, func(i, j int) bool {
|
||||
if lines[i].RunWait != lines[j].RunWait {
|
||||
return lines[i].RunWait < lines[j].RunWait
|
||||
}
|
||||
return lines[i].Start.Before(lines[j].Start)
|
||||
})
|
||||
|
||||
for _, l := range lines {
|
||||
log.Infof("!!! wDIAG: rw(%d) sid(%d) t(%s)", l.RunWait, l.Sector.Number, l.Task)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// run this one a bunch of times, it had a very annoying tendency to fail randomly
|
||||
for i := 0; i < 40; i++ {
|
||||
t.Run("pc1-pc2-prio", testFunc([]workerSpec{
|
||||
@ -423,6 +468,8 @@ func TestSched(t *testing.T) {
|
||||
// fill queues
|
||||
twoPC1("w0", 0, taskStarted),
|
||||
twoPC1("w1", 2, taskNotScheduled),
|
||||
sched("w2", "fred", 4, sealtasks.TTPreCommit1),
|
||||
taskNotScheduled("w2"),
|
||||
|
||||
// windowed
|
||||
|
||||
@ -435,10 +482,18 @@ func TestSched(t *testing.T) {
|
||||
sched("t3", "fred", 10, sealtasks.TTPreCommit2),
|
||||
taskNotScheduled("t3"),
|
||||
|
||||
diag(),
|
||||
|
||||
twoPC1Act("w0", taskDone),
|
||||
twoPC1Act("w1", taskStarted),
|
||||
taskNotScheduled("w2"),
|
||||
|
||||
twoPC1Act("w1", taskDone),
|
||||
taskStarted("w2"),
|
||||
|
||||
taskDone("w2"),
|
||||
|
||||
diag(),
|
||||
|
||||
taskStarted("t3"),
|
||||
taskNotScheduled("t1"),
|
||||
@ -518,3 +573,73 @@ func BenchmarkTrySched(b *testing.B) {
|
||||
b.Run("1w-500q", test(1, 500))
|
||||
b.Run("200w-400q", test(200, 400))
|
||||
}
|
||||
|
||||
func TestWindowCompact(t *testing.T) {
|
||||
sh := scheduler{
|
||||
spt: abi.RegisteredSealProof_StackedDrg32GiBV1,
|
||||
}
|
||||
|
||||
test := func(start [][]sealtasks.TaskType, expect [][]sealtasks.TaskType) func(t *testing.T) {
|
||||
return func(t *testing.T) {
|
||||
wh := &workerHandle{
|
||||
info: storiface.WorkerInfo{
|
||||
Resources: decentWorkerResources,
|
||||
},
|
||||
}
|
||||
|
||||
for _, windowTasks := range start {
|
||||
window := &schedWindow{}
|
||||
|
||||
for _, task := range windowTasks {
|
||||
window.todo = append(window.todo, &workerRequest{taskType: task})
|
||||
window.allocated.add(wh.info.Resources, ResourceTable[task][sh.spt])
|
||||
}
|
||||
|
||||
wh.activeWindows = append(wh.activeWindows, window)
|
||||
}
|
||||
|
||||
n := sh.workerCompactWindows(wh, 0)
|
||||
require.Equal(t, len(start)-len(expect), n)
|
||||
|
||||
for wi, tasks := range expect {
|
||||
var expectRes activeResources
|
||||
|
||||
for ti, task := range tasks {
|
||||
require.Equal(t, task, wh.activeWindows[wi].todo[ti].taskType, "%d, %d", wi, ti)
|
||||
expectRes.add(wh.info.Resources, ResourceTable[task][sh.spt])
|
||||
}
|
||||
|
||||
require.Equal(t, expectRes.cpuUse, wh.activeWindows[wi].allocated.cpuUse, "%d", wi)
|
||||
require.Equal(t, expectRes.gpuUsed, wh.activeWindows[wi].allocated.gpuUsed, "%d", wi)
|
||||
require.Equal(t, expectRes.memUsedMin, wh.activeWindows[wi].allocated.memUsedMin, "%d", wi)
|
||||
require.Equal(t, expectRes.memUsedMax, wh.activeWindows[wi].allocated.memUsedMax, "%d", wi)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
t.Run("2-pc1-windows", test(
|
||||
[][]sealtasks.TaskType{{sealtasks.TTPreCommit1}, {sealtasks.TTPreCommit1}},
|
||||
[][]sealtasks.TaskType{{sealtasks.TTPreCommit1, sealtasks.TTPreCommit1}}),
|
||||
)
|
||||
|
||||
t.Run("1-window", test(
|
||||
[][]sealtasks.TaskType{{sealtasks.TTPreCommit1, sealtasks.TTPreCommit1}},
|
||||
[][]sealtasks.TaskType{{sealtasks.TTPreCommit1, sealtasks.TTPreCommit1}}),
|
||||
)
|
||||
|
||||
t.Run("2-pc2-windows", test(
|
||||
[][]sealtasks.TaskType{{sealtasks.TTPreCommit2}, {sealtasks.TTPreCommit2}},
|
||||
[][]sealtasks.TaskType{{sealtasks.TTPreCommit2}, {sealtasks.TTPreCommit2}}),
|
||||
)
|
||||
|
||||
t.Run("2pc1-pc1ap", test(
|
||||
[][]sealtasks.TaskType{{sealtasks.TTPreCommit1, sealtasks.TTPreCommit1}, {sealtasks.TTPreCommit1, sealtasks.TTAddPiece}},
|
||||
[][]sealtasks.TaskType{{sealtasks.TTPreCommit1, sealtasks.TTPreCommit1, sealtasks.TTAddPiece}, {sealtasks.TTPreCommit1}}),
|
||||
)
|
||||
|
||||
t.Run("2pc1-pc1appc2", test(
|
||||
[][]sealtasks.TaskType{{sealtasks.TTPreCommit1, sealtasks.TTPreCommit1}, {sealtasks.TTPreCommit1, sealtasks.TTAddPiece, sealtasks.TTPreCommit2}},
|
||||
[][]sealtasks.TaskType{{sealtasks.TTPreCommit1, sealtasks.TTPreCommit1, sealtasks.TTAddPiece}, {sealtasks.TTPreCommit1, sealtasks.TTPreCommit2}}),
|
||||
)
|
||||
}
|
||||
|
24
extern/sector-storage/sealtasks/task.go
vendored
24
extern/sector-storage/sealtasks/task.go
vendored
@ -17,15 +17,15 @@ const (
|
||||
)
|
||||
|
||||
var order = map[TaskType]int{
|
||||
TTAddPiece: 7,
|
||||
TTPreCommit1: 6,
|
||||
TTPreCommit2: 5,
|
||||
TTCommit2: 4,
|
||||
TTCommit1: 3,
|
||||
TTFetch: 2,
|
||||
TTFinalize: 1,
|
||||
TTUnseal: 0,
|
||||
TTReadUnsealed: 0,
|
||||
TTAddPiece: 6, // least priority
|
||||
TTPreCommit1: 5,
|
||||
TTPreCommit2: 4,
|
||||
TTCommit2: 3,
|
||||
TTCommit1: 2,
|
||||
TTUnseal: 1,
|
||||
TTFetch: -1,
|
||||
TTReadUnsealed: -1,
|
||||
TTFinalize: -2, // most priority
|
||||
}
|
||||
|
||||
var shortNames = map[TaskType]string{
|
||||
@ -43,6 +43,12 @@ var shortNames = map[TaskType]string{
|
||||
TTReadUnsealed: "RD ",
|
||||
}
|
||||
|
||||
func (a TaskType) MuchLess(b TaskType) (bool, bool) {
|
||||
oa, ob := order[a], order[b]
|
||||
oneNegative := oa^ob < 0
|
||||
return oneNegative, oa < ob
|
||||
}
|
||||
|
||||
func (a TaskType) Less(b TaskType) bool {
|
||||
return order[a] < order[b]
|
||||
}
|
||||
|
2
extern/sector-storage/selector_alloc.go
vendored
2
extern/sector-storage/selector_alloc.go
vendored
@ -59,7 +59,7 @@ func (s *allocSelector) Ok(ctx context.Context, task sealtasks.TaskType, spt abi
|
||||
}
|
||||
|
||||
func (s *allocSelector) Cmp(ctx context.Context, task sealtasks.TaskType, a, b *workerHandle) (bool, error) {
|
||||
return a.active.utilization(a.info.Resources) < b.active.utilization(b.info.Resources), nil
|
||||
return a.utilization() < b.utilization(), nil
|
||||
}
|
||||
|
||||
var _ WorkerSelector = &allocSelector{}
|
||||
|
2
extern/sector-storage/selector_existing.go
vendored
2
extern/sector-storage/selector_existing.go
vendored
@ -61,7 +61,7 @@ func (s *existingSelector) Ok(ctx context.Context, task sealtasks.TaskType, spt
|
||||
}
|
||||
|
||||
func (s *existingSelector) Cmp(ctx context.Context, task sealtasks.TaskType, a, b *workerHandle) (bool, error) {
|
||||
return a.active.utilization(a.info.Resources) < b.active.utilization(b.info.Resources), nil
|
||||
return a.utilization() < b.utilization(), nil
|
||||
}
|
||||
|
||||
var _ WorkerSelector = &existingSelector{}
|
||||
|
2
extern/sector-storage/selector_task.go
vendored
2
extern/sector-storage/selector_task.go
vendored
@ -42,7 +42,7 @@ func (s *taskSelector) Cmp(ctx context.Context, _ sealtasks.TaskType, a, b *work
|
||||
return len(atasks) < len(btasks), nil // prefer workers which can do less
|
||||
}
|
||||
|
||||
return a.active.utilization(a.info.Resources) < b.active.utilization(b.info.Resources), nil
|
||||
return a.utilization() < b.utilization(), nil
|
||||
}
|
||||
|
||||
var _ WorkerSelector = &allocSelector{}
|
||||
|
18
extern/sector-storage/stats.go
vendored
18
extern/sector-storage/stats.go
vendored
@ -1,6 +1,8 @@
|
||||
package sectorstorage
|
||||
|
||||
import "github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||
import (
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||
)
|
||||
|
||||
func (m *Manager) WorkerStats() map[uint64]storiface.WorkerStats {
|
||||
m.sched.workersLk.RLock()
|
||||
@ -29,6 +31,20 @@ func (m *Manager) WorkerJobs() map[uint64][]storiface.WorkerJob {
|
||||
|
||||
for id, handle := range m.sched.workers {
|
||||
out[uint64(id)] = handle.wt.Running()
|
||||
|
||||
handle.wndLk.Lock()
|
||||
for wi, window := range handle.activeWindows {
|
||||
for _, request := range window.todo {
|
||||
out[uint64(id)] = append(out[uint64(id)], storiface.WorkerJob{
|
||||
ID: 0,
|
||||
Sector: request.sector,
|
||||
Task: request.taskType,
|
||||
RunWait: wi + 1,
|
||||
Start: request.start,
|
||||
})
|
||||
}
|
||||
}
|
||||
handle.wndLk.Unlock()
|
||||
}
|
||||
|
||||
return out
|
||||
|
3
extern/sector-storage/storiface/worker.go
vendored
3
extern/sector-storage/storiface/worker.go
vendored
@ -37,5 +37,6 @@ type WorkerJob struct {
|
||||
Sector abi.SectorID
|
||||
Task sealtasks.TaskType
|
||||
|
||||
Start time.Time
|
||||
RunWait int // 0 - running, 1+ - assigned
|
||||
Start time.Time
|
||||
}
|
||||
|
2
extern/sector-storage/testworker_test.go
vendored
2
extern/sector-storage/testworker_test.go
vendored
@ -85,7 +85,7 @@ func (t *testWorker) Remove(ctx context.Context, sector abi.SectorID) error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (t *testWorker) MoveStorage(ctx context.Context, sector abi.SectorID) error {
|
||||
func (t *testWorker) MoveStorage(ctx context.Context, sector abi.SectorID, types stores.SectorFileType) error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
|
85
extern/storage-sealing/cbor_gen.go
vendored
85
extern/storage-sealing/cbor_gen.go
vendored
@ -135,12 +135,34 @@ func (t *DealInfo) MarshalCBOR(w io.Writer) error {
|
||||
_, err := w.Write(cbg.CborNull)
|
||||
return err
|
||||
}
|
||||
if _, err := w.Write([]byte{163}); err != nil {
|
||||
if _, err := w.Write([]byte{164}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
scratch := make([]byte, 9)
|
||||
|
||||
// t.PublishCid (cid.Cid) (struct)
|
||||
if len("PublishCid") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"PublishCid\" was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("PublishCid"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("PublishCid")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if t.PublishCid == nil {
|
||||
if _, err := w.Write(cbg.CborNull); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := cbg.WriteCidBuf(scratch, w, *t.PublishCid); err != nil {
|
||||
return xerrors.Errorf("failed to write cid field t.PublishCid: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// t.DealID (abi.DealID) (uint64)
|
||||
if len("DealID") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"DealID\" was too long")
|
||||
@ -224,7 +246,30 @@ func (t *DealInfo) UnmarshalCBOR(r io.Reader) error {
|
||||
}
|
||||
|
||||
switch name {
|
||||
// t.DealID (abi.DealID) (uint64)
|
||||
// t.PublishCid (cid.Cid) (struct)
|
||||
case "PublishCid":
|
||||
|
||||
{
|
||||
|
||||
b, err := br.ReadByte()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if b != cbg.CborNull[0] {
|
||||
if err := br.UnreadByte(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c, err := cbg.ReadCid(br)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to read cid field t.PublishCid: %w", err)
|
||||
}
|
||||
|
||||
t.PublishCid = &c
|
||||
}
|
||||
|
||||
}
|
||||
// t.DealID (abi.DealID) (uint64)
|
||||
case "DealID":
|
||||
|
||||
{
|
||||
@ -430,7 +475,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error {
|
||||
_, err := w.Write(cbg.CborNull)
|
||||
return err
|
||||
}
|
||||
if _, err := w.Write([]byte{182}); err != nil {
|
||||
if _, err := w.Write([]byte{183}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -860,6 +905,29 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error {
|
||||
}
|
||||
}
|
||||
|
||||
// t.Return (sealing.ReturnState) (string)
|
||||
if len("Return") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"Return\" was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Return"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("Return")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(t.Return) > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field t.Return was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Return))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string(t.Return)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.LastErr (string) (string)
|
||||
if len("LastErr") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"LastErr\" was too long")
|
||||
@ -1362,6 +1430,17 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) error {
|
||||
}
|
||||
|
||||
}
|
||||
// t.Return (sealing.ReturnState) (string)
|
||||
case "Return":
|
||||
|
||||
{
|
||||
sval, err := cbg.ReadStringBuf(br, scratch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
t.Return = ReturnState(sval)
|
||||
}
|
||||
// t.LastErr (string) (string)
|
||||
case "LastErr":
|
||||
|
||||
|
14
extern/storage-sealing/checks.go
vendored
14
extern/storage-sealing/checks.go
vendored
@ -33,7 +33,7 @@ type ErrInvalidProof struct{ error }
|
||||
type ErrNoPrecommit struct{ error }
|
||||
type ErrCommitWaitFailed struct{ error }
|
||||
|
||||
func checkPieces(ctx context.Context, si SectorInfo, api SealingAPI) error {
|
||||
func checkPieces(ctx context.Context, maddr address.Address, si SectorInfo, api SealingAPI) error {
|
||||
tok, height, err := api.ChainHead(ctx)
|
||||
if err != nil {
|
||||
return &ErrApi{xerrors.Errorf("getting chain head: %w", err)}
|
||||
@ -55,6 +55,10 @@ func checkPieces(ctx context.Context, si SectorInfo, api SealingAPI) error {
|
||||
return &ErrInvalidDeals{xerrors.Errorf("getting deal %d for piece %d: %w", p.DealInfo.DealID, i, err)}
|
||||
}
|
||||
|
||||
if proposal.Provider != maddr {
|
||||
return &ErrInvalidDeals{xerrors.Errorf("piece %d (of %d) of sector %d refers deal %d with wrong provider: %s != %s", i, len(si.Pieces), si.SectorNumber, p.DealInfo.DealID, proposal.Provider, maddr)}
|
||||
}
|
||||
|
||||
if proposal.PieceCID != p.Piece.PieceCID {
|
||||
return &ErrInvalidDeals{xerrors.Errorf("piece %d (of %d) of sector %d refers deal %d with wrong PieceCID: %x != %x", i, len(si.Pieces), si.SectorNumber, p.DealInfo.DealID, p.Piece.PieceCID, proposal.PieceCID)}
|
||||
}
|
||||
@ -74,6 +78,10 @@ func checkPieces(ctx context.Context, si SectorInfo, api SealingAPI) error {
|
||||
// checkPrecommit checks that data commitment generated in the sealing process
|
||||
// matches pieces, and that the seal ticket isn't expired
|
||||
func checkPrecommit(ctx context.Context, maddr address.Address, si SectorInfo, tok TipSetToken, height abi.ChainEpoch, api SealingAPI) (err error) {
|
||||
if err := checkPieces(ctx, maddr, si, api); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
commD, err := api.StateComputeDataCommitment(ctx, maddr, si.SectorType, si.dealIDs(), tok)
|
||||
if err != nil {
|
||||
return &ErrApi{xerrors.Errorf("calling StateComputeDataCommitment: %w", err)}
|
||||
@ -176,5 +184,9 @@ func (m *Sealing) checkCommit(ctx context.Context, si SectorInfo, proof []byte,
|
||||
return &ErrInvalidProof{xerrors.New("invalid proof (compute error?)")}
|
||||
}
|
||||
|
||||
if err := checkPieces(ctx, m.maddr, si, m.api); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
263
extern/storage-sealing/fsm.go
vendored
263
extern/storage-sealing/fsm.go
vendored
@ -17,9 +17,9 @@ import (
|
||||
)
|
||||
|
||||
func (m *Sealing) Plan(events []statemachine.Event, user interface{}) (interface{}, uint64, error) {
|
||||
next, err := m.plan(events, user.(*SectorInfo))
|
||||
next, processed, err := m.plan(events, user.(*SectorInfo))
|
||||
if err != nil || next == nil {
|
||||
return nil, uint64(len(events)), err
|
||||
return nil, processed, err
|
||||
}
|
||||
|
||||
return func(ctx statemachine.Context, si SectorInfo) error {
|
||||
@ -30,10 +30,10 @@ func (m *Sealing) Plan(events []statemachine.Event, user interface{}) (interface
|
||||
}
|
||||
|
||||
return nil
|
||||
}, uint64(len(events)), nil // TODO: This processed event count is not very correct
|
||||
}, processed, nil // TODO: This processed event count is not very correct
|
||||
}
|
||||
|
||||
var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *SectorInfo) error{
|
||||
var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *SectorInfo) (uint64, error){
|
||||
// Sealing
|
||||
|
||||
UndefinedSectorState: planOne(
|
||||
@ -49,31 +49,39 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto
|
||||
PreCommit1: planOne(
|
||||
on(SectorPreCommit1{}, PreCommit2),
|
||||
on(SectorSealPreCommit1Failed{}, SealPreCommit1Failed),
|
||||
on(SectorPackingFailed{}, PackingFailed),
|
||||
on(SectorDealsExpired{}, DealsExpired),
|
||||
on(SectorInvalidDealIDs{}, RecoverDealIDs),
|
||||
),
|
||||
PreCommit2: planOne(
|
||||
on(SectorPreCommit2{}, PreCommitting),
|
||||
on(SectorSealPreCommit2Failed{}, SealPreCommit2Failed),
|
||||
on(SectorPackingFailed{}, PackingFailed),
|
||||
),
|
||||
PreCommitting: planOne(
|
||||
on(SectorSealPreCommit1Failed{}, SealPreCommit1Failed),
|
||||
on(SectorPreCommitted{}, PreCommitWait),
|
||||
on(SectorChainPreCommitFailed{}, PreCommitFailed),
|
||||
on(SectorPreCommitLanded{}, WaitSeed),
|
||||
on(SectorDealsExpired{}, DealsExpired),
|
||||
on(SectorInvalidDealIDs{}, RecoverDealIDs),
|
||||
),
|
||||
PreCommitWait: planOne(
|
||||
on(SectorChainPreCommitFailed{}, PreCommitFailed),
|
||||
on(SectorPreCommitLanded{}, WaitSeed),
|
||||
on(SectorRetryPreCommit{}, PreCommitting),
|
||||
),
|
||||
WaitSeed: planOne(
|
||||
on(SectorSeedReady{}, Committing),
|
||||
on(SectorChainPreCommitFailed{}, PreCommitFailed),
|
||||
),
|
||||
Committing: planCommitting,
|
||||
SubmitCommit: planOne(
|
||||
on(SectorCommitSubmitted{}, CommitWait),
|
||||
on(SectorCommitFailed{}, CommitFailed),
|
||||
),
|
||||
CommitWait: planOne(
|
||||
on(SectorProving{}, FinalizeSector),
|
||||
on(SectorCommitFailed{}, CommitFailed),
|
||||
on(SectorRetrySubmitCommit{}, SubmitCommit),
|
||||
),
|
||||
|
||||
FinalizeSector: planOne(
|
||||
@ -95,6 +103,8 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto
|
||||
on(SectorRetryWaitSeed{}, WaitSeed),
|
||||
on(SectorSealPreCommit1Failed{}, SealPreCommit1Failed),
|
||||
on(SectorPreCommitLanded{}, WaitSeed),
|
||||
on(SectorDealsExpired{}, DealsExpired),
|
||||
on(SectorInvalidDealIDs{}, RecoverDealIDs),
|
||||
),
|
||||
ComputeProofFailed: planOne(
|
||||
on(SectorRetryComputeProof{}, Committing),
|
||||
@ -109,22 +119,33 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto
|
||||
on(SectorChainPreCommitFailed{}, PreCommitFailed),
|
||||
on(SectorRetryPreCommit{}, PreCommitting),
|
||||
on(SectorRetryCommitWait{}, CommitWait),
|
||||
on(SectorDealsExpired{}, DealsExpired),
|
||||
on(SectorInvalidDealIDs{}, RecoverDealIDs),
|
||||
),
|
||||
FinalizeFailed: planOne(
|
||||
on(SectorRetryFinalize{}, FinalizeSector),
|
||||
),
|
||||
PackingFailed: planOne(), // TODO: Deprecated, remove
|
||||
DealsExpired: planOne(
|
||||
// SectorRemove (global)
|
||||
),
|
||||
RecoverDealIDs: planOne(
|
||||
onReturning(SectorUpdateDealIDs{}),
|
||||
),
|
||||
|
||||
// Post-seal
|
||||
|
||||
Proving: planOne(
|
||||
on(SectorFaultReported{}, FaultReported),
|
||||
on(SectorFaulty{}, Faulty),
|
||||
on(SectorRemove{}, Removing),
|
||||
),
|
||||
Removing: planOne(
|
||||
on(SectorRemoved{}, Removed),
|
||||
on(SectorRemoveFailed{}, RemoveFailed),
|
||||
),
|
||||
RemoveFailed: planOne(
|
||||
// SectorRemove (global)
|
||||
),
|
||||
Faulty: planOne(
|
||||
on(SectorFaultReported{}, FaultReported),
|
||||
),
|
||||
@ -133,7 +154,7 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto
|
||||
Removed: final,
|
||||
}
|
||||
|
||||
func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(statemachine.Context, SectorInfo) error, error) {
|
||||
func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(statemachine.Context, SectorInfo) error, uint64, error) {
|
||||
/////
|
||||
// First process all events
|
||||
|
||||
@ -176,11 +197,12 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta
|
||||
|
||||
p := fsmPlanners[state.State]
|
||||
if p == nil {
|
||||
return nil, xerrors.Errorf("planner for state %s not found", state.State)
|
||||
return nil, 0, xerrors.Errorf("planner for state %s not found", state.State)
|
||||
}
|
||||
|
||||
if err := p(events, state); err != nil {
|
||||
return nil, xerrors.Errorf("running planner for state %s failed: %w", state.State, err)
|
||||
processed, err := p(events, state)
|
||||
if err != nil {
|
||||
return nil, 0, xerrors.Errorf("running planner for state %s failed: %w", state.State, err)
|
||||
}
|
||||
|
||||
/////
|
||||
@ -188,47 +210,50 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta
|
||||
|
||||
/*
|
||||
|
||||
* Empty <- incoming deals
|
||||
| |
|
||||
| v
|
||||
*<- WaitDeals <- incoming deals
|
||||
| |
|
||||
| v
|
||||
*<- Packing <- incoming committed capacity
|
||||
| |
|
||||
| v
|
||||
*<- PreCommit1 <--> SealPreCommit1Failed
|
||||
| | ^ ^^
|
||||
| | *----------++----\
|
||||
| v v || |
|
||||
*<- PreCommit2 --------++--> SealPreCommit2Failed
|
||||
| | ||
|
||||
| v /-------/|
|
||||
* PreCommitting <-----+---> PreCommitFailed
|
||||
| | | ^
|
||||
| v | |
|
||||
*<- WaitSeed -----------+-----/
|
||||
| ||| ^ |
|
||||
| ||| \--------*-----/
|
||||
| ||| |
|
||||
| vvv v----+----> ComputeProofFailed
|
||||
*<- Committing |
|
||||
| | ^--> CommitFailed
|
||||
| v ^
|
||||
*<- CommitWait ---/
|
||||
| |
|
||||
| v
|
||||
| FinalizeSector <--> FinalizeFailed
|
||||
| |
|
||||
| v
|
||||
*<- Proving
|
||||
|
|
||||
v
|
||||
FailedUnrecoverable
|
||||
* Empty <- incoming deals
|
||||
| |
|
||||
| v
|
||||
*<- WaitDeals <- incoming deals
|
||||
| |
|
||||
| v
|
||||
*<- Packing <- incoming committed capacity
|
||||
| |
|
||||
| v
|
||||
*<- PreCommit1 <--> SealPreCommit1Failed
|
||||
| | ^ ^^
|
||||
| | *----------++----\
|
||||
| v v || |
|
||||
*<- PreCommit2 --------++--> SealPreCommit2Failed
|
||||
| | ||
|
||||
| v /-------/|
|
||||
* PreCommitting <-----+---> PreCommitFailed
|
||||
| | | ^
|
||||
| v | |
|
||||
*<- WaitSeed -----------+-----/
|
||||
| ||| ^ |
|
||||
| ||| \--------*-----/
|
||||
| ||| |
|
||||
| vvv v----+----> ComputeProofFailed
|
||||
*<- Committing |
|
||||
| | ^--> CommitFailed
|
||||
| v ^
|
||||
| SubmitCommit |
|
||||
| | |
|
||||
| v |
|
||||
*<- CommitWait ---/
|
||||
| |
|
||||
| v
|
||||
| FinalizeSector <--> FinalizeFailed
|
||||
| |
|
||||
| v
|
||||
*<- Proving
|
||||
|
|
||||
v
|
||||
FailedUnrecoverable
|
||||
|
||||
UndefinedSectorState <- ¯\_(ツ)_/¯
|
||||
| ^
|
||||
*---------------------/
|
||||
UndefinedSectorState <- ¯\_(ツ)_/¯
|
||||
| ^
|
||||
*---------------------/
|
||||
|
||||
*/
|
||||
|
||||
@ -241,51 +266,63 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta
|
||||
case WaitDeals:
|
||||
log.Infof("Waiting for deals %d", state.SectorNumber)
|
||||
case Packing:
|
||||
return m.handlePacking, nil
|
||||
return m.handlePacking, processed, nil
|
||||
case PreCommit1:
|
||||
return m.handlePreCommit1, nil
|
||||
return m.handlePreCommit1, processed, nil
|
||||
case PreCommit2:
|
||||
return m.handlePreCommit2, nil
|
||||
return m.handlePreCommit2, processed, nil
|
||||
case PreCommitting:
|
||||
return m.handlePreCommitting, nil
|
||||
return m.handlePreCommitting, processed, nil
|
||||
case PreCommitWait:
|
||||
return m.handlePreCommitWait, nil
|
||||
return m.handlePreCommitWait, processed, nil
|
||||
case WaitSeed:
|
||||
return m.handleWaitSeed, nil
|
||||
return m.handleWaitSeed, processed, nil
|
||||
case Committing:
|
||||
return m.handleCommitting, nil
|
||||
return m.handleCommitting, processed, nil
|
||||
case SubmitCommit:
|
||||
return m.handleSubmitCommit, processed, nil
|
||||
case CommitWait:
|
||||
return m.handleCommitWait, nil
|
||||
return m.handleCommitWait, processed, nil
|
||||
case FinalizeSector:
|
||||
return m.handleFinalizeSector, nil
|
||||
return m.handleFinalizeSector, processed, nil
|
||||
|
||||
// Handled failure modes
|
||||
case SealPreCommit1Failed:
|
||||
return m.handleSealPrecommit1Failed, nil
|
||||
return m.handleSealPrecommit1Failed, processed, nil
|
||||
case SealPreCommit2Failed:
|
||||
return m.handleSealPrecommit2Failed, nil
|
||||
return m.handleSealPrecommit2Failed, processed, nil
|
||||
case PreCommitFailed:
|
||||
return m.handlePreCommitFailed, nil
|
||||
return m.handlePreCommitFailed, processed, nil
|
||||
case ComputeProofFailed:
|
||||
return m.handleComputeProofFailed, nil
|
||||
return m.handleComputeProofFailed, processed, nil
|
||||
case CommitFailed:
|
||||
return m.handleCommitFailed, nil
|
||||
return m.handleCommitFailed, processed, nil
|
||||
case FinalizeFailed:
|
||||
return m.handleFinalizeFailed, nil
|
||||
return m.handleFinalizeFailed, processed, nil
|
||||
case PackingFailed: // DEPRECATED: remove this for the next reset
|
||||
state.State = DealsExpired
|
||||
fallthrough
|
||||
case DealsExpired:
|
||||
return m.handleDealsExpired, processed, nil
|
||||
case RecoverDealIDs:
|
||||
return m.handleRecoverDealIDs, processed, nil
|
||||
|
||||
// Post-seal
|
||||
case Proving:
|
||||
return m.handleProvingSector, nil
|
||||
return m.handleProvingSector, processed, nil
|
||||
case Removing:
|
||||
return m.handleRemoving, nil
|
||||
return m.handleRemoving, processed, nil
|
||||
case Removed:
|
||||
return nil, nil
|
||||
return nil, processed, nil
|
||||
|
||||
case RemoveFailed:
|
||||
return m.handleRemoveFailed, processed, nil
|
||||
|
||||
// Faults
|
||||
case Faulty:
|
||||
return m.handleFaulty, nil
|
||||
return m.handleFaulty, processed, nil
|
||||
case FaultReported:
|
||||
return m.handleFaultReported, nil
|
||||
return m.handleFaultReported, processed, nil
|
||||
|
||||
// Fatal errors
|
||||
case UndefinedSectorState:
|
||||
@ -296,28 +333,29 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta
|
||||
log.Errorf("unexpected sector update state: %s", state.State)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
return nil, processed, nil
|
||||
}
|
||||
|
||||
func planCommitting(events []statemachine.Event, state *SectorInfo) error {
|
||||
for _, event := range events {
|
||||
func planCommitting(events []statemachine.Event, state *SectorInfo) (uint64, error) {
|
||||
for i, event := range events {
|
||||
switch e := event.User.(type) {
|
||||
case globalMutator:
|
||||
if e.applyGlobal(state) {
|
||||
return nil
|
||||
return uint64(i + 1), nil
|
||||
}
|
||||
case SectorCommitted: // the normal case
|
||||
e.apply(state)
|
||||
state.State = CommitWait
|
||||
state.State = SubmitCommit
|
||||
case SectorSeedReady: // seed changed :/
|
||||
if e.SeedEpoch == state.SeedEpoch && bytes.Equal(e.SeedValue, state.SeedValue) {
|
||||
log.Warnf("planCommitting: got SectorSeedReady, but the seed didn't change")
|
||||
continue // or it didn't!
|
||||
}
|
||||
|
||||
log.Warnf("planCommitting: commit Seed changed")
|
||||
e.apply(state)
|
||||
state.State = Committing
|
||||
return nil
|
||||
return uint64(i + 1), nil
|
||||
case SectorComputeProofFailed:
|
||||
state.State = ComputeProofFailed
|
||||
case SectorSealPreCommit1Failed:
|
||||
@ -327,10 +365,10 @@ func planCommitting(events []statemachine.Event, state *SectorInfo) error {
|
||||
case SectorRetryCommitWait:
|
||||
state.State = CommitWait
|
||||
default:
|
||||
return xerrors.Errorf("planCommitting got event of unknown type %T, events: %+v", event.User, events)
|
||||
return uint64(i), xerrors.Errorf("planCommitting got event of unknown type %T, events: %+v", event.User, events)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return uint64(len(events)), nil
|
||||
}
|
||||
|
||||
func (m *Sealing) restartSectors(ctx context.Context) error {
|
||||
@ -344,12 +382,33 @@ func (m *Sealing) restartSectors(ctx context.Context) error {
|
||||
return xerrors.Errorf("getting the sealing delay: %w", err)
|
||||
}
|
||||
|
||||
m.unsealedInfoMap.lk.Lock()
|
||||
defer m.unsealedInfoMap.lk.Unlock()
|
||||
for _, sector := range trackedSectors {
|
||||
if err := m.sectors.Send(uint64(sector.SectorNumber), SectorRestart{}); err != nil {
|
||||
log.Errorf("restarting sector %d: %+v", sector.SectorNumber, err)
|
||||
}
|
||||
|
||||
if sector.State == WaitDeals {
|
||||
|
||||
// put the sector in the unsealedInfoMap
|
||||
if _, ok := m.unsealedInfoMap.infos[sector.SectorNumber]; ok {
|
||||
// something's funky here, but probably safe to move on
|
||||
log.Warnf("sector %v was already in the unsealedInfoMap when restarting", sector.SectorNumber)
|
||||
} else {
|
||||
ui := UnsealedSectorInfo{}
|
||||
for _, p := range sector.Pieces {
|
||||
if p.DealInfo != nil {
|
||||
ui.numDeals++
|
||||
}
|
||||
ui.stored += p.Piece.Size
|
||||
ui.pieceSizes = append(ui.pieceSizes, p.Piece.Size.Unpadded())
|
||||
}
|
||||
|
||||
m.unsealedInfoMap.infos[sector.SectorNumber] = ui
|
||||
}
|
||||
|
||||
// start a fresh timer for the sector
|
||||
if cfg.WaitDealsDelay > 0 {
|
||||
timer := time.NewTimer(cfg.WaitDealsDelay)
|
||||
go func() {
|
||||
@ -371,31 +430,38 @@ func (m *Sealing) ForceSectorState(ctx context.Context, id abi.SectorNumber, sta
|
||||
return m.sectors.Send(id, SectorForceState{state})
|
||||
}
|
||||
|
||||
func final(events []statemachine.Event, state *SectorInfo) error {
|
||||
return xerrors.Errorf("didn't expect any events in state %s, got %+v", state.State, events)
|
||||
func final(events []statemachine.Event, state *SectorInfo) (uint64, error) {
|
||||
return 0, xerrors.Errorf("didn't expect any events in state %s, got %+v", state.State, events)
|
||||
}
|
||||
|
||||
func on(mut mutator, next SectorState) func() (mutator, SectorState) {
|
||||
return func() (mutator, SectorState) {
|
||||
return mut, next
|
||||
func on(mut mutator, next SectorState) func() (mutator, func(*SectorInfo) error) {
|
||||
return func() (mutator, func(*SectorInfo) error) {
|
||||
return mut, func(state *SectorInfo) error {
|
||||
state.State = next
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func planOne(ts ...func() (mutator, SectorState)) func(events []statemachine.Event, state *SectorInfo) error {
|
||||
return func(events []statemachine.Event, state *SectorInfo) error {
|
||||
if len(events) != 1 {
|
||||
for _, event := range events {
|
||||
if gm, ok := event.User.(globalMutator); ok {
|
||||
gm.applyGlobal(state)
|
||||
return nil
|
||||
}
|
||||
func onReturning(mut mutator) func() (mutator, func(*SectorInfo) error) {
|
||||
return func() (mutator, func(*SectorInfo) error) {
|
||||
return mut, func(state *SectorInfo) error {
|
||||
if state.Return == "" {
|
||||
return xerrors.Errorf("return state not set")
|
||||
}
|
||||
return xerrors.Errorf("planner for state %s only has a plan for a single event only, got %+v", state.State, events)
|
||||
}
|
||||
|
||||
state.State = SectorState(state.Return)
|
||||
state.Return = ""
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func planOne(ts ...func() (mut mutator, next func(*SectorInfo) error)) func(events []statemachine.Event, state *SectorInfo) (uint64, error) {
|
||||
return func(events []statemachine.Event, state *SectorInfo) (uint64, error) {
|
||||
if gm, ok := events[0].User.(globalMutator); ok {
|
||||
gm.applyGlobal(state)
|
||||
return nil
|
||||
return 1, nil
|
||||
}
|
||||
|
||||
for _, t := range ts {
|
||||
@ -410,15 +476,14 @@ func planOne(ts ...func() (mutator, SectorState)) func(events []statemachine.Eve
|
||||
}
|
||||
|
||||
events[0].User.(mutator).apply(state)
|
||||
state.State = next
|
||||
return nil
|
||||
return 1, next(state)
|
||||
}
|
||||
|
||||
_, ok := events[0].User.(Ignorable)
|
||||
if ok {
|
||||
return nil
|
||||
return 1, nil
|
||||
}
|
||||
|
||||
return xerrors.Errorf("planner for state %s received unexpected event %T (%+v)", state.State, events[0].User, events[0])
|
||||
return 0, xerrors.Errorf("planner for state %s received unexpected event %T (%+v)", state.State, events[0].User, events[0])
|
||||
}
|
||||
}
|
||||
|
46
extern/storage-sealing/fsm_events.go
vendored
46
extern/storage-sealing/fsm_events.go
vendored
@ -101,10 +101,6 @@ func (evt SectorPacked) apply(state *SectorInfo) {
|
||||
}
|
||||
}
|
||||
|
||||
type SectorPackingFailed struct{ error }
|
||||
|
||||
func (evt SectorPackingFailed) apply(*SectorInfo) {}
|
||||
|
||||
type SectorPreCommit1 struct {
|
||||
PreCommit1Out storage.PreCommit1Out
|
||||
TicketValue abi.SealRandomness
|
||||
@ -191,13 +187,28 @@ type SectorCommitFailed struct{ error }
|
||||
func (evt SectorCommitFailed) FormatError(xerrors.Printer) (next error) { return evt.error }
|
||||
func (evt SectorCommitFailed) apply(*SectorInfo) {}
|
||||
|
||||
type SectorRetrySubmitCommit struct{}
|
||||
|
||||
func (evt SectorRetrySubmitCommit) apply(*SectorInfo) {}
|
||||
|
||||
type SectorDealsExpired struct{ error }
|
||||
|
||||
func (evt SectorDealsExpired) FormatError(xerrors.Printer) (next error) { return evt.error }
|
||||
func (evt SectorDealsExpired) apply(*SectorInfo) {}
|
||||
|
||||
type SectorCommitted struct {
|
||||
Message cid.Cid
|
||||
Proof []byte
|
||||
Proof []byte
|
||||
}
|
||||
|
||||
func (evt SectorCommitted) apply(state *SectorInfo) {
|
||||
state.Proof = evt.Proof
|
||||
}
|
||||
|
||||
type SectorCommitSubmitted struct {
|
||||
Message cid.Cid
|
||||
}
|
||||
|
||||
func (evt SectorCommitSubmitted) apply(state *SectorInfo) {
|
||||
state.CommitMessage = &evt.Message
|
||||
}
|
||||
|
||||
@ -256,6 +267,24 @@ type SectorRetryCommitWait struct{}
|
||||
|
||||
func (evt SectorRetryCommitWait) apply(state *SectorInfo) {}
|
||||
|
||||
type SectorInvalidDealIDs struct {
|
||||
Return ReturnState
|
||||
}
|
||||
|
||||
func (evt SectorInvalidDealIDs) apply(state *SectorInfo) {
|
||||
state.Return = evt.Return
|
||||
}
|
||||
|
||||
type SectorUpdateDealIDs struct {
|
||||
Updates map[int]abi.DealID
|
||||
}
|
||||
|
||||
func (evt SectorUpdateDealIDs) apply(state *SectorInfo) {
|
||||
for i, id := range evt.Updates {
|
||||
state.Pieces[i].DealInfo.DealID = id
|
||||
}
|
||||
}
|
||||
|
||||
// Faults
|
||||
|
||||
type SectorFaulty struct{}
|
||||
@ -274,7 +303,10 @@ type SectorFaultedFinal struct{}
|
||||
|
||||
type SectorRemove struct{}
|
||||
|
||||
func (evt SectorRemove) apply(state *SectorInfo) {}
|
||||
func (evt SectorRemove) applyGlobal(state *SectorInfo) bool {
|
||||
state.State = Removing
|
||||
return true
|
||||
}
|
||||
|
||||
type SectorRemoved struct{}
|
||||
|
||||
|
15
extern/storage-sealing/fsm_test.go
vendored
15
extern/storage-sealing/fsm_test.go
vendored
@ -16,7 +16,7 @@ func init() {
|
||||
}
|
||||
|
||||
func (t *test) planSingle(evt interface{}) {
|
||||
_, err := t.s.plan([]statemachine.Event{{User: evt}}, t.state)
|
||||
_, _, err := t.s.plan([]statemachine.Event{{User: evt}}, t.state)
|
||||
require.NoError(t.t, err)
|
||||
}
|
||||
|
||||
@ -62,6 +62,9 @@ func TestHappyPath(t *testing.T) {
|
||||
require.Equal(m.t, m.state.State, Committing)
|
||||
|
||||
m.planSingle(SectorCommitted{})
|
||||
require.Equal(m.t, m.state.State, SubmitCommit)
|
||||
|
||||
m.planSingle(SectorCommitSubmitted{})
|
||||
require.Equal(m.t, m.state.State, CommitWait)
|
||||
|
||||
m.planSingle(SectorProving{})
|
||||
@ -112,13 +115,16 @@ func TestSeedRevert(t *testing.T) {
|
||||
m.planSingle(SectorSeedReady{})
|
||||
require.Equal(m.t, m.state.State, Committing)
|
||||
|
||||
_, err := m.s.plan([]statemachine.Event{{User: SectorSeedReady{SeedValue: nil, SeedEpoch: 5}}, {User: SectorCommitted{}}}, m.state)
|
||||
_, _, err := m.s.plan([]statemachine.Event{{User: SectorSeedReady{SeedValue: nil, SeedEpoch: 5}}, {User: SectorCommitted{}}}, m.state)
|
||||
require.NoError(t, err)
|
||||
require.Equal(m.t, m.state.State, Committing)
|
||||
|
||||
// not changing the seed this time
|
||||
_, err = m.s.plan([]statemachine.Event{{User: SectorSeedReady{SeedValue: nil, SeedEpoch: 5}}, {User: SectorCommitted{}}}, m.state)
|
||||
_, _, err = m.s.plan([]statemachine.Event{{User: SectorSeedReady{SeedValue: nil, SeedEpoch: 5}}, {User: SectorCommitted{}}}, m.state)
|
||||
require.NoError(t, err)
|
||||
require.Equal(m.t, m.state.State, SubmitCommit)
|
||||
|
||||
m.planSingle(SectorCommitSubmitted{})
|
||||
require.Equal(m.t, m.state.State, CommitWait)
|
||||
|
||||
m.planSingle(SectorProving{})
|
||||
@ -143,7 +149,8 @@ func TestPlanCommittingHandlesSectorCommitFailed(t *testing.T) {
|
||||
|
||||
events := []statemachine.Event{{User: SectorCommitFailed{}}}
|
||||
|
||||
require.NoError(t, planCommitting(events, m.state))
|
||||
_, err := planCommitting(events, m.state)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, CommitFailed, m.state.State)
|
||||
}
|
||||
|
14
extern/storage-sealing/sealing.go
vendored
14
extern/storage-sealing/sealing.go
vendored
@ -148,7 +148,7 @@ func (m *Sealing) Stop(ctx context.Context) error {
|
||||
return m.sectors.Stop(ctx)
|
||||
}
|
||||
func (m *Sealing) AddPieceToAnySector(ctx context.Context, size abi.UnpaddedPieceSize, r io.Reader, d DealInfo) (abi.SectorNumber, abi.PaddedPieceSize, error) {
|
||||
log.Infof("Adding piece for deal %d", d.DealID)
|
||||
log.Infof("Adding piece for deal %d (publish msg: %s)", d.DealID, d.PublishCid)
|
||||
if (padreader.PaddedSize(uint64(size))) != size {
|
||||
return 0, 0, xerrors.Errorf("cannot allocate unpadded piece")
|
||||
}
|
||||
@ -231,15 +231,23 @@ func (m *Sealing) Remove(ctx context.Context, sid abi.SectorNumber) error {
|
||||
|
||||
// Caller should NOT hold m.unsealedInfoMap.lk
|
||||
func (m *Sealing) StartPacking(sectorID abi.SectorNumber) error {
|
||||
// locking here ensures that when the SectorStartPacking event is sent, the sector won't be picked up anywhere else
|
||||
m.unsealedInfoMap.lk.Lock()
|
||||
defer m.unsealedInfoMap.lk.Unlock()
|
||||
|
||||
// cannot send SectorStartPacking to sectors that have already been packed, otherwise it will cause the state machine to exit
|
||||
if _, ok := m.unsealedInfoMap.infos[sectorID]; !ok {
|
||||
log.Warnf("call start packing, but sector %v not in unsealedInfoMap.infos, maybe have called", sectorID)
|
||||
return nil
|
||||
}
|
||||
log.Infof("Starting packing sector %d", sectorID)
|
||||
err := m.sectors.Send(uint64(sectorID), SectorStartPacking{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Infof("send Starting packing event success sector %d", sectorID)
|
||||
|
||||
m.unsealedInfoMap.lk.Lock()
|
||||
delete(m.unsealedInfoMap.infos, sectorID)
|
||||
m.unsealedInfoMap.lk.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
11
extern/storage-sealing/sector_state.go
vendored
11
extern/storage-sealing/sector_state.go
vendored
@ -10,12 +10,13 @@ const (
|
||||
WaitDeals SectorState = "WaitDeals" // waiting for more pieces (deals) to be added to the sector
|
||||
Packing SectorState = "Packing" // sector not in sealStore, and not on chain
|
||||
PreCommit1 SectorState = "PreCommit1" // do PreCommit1
|
||||
PreCommit2 SectorState = "PreCommit2" // do PreCommit1
|
||||
PreCommit2 SectorState = "PreCommit2" // do PreCommit2
|
||||
PreCommitting SectorState = "PreCommitting" // on chain pre-commit
|
||||
PreCommitWait SectorState = "PreCommitWait" // waiting for precommit to land on chain
|
||||
WaitSeed SectorState = "WaitSeed" // waiting for seed
|
||||
Committing SectorState = "Committing"
|
||||
CommitWait SectorState = "CommitWait" // waiting for message to land on chain
|
||||
Committing SectorState = "Committing" // compute PoRep
|
||||
SubmitCommit SectorState = "SubmitCommit" // send commit message to the chain
|
||||
CommitWait SectorState = "CommitWait" // wait for the commit message to land on chain
|
||||
FinalizeSector SectorState = "FinalizeSector"
|
||||
Proving SectorState = "Proving"
|
||||
// error modes
|
||||
@ -25,8 +26,10 @@ const (
|
||||
PreCommitFailed SectorState = "PreCommitFailed"
|
||||
ComputeProofFailed SectorState = "ComputeProofFailed"
|
||||
CommitFailed SectorState = "CommitFailed"
|
||||
PackingFailed SectorState = "PackingFailed"
|
||||
PackingFailed SectorState = "PackingFailed" // TODO: deprecated, remove
|
||||
FinalizeFailed SectorState = "FinalizeFailed"
|
||||
DealsExpired SectorState = "DealsExpired"
|
||||
RecoverDealIDs SectorState = "RecoverDealIDs"
|
||||
|
||||
Faulty SectorState = "Faulty" // sector is corrupted or gone for some reason
|
||||
FaultReported SectorState = "FaultReported" // sector has been declared as a fault on chain
|
||||
|
141
extern/storage-sealing/states_failed.go
vendored
141
extern/storage-sealing/states_failed.go
vendored
@ -1,12 +1,18 @@
|
||||
package sealing
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"time"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-statemachine"
|
||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||
"github.com/filecoin-project/specs-actors/actors/builtin/market"
|
||||
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
||||
"github.com/filecoin-project/specs-actors/actors/runtime/exitcode"
|
||||
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/zerocomm"
|
||||
)
|
||||
|
||||
const minRetryTime = 1 * time.Minute
|
||||
@ -81,6 +87,11 @@ func (m *Sealing) handlePreCommitFailed(ctx statemachine.Context, sector SectorI
|
||||
return ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("ticket expired error: %w", err)})
|
||||
case *ErrBadTicket:
|
||||
return ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("bad expired: %w", err)})
|
||||
case *ErrInvalidDeals:
|
||||
log.Warnf("invalid deals in sector %d: %v", sector.SectorNumber, err)
|
||||
return ctx.Send(SectorInvalidDealIDs{Return: RetPreCommitFailed})
|
||||
case *ErrExpiredDeals:
|
||||
return ctx.Send(SectorDealsExpired{xerrors.Errorf("sector deals expired: %w", err)})
|
||||
case *ErrNoPrecommit:
|
||||
return ctx.Send(SectorRetryPreCommit{})
|
||||
case *ErrPrecommitOnChain:
|
||||
@ -88,6 +99,7 @@ func (m *Sealing) handlePreCommitFailed(ctx statemachine.Context, sector SectorI
|
||||
case *ErrSectorNumberAllocated:
|
||||
log.Errorf("handlePreCommitFailed: sector number already allocated, not proceeding: %+v", err)
|
||||
// TODO: check if the sector is committed (not sure how we'd end up here)
|
||||
// TODO: check on-chain state, adjust local sector number counter to not give out allocated numbers
|
||||
return nil
|
||||
default:
|
||||
return xerrors.Errorf("checkPrecommit sanity check error: %w", err)
|
||||
@ -157,7 +169,12 @@ func (m *Sealing) handleCommitFailed(ctx statemachine.Context, sector SectorInfo
|
||||
case *ErrExpiredTicket:
|
||||
return ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("ticket expired error: %w", err)})
|
||||
case *ErrBadTicket:
|
||||
return ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("bad expired: %w", err)})
|
||||
return ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("bad ticket: %w", err)})
|
||||
case *ErrInvalidDeals:
|
||||
log.Warnf("invalid deals in sector %d: %v", sector.SectorNumber, err)
|
||||
return ctx.Send(SectorInvalidDealIDs{Return: RetCommitFailed})
|
||||
case *ErrExpiredDeals:
|
||||
return ctx.Send(SectorDealsExpired{xerrors.Errorf("sector deals expired: %w", err)})
|
||||
case nil:
|
||||
return ctx.Send(SectorChainPreCommitFailed{xerrors.Errorf("no precommit: %w", err)})
|
||||
case *ErrPrecommitOnChain:
|
||||
@ -192,6 +209,11 @@ func (m *Sealing) handleCommitFailed(ctx statemachine.Context, sector SectorInfo
|
||||
return ctx.Send(SectorRetryPreCommitWait{})
|
||||
case *ErrNoPrecommit:
|
||||
return ctx.Send(SectorRetryPreCommit{})
|
||||
case *ErrInvalidDeals:
|
||||
log.Warnf("invalid deals in sector %d: %v", sector.SectorNumber, err)
|
||||
return ctx.Send(SectorInvalidDealIDs{Return: RetCommitFailed})
|
||||
case *ErrExpiredDeals:
|
||||
return ctx.Send(SectorDealsExpired{xerrors.Errorf("sector deals expired: %w", err)})
|
||||
case *ErrCommitWaitFailed:
|
||||
if err := failedCooldown(ctx, sector); err != nil {
|
||||
return err
|
||||
@ -221,3 +243,120 @@ func (m *Sealing) handleFinalizeFailed(ctx statemachine.Context, sector SectorIn
|
||||
|
||||
return ctx.Send(SectorRetryFinalize{})
|
||||
}
|
||||
|
||||
func (m *Sealing) handleRemoveFailed(ctx statemachine.Context, sector SectorInfo) error {
|
||||
if err := failedCooldown(ctx, sector); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ctx.Send(SectorRemove{})
|
||||
}
|
||||
|
||||
func (m *Sealing) handleDealsExpired(ctx statemachine.Context, sector SectorInfo) error {
|
||||
// First make vary sure the sector isn't committed
|
||||
si, err := m.api.StateSectorGetInfo(ctx.Context(), m.maddr, sector.SectorNumber, nil)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting sector info: %w", err)
|
||||
}
|
||||
if si != nil {
|
||||
// TODO: this should never happen, but in case it does, try to go back to
|
||||
// the proving state after running some checks
|
||||
return xerrors.Errorf("sector is committed on-chain, but we're in DealsExpired")
|
||||
}
|
||||
|
||||
if sector.PreCommitInfo == nil {
|
||||
// TODO: Create a separate state which will remove those pieces, and go back to PC1
|
||||
log.Errorf("non-precommitted sector with expired deals, can't recover from this yet")
|
||||
}
|
||||
|
||||
// Not much to do here, we can't go back in time to commit this sector
|
||||
return ctx.Send(SectorRemove{})
|
||||
}
|
||||
|
||||
func (m *Sealing) handleRecoverDealIDs(ctx statemachine.Context, sector SectorInfo) error {
|
||||
tok, height, err := m.api.ChainHead(ctx.Context())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting chain head: %w", err)
|
||||
}
|
||||
|
||||
var toFix []int
|
||||
|
||||
for i, p := range sector.Pieces {
|
||||
// if no deal is associated with the piece, ensure that we added it as
|
||||
// filler (i.e. ensure that it has a zero PieceCID)
|
||||
if p.DealInfo == nil {
|
||||
exp := zerocomm.ZeroPieceCommitment(p.Piece.Size.Unpadded())
|
||||
if !p.Piece.PieceCID.Equals(exp) {
|
||||
return xerrors.Errorf("sector %d piece %d had non-zero PieceCID %+v", sector.SectorNumber, i, p.Piece.PieceCID)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
proposal, err := m.api.StateMarketStorageDeal(ctx.Context(), p.DealInfo.DealID, tok)
|
||||
if err != nil {
|
||||
log.Warnf("getting deal %d for piece %d: %+v", p.DealInfo.DealID, i, err)
|
||||
toFix = append(toFix, i)
|
||||
continue
|
||||
}
|
||||
|
||||
if proposal.Provider != m.maddr {
|
||||
log.Warnf("piece %d (of %d) of sector %d refers deal %d with wrong provider: %s != %s", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID, proposal.Provider, m.maddr)
|
||||
toFix = append(toFix, i)
|
||||
continue
|
||||
}
|
||||
|
||||
if proposal.PieceCID != p.Piece.PieceCID {
|
||||
log.Warnf("piece %d (of %d) of sector %d refers deal %d with wrong PieceCID: %x != %x", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID, p.Piece.PieceCID, proposal.PieceCID)
|
||||
toFix = append(toFix, i)
|
||||
continue
|
||||
}
|
||||
|
||||
if p.Piece.Size != proposal.PieceSize {
|
||||
log.Warnf("piece %d (of %d) of sector %d refers deal %d with different size: %d != %d", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID, p.Piece.Size, proposal.PieceSize)
|
||||
toFix = append(toFix, i)
|
||||
continue
|
||||
}
|
||||
|
||||
if height >= proposal.StartEpoch {
|
||||
// TODO: check if we are in an early enough state (before precommit), try to remove the offending pieces
|
||||
// (tricky as we have to 'defragment' the sector while doing that, and update piece references for retrieval)
|
||||
return xerrors.Errorf("can't fix sector deals: piece %d (of %d) of sector %d refers expired deal %d - should start at %d, head %d", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID, proposal.StartEpoch, height)
|
||||
}
|
||||
}
|
||||
|
||||
updates := map[int]abi.DealID{}
|
||||
for _, i := range toFix {
|
||||
p := sector.Pieces[i]
|
||||
|
||||
if p.DealInfo.PublishCid == nil {
|
||||
// TODO: check if we are in an early enough state try to remove this piece
|
||||
log.Error("can't fix sector deals: piece %d (of %d) of sector %d has nil DealInfo.PublishCid (refers to deal %d)", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID)
|
||||
// Not much to do here (and this can only happen for old spacerace sectors)
|
||||
return ctx.Send(SectorRemove{})
|
||||
}
|
||||
|
||||
ml, err := m.api.StateSearchMsg(ctx.Context(), *p.DealInfo.PublishCid)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("looking for publish deal message %s (sector %d, piece %d): %w", *p.DealInfo.PublishCid, sector.SectorNumber, i, err)
|
||||
}
|
||||
|
||||
if ml.Receipt.ExitCode != exitcode.Ok {
|
||||
return xerrors.Errorf("looking for publish deal message %s (sector %d, piece %d): non-ok exit code: %s", *p.DealInfo.PublishCid, sector.SectorNumber, i, ml.Receipt.ExitCode)
|
||||
}
|
||||
|
||||
var retval market.PublishStorageDealsReturn
|
||||
if err := retval.UnmarshalCBOR(bytes.NewReader(ml.Receipt.Return)); err != nil {
|
||||
return xerrors.Errorf("looking for publish deal message: unmarshaling message return: %w", err)
|
||||
}
|
||||
|
||||
if len(retval.IDs) != 1 {
|
||||
// market currently only ever sends messages with 1 deal
|
||||
return xerrors.Errorf("can't recover dealIDs from publish deal message with more than 1 deal")
|
||||
}
|
||||
|
||||
updates[i] = retval.IDs[0]
|
||||
}
|
||||
|
||||
// Not much to do here, we can't go back in time to commit this sector
|
||||
return ctx.Send(SectorUpdateDealIDs{Updates: updates})
|
||||
}
|
||||
|
54
extern/storage-sealing/states_sealing.go
vendored
54
extern/storage-sealing/states_sealing.go
vendored
@ -12,6 +12,7 @@ import (
|
||||
"github.com/filecoin-project/specs-actors/actors/builtin"
|
||||
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
||||
"github.com/filecoin-project/specs-actors/actors/crypto"
|
||||
"github.com/filecoin-project/specs-actors/actors/runtime/exitcode"
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
)
|
||||
|
||||
@ -79,15 +80,16 @@ func (m *Sealing) getTicket(ctx statemachine.Context, sector SectorInfo) (abi.Se
|
||||
}
|
||||
|
||||
func (m *Sealing) handlePreCommit1(ctx statemachine.Context, sector SectorInfo) error {
|
||||
if err := checkPieces(ctx.Context(), sector, m.api); err != nil { // Sanity check state
|
||||
if err := checkPieces(ctx.Context(), m.maddr, sector, m.api); err != nil { // Sanity check state
|
||||
switch err.(type) {
|
||||
case *ErrApi:
|
||||
log.Errorf("handlePreCommit1: api error, not proceeding: %+v", err)
|
||||
return nil
|
||||
case *ErrInvalidDeals:
|
||||
return ctx.Send(SectorPackingFailed{xerrors.Errorf("invalid dealIDs in sector: %w", err)})
|
||||
log.Warnf("invalid deals in sector %d: %v", sector.SectorNumber, err)
|
||||
return ctx.Send(SectorInvalidDealIDs{Return: RetPreCommit1})
|
||||
case *ErrExpiredDeals: // Probably not much we can do here, maybe re-pack the sector?
|
||||
return ctx.Send(SectorPackingFailed{xerrors.Errorf("expired dealIDs in sector: %w", err)})
|
||||
return ctx.Send(SectorDealsExpired{xerrors.Errorf("expired dealIDs in sector: %w", err)})
|
||||
default:
|
||||
return xerrors.Errorf("checkPieces sanity check error: %w", err)
|
||||
}
|
||||
@ -155,6 +157,11 @@ func (m *Sealing) handlePreCommitting(ctx statemachine.Context, sector SectorInf
|
||||
return ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("ticket expired: %w", err)})
|
||||
case *ErrBadTicket:
|
||||
return ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("bad ticket: %w", err)})
|
||||
case *ErrInvalidDeals:
|
||||
log.Warnf("invalid deals in sector %d: %v", sector.SectorNumber, err)
|
||||
return ctx.Send(SectorInvalidDealIDs{Return: RetPreCommitting})
|
||||
case *ErrExpiredDeals:
|
||||
return ctx.Send(SectorDealsExpired{xerrors.Errorf("sector deals expired: %w", err)})
|
||||
case *ErrPrecommitOnChain:
|
||||
return ctx.Send(SectorPreCommitLanded{TipSet: tok}) // we re-did precommit
|
||||
case *ErrSectorNumberAllocated:
|
||||
@ -226,11 +233,18 @@ func (m *Sealing) handlePreCommitWait(ctx statemachine.Context, sector SectorInf
|
||||
return ctx.Send(SectorChainPreCommitFailed{err})
|
||||
}
|
||||
|
||||
if mw.Receipt.ExitCode != 0 {
|
||||
switch mw.Receipt.ExitCode {
|
||||
case exitcode.Ok:
|
||||
// this is what we expect
|
||||
case exitcode.SysErrOutOfGas:
|
||||
// gas estimator guessed a wrong number
|
||||
return ctx.Send(SectorRetryPreCommit{})
|
||||
default:
|
||||
log.Error("sector precommit failed: ", mw.Receipt.ExitCode)
|
||||
err := xerrors.Errorf("sector precommit failed: %d", mw.Receipt.ExitCode)
|
||||
return ctx.Send(SectorChainPreCommitFailed{err})
|
||||
}
|
||||
|
||||
log.Info("precommit message landed on chain: ", sector.SectorNumber)
|
||||
|
||||
return ctx.Send(SectorPreCommitLanded{TipSet: mw.TipSetTok})
|
||||
@ -326,21 +340,25 @@ func (m *Sealing) handleCommitting(ctx statemachine.Context, sector SectorInfo)
|
||||
return ctx.Send(SectorComputeProofFailed{xerrors.Errorf("computing seal proof failed(2): %w", err)})
|
||||
}
|
||||
|
||||
return ctx.Send(SectorCommitted{
|
||||
Proof: proof,
|
||||
})
|
||||
}
|
||||
|
||||
func (m *Sealing) handleSubmitCommit(ctx statemachine.Context, sector SectorInfo) error {
|
||||
tok, _, err := m.api.ChainHead(ctx.Context())
|
||||
if err != nil {
|
||||
log.Errorf("handleCommitting: api error, not proceeding: %+v", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := m.checkCommit(ctx.Context(), sector, proof, tok); err != nil {
|
||||
if err := m.checkCommit(ctx.Context(), sector, sector.Proof, tok); err != nil {
|
||||
return ctx.Send(SectorCommitFailed{xerrors.Errorf("commit check error: %w", err)})
|
||||
}
|
||||
|
||||
// TODO: Consider splitting states and persist proof for faster recovery
|
||||
|
||||
params := &miner.ProveCommitSectorParams{
|
||||
SectorNumber: sector.SectorNumber,
|
||||
Proof: proof,
|
||||
Proof: sector.Proof,
|
||||
}
|
||||
|
||||
enc := new(bytes.Buffer)
|
||||
@ -372,14 +390,13 @@ func (m *Sealing) handleCommitting(ctx statemachine.Context, sector SectorInfo)
|
||||
collateral = big.Zero()
|
||||
}
|
||||
|
||||
// TODO: check seed / ticket are up to date
|
||||
// TODO: check seed / ticket / deals are up to date
|
||||
mcid, err := m.api.SendMsg(ctx.Context(), waddr, m.maddr, builtin.MethodsMiner.ProveCommitSector, collateral, m.feeCfg.MaxCommitGasFee, enc.Bytes())
|
||||
if err != nil {
|
||||
return ctx.Send(SectorCommitFailed{xerrors.Errorf("pushing message to mpool: %w", err)})
|
||||
}
|
||||
|
||||
return ctx.Send(SectorCommitted{
|
||||
Proof: proof,
|
||||
return ctx.Send(SectorCommitSubmitted{
|
||||
Message: mcid,
|
||||
})
|
||||
}
|
||||
@ -395,13 +412,22 @@ func (m *Sealing) handleCommitWait(ctx statemachine.Context, sector SectorInfo)
|
||||
return ctx.Send(SectorCommitFailed{xerrors.Errorf("failed to wait for porep inclusion: %w", err)})
|
||||
}
|
||||
|
||||
if mw.Receipt.ExitCode != 0 {
|
||||
switch mw.Receipt.ExitCode {
|
||||
case exitcode.Ok:
|
||||
// this is what we expect
|
||||
case exitcode.SysErrOutOfGas:
|
||||
// gas estimator guessed a wrong number
|
||||
return ctx.Send(SectorRetrySubmitCommit{})
|
||||
default:
|
||||
return ctx.Send(SectorCommitFailed{xerrors.Errorf("submitting sector proof failed (exit=%d, msg=%s) (t:%x; s:%x(%d); p:%x)", mw.Receipt.ExitCode, sector.CommitMessage, sector.TicketValue, sector.SeedValue, sector.SeedEpoch, sector.Proof)})
|
||||
}
|
||||
|
||||
_, err = m.api.StateSectorGetInfo(ctx.Context(), m.maddr, sector.SectorNumber, mw.TipSetTok)
|
||||
si, err := m.api.StateSectorGetInfo(ctx.Context(), m.maddr, sector.SectorNumber, mw.TipSetTok)
|
||||
if err != nil {
|
||||
return ctx.Send(SectorCommitFailed{xerrors.Errorf("proof validation failed, sector not found in sector set after cron: %w", err)})
|
||||
return ctx.Send(SectorCommitFailed{xerrors.Errorf("proof validation failed, calling StateSectorGetInfo: %w", err)})
|
||||
}
|
||||
if si == nil {
|
||||
return ctx.Send(SectorCommitFailed{xerrors.Errorf("proof validation failed, sector not found in sector set after cron")})
|
||||
}
|
||||
|
||||
return ctx.Send(SectorProving{})
|
||||
|
13
extern/storage-sealing/types.go
vendored
13
extern/storage-sealing/types.go
vendored
@ -30,6 +30,7 @@ type Piece struct {
|
||||
|
||||
// DealInfo is a tuple of deal identity and its schedule
|
||||
type DealInfo struct {
|
||||
PublishCid *cid.Cid
|
||||
DealID abi.DealID
|
||||
DealSchedule DealSchedule
|
||||
KeepUnsealed bool
|
||||
@ -53,6 +54,15 @@ type Log struct {
|
||||
Kind string
|
||||
}
|
||||
|
||||
type ReturnState string
|
||||
|
||||
const (
|
||||
RetPreCommit1 = ReturnState(PreCommit1)
|
||||
RetPreCommitting = ReturnState(PreCommitting)
|
||||
RetPreCommitFailed = ReturnState(PreCommitFailed)
|
||||
RetCommitFailed = ReturnState(CommitFailed)
|
||||
)
|
||||
|
||||
type SectorInfo struct {
|
||||
State SectorState
|
||||
SectorNumber abi.SectorNumber
|
||||
@ -90,6 +100,9 @@ type SectorInfo struct {
|
||||
// Faults
|
||||
FaultReportMsg *cid.Cid
|
||||
|
||||
// Recovery
|
||||
Return ReturnState
|
||||
|
||||
// Debug
|
||||
LastErr string
|
||||
|
||||
|
4
extern/storage-sealing/upgrade_queue.go
vendored
4
extern/storage-sealing/upgrade_queue.go
vendored
@ -72,6 +72,10 @@ func (m *Sealing) tryUpgradeSector(ctx context.Context, params *miner.SectorPreC
|
||||
log.Errorf("error calling StateSectorGetInfo for replaced sector: %+v", err)
|
||||
return big.Zero()
|
||||
}
|
||||
if ri == nil {
|
||||
log.Errorf("couldn't find sector info for sector to replace: %+v", replace)
|
||||
return big.Zero()
|
||||
}
|
||||
|
||||
if params.Expiration < ri.Expiration {
|
||||
// TODO: Some limit on this
|
||||
|
1
extern/test-vectors
vendored
Submodule
1
extern/test-vectors
vendored
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit 9806d09b005dbaa0d08a6944aca67dd5ad2cd3b3
|
27
go.mod
27
go.mod
@ -2,20 +2,17 @@ module github.com/filecoin-project/lotus
|
||||
|
||||
go 1.14
|
||||
|
||||
replace github.com/supranational/blst => github.com/supranational/blst v0.1.2-alpha.1
|
||||
replace github.com/supranational/blst => github.com/filecoin-project/blst v0.1.2-adx
|
||||
|
||||
require (
|
||||
contrib.go.opencensus.io/exporter/jaeger v0.1.0
|
||||
contrib.go.opencensus.io/exporter/prometheus v0.1.0
|
||||
github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 // indirect
|
||||
github.com/BurntSushi/toml v0.3.1
|
||||
github.com/GeertJohan/go.rice v1.0.0
|
||||
github.com/Gurpartap/async v0.0.0-20180927173644-4f7f499dd9ee
|
||||
github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d // indirect
|
||||
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d
|
||||
github.com/buger/goterm v0.0.0-20200322175922-2f3e71b85129
|
||||
github.com/coreos/go-systemd/v22 v22.0.0
|
||||
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect
|
||||
github.com/detailyang/go-fallocate v0.0.0-20180908115635-432fa640bd2e
|
||||
github.com/dgraph-io/badger/v2 v2.0.3
|
||||
github.com/docker/go-units v0.4.0
|
||||
@ -26,13 +23,12 @@ require (
|
||||
github.com/filecoin-project/chain-validation v0.0.6-0.20200813000554-40c22fe26eef
|
||||
github.com/filecoin-project/filecoin-ffi v0.30.4-0.20200716204036-cddc56607e1d
|
||||
github.com/filecoin-project/go-address v0.0.3
|
||||
github.com/filecoin-project/go-amt-ipld/v2 v2.1.1-0.20200731171407-e559a0579161 // indirect
|
||||
github.com/filecoin-project/go-bitfield v0.2.0
|
||||
github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2
|
||||
github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03
|
||||
github.com/filecoin-project/go-data-transfer v0.6.2
|
||||
github.com/filecoin-project/go-data-transfer v0.6.3
|
||||
github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f
|
||||
github.com/filecoin-project/go-fil-markets v0.5.7
|
||||
github.com/filecoin-project/go-fil-markets v0.5.8
|
||||
github.com/filecoin-project/go-jsonrpc v0.1.2-0.20200822201400-474f4fdccc52
|
||||
github.com/filecoin-project/go-multistore v0.0.3
|
||||
github.com/filecoin-project/go-padreader v0.0.0-20200210211231-548257017ca6
|
||||
@ -40,12 +36,12 @@ require (
|
||||
github.com/filecoin-project/go-statemachine v0.0.0-20200813232949-df9b130df370
|
||||
github.com/filecoin-project/go-statestore v0.1.0
|
||||
github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b
|
||||
github.com/filecoin-project/sector-storage v0.0.0-20200810171746-eac70842d8e0 // indirect
|
||||
github.com/filecoin-project/specs-actors v0.9.3
|
||||
github.com/filecoin-project/specs-storage v0.1.1-0.20200730063404-f7db367e9401
|
||||
github.com/filecoin-project/statediff v0.0.1
|
||||
github.com/filecoin-project/test-vectors v0.0.0-20200902131127-9806d09b005d
|
||||
github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1
|
||||
github.com/go-kit/kit v0.10.0
|
||||
github.com/go-ole/go-ole v1.2.4 // indirect
|
||||
github.com/google/uuid v1.1.1
|
||||
github.com/gorilla/mux v1.7.4
|
||||
github.com/gorilla/websocket v1.4.2
|
||||
@ -64,7 +60,7 @@ require (
|
||||
github.com/ipfs/go-ds-measure v0.1.0
|
||||
github.com/ipfs/go-filestore v1.0.0
|
||||
github.com/ipfs/go-fs-lock v0.0.6
|
||||
github.com/ipfs/go-graphsync v0.1.1
|
||||
github.com/ipfs/go-graphsync v0.1.2
|
||||
github.com/ipfs/go-ipfs-blockstore v1.0.1
|
||||
github.com/ipfs/go-ipfs-chunker v0.0.5
|
||||
github.com/ipfs/go-ipfs-ds-help v1.0.0
|
||||
@ -82,7 +78,7 @@ require (
|
||||
github.com/ipfs/go-unixfs v0.2.4
|
||||
github.com/ipfs/interface-go-ipfs-core v0.2.3
|
||||
github.com/ipld/go-car v0.1.1-0.20200526133713-1c7508d55aae
|
||||
github.com/ipld/go-ipld-prime v0.0.2-0.20200428162820-8b59dc292b8e
|
||||
github.com/ipld/go-ipld-prime v0.0.4-0.20200828224805-5ff8c8b0b6ef
|
||||
github.com/kelseyhightower/envconfig v1.4.0
|
||||
github.com/lib/pq v1.7.0
|
||||
github.com/libp2p/go-eventbus v0.2.1
|
||||
@ -94,7 +90,7 @@ require (
|
||||
github.com/libp2p/go-libp2p-mplex v0.2.4
|
||||
github.com/libp2p/go-libp2p-noise v0.1.1
|
||||
github.com/libp2p/go-libp2p-peerstore v0.2.6
|
||||
github.com/libp2p/go-libp2p-pubsub v0.3.5
|
||||
github.com/libp2p/go-libp2p-pubsub v0.3.6-0.20200901174250-06a12f17b7de
|
||||
github.com/libp2p/go-libp2p-quic-transport v0.8.0
|
||||
github.com/libp2p/go-libp2p-record v0.1.3
|
||||
github.com/libp2p/go-libp2p-routing-helpers v0.2.3
|
||||
@ -102,7 +98,6 @@ require (
|
||||
github.com/libp2p/go-libp2p-tls v0.1.3
|
||||
github.com/libp2p/go-libp2p-yamux v0.2.8
|
||||
github.com/libp2p/go-maddr-filter v0.1.0
|
||||
github.com/mattn/go-isatty v0.0.12 // indirect
|
||||
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1
|
||||
github.com/mitchellh/go-homedir v1.1.0
|
||||
github.com/multiformats/go-base32 v0.0.3
|
||||
@ -112,7 +107,6 @@ require (
|
||||
github.com/multiformats/go-multihash v0.0.14
|
||||
github.com/opentracing/opentracing-go v1.2.0
|
||||
github.com/raulk/clock v1.1.0
|
||||
github.com/stretchr/objx v0.2.0 // indirect
|
||||
github.com/stretchr/testify v1.6.1
|
||||
github.com/supranational/blst v0.1.1
|
||||
github.com/syndtr/goleveldb v1.0.0
|
||||
@ -123,7 +117,6 @@ require (
|
||||
github.com/whyrusleeping/pubsub v0.0.0-20131020042734-02de8aa2db3d
|
||||
github.com/xorcare/golden v0.6.1-0.20191112154924-b87f686d7542
|
||||
go.opencensus.io v0.22.4
|
||||
go.uber.org/dig v1.8.0 // indirect
|
||||
go.uber.org/fx v1.9.0
|
||||
go.uber.org/multierr v1.5.0
|
||||
go.uber.org/zap v1.15.0
|
||||
@ -131,9 +124,7 @@ require (
|
||||
golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1
|
||||
google.golang.org/api v0.25.0 // indirect
|
||||
gotest.tools v2.2.0+incompatible
|
||||
launchpad.net/gocheck v0.0.0-20140225173054-000000000087 // indirect
|
||||
)
|
||||
|
||||
replace github.com/golangci/golangci-lint => github.com/golangci/golangci-lint v1.18.0
|
||||
@ -141,3 +132,5 @@ replace github.com/golangci/golangci-lint => github.com/golangci/golangci-lint v
|
||||
replace github.com/filecoin-project/filecoin-ffi => ./extern/filecoin-ffi
|
||||
|
||||
replace github.com/dgraph-io/badger/v2 => github.com/dgraph-io/badger/v2 v2.0.1-rc1.0.20200716180832-3ab515320794
|
||||
|
||||
replace github.com/filecoin-project/test-vectors => ./extern/test-vectors
|
||||
|
92
go.sum
92
go.sum
@ -35,8 +35,6 @@ git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGy
|
||||
github.com/AndreasBriese/bbloom v0.0.0-20180913140656-343706a395b7/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8=
|
||||
github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9 h1:HD8gA2tkByhMAwYaFAX9w2l7vxvBQ5NMoxDrkhqhtn4=
|
||||
github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8=
|
||||
github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 h1:cTp8I5+VIoKjsnZuH8vjyaysT/ses3EvZeaV/1UkF2M=
|
||||
github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8=
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
@ -67,11 +65,9 @@ github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia
|
||||
github.com/akavel/rsrc v0.8.0 h1:zjWn7ukO9Kc5Q62DOJCcxGpXC18RawVtYAGdz2aLlfw=
|
||||
github.com/akavel/rsrc v0.8.0/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkKq+c=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d h1:UQZhZ2O0vMHr2cI+DC1Mbh0TJxzA3RcLoMsFw+aXw7E=
|
||||
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
|
||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
@ -168,8 +164,6 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
|
||||
github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018/go.mod h1:rQYf4tfk5sSwFsnDg3qYaBxSjsD9S8+59vW0dKUgme4=
|
||||
github.com/davidlazar/go-crypto v0.0.0-20190912175916-7055855a373f h1:BOaYiTvg8p9vBUXpklC22XSK/mifLF7lG9jtmYYi3Tc=
|
||||
github.com/davidlazar/go-crypto v0.0.0-20190912175916-7055855a373f/go.mod h1:rQYf4tfk5sSwFsnDg3qYaBxSjsD9S8+59vW0dKUgme4=
|
||||
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU=
|
||||
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U=
|
||||
github.com/detailyang/go-fallocate v0.0.0-20180908115635-432fa640bd2e h1:lj77EKYUpYXTd8CD/+QMIf8b6OIOTsfEBSXiAzuEHTU=
|
||||
github.com/detailyang/go-fallocate v0.0.0-20180908115635-432fa640bd2e/go.mod h1:3ZQK6DMPSz/QZ73jlWxBtUhNA8xZx7LzUFSq/OfP8vk=
|
||||
github.com/dgraph-io/badger v1.5.5-0.20190226225317-8115aed38f8f/go.mod h1:VZxzAIRPHRVNRKRo6AXrX9BJegn6il06VMTZVJYCIjQ=
|
||||
@ -220,8 +214,9 @@ github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5/go.mod h1:Jp
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/fatih/color v1.8.0 h1:5bzFgL+oy7JITMTxUPJ00n7VxmYd/PdMp5mHFX40/RY=
|
||||
github.com/fatih/color v1.8.0/go.mod h1:3l45GVGkyrnYNl9HoIjnp2NnNWvh6hLAqD8yTfGjnw8=
|
||||
github.com/fd/go-nat v1.0.0 h1:DPyQ97sxA9ThrWYRPcWUz/z9TnpTIGRYODIQc/dy64M=
|
||||
github.com/fd/go-nat v1.0.0/go.mod h1:BTBu/CKvMmOMUPkKVef1pngt2WFH/lg7E6yQnulfp6E=
|
||||
github.com/filecoin-project/blst v0.1.2-adx h1:qyirtiGFTN/C17y4xlCFAblgw2OXhW8+wtnLwV27/cM=
|
||||
github.com/filecoin-project/blst v0.1.2-adx/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/filecoin-project/chain-validation v0.0.6-0.20200813000554-40c22fe26eef h1:MtQRSnJLsQOOlmsd/Ua5KWXimpxcaa715h6FUh/eJPY=
|
||||
github.com/filecoin-project/chain-validation v0.0.6-0.20200813000554-40c22fe26eef/go.mod h1:SMj5VK1pYgqC8FXVEtOBRTc+9AIrYu+C+K3tAXi2Rk8=
|
||||
github.com/filecoin-project/go-address v0.0.0-20200107215422-da8eea2842b5/go.mod h1:SAOwJoakQ8EPjwNIsiakIQKsoKdkcbx8U3IapgCg9R0=
|
||||
@ -235,7 +230,8 @@ github.com/filecoin-project/go-amt-ipld/v2 v2.1.1-0.20200731171407-e559a0579161
|
||||
github.com/filecoin-project/go-amt-ipld/v2 v2.1.1-0.20200731171407-e559a0579161/go.mod h1:vgmwKBkx+ca5OIeEvstiQgzAZnb7R6QaqE1oEDSqa6g=
|
||||
github.com/filecoin-project/go-bitfield v0.0.0-20200416002808-b3ee67ec9060/go.mod h1:iodsLxOFZnqKtjj2zkgqzoGNrv6vUqj69AT/J8DKXEw=
|
||||
github.com/filecoin-project/go-bitfield v0.0.1/go.mod h1:Ry9/iUlWSyjPUzlAvdnfy4Gtvrq4kWmWDztCU1yEgJY=
|
||||
github.com/filecoin-project/go-bitfield v0.1.2 h1:TjLregCoyP1/5lm7WCM0axyV1myIHwbjGa21skuu5tk=
|
||||
github.com/filecoin-project/go-bitfield v0.0.3/go.mod h1:Ry9/iUlWSyjPUzlAvdnfy4Gtvrq4kWmWDztCU1yEgJY=
|
||||
github.com/filecoin-project/go-bitfield v0.0.4-0.20200703174658-f4a5758051a1/go.mod h1:Ry9/iUlWSyjPUzlAvdnfy4Gtvrq4kWmWDztCU1yEgJY=
|
||||
github.com/filecoin-project/go-bitfield v0.1.2/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM=
|
||||
github.com/filecoin-project/go-bitfield v0.2.0 h1:gCtLcjskIPtdg4NfN7gQZSQF9yrBQ7mkT0qCJxzGI2Q=
|
||||
github.com/filecoin-project/go-bitfield v0.2.0/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM=
|
||||
@ -243,21 +239,27 @@ github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2 h1:a
|
||||
github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2/go.mod h1:pqTiPHobNkOVM5thSRsHYjyQfq7O5QSCMhvuu9JoDlg=
|
||||
github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 h1:2pMXdBnCiXjfCYx/hLqFxccPoqsSveQFxVLvNxy9bus=
|
||||
github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ=
|
||||
github.com/filecoin-project/go-data-transfer v0.6.2 h1:IgbkwcHoyWGglzfsY7P9L1GapzoiLNKuzfZY2bxER8E=
|
||||
github.com/filecoin-project/go-data-transfer v0.6.2/go.mod h1:uRYBRKVBVM12CSusBtVrzDHkVw/3DKZpkxKJVP1Ydas=
|
||||
github.com/filecoin-project/go-data-transfer v0.6.1/go.mod h1:uRYBRKVBVM12CSusBtVrzDHkVw/3DKZpkxKJVP1Ydas=
|
||||
github.com/filecoin-project/go-data-transfer v0.6.3 h1:7TLwm8nuodHYD/uiwJjKc/PGRR+LwqM8jmlZqgWuUfY=
|
||||
github.com/filecoin-project/go-data-transfer v0.6.3/go.mod h1:PmBKVXkhh67/tnEdJXQwDHl5mT+7Tbcwe1NPninqhnM=
|
||||
github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5/go.mod h1:JbkIgFF/Z9BDlvrJO1FuKkaWsH673/UdFaiVS6uIHlA=
|
||||
github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f h1:GxJzR3oRIMTPtpZ0b7QF8FKPK6/iPAc7trhlL5k/g+s=
|
||||
github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ=
|
||||
github.com/filecoin-project/go-fil-markets v0.5.7 h1:kzyMHqez8ssxchj5s9M1hkC3CTwRGh2MeglJGfUksQU=
|
||||
github.com/filecoin-project/go-fil-markets v0.5.7/go.mod h1:KnvFG3kSQ77vKYSY/QdrXET81wVCBByHXjG7AyxnbUw=
|
||||
github.com/filecoin-project/go-fil-markets v0.5.6-0.20200814234959-80b1788108ac/go.mod h1:umicPCaN99ysHTiYOmwhuLxTFbOwcsI+mdw/t96vvM4=
|
||||
github.com/filecoin-project/go-fil-markets v0.5.8 h1:uwl0QNUVmmSlUQfxshpj21Dmhh6WKTQNhnb1GMfdp18=
|
||||
github.com/filecoin-project/go-fil-markets v0.5.8/go.mod h1:6ZX1vbZbnukbVQ8tCB/MmEizuW/bmRX7SpGAltU3KVg=
|
||||
github.com/filecoin-project/go-jsonrpc v0.1.2-0.20200817153016-2ea5cbaf5ec0/go.mod h1:XBBpuKIMaXIIzeqzO1iucq4GvbF8CxmXRFoezRh+Cx4=
|
||||
github.com/filecoin-project/go-jsonrpc v0.1.2-0.20200822201400-474f4fdccc52 h1:FXtCp0ybqdQL9knb3OGDpkNTaBbPxgkqPeWKotUwkH0=
|
||||
github.com/filecoin-project/go-jsonrpc v0.1.2-0.20200822201400-474f4fdccc52/go.mod h1:XBBpuKIMaXIIzeqzO1iucq4GvbF8CxmXRFoezRh+Cx4=
|
||||
github.com/filecoin-project/go-multistore v0.0.3 h1:vaRBY4YiA2UZFPK57RNuewypB8u0DzzQwqsL0XarpnI=
|
||||
github.com/filecoin-project/go-multistore v0.0.3/go.mod h1:kaNqCC4IhU4B1uyr7YWFHd23TL4KM32aChS0jNkyUvQ=
|
||||
github.com/filecoin-project/go-padreader v0.0.0-20200210211231-548257017ca6 h1:92PET+sx1Hb4W/8CgFwGuxaKbttwY+UNspYZTvXY0vs=
|
||||
github.com/filecoin-project/go-padreader v0.0.0-20200210211231-548257017ca6/go.mod h1:0HgYnrkeSU4lu1p+LEOeDpFsNBssa0OGGriWdA4hvaE=
|
||||
github.com/filecoin-project/go-paramfetch v0.0.1/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc=
|
||||
github.com/filecoin-project/go-paramfetch v0.0.2-0.20200218225740-47c639bab663/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc=
|
||||
github.com/filecoin-project/go-paramfetch v0.0.2-0.20200701152213-3e0f0afdc261 h1:A256QonvzRaknIIAuWhe/M2dpV2otzs3NBhi5TWa/UA=
|
||||
github.com/filecoin-project/go-paramfetch v0.0.2-0.20200701152213-3e0f0afdc261/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc=
|
||||
github.com/filecoin-project/go-statemachine v0.0.0-20200226041606-2074af6d51d9/go.mod h1:FGwQgZAt2Gh5mjlwJUlVB62JeYdo+if0xWxSEfBD9ig=
|
||||
github.com/filecoin-project/go-statemachine v0.0.0-20200714194326-a77c3ae20989/go.mod h1:FGwQgZAt2Gh5mjlwJUlVB62JeYdo+if0xWxSEfBD9ig=
|
||||
github.com/filecoin-project/go-statemachine v0.0.0-20200813232949-df9b130df370 h1:Jbburj7Ih2iaJ/o5Q9A+EAeTabME6YII7FLi9SKUf5c=
|
||||
github.com/filecoin-project/go-statemachine v0.0.0-20200813232949-df9b130df370/go.mod h1:FGwQgZAt2Gh5mjlwJUlVB62JeYdo+if0xWxSEfBD9ig=
|
||||
@ -265,23 +267,27 @@ github.com/filecoin-project/go-statestore v0.1.0 h1:t56reH59843TwXHkMcwyuayStBIi
|
||||
github.com/filecoin-project/go-statestore v0.1.0/go.mod h1:LFc9hD+fRxPqiHiaqUEZOinUJB4WARkRfNl10O7kTnI=
|
||||
github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b h1:fkRZSPrYpk42PV3/lIXiL0LHetxde7vyYYvSsttQtfg=
|
||||
github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b/go.mod h1:Q0GQOBtKf1oE10eSXSlhN45kDBdGvEcVOqMiffqX+N8=
|
||||
github.com/filecoin-project/lotus v0.4.3-0.20200820203717-d1718369a182/go.mod h1:biFZPQ/YyQGfkHUmHMiaNf2hnD6zm1+OAXPQYQ61Zkg=
|
||||
github.com/filecoin-project/lotus v0.5.8-0.20200902130912-0962292f920e/go.mod h1:OkZ5aUqs+fFnJOq9243WJDsTa9c3/Ae67NIAwVhAB+0=
|
||||
github.com/filecoin-project/sector-storage v0.0.0-20200712023225-1d67dcfa3c15/go.mod h1:salgVdX7qeXFo/xaiEQE29J4pPkjn71T0kt0n+VDBzo=
|
||||
github.com/filecoin-project/sector-storage v0.0.0-20200730050024-3ee28c3b6d9a/go.mod h1:oOawOl9Yk+qeytLzzIryjI8iRbqo+qzS6EEeElP4PWA=
|
||||
github.com/filecoin-project/sector-storage v0.0.0-20200810171746-eac70842d8e0 h1:E1fZ27fhKK05bhZItfTwqr1i05vXnEZJznQFEYwEEUU=
|
||||
github.com/filecoin-project/sector-storage v0.0.0-20200810171746-eac70842d8e0/go.mod h1:oOawOl9Yk+qeytLzzIryjI8iRbqo+qzS6EEeElP4PWA=
|
||||
github.com/filecoin-project/specs-actors v0.0.0-20200210130641-2d1fbd8672cf/go.mod h1:xtDZUB6pe4Pksa/bAJbJ693OilaC5Wbot9jMhLm3cZA=
|
||||
github.com/filecoin-project/specs-actors v0.3.0/go.mod h1:nQYnFbQ7Y0bHZyq6HDEuVlCPR+U3z5Q3wMOQ+2aiV+Y=
|
||||
github.com/filecoin-project/specs-actors v0.6.1/go.mod h1:dRdy3cURykh2R8O/DKqy8olScl70rmIS7GrB4hB1IDY=
|
||||
github.com/filecoin-project/specs-actors v0.7.3-0.20200716231407-60a2ae96d2e6/go.mod h1:JOMUa7EijvpOO4ofD1yeHNmqohkmmnhTvz/IpB6so4c=
|
||||
github.com/filecoin-project/specs-actors v0.8.2/go.mod h1:Q3ACV5kBLvqPaYbthc/J1lGMJ5OwogmD9pzdtPRMdCw=
|
||||
github.com/filecoin-project/specs-actors v0.8.7-0.20200811203034-272d022c1923 h1:+H4IG4OjTThljPkMH1ZpynxCulNdx4amEeHoP2GdQJI=
|
||||
github.com/filecoin-project/specs-actors v0.8.7-0.20200811203034-272d022c1923/go.mod h1:hukRu6vKQrrS7Nt+fC/ql4PqWLSfmAWNshD/VDtARZU=
|
||||
github.com/filecoin-project/specs-actors v0.9.2 h1:0JG0QLHw8pO6BPqPRe9eQxQW60biHAQsx1rlQ9QbzZ0=
|
||||
github.com/filecoin-project/specs-actors v0.9.2/go.mod h1:YasnVUOUha0DN5wB+twl+V8LlDKVNknRG00kTJpsfFA=
|
||||
github.com/filecoin-project/specs-actors v0.9.3 h1:Fi75G/UQ7R4eiIwnN+S6bBQ9LqKivyJdw62jJzTi6aE=
|
||||
github.com/filecoin-project/specs-actors v0.9.3/go.mod h1:YasnVUOUha0DN5wB+twl+V8LlDKVNknRG00kTJpsfFA=
|
||||
github.com/filecoin-project/specs-storage v0.1.1-0.20200622113353-88a9704877ea h1:iixjULRQFPn7Q9KlIqfwLJnlAXO10bbkI+xy5GKGdLY=
|
||||
github.com/filecoin-project/specs-storage v0.1.1-0.20200622113353-88a9704877ea/go.mod h1:Pr5ntAaxsh+sLG/LYiL4tKzvA83Vk5vLODYhfNwOg7k=
|
||||
github.com/filecoin-project/specs-storage v0.1.1-0.20200730063404-f7db367e9401 h1:jLzN1hwO5WpKPu8ASbW8fs1FUCsOWNvoBXzQhv+8/E8=
|
||||
github.com/filecoin-project/specs-storage v0.1.1-0.20200730063404-f7db367e9401/go.mod h1:Pr5ntAaxsh+sLG/LYiL4tKzvA83Vk5vLODYhfNwOg7k=
|
||||
github.com/filecoin-project/statediff v0.0.1 h1:lym6d5wNnzr+5Uc/6RRWx1hgwb+tCKn2mFIK0Eb1Q18=
|
||||
github.com/filecoin-project/statediff v0.0.1/go.mod h1:qNWauolLFEzOiA4LNWermBRVNbaZHfPcPevumZeh+hE=
|
||||
github.com/filecoin-project/storage-fsm v0.0.0-20200805013058-9d9ea4e6331f/go.mod h1:1CGbd11KkHuyWPT+xwwCol1zl/jnlpiKD2L4fzKxaiI=
|
||||
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
|
||||
github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6 h1:u/UEqS66A5ckRmS4yNpjmVH56sVtS/RfclBAYocb4as=
|
||||
github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6/go.mod h1:1i71OnUq3iUe1ma7Lr6yG6/rjvM3emb6yoL7xLFzcVQ=
|
||||
@ -289,7 +295,6 @@ github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJn
|
||||
github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY=
|
||||
github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
|
||||
github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
@ -345,7 +350,6 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
||||
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.3 h1:GV+pQPG/EUUbkh47niozDcADz6go/dUwhVzdUQHIVRw=
|
||||
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.4 h1:l75CXGRSwbaYNpl/Z2X1XIIAMSCquvXgpVZDhwEIJsc=
|
||||
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
|
||||
@ -398,7 +402,6 @@ github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c/go.mod h1:wJfORR
|
||||
github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f h1:KMlcu9X58lhTA/KrfX8Bi1LQSO4pzoVjTiL3h4Jk+Zk=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
|
||||
github.com/gorilla/handlers v1.4.2 h1:0QniY0USkHQ1RGCLfKxeNHK9bkDHGRYGNDFBCS+YARg=
|
||||
github.com/gorilla/handlers v1.4.2/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
|
||||
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
@ -477,6 +480,7 @@ github.com/ipfs/go-bitswap v0.0.9/go.mod h1:kAPf5qgn2W2DrgAcscZ3HrM9qh4pH+X8Fkk3
|
||||
github.com/ipfs/go-bitswap v0.1.0/go.mod h1:FFJEf18E9izuCqUtHxbWEvq+reg7o4CW5wSAE1wsxj0=
|
||||
github.com/ipfs/go-bitswap v0.1.2/go.mod h1:qxSWS4NXGs7jQ6zQvoPY3+NmOfHHG47mhkiLzBpJQIs=
|
||||
github.com/ipfs/go-bitswap v0.1.8/go.mod h1:TOWoxllhccevbWFUR2N7B1MTSVVge1s6XSMiCSA4MzM=
|
||||
github.com/ipfs/go-bitswap v0.2.8/go.mod h1:2Yjog0GMdH8+AsxkE0DI9D2mANaUTxbVVav0pPoZoug=
|
||||
github.com/ipfs/go-bitswap v0.2.20 h1:Zfi5jDUoqxDThORUznqdeL77DdGniAzlccNJ4vr+Itc=
|
||||
github.com/ipfs/go-bitswap v0.2.20/go.mod h1:C7TwBgHnu89Q8sHsTJP7IhUqF9XYLe71P4tT5adgmYo=
|
||||
github.com/ipfs/go-block-format v0.0.1/go.mod h1:DK/YYcsSUIVAFNwo/KZCdIIbpN0ROH/baNLgayt4pFc=
|
||||
@ -531,19 +535,20 @@ github.com/ipfs/go-ds-measure v0.1.0 h1:vE4TyY4aeLeVgnnPBC5QzKIjKrqzha0NCujTfgvV
|
||||
github.com/ipfs/go-ds-measure v0.1.0/go.mod h1:1nDiFrhLlwArTME1Ees2XaBOl49OoCgd2A3f8EchMSY=
|
||||
github.com/ipfs/go-filestore v1.0.0 h1:QR7ekKH+q2AGiWDc7W2Q0qHuYSRZGUJqUn0GsegEPb0=
|
||||
github.com/ipfs/go-filestore v1.0.0/go.mod h1:/XOCuNtIe2f1YPbiXdYvD0BKLA0JR1MgPiFOdcuu9SM=
|
||||
github.com/ipfs/go-fs-lock v0.0.1/go.mod h1:DNBekbboPKcxs1aukPSaOtFA3QfSdi5C855v0i9XJ8Y=
|
||||
github.com/ipfs/go-fs-lock v0.0.6 h1:sn3TWwNVQqSeNjlWy6zQ1uUGAZrV3hPOyEA6y1/N2a0=
|
||||
github.com/ipfs/go-fs-lock v0.0.6/go.mod h1:OTR+Rj9sHiRubJh3dRhD15Juhd/+w6VPOY28L7zESmM=
|
||||
github.com/ipfs/go-graphsync v0.1.0 h1:RjLk7ha1tJtDXktqoxOjhvx4lDuzzIU+xQ+PEi74r3s=
|
||||
github.com/ipfs/go-graphsync v0.1.0/go.mod h1:jMXfqIEDFukLPZHqDPp8tJMbHO9Rmeb9CEGevngQbmE=
|
||||
github.com/ipfs/go-graphsync v0.1.1 h1:bFDAYS0Z48yd8ROPI6f/zIVmJxaDLA6m8cVuJPKC5fE=
|
||||
github.com/ipfs/go-graphsync v0.1.1/go.mod h1:jMXfqIEDFukLPZHqDPp8tJMbHO9Rmeb9CEGevngQbmE=
|
||||
github.com/ipfs/go-graphsync v0.1.2 h1:25Ll9kIXCE+DY0dicvfS3KMw+U5sd01b/FJbA7KAbhg=
|
||||
github.com/ipfs/go-graphsync v0.1.2/go.mod h1:sLXVXm1OxtE2XYPw62MuXCdAuNwkAdsbnfrmos5odbA=
|
||||
github.com/ipfs/go-hamt-ipld v0.0.15-0.20200131012125-dd88a59d3f2e/go.mod h1:9aQJu/i/TaRDW6jqB5U217dLIDopn50wxLdHXM2CTfE=
|
||||
github.com/ipfs/go-hamt-ipld v0.0.15-0.20200204200533-99b8553ef242/go.mod h1:kq3Pi+UP3oHhAdKexE+kHHYRKMoFNuGero0R7q3hWGg=
|
||||
github.com/ipfs/go-hamt-ipld v0.1.1 h1:0IQdvwnAAUKmDE+PMJa5y1QiwOPHpI9+eAbQEEEYthk=
|
||||
github.com/ipfs/go-hamt-ipld v0.1.1/go.mod h1:1EZCr2v0jlCnhpa+aZ0JZYp8Tt2w16+JJOAVz17YcDk=
|
||||
github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08=
|
||||
github.com/ipfs/go-ipfs-blockstore v0.1.0/go.mod h1:5aD0AvHPi7mZc6Ci1WCAhiBQu2IsfTduLl+422H6Rqw=
|
||||
github.com/ipfs/go-ipfs-blockstore v0.1.4/go.mod h1:Jxm3XMVjh6R17WvxFEiyKBLUGr86HgIYJW/D/MwqeYQ=
|
||||
github.com/ipfs/go-ipfs-blockstore v1.0.0 h1:pmFp5sFYsYVvMOp9X01AK3s85usVcLvkBTRsN6SnfUA=
|
||||
github.com/ipfs/go-ipfs-blockstore v1.0.0/go.mod h1:knLVdhVU9L7CC4T+T4nvGdeUIPAXlnd9zmXfp+9MIjU=
|
||||
github.com/ipfs/go-ipfs-blockstore v1.0.1 h1:fnuVj4XdZp4yExhd0CnUwAiMNJHiPnfInhiuwz4lW1w=
|
||||
github.com/ipfs/go-ipfs-blockstore v1.0.1/go.mod h1:MGNZlHNEnR4KGgPHM3/k8lBySIOK2Ve+0KjZubKlaOE=
|
||||
@ -618,7 +623,6 @@ github.com/ipfs/go-log/v2 v2.1.2-0.20200626104915-0016c0b4b3e4/go.mod h1:2v2nsGf
|
||||
github.com/ipfs/go-merkledag v0.0.3/go.mod h1:Oc5kIXLHokkE1hWGMBHw+oxehkAaTOqtEb7Zbh6BhLA=
|
||||
github.com/ipfs/go-merkledag v0.0.6/go.mod h1:QYPdnlvkOg7GnQRofu9XZimC5ZW5Wi3bKys/4GQQfto=
|
||||
github.com/ipfs/go-merkledag v0.2.3/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk=
|
||||
github.com/ipfs/go-merkledag v0.3.1 h1:3UqWINBEr3/N+r6OwgFXAddDP/8zpQX/8J7IGVOCqRQ=
|
||||
github.com/ipfs/go-merkledag v0.3.1/go.mod h1:fvkZNNZixVW6cKSZ/JfLlON5OlgTXNdRLz0p6QG/I2M=
|
||||
github.com/ipfs/go-merkledag v0.3.2 h1:MRqj40QkrWkvPswXs4EfSslhZ4RVPRbxwX11js0t1xY=
|
||||
github.com/ipfs/go-merkledag v0.3.2/go.mod h1:fvkZNNZixVW6cKSZ/JfLlON5OlgTXNdRLz0p6QG/I2M=
|
||||
@ -632,7 +636,6 @@ github.com/ipfs/go-peertaskqueue v0.1.0/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3
|
||||
github.com/ipfs/go-peertaskqueue v0.1.1/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3Vzz/jUmWw8Z0U=
|
||||
github.com/ipfs/go-peertaskqueue v0.2.0 h1:2cSr7exUGKYyDeUyQ7P/nHPs9P7Ht/B+ROrpN1EJOjc=
|
||||
github.com/ipfs/go-peertaskqueue v0.2.0/go.mod h1:5/eNrBEbtSKWCG+kQK8K8fGNixoYUnr+P7jivavs9lY=
|
||||
github.com/ipfs/go-todocounter v0.0.1 h1:kITWA5ZcQZfrUnDNkRn04Xzh0YFaDFXsoO2A81Eb6Lw=
|
||||
github.com/ipfs/go-todocounter v0.0.1/go.mod h1:l5aErvQc8qKE2r7NDMjmq5UNAvuZy0rC8BHOplkWvZ4=
|
||||
github.com/ipfs/go-unixfs v0.0.4/go.mod h1:eIo/p9ADu/MFOuyxzwU+Th8D6xoxU//r590vUpWyfz8=
|
||||
github.com/ipfs/go-unixfs v0.2.1/go.mod h1:IwAAgul1UQIcNZzKPYZWOCijryFBeCV79cNubPzol+k=
|
||||
@ -648,14 +651,15 @@ github.com/ipfs/iptb-plugins v0.2.1 h1:au4HWn9/pRPbkxA08pDx2oRAs4cnbgQWgV0teYXuu
|
||||
github.com/ipfs/iptb-plugins v0.2.1/go.mod h1:QXMbtIWZ+jRsW8a4h13qAKU7jcM7qaittO8wOsTP0Rs=
|
||||
github.com/ipld/go-car v0.1.1-0.20200526133713-1c7508d55aae h1:OV9dxl8iPMCOD8Vi/hvFwRh3JWPXqmkYSVxWr9JnEzM=
|
||||
github.com/ipld/go-car v0.1.1-0.20200526133713-1c7508d55aae/go.mod h1:2mvxpu4dKRnuH3mj5u6KW/tmRSCcXvy/KYiJ4nC6h4c=
|
||||
github.com/ipld/go-ipld-prime v0.0.2-0.20200428162820-8b59dc292b8e h1:ZISbJlM0urTANR9KRfRaqlBmyOj5uUtxs2r4Up9IXsA=
|
||||
github.com/ipld/go-ipld-prime v0.0.2-0.20200428162820-8b59dc292b8e/go.mod h1:uVIwe/u0H4VdKv3kaN1ck7uCb6yD9cFLS9/ELyXbsw8=
|
||||
github.com/ipld/go-ipld-prime-proto v0.0.0-20200428191222-c1ffdadc01e1 h1:K1Ysr7kgIlo7YQkPqdkA6H7BVdIugvuAz7OQUTJxLdE=
|
||||
github.com/ipld/go-ipld-prime v0.0.4-0.20200828224805-5ff8c8b0b6ef h1:/yPelt/0CuzZsmRkYzBBnJ499JnAOGaIaAXHujx96ic=
|
||||
github.com/ipld/go-ipld-prime v0.0.4-0.20200828224805-5ff8c8b0b6ef/go.mod h1:uVIwe/u0H4VdKv3kaN1ck7uCb6yD9cFLS9/ELyXbsw8=
|
||||
github.com/ipld/go-ipld-prime-proto v0.0.0-20200428191222-c1ffdadc01e1/go.mod h1:OAV6xBmuTLsPZ+epzKkPB1e25FHk/vCtyatkdHcArLs=
|
||||
github.com/ipld/go-ipld-prime-proto v0.0.0-20200828231332-ae0aea07222b h1:ZtlW6pubN17TDaStlxgrwEXXwwUfJaXu9RobwczXato=
|
||||
github.com/ipld/go-ipld-prime-proto v0.0.0-20200828231332-ae0aea07222b/go.mod h1:OAV6xBmuTLsPZ+epzKkPB1e25FHk/vCtyatkdHcArLs=
|
||||
github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52 h1:QG4CGBqCeuBo6aZlGAamSkxWdgWfZGeE49eUOWJPA4c=
|
||||
github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52/go.mod h1:fdg+/X9Gg4AsAIzWpEHwnqd+QY3b7lajxyjE1m4hkq4=
|
||||
github.com/jackpal/gateway v1.0.4/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA=
|
||||
github.com/jackpal/gateway v1.0.5 h1:qzXWUJfuMdlLMtt0a3Dgt+xkWQiA5itDEITVJtuSwMc=
|
||||
github.com/jackpal/gateway v1.0.5/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA=
|
||||
github.com/jackpal/go-nat-pmp v1.0.1/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
|
||||
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
|
||||
@ -758,6 +762,7 @@ github.com/libp2p/go-libp2p v0.8.1/go.mod h1:QRNH9pwdbEBpx5DTJYg+qxcVaDMAz3Ee/qD
|
||||
github.com/libp2p/go-libp2p v0.8.3/go.mod h1:EsH1A+8yoWK+L4iKcbPYu6MPluZ+CHWI9El8cTaefiM=
|
||||
github.com/libp2p/go-libp2p v0.9.2/go.mod h1:cunHNLDVus66Ct9iXXcjKRLdmHdFdHVe1TAnbubJQqQ=
|
||||
github.com/libp2p/go-libp2p v0.10.0/go.mod h1:yBJNpb+mGJdgrwbKAKrhPU0u3ogyNFTfjJ6bdM+Q/G8=
|
||||
github.com/libp2p/go-libp2p v0.10.3/go.mod h1:0ER6iPSaPeQjryNgOnm9bLNpMJCYmuw54xJXsVR17eE=
|
||||
github.com/libp2p/go-libp2p v0.11.0 h1:jb5mqdqYEBAybTEhD8io43Cz5LzVKuWxOK7znSN69jE=
|
||||
github.com/libp2p/go-libp2p v0.11.0/go.mod h1:3/ogJDXsbbepEfqtZKBR/DedzxJXCeK17t2Z9RE9bEE=
|
||||
github.com/libp2p/go-libp2p-autonat v0.0.2/go.mod h1:fs71q5Xk+pdnKU014o2iq1RhMs9/PMaG5zXRFNnIIT4=
|
||||
@ -819,7 +824,6 @@ github.com/libp2p/go-libp2p-core v0.6.1 h1:XS+Goh+QegCDojUZp00CaPMfiEADCrLjNZskW
|
||||
github.com/libp2p/go-libp2p-core v0.6.1/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8=
|
||||
github.com/libp2p/go-libp2p-crypto v0.0.1/go.mod h1:yJkNyDmO341d5wwXxDUGO0LykUVT72ImHNUqh5D/dBE=
|
||||
github.com/libp2p/go-libp2p-crypto v0.0.2/go.mod h1:eETI5OUfBnvARGOHrJz2eWNyTUxEGZnBxMcbUjfIj4I=
|
||||
github.com/libp2p/go-libp2p-crypto v0.1.0 h1:k9MFy+o2zGDNGsaoZl0MA3iZ75qXxr9OOoAZF+sD5OQ=
|
||||
github.com/libp2p/go-libp2p-crypto v0.1.0/go.mod h1:sPUokVISZiy+nNuTTH/TY+leRSxnFj/2GLjtOTW90hI=
|
||||
github.com/libp2p/go-libp2p-daemon v0.2.2/go.mod h1:kyrpsLB2JeNYR2rvXSVWyY0iZuRIMhqzWR3im9BV6NQ=
|
||||
github.com/libp2p/go-libp2p-discovery v0.0.1/go.mod h1:ZkkF9xIFRLA1xCc7bstYFkd80gBGK8Fc1JqGoU2i+zI=
|
||||
@ -867,7 +871,6 @@ github.com/libp2p/go-libp2p-noise v0.1.1 h1:vqYQWvnIcHpIoWJKC7Al4D6Hgj0H012TuXRh
|
||||
github.com/libp2p/go-libp2p-noise v0.1.1/go.mod h1:QDFLdKX7nluB7DEnlVPbz7xlLHdwHFA9HiohJRr3vwM=
|
||||
github.com/libp2p/go-libp2p-peer v0.0.1/go.mod h1:nXQvOBbwVqoP+T5Y5nCjeH4sP9IX/J0AMzcDUVruVoo=
|
||||
github.com/libp2p/go-libp2p-peer v0.1.1/go.mod h1:jkF12jGB4Gk/IOo+yomm+7oLWxF278F7UnrYUQ1Q8es=
|
||||
github.com/libp2p/go-libp2p-peer v0.2.0 h1:EQ8kMjaCUwt/Y5uLgjT8iY2qg0mGUT0N1zUjer50DsY=
|
||||
github.com/libp2p/go-libp2p-peer v0.2.0/go.mod h1:RCffaCvUyW2CJmG2gAWVqwePwW7JMgxjsHm7+J5kjWY=
|
||||
github.com/libp2p/go-libp2p-peerstore v0.0.1/go.mod h1:RabLyPVJLuNQ+GFyoEkfi8H4Ti6k/HtZJ7YKgtSq+20=
|
||||
github.com/libp2p/go-libp2p-peerstore v0.0.6/go.mod h1:RabLyPVJLuNQ+GFyoEkfi8H4Ti6k/HtZJ7YKgtSq+20=
|
||||
@ -887,10 +890,12 @@ github.com/libp2p/go-libp2p-protocol v0.0.1/go.mod h1:Af9n4PiruirSDjHycM1QuiMi/1
|
||||
github.com/libp2p/go-libp2p-protocol v0.1.0/go.mod h1:KQPHpAabB57XQxGrXCNvbL6UEXfQqUgC/1adR2Xtflk=
|
||||
github.com/libp2p/go-libp2p-pubsub v0.1.1/go.mod h1:ZwlKzRSe1eGvSIdU5bD7+8RZN/Uzw0t1Bp9R1znpR/Q=
|
||||
github.com/libp2p/go-libp2p-pubsub v0.3.2-0.20200527132641-c0712c6e92cf/go.mod h1:TxPOBuo1FPdsTjFnv+FGZbNbWYsp74Culx+4ViQpato=
|
||||
github.com/libp2p/go-libp2p-pubsub v0.3.5 h1:iF75GWpcxKEUQU8tTkgLy69qIQvfhL+t6U6ndQrB6ho=
|
||||
github.com/libp2p/go-libp2p-pubsub v0.3.5/go.mod h1:DTMSVmZZfXodB/pvdTGrY2eHPZ9W2ev7hzTH83OKHrI=
|
||||
github.com/libp2p/go-libp2p-pubsub v0.3.5-0.20200820194335-bfc96c2cd081/go.mod h1:DTMSVmZZfXodB/pvdTGrY2eHPZ9W2ev7hzTH83OKHrI=
|
||||
github.com/libp2p/go-libp2p-pubsub v0.3.6-0.20200901174250-06a12f17b7de h1:Dl0B0x6u+OSKXAa1DeB6xHFsUOBAhjrXJ10zykVSN6Q=
|
||||
github.com/libp2p/go-libp2p-pubsub v0.3.6-0.20200901174250-06a12f17b7de/go.mod h1:DTMSVmZZfXodB/pvdTGrY2eHPZ9W2ev7hzTH83OKHrI=
|
||||
github.com/libp2p/go-libp2p-quic-transport v0.1.1/go.mod h1:wqG/jzhF3Pu2NrhJEvE+IE0NTHNXslOPn9JQzyCAxzU=
|
||||
github.com/libp2p/go-libp2p-quic-transport v0.5.0/go.mod h1:IEcuC5MLxvZ5KuHKjRu+dr3LjCT1Be3rcD/4d8JrX8M=
|
||||
github.com/libp2p/go-libp2p-quic-transport v0.7.1/go.mod h1:TD31to4E5exogR/GWHClXCfkktigjAl5rXSt7HoxNvY=
|
||||
github.com/libp2p/go-libp2p-quic-transport v0.8.0 h1:mHA94K2+TD0e9XtjWx/P5jGGZn0GdQ4OFYwNllagv4E=
|
||||
github.com/libp2p/go-libp2p-quic-transport v0.8.0/go.mod h1:F2FG/6Bzz0U6essUVxDzE0s9CrY4XGLbl7QEmDNvU7A=
|
||||
github.com/libp2p/go-libp2p-record v0.0.1/go.mod h1:grzqg263Rug/sRex85QrDOLntdFAymLDLm7lxMgU79Q=
|
||||
@ -908,7 +913,6 @@ github.com/libp2p/go-libp2p-secio v0.0.3/go.mod h1:hS7HQ00MgLhRO/Wyu1bTX6ctJKhVp
|
||||
github.com/libp2p/go-libp2p-secio v0.1.0/go.mod h1:tMJo2w7h3+wN4pgU2LSYeiKPrfqBgkOsdiKK77hE7c8=
|
||||
github.com/libp2p/go-libp2p-secio v0.2.0/go.mod h1:2JdZepB8J5V9mBp79BmwsaPQhRPNN2NrnB2lKQcdy6g=
|
||||
github.com/libp2p/go-libp2p-secio v0.2.1/go.mod h1:cWtZpILJqkqrSkiYcDBh5lA3wbT2Q+hz3rJQq3iftD8=
|
||||
github.com/libp2p/go-libp2p-secio v0.2.2 h1:rLLPvShPQAcY6eNurKNZq3eZjPWfU9kXF2eI9jIYdrg=
|
||||
github.com/libp2p/go-libp2p-secio v0.2.2/go.mod h1:wP3bS+m5AUnFA+OFO7Er03uO1mncHG0uVwGrwvjYlNY=
|
||||
github.com/libp2p/go-libp2p-swarm v0.0.1/go.mod h1:mh+KZxkbd3lQnveQ3j2q60BM1Cw2mX36XXQqwfPOShs=
|
||||
github.com/libp2p/go-libp2p-swarm v0.0.6/go.mod h1:s5GZvzg9xXe8sbeESuFpjt8CJPTCa8mhEusweJqyFy8=
|
||||
@ -979,13 +983,11 @@ github.com/libp2p/go-openssl v0.0.4/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO
|
||||
github.com/libp2p/go-openssl v0.0.5/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc=
|
||||
github.com/libp2p/go-openssl v0.0.7 h1:eCAzdLejcNVBzP/iZM9vqHnQm+XyCEbSSIheIPRGNsw=
|
||||
github.com/libp2p/go-openssl v0.0.7/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc=
|
||||
github.com/libp2p/go-reuseport v0.0.1 h1:7PhkfH73VXfPJYKQ6JwS5I/eVcoyYi9IMNGc6FWpFLw=
|
||||
github.com/libp2p/go-reuseport v0.0.1/go.mod h1:jn6RmB1ufnQwl0Q1f+YxAj8isJgDCQzaaxIFYDhcYEA=
|
||||
github.com/libp2p/go-reuseport v0.0.2 h1:XSG94b1FJfGA01BUrT82imejHQyTxO4jEWqheyCXYvU=
|
||||
github.com/libp2p/go-reuseport v0.0.2/go.mod h1:SPD+5RwGC7rcnzngoYC86GjPzjSywuQyMVAheVBD9nQ=
|
||||
github.com/libp2p/go-reuseport-transport v0.0.1/go.mod h1:YkbSDrvjUVDL6b8XqriyA20obEtsW9BLkuOUyQAOCbs=
|
||||
github.com/libp2p/go-reuseport-transport v0.0.2/go.mod h1:YkbSDrvjUVDL6b8XqriyA20obEtsW9BLkuOUyQAOCbs=
|
||||
github.com/libp2p/go-reuseport-transport v0.0.3 h1:zzOeXnTooCkRvoH+bSXEfXhn76+LAiwoneM0gnXjF2M=
|
||||
github.com/libp2p/go-reuseport-transport v0.0.3/go.mod h1:Spv+MPft1exxARzP2Sruj2Wb5JSyHNncjf1Oi2dEbzM=
|
||||
github.com/libp2p/go-reuseport-transport v0.0.4 h1:OZGz0RB620QDGpv300n1zaOcKGGAoGVf8h9txtt/1uM=
|
||||
github.com/libp2p/go-reuseport-transport v0.0.4/go.mod h1:trPa7r/7TJK/d+0hdBLOCGvpQQVOU74OXbNCIMkufGw=
|
||||
@ -1002,7 +1004,6 @@ github.com/libp2p/go-tcp-transport v0.0.1/go.mod h1:mnjg0o0O5TmXUaUIanYPUqkW4+u6
|
||||
github.com/libp2p/go-tcp-transport v0.0.4/go.mod h1:+E8HvC8ezEVOxIo3V5vCK9l1y/19K427vCzQ+xHKH/o=
|
||||
github.com/libp2p/go-tcp-transport v0.1.0/go.mod h1:oJ8I5VXryj493DEJ7OsBieu8fcg2nHGctwtInJVpipc=
|
||||
github.com/libp2p/go-tcp-transport v0.1.1/go.mod h1:3HzGvLbx6etZjnFlERyakbaYPdfjg2pWP97dFZworkY=
|
||||
github.com/libp2p/go-tcp-transport v0.2.0 h1:YoThc549fzmNJIh7XjHVtMIFaEDRtIrtWciG5LyYAPo=
|
||||
github.com/libp2p/go-tcp-transport v0.2.0/go.mod h1:vX2U0CnWimU4h0SGSEsg++AzvBcroCGYw28kh94oLe0=
|
||||
github.com/libp2p/go-tcp-transport v0.2.1 h1:ExZiVQV+h+qL16fzCWtd1HSzPsqWottJ8KXwWaVi8Ns=
|
||||
github.com/libp2p/go-tcp-transport v0.2.1/go.mod h1:zskiJ70MEfWz2MKxvFB/Pv+tPIB1PpPUrHIWQ8aFw7M=
|
||||
@ -1029,6 +1030,7 @@ github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-b
|
||||
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
|
||||
github.com/lucas-clemente/quic-go v0.11.2/go.mod h1:PpMmPfPKO9nKJ/psF49ESTAGQSdfXxlg1otPbEB2nOw=
|
||||
github.com/lucas-clemente/quic-go v0.16.0/go.mod h1:I0+fcNTdb9eS1ZcjQZbDVPGchJ86chcIxPALn9lEJqE=
|
||||
github.com/lucas-clemente/quic-go v0.17.3/go.mod h1:I0+fcNTdb9eS1ZcjQZbDVPGchJ86chcIxPALn9lEJqE=
|
||||
github.com/lucas-clemente/quic-go v0.18.0 h1:JhQDdqxdwdmGdKsKgXi1+coHRoGhvU6z0rNzOJqZ/4o=
|
||||
github.com/lucas-clemente/quic-go v0.18.0/go.mod h1:yXttHsSNxQi8AWijC/vLP+OJczXqzHSOcJrM5ITUlCg=
|
||||
github.com/lufia/iostat v1.1.0/go.mod h1:rEPNA0xXgjHQjuI5Cy05sLlS2oRcSlWHRLrvh/AQ+Pg=
|
||||
@ -1040,7 +1042,6 @@ github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN
|
||||
github.com/marten-seemann/qpack v0.1.0/go.mod h1:LFt1NU/Ptjip0C2CPkhimBz5CGE3WGDAUWqna+CNTrI=
|
||||
github.com/marten-seemann/qpack v0.2.0/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc=
|
||||
github.com/marten-seemann/qtls v0.2.3/go.mod h1:xzjG7avBwGGbdZ8dTGxlBnLArsVKLvwmjgmPuiQEcYk=
|
||||
github.com/marten-seemann/qtls v0.9.1 h1:O0YKQxNVPaiFgMng0suWEOY2Sb4LT2sRn9Qimq3Z1IQ=
|
||||
github.com/marten-seemann/qtls v0.9.1/go.mod h1:T1MmAdDPyISzxlK6kjRr0pcZFBVd1OZbBb/j3cvzHhk=
|
||||
github.com/marten-seemann/qtls v0.10.0 h1:ECsuYUKalRL240rRD4Ri33ISb7kAQ3qGDlrrl55b2pc=
|
||||
github.com/marten-seemann/qtls v0.10.0/go.mod h1:UvMd1oaYDACI99/oZUYLzMCkBXQVT0aGm99sJhbT8hs=
|
||||
@ -1076,6 +1077,7 @@ github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3N
|
||||
github.com/miekg/dns v1.1.4/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
github.com/miekg/dns v1.1.12/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
github.com/miekg/dns v1.1.28/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
|
||||
github.com/miekg/dns v1.1.30/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
|
||||
github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
|
||||
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g=
|
||||
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ=
|
||||
@ -1115,7 +1117,6 @@ github.com/multiformats/go-multiaddr v0.1.0/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lg
|
||||
github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo=
|
||||
github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4=
|
||||
github.com/multiformats/go-multiaddr v0.2.1/go.mod h1:s/Apk6IyxfvMjDafnhJgJ3/46z7tZ04iMk5wP4QMGGE=
|
||||
github.com/multiformats/go-multiaddr v0.2.2 h1:XZLDTszBIJe6m0zF6ITBrEcZR73OPUhCBBS9rYAuUzI=
|
||||
github.com/multiformats/go-multiaddr v0.2.2/go.mod h1:NtfXiOtHvghW9KojvtySjH5y0u0xW5UouOmQQrn6a3Y=
|
||||
github.com/multiformats/go-multiaddr v0.3.0/go.mod h1:dF9kph9wfJ+3VLAaeBqo9Of8x4fJxp6ggJGteB8HQTI=
|
||||
github.com/multiformats/go-multiaddr v0.3.1 h1:1bxa+W7j9wZKTZREySx1vPMs2TqrYWjVZ7zE6/XLG1I=
|
||||
@ -1135,7 +1136,6 @@ github.com/multiformats/go-multiaddr-net v0.1.1/go.mod h1:5JNbcfBOP4dnhoZOv10JJV
|
||||
github.com/multiformats/go-multiaddr-net v0.1.2/go.mod h1:QsWt3XK/3hwvNxZJp92iMQKME1qHfpYmyIjFVsSOY6Y=
|
||||
github.com/multiformats/go-multiaddr-net v0.1.3/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA=
|
||||
github.com/multiformats/go-multiaddr-net v0.1.4/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA=
|
||||
github.com/multiformats/go-multiaddr-net v0.1.5 h1:QoRKvu0xHN1FCFJcMQLbG/yQE2z441L5urvG3+qyz7g=
|
||||
github.com/multiformats/go-multiaddr-net v0.1.5/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA=
|
||||
github.com/multiformats/go-multiaddr-net v0.2.0 h1:MSXRGN0mFymt6B1yo/6BPnIRpLPEnKgQNvVfCX5VDJk=
|
||||
github.com/multiformats/go-multiaddr-net v0.2.0/go.mod h1:gGdH3UXny6U3cKKYCvpXI5rnK7YaOIEOPVDI9tsJbEA=
|
||||
@ -1187,7 +1187,6 @@ github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W
|
||||
github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg=
|
||||
github.com/onsi/ginkgo v1.12.1 h1:mFwc4LvZ0xpSvDZ3E+k8Yte0hLOMxXUlP+yXtJqkYfQ=
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA=
|
||||
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
|
||||
@ -1195,7 +1194,6 @@ github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1Cpa
|
||||
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
|
||||
github.com/onsi/gomega v1.9.0 h1:R1uwffexN6Pr340GtYRIdZmAiN4J+iw6WG4wog1DUXg=
|
||||
github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
|
||||
github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
@ -1372,8 +1370,6 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5
|
||||
github.com/stretchr/testify v1.6.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/supranational/blst v0.1.2-alpha.1 h1:v0UqVlvbRNZIaSeMPr+T01kvTUq1h0EZuZ6gnDR1Mlg=
|
||||
github.com/supranational/blst v0.1.2-alpha.1/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE=
|
||||
github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
|
||||
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
|
||||
@ -1448,7 +1444,12 @@ github.com/whyrusleeping/pubsub v0.0.0-20131020042734-02de8aa2db3d/go.mod h1:g7c
|
||||
github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee h1:lYbXeSvJi5zk5GLKVuid9TVjS9a0OmLIDKTfoZBL6Ow=
|
||||
github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee/go.mod h1:m2aV4LZI4Aez7dP5PMyVKEHhUyEJ/RjmPEDOpDvudHg=
|
||||
github.com/whyrusleeping/yamux v1.1.5/go.mod h1:E8LnQQ8HKx5KD29HZFUwM1PxCOdPRzGwur1mcYhXcD8=
|
||||
github.com/willscott/go-cmp v0.5.2-0.20200812183318-8affb9542345 h1:IJVAwIctqDFOrO0C2qzksXmANviyHJzrklU27e1ltzE=
|
||||
github.com/willscott/go-cmp v0.5.2-0.20200812183318-8affb9542345/go.mod h1:D7hA8H5pyQx7Y5Em7IWx1R4vNJzfon3gpG9nxjkITjQ=
|
||||
github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
|
||||
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xlab/c-for-go v0.0.0-20200718154222-87b0065af829 h1:wb7xrDzfkLgPHsSEBm+VSx6aDdi64VtV0xvP0E6j8bk=
|
||||
github.com/xlab/c-for-go v0.0.0-20200718154222-87b0065af829/go.mod h1:h/1PEBwj7Ym/8kOuMWvO2ujZ6Lt+TMbySEXNhjjR87I=
|
||||
@ -1508,6 +1509,8 @@ go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc=
|
||||
go.uber.org/zap v1.15.0 h1:ZZCA22JRF2gQE5FoNmhmrf7jeJJ2uhqDUNRYKm8dvmM=
|
||||
go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc=
|
||||
go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
|
||||
go4.org v0.0.0-20190218023631-ce4c26f7be8e/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
|
||||
go4.org v0.0.0-20190313082347-94abd6928b1d/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
|
||||
go4.org v0.0.0-20200411211856-f5505b9728dd h1:BNJlw5kRTzdmyfh5U8F93HA2OwkP7ZGwA51eJ/0wKOU=
|
||||
go4.org v0.0.0-20200411211856-f5505b9728dd/go.mod h1:CIiUVy99QCPfoE13bO4EZaz5GZMZXMSBGhxRdsvzbkg=
|
||||
golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw=
|
||||
@ -1532,6 +1535,7 @@ golang.org/x/crypto v0.0.0-20190618222545-ea8f1a30c443/go.mod h1:yigFU9vqHzYiE8U
|
||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200117160349-530e935923ad/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
@ -1539,7 +1543,6 @@ golang.org/x/crypto v0.0.0-20200317142112-1b76d66859c6/go.mod h1:LzIPMQfyMNhhGPh
|
||||
golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200427165652-729f1e841bcc/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
@ -1689,6 +1692,7 @@ golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@ -1750,6 +1754,7 @@ golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapK
|
||||
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200108195415-316d2f248479/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
@ -1760,8 +1765,9 @@ golang.org/x/tools v0.0.0-20200216192241-b320d3a0f5a2/go.mod h1:TB2adYChydJhpapK
|
||||
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200318150045-ba25ddc85566/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
|
||||
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
|
||||
golang.org/x/tools v0.0.0-20200711155855-7342f9734a7d h1:F3OmlXCzYtG9YE6tXDnUOlJBzVzHF8EcmZ1yTJlcgIk=
|
||||
golang.org/x/tools v0.0.0-20200711155855-7342f9734a7d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200827010519-17fd2f27a9e3 h1:r3P/5xOq/dK1991B65Oy6E1fRF/2d/fSYZJ/fXGVfJc=
|
||||
golang.org/x/tools v0.0.0-20200827010519-17fd2f27a9e3/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
@ -97,8 +97,13 @@ func (n *ProviderNodeAdapter) PublishDeals(ctx context.Context, deal storagemark
|
||||
}
|
||||
|
||||
func (n *ProviderNodeAdapter) OnDealComplete(ctx context.Context, deal storagemarket.MinerDeal, pieceSize abi.UnpaddedPieceSize, pieceData io.Reader) (*storagemarket.PackingResult, error) {
|
||||
if deal.PublishCid == nil {
|
||||
return nil, xerrors.Errorf("deal.PublishCid can't be nil")
|
||||
}
|
||||
|
||||
p, offset, err := n.secb.AddPiece(ctx, pieceSize, pieceData, sealing.DealInfo{
|
||||
DealID: deal.DealID,
|
||||
DealID: deal.DealID,
|
||||
PublishCid: deal.PublishCid,
|
||||
DealSchedule: sealing.DealSchedule{
|
||||
StartEpoch: deal.ClientDealProposal.Proposal.StartEpoch,
|
||||
EndEpoch: deal.ClientDealProposal.Proposal.EndEpoch,
|
||||
@ -372,7 +377,7 @@ func (n *ProviderNodeAdapter) GetChainHead(ctx context.Context) (shared.TipSetTo
|
||||
}
|
||||
|
||||
func (n *ProviderNodeAdapter) WaitForMessage(ctx context.Context, mcid cid.Cid, cb func(code exitcode.ExitCode, bytes []byte, err error) error) error {
|
||||
receipt, err := n.StateWaitMsg(ctx, mcid, build.MessageConfidence)
|
||||
receipt, err := n.StateWaitMsg(ctx, mcid, 2*build.MessageConfidence)
|
||||
if err != nil {
|
||||
return cb(0, nil, err)
|
||||
}
|
||||
|
@ -19,6 +19,7 @@ var (
|
||||
Commit, _ = tag.NewKey("commit")
|
||||
PeerID, _ = tag.NewKey("peer_id")
|
||||
FailureType, _ = tag.NewKey("failure_type")
|
||||
Local, _ = tag.NewKey("local")
|
||||
MessageFrom, _ = tag.NewKey("message_from")
|
||||
MessageTo, _ = tag.NewKey("message_to")
|
||||
MessageNonce, _ = tag.NewKey("message_nonce")
|
||||
@ -30,7 +31,7 @@ var (
|
||||
LotusInfo = stats.Int64("info", "Arbitrary counter to tag lotus info to", stats.UnitDimensionless)
|
||||
ChainNodeHeight = stats.Int64("chain/node_height", "Current Height of the node", stats.UnitDimensionless)
|
||||
ChainNodeWorkerHeight = stats.Int64("chain/node_worker_height", "Current Height of workers on the node", stats.UnitDimensionless)
|
||||
MessagePublished = stats.Int64("message/pubished", "Counter for total locally published messages", stats.UnitDimensionless)
|
||||
MessagePublished = stats.Int64("message/published", "Counter for total locally published messages", stats.UnitDimensionless)
|
||||
MessageReceived = stats.Int64("message/received", "Counter for total received messages", stats.UnitDimensionless)
|
||||
MessageValidationFailure = stats.Int64("message/failure", "Counter for message validation failures", stats.UnitDimensionless)
|
||||
MessageValidationSuccess = stats.Int64("message/success", "Counter for message validation successes", stats.UnitDimensionless)
|
||||
@ -82,6 +83,10 @@ var (
|
||||
Measure: BlockValidationDurationMilliseconds,
|
||||
Aggregation: defaultMillisecondsDistribution,
|
||||
}
|
||||
MessagePublishedView = &view.View{
|
||||
Measure: MessagePublished,
|
||||
Aggregation: view.Count(),
|
||||
}
|
||||
MessageReceivedView = &view.View{
|
||||
Measure: MessageReceived,
|
||||
Aggregation: view.Count(),
|
||||
@ -89,7 +94,7 @@ var (
|
||||
MessageValidationFailureView = &view.View{
|
||||
Measure: MessageValidationFailure,
|
||||
Aggregation: view.Count(),
|
||||
TagKeys: []tag.Key{FailureType},
|
||||
TagKeys: []tag.Key{FailureType, Local},
|
||||
}
|
||||
MessageValidationSuccessView = &view.View{
|
||||
Measure: MessageValidationSuccess,
|
||||
@ -99,6 +104,34 @@ var (
|
||||
Measure: PeerCount,
|
||||
Aggregation: view.LastValue(),
|
||||
}
|
||||
PubsubPublishMessageView = &view.View{
|
||||
Measure: PubsubPublishMessage,
|
||||
Aggregation: view.Count(),
|
||||
}
|
||||
PubsubDeliverMessageView = &view.View{
|
||||
Measure: PubsubDeliverMessage,
|
||||
Aggregation: view.Count(),
|
||||
}
|
||||
PubsubRejectMessageView = &view.View{
|
||||
Measure: PubsubRejectMessage,
|
||||
Aggregation: view.Count(),
|
||||
}
|
||||
PubsubDuplicateMessageView = &view.View{
|
||||
Measure: PubsubDuplicateMessage,
|
||||
Aggregation: view.Count(),
|
||||
}
|
||||
PubsubRecvRPCView = &view.View{
|
||||
Measure: PubsubRecvRPC,
|
||||
Aggregation: view.Count(),
|
||||
}
|
||||
PubsubSendRPCView = &view.View{
|
||||
Measure: PubsubSendRPC,
|
||||
Aggregation: view.Count(),
|
||||
}
|
||||
PubsubDropRPCView = &view.View{
|
||||
Measure: PubsubDropRPC,
|
||||
Aggregation: view.Count(),
|
||||
}
|
||||
)
|
||||
|
||||
// DefaultViews is an array of OpenCensus views for metric gathering purposes
|
||||
@ -110,10 +143,18 @@ var DefaultViews = append([]*view.View{
|
||||
BlockValidationFailureView,
|
||||
BlockValidationSuccessView,
|
||||
BlockValidationDurationView,
|
||||
MessagePublishedView,
|
||||
MessageReceivedView,
|
||||
MessageValidationFailureView,
|
||||
MessageValidationSuccessView,
|
||||
PeerCountView,
|
||||
PubsubPublishMessageView,
|
||||
PubsubDeliverMessageView,
|
||||
PubsubRejectMessageView,
|
||||
PubsubDuplicateMessageView,
|
||||
PubsubRecvRPCView,
|
||||
PubsubSendRPCView,
|
||||
PubsubDropRPCView,
|
||||
},
|
||||
rpcmetrics.DefaultViews...)
|
||||
|
||||
|
@ -37,7 +37,7 @@ const (
|
||||
)
|
||||
|
||||
// returns a callback reporting whether we mined a blocks in this round
|
||||
type waitFunc func(ctx context.Context, baseTime uint64) (func(bool, error), abi.ChainEpoch, error)
|
||||
type waitFunc func(ctx context.Context, baseTime uint64) (func(bool, abi.ChainEpoch, error), abi.ChainEpoch, error)
|
||||
|
||||
func randTimeOffset(width time.Duration) time.Duration {
|
||||
buf := make([]byte, 8)
|
||||
@ -57,7 +57,7 @@ func NewMiner(api api.FullNode, epp gen.WinningPoStProver, addr address.Address,
|
||||
api: api,
|
||||
epp: epp,
|
||||
address: addr,
|
||||
waitFunc: func(ctx context.Context, baseTime uint64) (func(bool, error), abi.ChainEpoch, error) {
|
||||
waitFunc: func(ctx context.Context, baseTime uint64) (func(bool, abi.ChainEpoch, error), abi.ChainEpoch, error) {
|
||||
// Wait around for half the block time in case other parents come in
|
||||
deadline := baseTime + build.PropagationDelaySecs
|
||||
baseT := time.Unix(int64(deadline), 0)
|
||||
@ -66,7 +66,7 @@ func NewMiner(api api.FullNode, epp gen.WinningPoStProver, addr address.Address,
|
||||
|
||||
build.Clock.Sleep(build.Clock.Until(baseT))
|
||||
|
||||
return func(bool, error) {}, 0, nil
|
||||
return func(bool, abi.ChainEpoch, error) {}, 0, nil
|
||||
},
|
||||
|
||||
sf: sf,
|
||||
@ -160,7 +160,7 @@ func (m *Miner) mine(ctx context.Context) {
|
||||
}
|
||||
|
||||
var base *MiningBase
|
||||
var onDone func(bool, error)
|
||||
var onDone func(bool, abi.ChainEpoch, error)
|
||||
var injectNulls abi.ChainEpoch
|
||||
|
||||
for {
|
||||
@ -176,7 +176,7 @@ func (m *Miner) mine(ctx context.Context) {
|
||||
break
|
||||
}
|
||||
if base != nil {
|
||||
onDone(false, nil)
|
||||
onDone(false, 0, nil)
|
||||
}
|
||||
|
||||
// TODO: need to change the orchestration here. the problem is that
|
||||
@ -215,12 +215,16 @@ func (m *Miner) mine(ctx context.Context) {
|
||||
if err != nil {
|
||||
log.Errorf("mining block failed: %+v", err)
|
||||
m.niceSleep(time.Second)
|
||||
onDone(false, err)
|
||||
onDone(false, 0, err)
|
||||
continue
|
||||
}
|
||||
lastBase = *base
|
||||
|
||||
onDone(b != nil, nil)
|
||||
var h abi.ChainEpoch
|
||||
if b != nil {
|
||||
h = b.Header.Height
|
||||
}
|
||||
onDone(b != nil, h, nil)
|
||||
|
||||
if b != nil {
|
||||
journal.J.RecordEvent(m.evtTypes[evtTypeBlockMined], func() interface{} {
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user