Merge branch 'raulk/itests-refactor-kit' into refactor/itest-multisig
This commit is contained in:
commit
f1bf080e04
@ -133,6 +133,9 @@ jobs:
|
|||||||
deadline-test:
|
deadline-test:
|
||||||
type: string
|
type: string
|
||||||
default: "0"
|
default: "0"
|
||||||
|
proofs-log-test:
|
||||||
|
type: string
|
||||||
|
default: "0"
|
||||||
test-suite-name:
|
test-suite-name:
|
||||||
type: string
|
type: string
|
||||||
default: unit
|
default: unit
|
||||||
@ -167,6 +170,7 @@ jobs:
|
|||||||
environment:
|
environment:
|
||||||
LOTUS_TEST_WINDOW_POST: << parameters.winpost-test >>
|
LOTUS_TEST_WINDOW_POST: << parameters.winpost-test >>
|
||||||
LOTUS_TEST_DEADLINE_TOGGLING: << parameters.deadline-test >>
|
LOTUS_TEST_DEADLINE_TOGGLING: << parameters.deadline-test >>
|
||||||
|
TEST_RUSTPROOFS_LOGS: << parameters.proofs-log-test >>
|
||||||
SKIP_CONFORMANCE: "1"
|
SKIP_CONFORMANCE: "1"
|
||||||
command: |
|
command: |
|
||||||
mkdir -p /tmp/test-reports/<< parameters.test-suite-name >>
|
mkdir -p /tmp/test-reports/<< parameters.test-suite-name >>
|
||||||
@ -212,6 +216,8 @@ jobs:
|
|||||||
<<: *test
|
<<: *test
|
||||||
test-terminate:
|
test-terminate:
|
||||||
<<: *test
|
<<: *test
|
||||||
|
check-proofs-multicore-sdr:
|
||||||
|
<<: *test
|
||||||
test-conformance:
|
test-conformance:
|
||||||
description: |
|
description: |
|
||||||
Run tests using a corpus of interoperable test vectors for Filecoin
|
Run tests using a corpus of interoperable test vectors for Filecoin
|
||||||
@ -815,6 +821,12 @@ workflows:
|
|||||||
tags:
|
tags:
|
||||||
only:
|
only:
|
||||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||||
|
- check-proofs-multicore-sdr:
|
||||||
|
codecov-upload: true
|
||||||
|
go-test-flags: "-run=TestMulticoreSDR"
|
||||||
|
test-suite-name: multicore-sdr-check
|
||||||
|
packages: "./extern/sector-storage/ffiwrapper"
|
||||||
|
proofs-log-test: "1"
|
||||||
- test-conformance:
|
- test-conformance:
|
||||||
test-suite-name: conformance
|
test-suite-name: conformance
|
||||||
packages: "./conformance"
|
packages: "./conformance"
|
||||||
|
137
CHANGELOG.md
137
CHANGELOG.md
@ -70,143 +70,6 @@ This is an optional Lotus release that introduces various improvements to the se
|
|||||||
- fix health report (https://github.com/filecoin-project/lotus/pull/6011)
|
- fix health report (https://github.com/filecoin-project/lotus/pull/6011)
|
||||||
- fix(ci): Use recent ubuntu LTS release; Update release params ((https://github.com/filecoin-project/lotus/pull/6011))
|
- fix(ci): Use recent ubuntu LTS release; Update release params ((https://github.com/filecoin-project/lotus/pull/6011))
|
||||||
|
|
||||||
# 1.9.0-rc4 / 2021-05-13
|
|
||||||
|
|
||||||
This is an optional Lotus release that introduces various improvements to the sealing, mining, and deal-making processes.
|
|
||||||
|
|
||||||
## Highlights
|
|
||||||
|
|
||||||
- OpenRPC Support (https://github.com/filecoin-project/lotus/pull/5843)
|
|
||||||
- Take latency into account when making interactive deals (https://github.com/filecoin-project/lotus/pull/5876)
|
|
||||||
- Update go-commp-utils for >10x faster client commp calculation (https://github.com/filecoin-project/lotus/pull/5892)
|
|
||||||
- add `lotus client cancel-retrieval` cmd to lotus CLI (https://github.com/filecoin-project/lotus/pull/5871)
|
|
||||||
- add `inspect-deal` command to `lotus client` (https://github.com/filecoin-project/lotus/pull/5833)
|
|
||||||
- Local retrieval support (https://github.com/filecoin-project/lotus/pull/5917)
|
|
||||||
- go-fil-markets v1.1.9 -> v1.2.5
|
|
||||||
- For a detailed changelog see https://github.com/filecoin-project/go-fil-markets/blob/master/CHANGELOG.md
|
|
||||||
- rust-fil-proofs v5.4.1 -> v7.0.1
|
|
||||||
- For a detailed changelog see https://github.com/filecoin-project/rust-fil-proofs/blob/master/CHANGELOG.md
|
|
||||||
|
|
||||||
## Changes
|
|
||||||
- storagefsm: Apply global events even in broken states (https://github.com/filecoin-project/lotus/pull/5962)
|
|
||||||
- Default the AlwaysKeepUnsealedCopy flag to true (https://github.com/filecoin-project/lotus/pull/5743)
|
|
||||||
- splitstore: compact hotstore prior to garbage collection (https://github.com/filecoin-project/lotus/pull/5778)
|
|
||||||
- ipfs-force bootstrapper update (https://github.com/filecoin-project/lotus/pull/5799)
|
|
||||||
- better logging when unsealing fails (https://github.com/filecoin-project/lotus/pull/5851)
|
|
||||||
- perf: add cache for gas permium estimation (https://github.com/filecoin-project/lotus/pull/5709)
|
|
||||||
- backupds: Compact log on restart (https://github.com/filecoin-project/lotus/pull/5875)
|
|
||||||
- backupds: Improve truncated log handling (https://github.com/filecoin-project/lotus/pull/5891)
|
|
||||||
- State CLI improvements (State CLI improvements)
|
|
||||||
- API proxy struct codegen (https://github.com/filecoin-project/lotus/pull/5854)
|
|
||||||
- move DI stuff for paychmgr into modules (https://github.com/filecoin-project/lotus/pull/5791)
|
|
||||||
- Implement Event observer and Settings for 3rd party dep injection (https://github.com/filecoin-project/lotus/pull/5693)
|
|
||||||
- Export developer and network commands for consumption by derivatives of Lotus (https://github.com/filecoin-project/lotus/pull/5864)
|
|
||||||
- mock sealer: Simulate randomness sideeffects (https://github.com/filecoin-project/lotus/pull/5805)
|
|
||||||
- localstorage: Demote reservation stat error to debug (https://github.com/filecoin-project/lotus/pull/5976)
|
|
||||||
- shed command to unpack miner info dumps (https://github.com/filecoin-project/lotus/pull/5800)
|
|
||||||
- Add two utils to Lotus-shed (https://github.com/filecoin-project/lotus/pull/5867)
|
|
||||||
- add shed election estimate command (https://github.com/filecoin-project/lotus/pull/5092)
|
|
||||||
- Add --actor flag in lotus-shed sectors terminate (https://github.com/filecoin-project/lotus/pull/5819)
|
|
||||||
- Move lotus mpool clear to lotus-shed (https://github.com/filecoin-project/lotus/pull/5900)
|
|
||||||
- Centralize everything on ipfs/go-log/v2 (https://github.com/filecoin-project/lotus/pull/5974)
|
|
||||||
- expose NextID from nice market actor interface (https://github.com/filecoin-project/lotus/pull/5850)
|
|
||||||
- add available options for perm on error (https://github.com/filecoin-project/lotus/pull/5814)
|
|
||||||
- API docs clarification: Document StateSearchMsg replaced message behavior (https://github.com/filecoin-project/lotus/pull/5838)
|
|
||||||
- api: Document StateReplay replaced message behavior (https://github.com/filecoin-project/lotus/pull/5840)
|
|
||||||
- add godocs to miner objects (https://github.com/filecoin-project/lotus/pull/2184)
|
|
||||||
- Add description to the client deal CLI command (https://github.com/filecoin-project/lotus/pull/5999)
|
|
||||||
- lint: don't skip builtin (https://github.com/filecoin-project/lotus/pull/5881)
|
|
||||||
- use deal duration from actors (https://github.com/filecoin-project/lotus/pull/5270)
|
|
||||||
- remote calc winningpost proof (https://github.com/filecoin-project/lotus/pull/5884)
|
|
||||||
- packer: other network images (https://github.com/filecoin-project/lotus/pull/5930)
|
|
||||||
- Convert the chainstore lock to RW (https://github.com/filecoin-project/lotus/pull/5971)
|
|
||||||
- Remove CachedBlockstore (https://github.com/filecoin-project/lotus/pull/5972)
|
|
||||||
- remove messagepool CapGasFee duplicate code (https://github.com/filecoin-project/lotus/pull/5992)
|
|
||||||
- Add a mining-heartbeat INFO line at every epoch (https://github.com/filecoin-project/lotus/pull/6183)
|
|
||||||
- chore(ci): Enable build on RC tags (https://github.com/filecoin-project/lotus/pull/6245)
|
|
||||||
- Upgrade nerpa to actor v4 and bump the version to rc4 (https://github.com/filecoin-project/lotus/pull/6249)
|
|
||||||
## Fixes
|
|
||||||
- return buffers after canceling badger operation (https://github.com/filecoin-project/lotus/pull/5796)
|
|
||||||
- avoid holding a lock while calling the View callback (https://github.com/filecoin-project/lotus/pull/5792)
|
|
||||||
- storagefsm: Trigger input processing when below limits (https://github.com/filecoin-project/lotus/pull/5801)
|
|
||||||
- After importing a previously deleted key, be able to delete it again (https://github.com/filecoin-project/lotus/pull/4653)
|
|
||||||
- fix StateManager.Replay on reward actor (https://github.com/filecoin-project/lotus/pull/5804)
|
|
||||||
- make sure atomic 64bit fields are 64bit aligned (https://github.com/filecoin-project/lotus/pull/5794)
|
|
||||||
- Import secp sigs in paych tests (https://github.com/filecoin-project/lotus/pull/5879)
|
|
||||||
- fix ci build-macos (https://github.com/filecoin-project/lotus/pull/5934)
|
|
||||||
- Fix creation of remainder account when it's not a multisig (https://github.com/filecoin-project/lotus/pull/5807)
|
|
||||||
- Fix fallback chainstore (https://github.com/filecoin-project/lotus/pull/6003)
|
|
||||||
- fix 4857: show help for set-addrs (https://github.com/filecoin-project/lotus/pull/5943)
|
|
||||||
- fix health report (https://github.com/filecoin-project/lotus/pull/6011)
|
|
||||||
|
|
||||||
|
|
||||||
# 1.9.0-rc2 / 2021-04-30
|
|
||||||
|
|
||||||
This is an optional Lotus release that introduces various improvements to the sealing, mining, and deal-making processes.
|
|
||||||
|
|
||||||
## Highlights
|
|
||||||
|
|
||||||
- OpenRPC Support (https://github.com/filecoin-project/lotus/pull/5843)
|
|
||||||
- Take latency into account when making interactive deals (https://github.com/filecoin-project/lotus/pull/5876)
|
|
||||||
- Update go-commp-utils for >10x faster client commp calculation (https://github.com/filecoin-project/lotus/pull/5892)
|
|
||||||
- add `lotus client cancel-retrieval` cmd to lotus CLI (https://github.com/filecoin-project/lotus/pull/5871)
|
|
||||||
- add `inspect-deal` command to `lotus client` (https://github.com/filecoin-project/lotus/pull/5833)
|
|
||||||
- Local retrieval support (https://github.com/filecoin-project/lotus/pull/5917)
|
|
||||||
- go-fil-markets v1.1.9 -> v1.2.5
|
|
||||||
- For a detailed changelog see https://github.com/filecoin-project/go-fil-markets/blob/master/CHANGELOG.md
|
|
||||||
- rust-fil-proofs v5.4.1 -> v7
|
|
||||||
- For a detailed changelog see https://github.com/filecoin-project/rust-fil-proofs/blob/master/CHANGELOG.md
|
|
||||||
|
|
||||||
## Changes
|
|
||||||
- storagefsm: Apply global events even in broken states (https://github.com/filecoin-project/lotus/pull/5962)
|
|
||||||
- Default the AlwaysKeepUnsealedCopy flag to true (https://github.com/filecoin-project/lotus/pull/5743)
|
|
||||||
- splitstore: compact hotstore prior to garbage collection (https://github.com/filecoin-project/lotus/pull/5778)
|
|
||||||
- ipfs-force bootstrapper update (https://github.com/filecoin-project/lotus/pull/5799)
|
|
||||||
- better logging when unsealing fails (https://github.com/filecoin-project/lotus/pull/5851)
|
|
||||||
- perf: add cache for gas permium estimation (https://github.com/filecoin-project/lotus/pull/5709)
|
|
||||||
- backupds: Compact log on restart (https://github.com/filecoin-project/lotus/pull/5875)
|
|
||||||
- backupds: Improve truncated log handling (https://github.com/filecoin-project/lotus/pull/5891)
|
|
||||||
- State CLI improvements (State CLI improvements)
|
|
||||||
- API proxy struct codegen (https://github.com/filecoin-project/lotus/pull/5854)
|
|
||||||
- move DI stuff for paychmgr into modules (https://github.com/filecoin-project/lotus/pull/5791)
|
|
||||||
- Implement Event observer and Settings for 3rd party dep injection (https://github.com/filecoin-project/lotus/pull/5693)
|
|
||||||
- Export developer and network commands for consumption by derivatives of Lotus (https://github.com/filecoin-project/lotus/pull/5864)
|
|
||||||
- mock sealer: Simulate randomness sideeffects (https://github.com/filecoin-project/lotus/pull/5805)
|
|
||||||
- localstorage: Demote reservation stat error to debug (https://github.com/filecoin-project/lotus/pull/5976)
|
|
||||||
- shed command to unpack miner info dumps (https://github.com/filecoin-project/lotus/pull/5800)
|
|
||||||
- Add two utils to Lotus-shed (https://github.com/filecoin-project/lotus/pull/5867)
|
|
||||||
- add shed election estimate command (https://github.com/filecoin-project/lotus/pull/5092)
|
|
||||||
- Add --actor flag in lotus-shed sectors terminate (https://github.com/filecoin-project/lotus/pull/5819)
|
|
||||||
- Move lotus mpool clear to lotus-shed (https://github.com/filecoin-project/lotus/pull/5900)
|
|
||||||
- Centralize everything on ipfs/go-log/v2 (https://github.com/filecoin-project/lotus/pull/5974)
|
|
||||||
- expose NextID from nice market actor interface (https://github.com/filecoin-project/lotus/pull/5850)
|
|
||||||
- add available options for perm on error (https://github.com/filecoin-project/lotus/pull/5814)
|
|
||||||
- API docs clarification: Document StateSearchMsg replaced message behavior (https://github.com/filecoin-project/lotus/pull/5838)
|
|
||||||
- api: Document StateReplay replaced message behavior (https://github.com/filecoin-project/lotus/pull/5840)
|
|
||||||
- add godocs to miner objects (https://github.com/filecoin-project/lotus/pull/2184)
|
|
||||||
- Add description to the client deal CLI command (https://github.com/filecoin-project/lotus/pull/5999)
|
|
||||||
- lint: don't skip builtin (https://github.com/filecoin-project/lotus/pull/5881)
|
|
||||||
- use deal duration from actors (https://github.com/filecoin-project/lotus/pull/5270)
|
|
||||||
- remote calc winningpost proof (https://github.com/filecoin-project/lotus/pull/5884)
|
|
||||||
- packer: other network images (https://github.com/filecoin-project/lotus/pull/5930)
|
|
||||||
- Convert the chainstore lock to RW (https://github.com/filecoin-project/lotus/pull/5971)
|
|
||||||
- Remove CachedBlockstore (https://github.com/filecoin-project/lotus/pull/5972)
|
|
||||||
- remove messagepool CapGasFee duplicate code (https://github.com/filecoin-project/lotus/pull/5992)
|
|
||||||
|
|
||||||
## Fixes
|
|
||||||
- return buffers after canceling badger operation (https://github.com/filecoin-project/lotus/pull/5796)
|
|
||||||
- avoid holding a lock while calling the View callback (https://github.com/filecoin-project/lotus/pull/5792)
|
|
||||||
- storagefsm: Trigger input processing when below limits (https://github.com/filecoin-project/lotus/pull/5801)
|
|
||||||
- After importing a previously deleted key, be able to delete it again (https://github.com/filecoin-project/lotus/pull/4653)
|
|
||||||
- fix StateManager.Replay on reward actor (https://github.com/filecoin-project/lotus/pull/5804)
|
|
||||||
- make sure atomic 64bit fields are 64bit aligned (https://github.com/filecoin-project/lotus/pull/5794)
|
|
||||||
- Import secp sigs in paych tests (https://github.com/filecoin-project/lotus/pull/5879)
|
|
||||||
- fix ci build-macos (https://github.com/filecoin-project/lotus/pull/5934)
|
|
||||||
- Fix creation of remainder account when it's not a multisig (https://github.com/filecoin-project/lotus/pull/5807)
|
|
||||||
- Fix fallback chainstore (https://github.com/filecoin-project/lotus/pull/6003)
|
|
||||||
- fix 4857: show help for set-addrs (https://github.com/filecoin-project/lotus/pull/5943)
|
|
||||||
- fix health report (https://github.com/filecoin-project/lotus/pull/6011)
|
|
||||||
|
|
||||||
# 1.8.0 / 2021-04-05
|
# 1.8.0 / 2021-04-05
|
||||||
|
|
||||||
This is a mandatory release of Lotus that upgrades the network to version 12, which introduces various performance improvements to the cron processing of the power actor. The network will upgrade at height 712320, which is 2021-04-29T06:00:00Z.
|
This is a mandatory release of Lotus that upgrades the network to version 12, which introduces various performance improvements to the cron processing of the power actor. The network will upgrade at height 712320, which is 2021-04-29T06:00:00Z.
|
||||||
|
@ -629,7 +629,7 @@ type FullNode interface {
|
|||||||
// proposal. This method of approval can be used to ensure you only approve
|
// proposal. This method of approval can be used to ensure you only approve
|
||||||
// exactly the transaction you think you are.
|
// exactly the transaction you think you are.
|
||||||
// It takes the following params: <multisig address>, <proposed message ID>, <proposer address>, <recipient address>, <value to transfer>,
|
// It takes the following params: <multisig address>, <proposed message ID>, <proposer address>, <recipient address>, <value to transfer>,
|
||||||
// <sender address of the approve msg>, <method to call in the proposed message>, <params to include in the proposed message>
|
// <sender address of the approve msg>, <method to call in the approved message>, <params to include in the proposed message>
|
||||||
MsigApproveTxnHash(context.Context, address.Address, uint64, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) //perm:sign
|
MsigApproveTxnHash(context.Context, address.Address, uint64, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) //perm:sign
|
||||||
|
|
||||||
// MsigCancel cancels a previously-proposed multisig message
|
// MsigCancel cancels a previously-proposed multisig message
|
||||||
|
@ -45,7 +45,8 @@ const UpgradeNorwegianHeight = 114000
|
|||||||
|
|
||||||
const UpgradeTurboHeight = 193789
|
const UpgradeTurboHeight = 193789
|
||||||
|
|
||||||
const UpgradeHyperdriveHeight = 9999999
|
// 2021-06-11T14:30:00Z
|
||||||
|
const UpgradeHyperdriveHeight = 321519
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
policy.SetConsensusMinerMinPower(abi.NewStoragePower(32 << 30))
|
policy.SetConsensusMinerMinPower(abi.NewStoragePower(32 << 30))
|
||||||
|
@ -3,6 +3,8 @@ package policy
|
|||||||
import (
|
import (
|
||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/go-state-types/network"
|
"github.com/filecoin-project/go-state-types/network"
|
||||||
"github.com/filecoin-project/lotus/chain/actors"
|
"github.com/filecoin-project/lotus/chain/actors"
|
||||||
@ -367,3 +369,31 @@ func GetDeclarationsMax(nwVer network.Version) int {
|
|||||||
panic("unsupported network version")
|
panic("unsupported network version")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func AggregateNetworkFee(nwVer network.Version, aggregateSize int, baseFee abi.TokenAmount) abi.TokenAmount {
|
||||||
|
switch actors.VersionForNetwork(nwVer) {
|
||||||
|
|
||||||
|
case actors.Version0:
|
||||||
|
|
||||||
|
return big.Zero()
|
||||||
|
|
||||||
|
case actors.Version2:
|
||||||
|
|
||||||
|
return big.Zero()
|
||||||
|
|
||||||
|
case actors.Version3:
|
||||||
|
|
||||||
|
return big.Zero()
|
||||||
|
|
||||||
|
case actors.Version4:
|
||||||
|
|
||||||
|
return big.Zero()
|
||||||
|
|
||||||
|
case actors.Version5:
|
||||||
|
|
||||||
|
return miner5.AggregateNetworkFee(aggregateSize, baseFee)
|
||||||
|
|
||||||
|
default:
|
||||||
|
panic("unsupported network version")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -3,6 +3,8 @@ package policy
|
|||||||
import (
|
import (
|
||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/go-state-types/network"
|
"github.com/filecoin-project/go-state-types/network"
|
||||||
"github.com/filecoin-project/lotus/chain/actors"
|
"github.com/filecoin-project/lotus/chain/actors"
|
||||||
@ -246,3 +248,18 @@ func GetDeclarationsMax(nwVer network.Version) int {
|
|||||||
panic("unsupported network version")
|
panic("unsupported network version")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func AggregateNetworkFee(nwVer network.Version, aggregateSize int, baseFee abi.TokenAmount) abi.TokenAmount {
|
||||||
|
switch actors.VersionForNetwork(nwVer) {
|
||||||
|
{{range .versions}}
|
||||||
|
case actors.Version{{.}}:
|
||||||
|
{{if (le . 4)}}
|
||||||
|
return big.Zero()
|
||||||
|
{{else}}
|
||||||
|
return miner{{.}}.AggregateNetworkFee(aggregateSize, baseFee)
|
||||||
|
{{end}}
|
||||||
|
{{end}}
|
||||||
|
default:
|
||||||
|
panic("unsupported network version")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -19,18 +19,18 @@ import (
|
|||||||
var baseFeeUpperBoundFactor = types.NewInt(10)
|
var baseFeeUpperBoundFactor = types.NewInt(10)
|
||||||
|
|
||||||
// CheckMessages performs a set of logic checks for a list of messages, prior to submitting it to the mpool
|
// CheckMessages performs a set of logic checks for a list of messages, prior to submitting it to the mpool
|
||||||
func (mp *MessagePool) CheckMessages(protos []*api.MessagePrototype) ([][]api.MessageCheckStatus, error) {
|
func (mp *MessagePool) CheckMessages(ctx context.Context, protos []*api.MessagePrototype) ([][]api.MessageCheckStatus, error) {
|
||||||
flex := make([]bool, len(protos))
|
flex := make([]bool, len(protos))
|
||||||
msgs := make([]*types.Message, len(protos))
|
msgs := make([]*types.Message, len(protos))
|
||||||
for i, p := range protos {
|
for i, p := range protos {
|
||||||
flex[i] = !p.ValidNonce
|
flex[i] = !p.ValidNonce
|
||||||
msgs[i] = &p.Message
|
msgs[i] = &p.Message
|
||||||
}
|
}
|
||||||
return mp.checkMessages(msgs, false, flex)
|
return mp.checkMessages(ctx, msgs, false, flex)
|
||||||
}
|
}
|
||||||
|
|
||||||
// CheckPendingMessages performs a set of logical sets for all messages pending from a given actor
|
// CheckPendingMessages performs a set of logical sets for all messages pending from a given actor
|
||||||
func (mp *MessagePool) CheckPendingMessages(from address.Address) ([][]api.MessageCheckStatus, error) {
|
func (mp *MessagePool) CheckPendingMessages(ctx context.Context, from address.Address) ([][]api.MessageCheckStatus, error) {
|
||||||
var msgs []*types.Message
|
var msgs []*types.Message
|
||||||
mp.lk.Lock()
|
mp.lk.Lock()
|
||||||
mset, ok := mp.pending[from]
|
mset, ok := mp.pending[from]
|
||||||
@ -49,12 +49,12 @@ func (mp *MessagePool) CheckPendingMessages(from address.Address) ([][]api.Messa
|
|||||||
return msgs[i].Nonce < msgs[j].Nonce
|
return msgs[i].Nonce < msgs[j].Nonce
|
||||||
})
|
})
|
||||||
|
|
||||||
return mp.checkMessages(msgs, true, nil)
|
return mp.checkMessages(ctx, msgs, true, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// CheckReplaceMessages performs a set of logical checks for related messages while performing a
|
// CheckReplaceMessages performs a set of logical checks for related messages while performing a
|
||||||
// replacement.
|
// replacement.
|
||||||
func (mp *MessagePool) CheckReplaceMessages(replace []*types.Message) ([][]api.MessageCheckStatus, error) {
|
func (mp *MessagePool) CheckReplaceMessages(ctx context.Context, replace []*types.Message) ([][]api.MessageCheckStatus, error) {
|
||||||
msgMap := make(map[address.Address]map[uint64]*types.Message)
|
msgMap := make(map[address.Address]map[uint64]*types.Message)
|
||||||
count := 0
|
count := 0
|
||||||
|
|
||||||
@ -94,12 +94,12 @@ func (mp *MessagePool) CheckReplaceMessages(replace []*types.Message) ([][]api.M
|
|||||||
start = end
|
start = end
|
||||||
}
|
}
|
||||||
|
|
||||||
return mp.checkMessages(msgs, true, nil)
|
return mp.checkMessages(ctx, msgs, true, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// flexibleNonces should be either nil or of len(msgs), it signifies that message at given index
|
// flexibleNonces should be either nil or of len(msgs), it signifies that message at given index
|
||||||
// has non-determied nonce at this point
|
// has non-determied nonce at this point
|
||||||
func (mp *MessagePool) checkMessages(msgs []*types.Message, interned bool, flexibleNonces []bool) (result [][]api.MessageCheckStatus, err error) {
|
func (mp *MessagePool) checkMessages(ctx context.Context, msgs []*types.Message, interned bool, flexibleNonces []bool) (result [][]api.MessageCheckStatus, err error) {
|
||||||
if mp.api.IsLite() {
|
if mp.api.IsLite() {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
@ -160,7 +160,7 @@ func (mp *MessagePool) checkMessages(msgs []*types.Message, interned bool, flexi
|
|||||||
} else {
|
} else {
|
||||||
mp.lk.Unlock()
|
mp.lk.Unlock()
|
||||||
|
|
||||||
stateNonce, err := mp.getStateNonce(m.From, curTs)
|
stateNonce, err := mp.getStateNonce(ctx, m.From, curTs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
check.OK = false
|
check.OK = false
|
||||||
check.Err = fmt.Sprintf("error retrieving state nonce: %s", err.Error())
|
check.Err = fmt.Sprintf("error retrieving state nonce: %s", err.Error())
|
||||||
@ -193,7 +193,7 @@ func (mp *MessagePool) checkMessages(msgs []*types.Message, interned bool, flexi
|
|||||||
|
|
||||||
balance, ok := balances[m.From]
|
balance, ok := balances[m.From]
|
||||||
if !ok {
|
if !ok {
|
||||||
balance, err = mp.getStateBalance(m.From, curTs)
|
balance, err = mp.getStateBalance(ctx, m.From, curTs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
check.OK = false
|
check.OK = false
|
||||||
check.Err = fmt.Sprintf("error retrieving state balance: %s", err)
|
check.Err = fmt.Sprintf("error retrieving state balance: %s", err)
|
||||||
|
@ -34,6 +34,7 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/chain/vm"
|
"github.com/filecoin-project/lotus/chain/vm"
|
||||||
"github.com/filecoin-project/lotus/journal"
|
"github.com/filecoin-project/lotus/journal"
|
||||||
"github.com/filecoin-project/lotus/lib/sigs"
|
"github.com/filecoin-project/lotus/lib/sigs"
|
||||||
|
"github.com/filecoin-project/lotus/metrics"
|
||||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||||
|
|
||||||
"github.com/raulk/clock"
|
"github.com/raulk/clock"
|
||||||
@ -577,7 +578,7 @@ func (mp *MessagePool) addLocal(ctx context.Context, m *types.SignedMessage) err
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// verifyMsgBeforeAdd verifies that the message meets the minimum criteria for block inclusio
|
// verifyMsgBeforeAdd verifies that the message meets the minimum criteria for block inclusion
|
||||||
// and whether the message has enough funds to be included in the next 20 blocks.
|
// and whether the message has enough funds to be included in the next 20 blocks.
|
||||||
// If the message is not valid for block inclusion, it returns an error.
|
// If the message is not valid for block inclusion, it returns an error.
|
||||||
// For local messages, if the message can be included in the next 20 blocks, it returns true to
|
// For local messages, if the message can be included in the next 20 blocks, it returns true to
|
||||||
@ -631,6 +632,9 @@ func (mp *MessagePool) verifyMsgBeforeAdd(m *types.SignedMessage, curTs *types.T
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (mp *MessagePool) Push(ctx context.Context, m *types.SignedMessage) (cid.Cid, error) {
|
func (mp *MessagePool) Push(ctx context.Context, m *types.SignedMessage) (cid.Cid, error) {
|
||||||
|
done := metrics.Timer(ctx, metrics.MpoolPushDuration)
|
||||||
|
defer done()
|
||||||
|
|
||||||
err := mp.checkMessage(m)
|
err := mp.checkMessage(m)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return cid.Undef, err
|
return cid.Undef, err
|
||||||
@ -697,6 +701,9 @@ func (mp *MessagePool) checkMessage(m *types.SignedMessage) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (mp *MessagePool) Add(ctx context.Context, m *types.SignedMessage) error {
|
func (mp *MessagePool) Add(ctx context.Context, m *types.SignedMessage) error {
|
||||||
|
done := metrics.Timer(ctx, metrics.MpoolAddDuration)
|
||||||
|
defer done()
|
||||||
|
|
||||||
err := mp.checkMessage(m)
|
err := mp.checkMessage(m)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -752,7 +759,7 @@ func (mp *MessagePool) VerifyMsgSig(m *types.SignedMessage) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (mp *MessagePool) checkBalance(ctx context.Context, m *types.SignedMessage, curTs *types.TipSet) error {
|
func (mp *MessagePool) checkBalance(ctx context.Context, m *types.SignedMessage, curTs *types.TipSet) error {
|
||||||
balance, err := mp.getStateBalance(m.Message.From, curTs)
|
balance, err := mp.getStateBalance(ctx, m.Message.From, curTs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("failed to check sender balance: %s: %w", err, ErrSoftValidationFailure)
|
return xerrors.Errorf("failed to check sender balance: %s: %w", err, ErrSoftValidationFailure)
|
||||||
}
|
}
|
||||||
@ -785,7 +792,10 @@ func (mp *MessagePool) checkBalance(ctx context.Context, m *types.SignedMessage,
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (mp *MessagePool) addTs(ctx context.Context, m *types.SignedMessage, curTs *types.TipSet, local, untrusted bool) (bool, error) {
|
func (mp *MessagePool) addTs(ctx context.Context, m *types.SignedMessage, curTs *types.TipSet, local, untrusted bool) (bool, error) {
|
||||||
snonce, err := mp.getStateNonce(m.Message.From, curTs)
|
done := metrics.Timer(ctx, metrics.MpoolAddTsDuration)
|
||||||
|
defer done()
|
||||||
|
|
||||||
|
snonce, err := mp.getStateNonce(ctx, m.Message.From, curTs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, xerrors.Errorf("failed to look up actor state nonce: %s: %w", err, ErrSoftValidationFailure)
|
return false, xerrors.Errorf("failed to look up actor state nonce: %s: %w", err, ErrSoftValidationFailure)
|
||||||
}
|
}
|
||||||
@ -833,7 +843,7 @@ func (mp *MessagePool) addLoaded(ctx context.Context, m *types.SignedMessage) er
|
|||||||
return xerrors.Errorf("current tipset not loaded")
|
return xerrors.Errorf("current tipset not loaded")
|
||||||
}
|
}
|
||||||
|
|
||||||
snonce, err := mp.getStateNonce(m.Message.From, curTs)
|
snonce, err := mp.getStateNonce(ctx, m.Message.From, curTs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("failed to look up actor state nonce: %s: %w", err, ErrSoftValidationFailure)
|
return xerrors.Errorf("failed to look up actor state nonce: %s: %w", err, ErrSoftValidationFailure)
|
||||||
}
|
}
|
||||||
@ -885,7 +895,7 @@ func (mp *MessagePool) addLocked(ctx context.Context, m *types.SignedMessage, st
|
|||||||
}
|
}
|
||||||
|
|
||||||
if !ok {
|
if !ok {
|
||||||
nonce, err := mp.getStateNonce(m.Message.From, mp.curTs)
|
nonce, err := mp.getStateNonce(ctx, m.Message.From, mp.curTs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("failed to get initial actor nonce: %w", err)
|
return xerrors.Errorf("failed to get initial actor nonce: %w", err)
|
||||||
}
|
}
|
||||||
@ -946,7 +956,7 @@ func (mp *MessagePool) GetActor(_ context.Context, addr address.Address, _ types
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (mp *MessagePool) getNonceLocked(ctx context.Context, addr address.Address, curTs *types.TipSet) (uint64, error) {
|
func (mp *MessagePool) getNonceLocked(ctx context.Context, addr address.Address, curTs *types.TipSet) (uint64, error) {
|
||||||
stateNonce, err := mp.getStateNonce(addr, curTs) // sanity check
|
stateNonce, err := mp.getStateNonce(ctx, addr, curTs) // sanity check
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@ -970,7 +980,10 @@ func (mp *MessagePool) getNonceLocked(ctx context.Context, addr address.Address,
|
|||||||
return stateNonce, nil
|
return stateNonce, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mp *MessagePool) getStateNonce(addr address.Address, ts *types.TipSet) (uint64, error) {
|
func (mp *MessagePool) getStateNonce(ctx context.Context, addr address.Address, ts *types.TipSet) (uint64, error) {
|
||||||
|
done := metrics.Timer(ctx, metrics.MpoolGetNonceDuration)
|
||||||
|
defer done()
|
||||||
|
|
||||||
act, err := mp.api.GetActorAfter(addr, ts)
|
act, err := mp.api.GetActorAfter(addr, ts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
@ -979,7 +992,10 @@ func (mp *MessagePool) getStateNonce(addr address.Address, ts *types.TipSet) (ui
|
|||||||
return act.Nonce, nil
|
return act.Nonce, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mp *MessagePool) getStateBalance(addr address.Address, ts *types.TipSet) (types.BigInt, error) {
|
func (mp *MessagePool) getStateBalance(ctx context.Context, addr address.Address, ts *types.TipSet) (types.BigInt, error) {
|
||||||
|
done := metrics.Timer(ctx, metrics.MpoolGetBalanceDuration)
|
||||||
|
defer done()
|
||||||
|
|
||||||
act, err := mp.api.GetActorAfter(addr, ts)
|
act, err := mp.api.GetActorAfter(addr, ts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return types.EmptyInt, err
|
return types.EmptyInt, err
|
||||||
|
@ -507,6 +507,12 @@ func (mv *MessageValidator) Validate(ctx context.Context, pid peer.ID, msg *pubs
|
|||||||
return mv.validateLocalMessage(ctx, msg)
|
return mv.validateLocalMessage(ctx, msg)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
start := time.Now()
|
||||||
|
defer func() {
|
||||||
|
ms := time.Now().Sub(start).Microseconds()
|
||||||
|
stats.Record(ctx, metrics.MessageValidationDuration.M(float64(ms)/1000))
|
||||||
|
}()
|
||||||
|
|
||||||
stats.Record(ctx, metrics.MessageReceived.M(1))
|
stats.Record(ctx, metrics.MessageReceived.M(1))
|
||||||
m, err := types.DecodeSignedMessage(msg.Message.GetData())
|
m, err := types.DecodeSignedMessage(msg.Message.GetData())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -538,6 +544,12 @@ func (mv *MessageValidator) Validate(ctx context.Context, pid peer.ID, msg *pubs
|
|||||||
return pubsub.ValidationReject
|
return pubsub.ValidationReject
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ctx, _ = tag.New(
|
||||||
|
ctx,
|
||||||
|
tag.Upsert(metrics.MsgValid, "true"),
|
||||||
|
)
|
||||||
|
|
||||||
stats.Record(ctx, metrics.MessageValidationSuccess.M(1))
|
stats.Record(ctx, metrics.MessageValidationSuccess.M(1))
|
||||||
return pubsub.ValidationAccept
|
return pubsub.ValidationAccept
|
||||||
}
|
}
|
||||||
@ -547,6 +559,13 @@ func (mv *MessageValidator) validateLocalMessage(ctx context.Context, msg *pubsu
|
|||||||
ctx,
|
ctx,
|
||||||
tag.Upsert(metrics.Local, "true"),
|
tag.Upsert(metrics.Local, "true"),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
start := time.Now()
|
||||||
|
defer func() {
|
||||||
|
ms := time.Now().Sub(start).Microseconds()
|
||||||
|
stats.Record(ctx, metrics.MessageValidationDuration.M(float64(ms)/1000))
|
||||||
|
}()
|
||||||
|
|
||||||
// do some lightweight validation
|
// do some lightweight validation
|
||||||
stats.Record(ctx, metrics.MessagePublished.M(1))
|
stats.Record(ctx, metrics.MessagePublished.M(1))
|
||||||
|
|
||||||
@ -581,6 +600,11 @@ func (mv *MessageValidator) validateLocalMessage(ctx context.Context, msg *pubsu
|
|||||||
return pubsub.ValidationIgnore
|
return pubsub.ValidationIgnore
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ctx, _ = tag.New(
|
||||||
|
ctx,
|
||||||
|
tag.Upsert(metrics.MsgValid, "true"),
|
||||||
|
)
|
||||||
|
|
||||||
stats.Record(ctx, metrics.MessageValidationSuccess.M(1))
|
stats.Record(ctx, metrics.MessageValidationSuccess.M(1))
|
||||||
return pubsub.ValidationAccept
|
return pubsub.ValidationAccept
|
||||||
}
|
}
|
||||||
|
@ -536,7 +536,7 @@ var ChainListCmd = &cli.Command{
|
|||||||
Aliases: []string{"love"},
|
Aliases: []string{"love"},
|
||||||
Usage: "View a segment of the chain",
|
Usage: "View a segment of the chain",
|
||||||
Flags: []cli.Flag{
|
Flags: []cli.Flag{
|
||||||
&cli.Uint64Flag{Name: "height"},
|
&cli.Uint64Flag{Name: "height", DefaultText: "current head"},
|
||||||
&cli.IntFlag{Name: "count", Value: 30},
|
&cli.IntFlag{Name: "count", Value: 30},
|
||||||
&cli.StringFlag{
|
&cli.StringFlag{
|
||||||
Name: "format",
|
Name: "format",
|
||||||
|
103
cmd/lotus-shed/export-car.go
Normal file
103
cmd/lotus-shed/export-car.go
Normal file
@ -0,0 +1,103 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/ipfs/go-blockservice"
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
offline "github.com/ipfs/go-ipfs-exchange-offline"
|
||||||
|
format "github.com/ipfs/go-ipld-format"
|
||||||
|
"github.com/ipfs/go-merkledag"
|
||||||
|
"github.com/ipld/go-car"
|
||||||
|
"github.com/urfave/cli/v2"
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
lcli "github.com/filecoin-project/lotus/cli"
|
||||||
|
"github.com/filecoin-project/lotus/node/repo"
|
||||||
|
)
|
||||||
|
|
||||||
|
func carWalkFunc(nd format.Node) (out []*format.Link, err error) {
|
||||||
|
for _, link := range nd.Links() {
|
||||||
|
if link.Cid.Prefix().Codec == cid.FilCommitmentSealed || link.Cid.Prefix().Codec == cid.FilCommitmentUnsealed {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
out = append(out, link)
|
||||||
|
}
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var exportCarCmd = &cli.Command{
|
||||||
|
Name: "export-car",
|
||||||
|
Description: "Export a car from repo (requires node to be offline)",
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: "repo",
|
||||||
|
Value: "~/.lotus",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Action: func(cctx *cli.Context) error {
|
||||||
|
if cctx.Args().Len() != 2 {
|
||||||
|
return lcli.ShowHelp(cctx, fmt.Errorf("must specify file name and object"))
|
||||||
|
}
|
||||||
|
|
||||||
|
outfile := cctx.Args().First()
|
||||||
|
var roots []cid.Cid
|
||||||
|
for _, arg := range cctx.Args().Tail() {
|
||||||
|
c, err := cid.Decode(arg)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
roots = append(roots, c)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := lcli.ReqContext(cctx)
|
||||||
|
|
||||||
|
r, err := repo.NewFS(cctx.String("repo"))
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("opening fs repo: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
exists, err := r.Exists()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !exists {
|
||||||
|
return xerrors.Errorf("lotus repo doesn't exist")
|
||||||
|
}
|
||||||
|
|
||||||
|
lr, err := r.Lock(repo.FullNode)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer lr.Close() //nolint:errcheck
|
||||||
|
|
||||||
|
fi, err := os.Create(outfile)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("opening the output file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
defer fi.Close() //nolint:errcheck
|
||||||
|
|
||||||
|
bs, err := lr.Blockstore(ctx, repo.UniversalBlockstore)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to open blockstore: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if c, ok := bs.(io.Closer); ok {
|
||||||
|
if err := c.Close(); err != nil {
|
||||||
|
log.Warnf("failed to close blockstore: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
dag := merkledag.NewDAGService(blockservice.New(bs, offline.Exchange(bs)))
|
||||||
|
err = car.WriteCarWithWalker(ctx, dag, roots, fi, carWalkFunc)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}
|
@ -43,6 +43,7 @@ func main() {
|
|||||||
minerCmd,
|
minerCmd,
|
||||||
mpoolStatsCmd,
|
mpoolStatsCmd,
|
||||||
exportChainCmd,
|
exportChainCmd,
|
||||||
|
exportCarCmd,
|
||||||
consensusCmd,
|
consensusCmd,
|
||||||
storageStatsCmd,
|
storageStatsCmd,
|
||||||
syncCmd,
|
syncCmd,
|
||||||
@ -58,6 +59,7 @@ func main() {
|
|||||||
signaturesCmd,
|
signaturesCmd,
|
||||||
actorCmd,
|
actorCmd,
|
||||||
minerTypesCmd,
|
minerTypesCmd,
|
||||||
|
minerMultisigsCmd,
|
||||||
}
|
}
|
||||||
|
|
||||||
app := &cli.App{
|
app := &cli.App{
|
||||||
|
388
cmd/lotus-shed/miner-multisig.go
Normal file
388
cmd/lotus-shed/miner-multisig.go
Normal file
@ -0,0 +1,388 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner"
|
||||||
|
|
||||||
|
msig5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/multisig"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
|
"github.com/filecoin-project/lotus/build"
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors"
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
lcli "github.com/filecoin-project/lotus/cli"
|
||||||
|
"github.com/urfave/cli/v2"
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
)
|
||||||
|
|
||||||
|
var minerMultisigsCmd = &cli.Command{
|
||||||
|
Name: "miner-multisig",
|
||||||
|
Description: "a collection of utilities for using multisigs as owner addresses of miners",
|
||||||
|
Subcommands: []*cli.Command{
|
||||||
|
mmProposeWithdrawBalance,
|
||||||
|
mmApproveWithdrawBalance,
|
||||||
|
mmProposeChangeOwner,
|
||||||
|
mmApproveChangeOwner,
|
||||||
|
},
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: "from",
|
||||||
|
Usage: "specify address to send message from",
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: "multisig",
|
||||||
|
Usage: "specify multisig that will receive the message",
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: "miner",
|
||||||
|
Usage: "specify miner being acted upon",
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var mmProposeWithdrawBalance = &cli.Command{
|
||||||
|
Name: "propose-withdraw",
|
||||||
|
Usage: "Propose to withdraw FIL from the miner",
|
||||||
|
ArgsUsage: "[amount]",
|
||||||
|
Action: func(cctx *cli.Context) error {
|
||||||
|
if !cctx.Args().Present() {
|
||||||
|
return fmt.Errorf("must pass amount to withdraw")
|
||||||
|
}
|
||||||
|
|
||||||
|
api, closer, err := lcli.GetFullNodeAPI(cctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer closer()
|
||||||
|
|
||||||
|
ctx := lcli.ReqContext(cctx)
|
||||||
|
|
||||||
|
multisigAddr, sender, minerAddr, err := getInputs(cctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
val, err := types.ParseFIL(cctx.Args().First())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
sp, err := actors.SerializeParams(&miner5.WithdrawBalanceParams{
|
||||||
|
AmountRequested: abi.TokenAmount(val),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
pcid, err := api.MsigPropose(ctx, multisigAddr, minerAddr, big.Zero(), sender, uint64(miner.Methods.WithdrawBalance), sp)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("proposing message: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprintln(cctx.App.Writer, "Propose Message CID:", pcid)
|
||||||
|
|
||||||
|
// wait for it to get mined into a block
|
||||||
|
wait, err := api.StateWaitMsg(ctx, pcid, build.MessageConfidence)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// check it executed successfully
|
||||||
|
if wait.Receipt.ExitCode != 0 {
|
||||||
|
fmt.Fprintln(cctx.App.Writer, "Propose owner change tx failed!")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var retval msig5.ProposeReturn
|
||||||
|
if err := retval.UnmarshalCBOR(bytes.NewReader(wait.Receipt.Return)); err != nil {
|
||||||
|
return fmt.Errorf("failed to unmarshal propose return value: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("Transaction ID: %d\n", retval.TxnID)
|
||||||
|
if retval.Applied {
|
||||||
|
fmt.Printf("Transaction was executed during propose\n")
|
||||||
|
fmt.Printf("Exit Code: %d\n", retval.Code)
|
||||||
|
fmt.Printf("Return Value: %x\n", retval.Ret)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var mmApproveWithdrawBalance = &cli.Command{
|
||||||
|
Name: "approve-withdraw",
|
||||||
|
Usage: "Approve to withdraw FIL from the miner",
|
||||||
|
ArgsUsage: "[amount txnId proposer]",
|
||||||
|
Action: func(cctx *cli.Context) error {
|
||||||
|
if cctx.NArg() != 3 {
|
||||||
|
return fmt.Errorf("must pass amount, txn Id, and proposer address")
|
||||||
|
}
|
||||||
|
|
||||||
|
api, closer, err := lcli.GetFullNodeAPI(cctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer closer()
|
||||||
|
|
||||||
|
ctx := lcli.ReqContext(cctx)
|
||||||
|
|
||||||
|
multisigAddr, sender, minerAddr, err := getInputs(cctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
val, err := types.ParseFIL(cctx.Args().First())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
sp, err := actors.SerializeParams(&miner5.WithdrawBalanceParams{
|
||||||
|
AmountRequested: abi.TokenAmount(val),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
txid, err := strconv.ParseUint(cctx.Args().Get(1), 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
proposer, err := address.NewFromString(cctx.Args().Get(2))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
acid, err := api.MsigApproveTxnHash(ctx, multisigAddr, txid, proposer, minerAddr, big.Zero(), sender, uint64(miner.Methods.WithdrawBalance), sp)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("approving message: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprintln(cctx.App.Writer, "Approve Message CID:", acid)
|
||||||
|
|
||||||
|
// wait for it to get mined into a block
|
||||||
|
wait, err := api.StateWaitMsg(ctx, acid, build.MessageConfidence)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// check it executed successfully
|
||||||
|
if wait.Receipt.ExitCode != 0 {
|
||||||
|
fmt.Fprintln(cctx.App.Writer, "Approve owner change tx failed!")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var retval msig5.ApproveReturn
|
||||||
|
if err := retval.UnmarshalCBOR(bytes.NewReader(wait.Receipt.Return)); err != nil {
|
||||||
|
return fmt.Errorf("failed to unmarshal approve return value: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if retval.Applied {
|
||||||
|
fmt.Printf("Transaction was executed with the approve\n")
|
||||||
|
fmt.Printf("Exit Code: %d\n", retval.Code)
|
||||||
|
fmt.Printf("Return Value: %x\n", retval.Ret)
|
||||||
|
} else {
|
||||||
|
fmt.Println("Transaction was approved, but not executed")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var mmProposeChangeOwner = &cli.Command{
|
||||||
|
Name: "propose-change-owner",
|
||||||
|
Usage: "Propose an owner address change",
|
||||||
|
ArgsUsage: "[newOwner]",
|
||||||
|
Action: func(cctx *cli.Context) error {
|
||||||
|
if !cctx.Args().Present() {
|
||||||
|
return fmt.Errorf("must pass new owner address")
|
||||||
|
}
|
||||||
|
|
||||||
|
api, closer, err := lcli.GetFullNodeAPI(cctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer closer()
|
||||||
|
|
||||||
|
ctx := lcli.ReqContext(cctx)
|
||||||
|
|
||||||
|
multisigAddr, sender, minerAddr, err := getInputs(cctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
na, err := address.NewFromString(cctx.Args().First())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
newAddr, err := api.StateLookupID(ctx, na, types.EmptyTSK)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
mi, err := api.StateMinerInfo(ctx, minerAddr, types.EmptyTSK)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if mi.Owner == newAddr {
|
||||||
|
return fmt.Errorf("owner address already set to %s", na)
|
||||||
|
}
|
||||||
|
|
||||||
|
sp, err := actors.SerializeParams(&newAddr)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("serializing params: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
pcid, err := api.MsigPropose(ctx, multisigAddr, minerAddr, big.Zero(), sender, uint64(miner.Methods.ChangeOwnerAddress), sp)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("proposing message: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprintln(cctx.App.Writer, "Propose Message CID:", pcid)
|
||||||
|
|
||||||
|
// wait for it to get mined into a block
|
||||||
|
wait, err := api.StateWaitMsg(ctx, pcid, build.MessageConfidence)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// check it executed successfully
|
||||||
|
if wait.Receipt.ExitCode != 0 {
|
||||||
|
fmt.Fprintln(cctx.App.Writer, "Propose owner change tx failed!")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var retval msig5.ProposeReturn
|
||||||
|
if err := retval.UnmarshalCBOR(bytes.NewReader(wait.Receipt.Return)); err != nil {
|
||||||
|
return fmt.Errorf("failed to unmarshal propose return value: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("Transaction ID: %d\n", retval.TxnID)
|
||||||
|
if retval.Applied {
|
||||||
|
fmt.Printf("Transaction was executed during propose\n")
|
||||||
|
fmt.Printf("Exit Code: %d\n", retval.Code)
|
||||||
|
fmt.Printf("Return Value: %x\n", retval.Ret)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var mmApproveChangeOwner = &cli.Command{
|
||||||
|
Name: "approve-change-owner",
|
||||||
|
Usage: "Approve an owner address change",
|
||||||
|
ArgsUsage: "[newOwner txnId proposer]",
|
||||||
|
Action: func(cctx *cli.Context) error {
|
||||||
|
if cctx.NArg() != 3 {
|
||||||
|
return fmt.Errorf("must pass new owner address, txn Id, and proposer address")
|
||||||
|
}
|
||||||
|
|
||||||
|
api, closer, err := lcli.GetFullNodeAPI(cctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer closer()
|
||||||
|
|
||||||
|
ctx := lcli.ReqContext(cctx)
|
||||||
|
|
||||||
|
multisigAddr, sender, minerAddr, err := getInputs(cctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
na, err := address.NewFromString(cctx.Args().First())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
newAddr, err := api.StateLookupID(ctx, na, types.EmptyTSK)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
txid, err := strconv.ParseUint(cctx.Args().Get(1), 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
proposer, err := address.NewFromString(cctx.Args().Get(2))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
mi, err := api.StateMinerInfo(ctx, minerAddr, types.EmptyTSK)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if mi.Owner == newAddr {
|
||||||
|
return fmt.Errorf("owner address already set to %s", na)
|
||||||
|
}
|
||||||
|
|
||||||
|
sp, err := actors.SerializeParams(&newAddr)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("serializing params: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
acid, err := api.MsigApproveTxnHash(ctx, multisigAddr, txid, proposer, minerAddr, big.Zero(), sender, uint64(miner.Methods.ChangeOwnerAddress), sp)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("approving message: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprintln(cctx.App.Writer, "Approve Message CID:", acid)
|
||||||
|
|
||||||
|
// wait for it to get mined into a block
|
||||||
|
wait, err := api.StateWaitMsg(ctx, acid, build.MessageConfidence)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// check it executed successfully
|
||||||
|
if wait.Receipt.ExitCode != 0 {
|
||||||
|
fmt.Fprintln(cctx.App.Writer, "Approve owner change tx failed!")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var retval msig5.ApproveReturn
|
||||||
|
if err := retval.UnmarshalCBOR(bytes.NewReader(wait.Receipt.Return)); err != nil {
|
||||||
|
return fmt.Errorf("failed to unmarshal approve return value: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if retval.Applied {
|
||||||
|
fmt.Printf("Transaction was executed with the approve\n")
|
||||||
|
fmt.Printf("Exit Code: %d\n", retval.Code)
|
||||||
|
fmt.Printf("Return Value: %x\n", retval.Ret)
|
||||||
|
} else {
|
||||||
|
fmt.Println("Transaction was approved, but not executed")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func getInputs(cctx *cli.Context) (address.Address, address.Address, address.Address, error) {
|
||||||
|
multisigAddr, err := address.NewFromString(cctx.String("multisig"))
|
||||||
|
if err != nil {
|
||||||
|
return address.Undef, address.Undef, address.Undef, err
|
||||||
|
}
|
||||||
|
|
||||||
|
sender, err := address.NewFromString(cctx.String("from"))
|
||||||
|
if err != nil {
|
||||||
|
return address.Undef, address.Undef, address.Undef, err
|
||||||
|
}
|
||||||
|
|
||||||
|
minerAddr, err := address.NewFromString(cctx.String("miner"))
|
||||||
|
if err != nil {
|
||||||
|
return address.Undef, address.Undef, address.Undef, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return multisigAddr, sender, minerAddr, nil
|
||||||
|
}
|
@ -295,6 +295,7 @@ var stateList = []stateMeta{
|
|||||||
{col: color.FgYellow, state: sealing.PreCommitBatchWait},
|
{col: color.FgYellow, state: sealing.PreCommitBatchWait},
|
||||||
{col: color.FgYellow, state: sealing.WaitSeed},
|
{col: color.FgYellow, state: sealing.WaitSeed},
|
||||||
{col: color.FgYellow, state: sealing.Committing},
|
{col: color.FgYellow, state: sealing.Committing},
|
||||||
|
{col: color.FgYellow, state: sealing.CommitFinalize},
|
||||||
{col: color.FgYellow, state: sealing.SubmitCommit},
|
{col: color.FgYellow, state: sealing.SubmitCommit},
|
||||||
{col: color.FgYellow, state: sealing.CommitWait},
|
{col: color.FgYellow, state: sealing.CommitWait},
|
||||||
{col: color.FgYellow, state: sealing.SubmitCommitAggregate},
|
{col: color.FgYellow, state: sealing.SubmitCommitAggregate},
|
||||||
@ -315,6 +316,7 @@ var stateList = []stateMeta{
|
|||||||
{col: color.FgRed, state: sealing.PreCommitFailed},
|
{col: color.FgRed, state: sealing.PreCommitFailed},
|
||||||
{col: color.FgRed, state: sealing.ComputeProofFailed},
|
{col: color.FgRed, state: sealing.ComputeProofFailed},
|
||||||
{col: color.FgRed, state: sealing.CommitFailed},
|
{col: color.FgRed, state: sealing.CommitFailed},
|
||||||
|
{col: color.FgRed, state: sealing.CommitFinalizeFailed},
|
||||||
{col: color.FgRed, state: sealing.PackingFailed},
|
{col: color.FgRed, state: sealing.PackingFailed},
|
||||||
{col: color.FgRed, state: sealing.FinalizeFailed},
|
{col: color.FgRed, state: sealing.FinalizeFailed},
|
||||||
{col: color.FgRed, state: sealing.Faulty},
|
{col: color.FgRed, state: sealing.Faulty},
|
||||||
|
@ -2505,7 +2505,7 @@ using both transaction ID and a hash of the parameters used in the
|
|||||||
proposal. This method of approval can be used to ensure you only approve
|
proposal. This method of approval can be used to ensure you only approve
|
||||||
exactly the transaction you think you are.
|
exactly the transaction you think you are.
|
||||||
It takes the following params: <multisig address>, <proposed message ID>, <proposer address>, <recipient address>, <value to transfer>,
|
It takes the following params: <multisig address>, <proposed message ID>, <proposer address>, <recipient address>, <value to transfer>,
|
||||||
<sender address of the approve msg>, <method to call in the proposed message>, <params to include in the proposed message>
|
<sender address of the approve msg>, <method to call in the approved message>, <params to include in the proposed message>
|
||||||
|
|
||||||
|
|
||||||
Perms: sign
|
Perms: sign
|
||||||
|
@ -25,7 +25,6 @@ We're happy to announce Lotus X.Y.Z...
|
|||||||
First steps:
|
First steps:
|
||||||
|
|
||||||
- [ ] Fork a new branch (`release/vX.Y.Z`) from `master` and make any further release related changes to this branch. If any "non-trivial" changes get added to the release, uncheck all the checkboxes and return to this stage.
|
- [ ] Fork a new branch (`release/vX.Y.Z`) from `master` and make any further release related changes to this branch. If any "non-trivial" changes get added to the release, uncheck all the checkboxes and return to this stage.
|
||||||
- [ ] Prep the changelog using `scripts/mkreleaselog`, and add it to `CHANGELOG.md`
|
|
||||||
- [ ] Bump the version in `version.go` in the `master` branch to `vX.(Y+1).0-dev`.
|
- [ ] Bump the version in `version.go` in the `master` branch to `vX.(Y+1).0-dev`.
|
||||||
|
|
||||||
Prepping an RC:
|
Prepping an RC:
|
||||||
@ -93,7 +92,7 @@ Testing an RC:
|
|||||||
- [ ] Final preparation
|
- [ ] Final preparation
|
||||||
- [ ] Verify that version string in [`version.go`](https://github.com/ipfs/go-ipfs/tree/master/version.go) has been updated.
|
- [ ] Verify that version string in [`version.go`](https://github.com/ipfs/go-ipfs/tree/master/version.go) has been updated.
|
||||||
- [ ] Ensure that [CHANGELOG.md](https://github.com/filecoin-project/lotus/blob/master/CHANGELOG.md) is up to date
|
- [ ] Ensure that [CHANGELOG.md](https://github.com/filecoin-project/lotus/blob/master/CHANGELOG.md) is up to date
|
||||||
- [ ] Ensure that [README.md](https://github.com/filecoin-project/lotus/blob/master/README.md) is up to date
|
- [ ] Prep the changelog using `scripts/mkreleaselog`, and add it to `CHANGELOG.md`
|
||||||
- [ ] Merge `release-vX.Y.Z` into the `releases` branch.
|
- [ ] Merge `release-vX.Y.Z` into the `releases` branch.
|
||||||
- [ ] Tag this merge commit (on the `releases` branch) with `vX.Y.Z`
|
- [ ] Tag this merge commit (on the `releases` branch) with `vX.Y.Z`
|
||||||
- [ ] Cut the release [here](https://github.com/filecoin-project/lotus/releases/new?prerelease=true&target=releases).
|
- [ ] Cut the release [here](https://github.com/filecoin-project/lotus/releases/new?prerelease=true&target=releases).
|
||||||
|
2
extern/filecoin-ffi
vendored
2
extern/filecoin-ffi
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 8b97bd8230b77bd32f4f27e4766a6d8a03b4e801
|
Subproject commit d2e3aa7d61501d69bed6e898de13d1312b021e62
|
85
extern/sector-storage/ffiwrapper/sealer_test.go
vendored
85
extern/sector-storage/ffiwrapper/sealer_test.go
vendored
@ -31,6 +31,7 @@ import (
|
|||||||
"github.com/filecoin-project/specs-storage/storage"
|
"github.com/filecoin-project/specs-storage/storage"
|
||||||
|
|
||||||
ffi "github.com/filecoin-project/filecoin-ffi"
|
ffi "github.com/filecoin-project/filecoin-ffi"
|
||||||
|
"github.com/filecoin-project/filecoin-ffi/generated"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/chain/actors/policy"
|
"github.com/filecoin-project/lotus/chain/actors/policy"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper/basicfs"
|
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper/basicfs"
|
||||||
@ -853,3 +854,87 @@ func TestAddPiece512MPadded(t *testing.T) {
|
|||||||
|
|
||||||
require.Equal(t, "baga6ea4seaqonenxyku4o7hr5xkzbqsceipf6xgli3on54beqbk6k246sbooobq", c.PieceCID.String())
|
require.Equal(t, "baga6ea4seaqonenxyku4o7hr5xkzbqsceipf6xgli3on54beqbk6k246sbooobq", c.PieceCID.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func setupLogger(t *testing.T) *bytes.Buffer {
|
||||||
|
_ = os.Setenv("RUST_LOG", "info")
|
||||||
|
|
||||||
|
var bb bytes.Buffer
|
||||||
|
r, w, err := os.Pipe()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
_, _ = io.Copy(&bb, r)
|
||||||
|
runtime.KeepAlive(w)
|
||||||
|
}()
|
||||||
|
|
||||||
|
resp := generated.FilInitLogFd(int32(w.Fd()))
|
||||||
|
resp.Deref()
|
||||||
|
|
||||||
|
defer generated.FilDestroyInitLogFdResponse(resp)
|
||||||
|
|
||||||
|
if resp.StatusCode != generated.FCPResponseStatusFCPNoError {
|
||||||
|
t.Fatal(generated.RawString(resp.ErrorMsg).Copy())
|
||||||
|
}
|
||||||
|
|
||||||
|
return &bb
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMulticoreSDR(t *testing.T) {
|
||||||
|
if os.Getenv("TEST_RUSTPROOFS_LOGS") != "1" {
|
||||||
|
t.Skip("skipping test without TEST_RUSTPROOFS_LOGS=1")
|
||||||
|
}
|
||||||
|
|
||||||
|
rustLogger := setupLogger(t)
|
||||||
|
|
||||||
|
getGrothParamFileAndVerifyingKeys(sectorSize)
|
||||||
|
|
||||||
|
dir, err := ioutil.TempDir("", "sbtest")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
miner := abi.ActorID(123)
|
||||||
|
|
||||||
|
sp := &basicfs.Provider{
|
||||||
|
Root: dir,
|
||||||
|
}
|
||||||
|
sb, err := New(sp)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("%+v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
cleanup := func() {
|
||||||
|
if t.Failed() {
|
||||||
|
fmt.Printf("not removing %s\n", dir)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := os.RemoveAll(dir); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
si := storage.SectorRef{
|
||||||
|
ID: abi.SectorID{Miner: miner, Number: 1},
|
||||||
|
ProofType: sealProofType,
|
||||||
|
}
|
||||||
|
|
||||||
|
s := seal{ref: si}
|
||||||
|
|
||||||
|
// check multicore
|
||||||
|
_ = os.Setenv("FIL_PROOFS_USE_MULTICORE_SDR", "1")
|
||||||
|
rustLogger.Reset()
|
||||||
|
s.precommit(t, sb, si, func() {})
|
||||||
|
|
||||||
|
ok := false
|
||||||
|
for _, s := range strings.Split(rustLogger.String(), "\n") {
|
||||||
|
if strings.Contains(s, "create_label::multi") {
|
||||||
|
ok = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
require.True(t, ok)
|
||||||
|
}
|
||||||
|
137
extern/storage-sealing/commit_batch.go
vendored
137
extern/storage-sealing/commit_batch.go
vendored
@ -7,6 +7,10 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-state-types/network"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors"
|
||||||
|
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
@ -23,23 +27,28 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/chain/actors/policy"
|
"github.com/filecoin-project/lotus/chain/actors/policy"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
||||||
"github.com/filecoin-project/lotus/extern/storage-sealing/sealiface"
|
"github.com/filecoin-project/lotus/extern/storage-sealing/sealiface"
|
||||||
|
"github.com/filecoin-project/lotus/node/config"
|
||||||
)
|
)
|
||||||
|
|
||||||
const arp = abi.RegisteredAggregationProof_SnarkPackV1
|
const arp = abi.RegisteredAggregationProof_SnarkPackV1
|
||||||
|
|
||||||
|
//go:generate go run github.com/golang/mock/mockgen -destination=mocks/mock_commit_batcher.go -package=mocks . CommitBatcherApi
|
||||||
|
|
||||||
type CommitBatcherApi interface {
|
type CommitBatcherApi interface {
|
||||||
SendMsg(ctx context.Context, from, to address.Address, method abi.MethodNum, value, maxFee abi.TokenAmount, params []byte) (cid.Cid, error)
|
SendMsg(ctx context.Context, from, to address.Address, method abi.MethodNum, value, maxFee abi.TokenAmount, params []byte) (cid.Cid, error)
|
||||||
StateMinerInfo(context.Context, address.Address, TipSetToken) (miner.MinerInfo, error)
|
StateMinerInfo(context.Context, address.Address, TipSetToken) (miner.MinerInfo, error)
|
||||||
ChainHead(ctx context.Context) (TipSetToken, abi.ChainEpoch, error)
|
ChainHead(ctx context.Context) (TipSetToken, abi.ChainEpoch, error)
|
||||||
|
ChainBaseFee(context.Context, TipSetToken) (abi.TokenAmount, error)
|
||||||
|
|
||||||
StateSectorPreCommitInfo(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok TipSetToken) (*miner.SectorPreCommitOnChainInfo, error)
|
StateSectorPreCommitInfo(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok TipSetToken) (*miner.SectorPreCommitOnChainInfo, error)
|
||||||
StateMinerInitialPledgeCollateral(context.Context, address.Address, miner.SectorPreCommitInfo, TipSetToken) (big.Int, error)
|
StateMinerInitialPledgeCollateral(context.Context, address.Address, miner.SectorPreCommitInfo, TipSetToken) (big.Int, error)
|
||||||
|
StateNetworkVersion(ctx context.Context, tok TipSetToken) (network.Version, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type AggregateInput struct {
|
type AggregateInput struct {
|
||||||
spt abi.RegisteredSealProof
|
Spt abi.RegisteredSealProof
|
||||||
info proof5.AggregateSealVerifyInfo
|
Info proof5.AggregateSealVerifyInfo
|
||||||
proof []byte
|
Proof []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
type CommitBatcher struct {
|
type CommitBatcher struct {
|
||||||
@ -47,20 +56,20 @@ type CommitBatcher struct {
|
|||||||
maddr address.Address
|
maddr address.Address
|
||||||
mctx context.Context
|
mctx context.Context
|
||||||
addrSel AddrSel
|
addrSel AddrSel
|
||||||
feeCfg FeeConfig
|
feeCfg config.MinerFeeConfig
|
||||||
getConfig GetSealingConfigFunc
|
getConfig GetSealingConfigFunc
|
||||||
prover ffiwrapper.Prover
|
prover ffiwrapper.Prover
|
||||||
|
|
||||||
deadlines map[abi.SectorNumber]time.Time
|
cutoffs map[abi.SectorNumber]time.Time
|
||||||
todo map[abi.SectorNumber]AggregateInput
|
todo map[abi.SectorNumber]AggregateInput
|
||||||
waiting map[abi.SectorNumber][]chan sealiface.CommitBatchRes
|
waiting map[abi.SectorNumber][]chan sealiface.CommitBatchRes
|
||||||
|
|
||||||
notify, stop, stopped chan struct{}
|
notify, stop, stopped chan struct{}
|
||||||
force chan chan []sealiface.CommitBatchRes
|
force chan chan []sealiface.CommitBatchRes
|
||||||
lk sync.Mutex
|
lk sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewCommitBatcher(mctx context.Context, maddr address.Address, api CommitBatcherApi, addrSel AddrSel, feeCfg FeeConfig, getConfig GetSealingConfigFunc, prov ffiwrapper.Prover) *CommitBatcher {
|
func NewCommitBatcher(mctx context.Context, maddr address.Address, api CommitBatcherApi, addrSel AddrSel, feeCfg config.MinerFeeConfig, getConfig GetSealingConfigFunc, prov ffiwrapper.Prover) *CommitBatcher {
|
||||||
b := &CommitBatcher{
|
b := &CommitBatcher{
|
||||||
api: api,
|
api: api,
|
||||||
maddr: maddr,
|
maddr: maddr,
|
||||||
@ -70,9 +79,9 @@ func NewCommitBatcher(mctx context.Context, maddr address.Address, api CommitBat
|
|||||||
getConfig: getConfig,
|
getConfig: getConfig,
|
||||||
prover: prov,
|
prover: prov,
|
||||||
|
|
||||||
deadlines: map[abi.SectorNumber]time.Time{},
|
cutoffs: map[abi.SectorNumber]time.Time{},
|
||||||
todo: map[abi.SectorNumber]AggregateInput{},
|
todo: map[abi.SectorNumber]AggregateInput{},
|
||||||
waiting: map[abi.SectorNumber][]chan sealiface.CommitBatchRes{},
|
waiting: map[abi.SectorNumber][]chan sealiface.CommitBatchRes{},
|
||||||
|
|
||||||
notify: make(chan struct{}, 1),
|
notify: make(chan struct{}, 1),
|
||||||
force: make(chan chan []sealiface.CommitBatchRes),
|
force: make(chan chan []sealiface.CommitBatchRes),
|
||||||
@ -132,30 +141,30 @@ func (b *CommitBatcher) batchWait(maxWait, slack time.Duration) <-chan time.Time
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var deadline time.Time
|
var cutoff time.Time
|
||||||
for sn := range b.todo {
|
for sn := range b.todo {
|
||||||
sectorDeadline := b.deadlines[sn]
|
sectorCutoff := b.cutoffs[sn]
|
||||||
if deadline.IsZero() || (!sectorDeadline.IsZero() && sectorDeadline.Before(deadline)) {
|
if cutoff.IsZero() || (!sectorCutoff.IsZero() && sectorCutoff.Before(cutoff)) {
|
||||||
deadline = sectorDeadline
|
cutoff = sectorCutoff
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for sn := range b.waiting {
|
for sn := range b.waiting {
|
||||||
sectorDeadline := b.deadlines[sn]
|
sectorCutoff := b.cutoffs[sn]
|
||||||
if deadline.IsZero() || (!sectorDeadline.IsZero() && sectorDeadline.Before(deadline)) {
|
if cutoff.IsZero() || (!sectorCutoff.IsZero() && sectorCutoff.Before(cutoff)) {
|
||||||
deadline = sectorDeadline
|
cutoff = sectorCutoff
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if deadline.IsZero() {
|
if cutoff.IsZero() {
|
||||||
return time.After(maxWait)
|
return time.After(maxWait)
|
||||||
}
|
}
|
||||||
|
|
||||||
deadline = deadline.Add(-slack)
|
cutoff = cutoff.Add(-slack)
|
||||||
if deadline.Before(now) {
|
if cutoff.Before(now) {
|
||||||
return time.After(time.Nanosecond) // can't return 0
|
return time.After(time.Nanosecond) // can't return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
wait := deadline.Sub(now)
|
wait := cutoff.Sub(now)
|
||||||
if wait > maxWait {
|
if wait > maxWait {
|
||||||
wait = maxWait
|
wait = maxWait
|
||||||
}
|
}
|
||||||
@ -208,7 +217,7 @@ func (b *CommitBatcher) maybeStartBatch(notif, after bool) ([]sealiface.CommitBa
|
|||||||
|
|
||||||
delete(b.waiting, sn)
|
delete(b.waiting, sn)
|
||||||
delete(b.todo, sn)
|
delete(b.todo, sn)
|
||||||
delete(b.deadlines, sn)
|
delete(b.cutoffs, sn)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -239,6 +248,8 @@ func (b *CommitBatcher) processBatch(cfg sealiface.Config) ([]sealiface.CommitBa
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
|
res.Sectors = append(res.Sectors, id)
|
||||||
|
|
||||||
sc, err := b.getSectorCollateral(id, tok)
|
sc, err := b.getSectorCollateral(id, tok)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
res.FailedSectors[id] = err.Error()
|
res.FailedSectors[id] = err.Error()
|
||||||
@ -247,9 +258,8 @@ func (b *CommitBatcher) processBatch(cfg sealiface.Config) ([]sealiface.CommitBa
|
|||||||
|
|
||||||
collateral = big.Add(collateral, sc)
|
collateral = big.Add(collateral, sc)
|
||||||
|
|
||||||
res.Sectors = append(res.Sectors, id)
|
|
||||||
params.SectorNumbers.Set(uint64(id))
|
params.SectorNumbers.Set(uint64(id))
|
||||||
infos = append(infos, p.info)
|
infos = append(infos, p.Info)
|
||||||
}
|
}
|
||||||
|
|
||||||
sort.Slice(infos, func(i, j int) bool {
|
sort.Slice(infos, func(i, j int) bool {
|
||||||
@ -257,7 +267,7 @@ func (b *CommitBatcher) processBatch(cfg sealiface.Config) ([]sealiface.CommitBa
|
|||||||
})
|
})
|
||||||
|
|
||||||
for _, info := range infos {
|
for _, info := range infos {
|
||||||
proofs = append(proofs, b.todo[info.Number].proof)
|
proofs = append(proofs, b.todo[info.Number].Proof)
|
||||||
}
|
}
|
||||||
|
|
||||||
mid, err := address.IDFromAddress(b.maddr)
|
mid, err := address.IDFromAddress(b.maddr)
|
||||||
@ -267,7 +277,7 @@ func (b *CommitBatcher) processBatch(cfg sealiface.Config) ([]sealiface.CommitBa
|
|||||||
|
|
||||||
params.AggregateProof, err = b.prover.AggregateSealProofs(proof5.AggregateSealVerifyProofAndInfos{
|
params.AggregateProof, err = b.prover.AggregateSealProofs(proof5.AggregateSealVerifyProofAndInfos{
|
||||||
Miner: abi.ActorID(mid),
|
Miner: abi.ActorID(mid),
|
||||||
SealProof: b.todo[infos[0].Number].spt,
|
SealProof: b.todo[infos[0].Number].Spt,
|
||||||
AggregateProof: arp,
|
AggregateProof: arp,
|
||||||
Infos: infos,
|
Infos: infos,
|
||||||
}, proofs)
|
}, proofs)
|
||||||
@ -285,14 +295,29 @@ func (b *CommitBatcher) processBatch(cfg sealiface.Config) ([]sealiface.CommitBa
|
|||||||
return []sealiface.CommitBatchRes{res}, xerrors.Errorf("couldn't get miner info: %w", err)
|
return []sealiface.CommitBatchRes{res}, xerrors.Errorf("couldn't get miner info: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
goodFunds := big.Add(b.feeCfg.MaxCommitGasFee, collateral)
|
maxFee := b.feeCfg.MaxCommitBatchGasFee.FeeForSectors(len(infos))
|
||||||
|
|
||||||
|
bf, err := b.api.ChainBaseFee(b.mctx, tok)
|
||||||
|
if err != nil {
|
||||||
|
return []sealiface.CommitBatchRes{res}, xerrors.Errorf("couldn't get base fee: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
nv, err := b.api.StateNetworkVersion(b.mctx, tok)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("getting network version: %s", err)
|
||||||
|
return []sealiface.CommitBatchRes{res}, xerrors.Errorf("getting network version: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
aggFee := policy.AggregateNetworkFee(nv, len(infos), bf)
|
||||||
|
|
||||||
|
goodFunds := big.Add(maxFee, big.Add(collateral, aggFee))
|
||||||
|
|
||||||
from, _, err := b.addrSel(b.mctx, mi, api.CommitAddr, goodFunds, collateral)
|
from, _, err := b.addrSel(b.mctx, mi, api.CommitAddr, goodFunds, collateral)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return []sealiface.CommitBatchRes{res}, xerrors.Errorf("no good address found: %w", err)
|
return []sealiface.CommitBatchRes{res}, xerrors.Errorf("no good address found: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
mcid, err := b.api.SendMsg(b.mctx, from, b.maddr, miner.Methods.ProveCommitAggregate, collateral, b.feeCfg.MaxCommitGasFee, enc.Bytes())
|
mcid, err := b.api.SendMsg(b.mctx, from, b.maddr, miner.Methods.ProveCommitAggregate, collateral, maxFee, enc.Bytes())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return []sealiface.CommitBatchRes{res}, xerrors.Errorf("sending message failed: %w", err)
|
return []sealiface.CommitBatchRes{res}, xerrors.Errorf("sending message failed: %w", err)
|
||||||
}
|
}
|
||||||
@ -340,7 +365,7 @@ func (b *CommitBatcher) processSingle(mi miner.MinerInfo, sn abi.SectorNumber, i
|
|||||||
enc := new(bytes.Buffer)
|
enc := new(bytes.Buffer)
|
||||||
params := &miner.ProveCommitSectorParams{
|
params := &miner.ProveCommitSectorParams{
|
||||||
SectorNumber: sn,
|
SectorNumber: sn,
|
||||||
Proof: info.proof,
|
Proof: info.Proof,
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := params.MarshalCBOR(enc); err != nil {
|
if err := params.MarshalCBOR(enc); err != nil {
|
||||||
@ -352,14 +377,14 @@ func (b *CommitBatcher) processSingle(mi miner.MinerInfo, sn abi.SectorNumber, i
|
|||||||
return cid.Undef, err
|
return cid.Undef, err
|
||||||
}
|
}
|
||||||
|
|
||||||
goodFunds := big.Add(collateral, b.feeCfg.MaxCommitGasFee)
|
goodFunds := big.Add(collateral, big.Int(b.feeCfg.MaxCommitGasFee))
|
||||||
|
|
||||||
from, _, err := b.addrSel(b.mctx, mi, api.CommitAddr, goodFunds, collateral)
|
from, _, err := b.addrSel(b.mctx, mi, api.CommitAddr, goodFunds, collateral)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return cid.Undef, xerrors.Errorf("no good address to send commit message from: %w", err)
|
return cid.Undef, xerrors.Errorf("no good address to send commit message from: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
mcid, err := b.api.SendMsg(b.mctx, from, b.maddr, miner.Methods.ProveCommitSector, collateral, b.feeCfg.MaxCommitGasFee, enc.Bytes())
|
mcid, err := b.api.SendMsg(b.mctx, from, b.maddr, miner.Methods.ProveCommitSector, collateral, big.Int(b.feeCfg.MaxCommitGasFee), enc.Bytes())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return cid.Undef, xerrors.Errorf("pushing message to mpool: %w", err)
|
return cid.Undef, xerrors.Errorf("pushing message to mpool: %w", err)
|
||||||
}
|
}
|
||||||
@ -369,16 +394,15 @@ func (b *CommitBatcher) processSingle(mi miner.MinerInfo, sn abi.SectorNumber, i
|
|||||||
|
|
||||||
// register commit, wait for batch message, return message CID
|
// register commit, wait for batch message, return message CID
|
||||||
func (b *CommitBatcher) AddCommit(ctx context.Context, s SectorInfo, in AggregateInput) (res sealiface.CommitBatchRes, err error) {
|
func (b *CommitBatcher) AddCommit(ctx context.Context, s SectorInfo, in AggregateInput) (res sealiface.CommitBatchRes, err error) {
|
||||||
_, curEpoch, err := b.api.ChainHead(b.mctx)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("getting chain head: %s", err)
|
|
||||||
return sealiface.CommitBatchRes{}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
sn := s.SectorNumber
|
sn := s.SectorNumber
|
||||||
|
|
||||||
|
cu, err := b.getCommitCutoff(s)
|
||||||
|
if err != nil {
|
||||||
|
return sealiface.CommitBatchRes{}, err
|
||||||
|
}
|
||||||
|
|
||||||
b.lk.Lock()
|
b.lk.Lock()
|
||||||
b.deadlines[sn] = getSectorDeadline(curEpoch, s)
|
b.cutoffs[sn] = cu
|
||||||
b.todo[sn] = in
|
b.todo[sn] = in
|
||||||
|
|
||||||
sent := make(chan sealiface.CommitBatchRes, 1)
|
sent := make(chan sealiface.CommitBatchRes, 1)
|
||||||
@ -426,7 +450,7 @@ func (b *CommitBatcher) Pending(ctx context.Context) ([]abi.SectorID, error) {
|
|||||||
for _, s := range b.todo {
|
for _, s := range b.todo {
|
||||||
res = append(res, abi.SectorID{
|
res = append(res, abi.SectorID{
|
||||||
Miner: abi.ActorID(mid),
|
Miner: abi.ActorID(mid),
|
||||||
Number: s.info.Number,
|
Number: s.Info.Number,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -452,24 +476,43 @@ func (b *CommitBatcher) Stop(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func getSectorDeadline(curEpoch abi.ChainEpoch, si SectorInfo) time.Time {
|
// TODO: If this returned epochs, it would make testing much easier
|
||||||
deadlineEpoch := si.TicketEpoch + policy.MaxPreCommitRandomnessLookback
|
func (b *CommitBatcher) getCommitCutoff(si SectorInfo) (time.Time, error) {
|
||||||
|
tok, curEpoch, err := b.api.ChainHead(b.mctx)
|
||||||
|
if err != nil {
|
||||||
|
return time.Now(), xerrors.Errorf("getting chain head: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
nv, err := b.api.StateNetworkVersion(b.mctx, tok)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("getting network version: %s", err)
|
||||||
|
return time.Now(), xerrors.Errorf("getting network version: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
pci, err := b.api.StateSectorPreCommitInfo(b.mctx, b.maddr, si.SectorNumber, tok)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("getting precommit info: %s", err)
|
||||||
|
return time.Now(), err
|
||||||
|
}
|
||||||
|
|
||||||
|
cutoffEpoch := pci.PreCommitEpoch + policy.GetMaxProveCommitDuration(actors.VersionForNetwork(nv), si.SectorType)
|
||||||
|
|
||||||
for _, p := range si.Pieces {
|
for _, p := range si.Pieces {
|
||||||
if p.DealInfo == nil {
|
if p.DealInfo == nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
startEpoch := p.DealInfo.DealSchedule.StartEpoch
|
startEpoch := p.DealInfo.DealSchedule.StartEpoch
|
||||||
if startEpoch < deadlineEpoch {
|
if startEpoch < cutoffEpoch {
|
||||||
deadlineEpoch = startEpoch
|
cutoffEpoch = startEpoch
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if deadlineEpoch <= curEpoch {
|
if cutoffEpoch <= curEpoch {
|
||||||
return time.Now()
|
return time.Now(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return time.Now().Add(time.Duration(deadlineEpoch-curEpoch) * time.Duration(build.BlockDelaySecs) * time.Second)
|
return time.Now().Add(time.Duration(cutoffEpoch-curEpoch) * time.Duration(build.BlockDelaySecs) * time.Second), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *CommitBatcher) getSectorCollateral(sn abi.SectorNumber, tok TipSetToken) (abi.TokenAmount, error) {
|
func (b *CommitBatcher) getSectorCollateral(sn abi.SectorNumber, tok TipSetToken) (abi.TokenAmount, error) {
|
||||||
|
299
extern/storage-sealing/commit_batch_test.go
vendored
Normal file
299
extern/storage-sealing/commit_batch_test.go
vendored
Normal file
@ -0,0 +1,299 @@
|
|||||||
|
package sealing_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"sort"
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/golang/mock/gomock"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
|
"github.com/filecoin-project/go-state-types/network"
|
||||||
|
miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner"
|
||||||
|
proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/api"
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
||||||
|
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
||||||
|
"github.com/filecoin-project/lotus/extern/storage-sealing/mocks"
|
||||||
|
"github.com/filecoin-project/lotus/extern/storage-sealing/sealiface"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestCommitBatcher(t *testing.T) {
|
||||||
|
t0123, err := address.NewFromString("t0123")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
as := func(ctx context.Context, mi miner.MinerInfo, use api.AddrUse, goodFunds, minFunds abi.TokenAmount) (address.Address, abi.TokenAmount, error) {
|
||||||
|
return t0123, big.Zero(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
maxBatch := miner5.MaxAggregatedSectors
|
||||||
|
minBatch := miner5.MinAggregatedSectors
|
||||||
|
|
||||||
|
cfg := func() (sealiface.Config, error) {
|
||||||
|
return sealiface.Config{
|
||||||
|
MaxWaitDealsSectors: 2,
|
||||||
|
MaxSealingSectors: 0,
|
||||||
|
MaxSealingSectorsForDeals: 0,
|
||||||
|
WaitDealsDelay: time.Hour * 6,
|
||||||
|
AlwaysKeepUnsealedCopy: true,
|
||||||
|
|
||||||
|
BatchPreCommits: true,
|
||||||
|
MinPreCommitBatch: 1,
|
||||||
|
MaxPreCommitBatch: miner5.PreCommitSectorBatchMaxSize,
|
||||||
|
PreCommitBatchWait: 24 * time.Hour,
|
||||||
|
PreCommitBatchSlack: 3 * time.Hour,
|
||||||
|
|
||||||
|
AggregateCommits: true,
|
||||||
|
MinCommitBatch: minBatch,
|
||||||
|
MaxCommitBatch: maxBatch,
|
||||||
|
CommitBatchWait: 24 * time.Hour,
|
||||||
|
CommitBatchSlack: 1 * time.Hour,
|
||||||
|
|
||||||
|
TerminateBatchMin: 1,
|
||||||
|
TerminateBatchMax: 100,
|
||||||
|
TerminateBatchWait: 5 * time.Minute,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type promise func(t *testing.T)
|
||||||
|
type action func(t *testing.T, s *mocks.MockCommitBatcherApi, pcb *sealing.CommitBatcher) promise
|
||||||
|
|
||||||
|
actions := func(as ...action) action {
|
||||||
|
return func(t *testing.T, s *mocks.MockCommitBatcherApi, pcb *sealing.CommitBatcher) promise {
|
||||||
|
var ps []promise
|
||||||
|
for _, a := range as {
|
||||||
|
p := a(t, s, pcb)
|
||||||
|
if p != nil {
|
||||||
|
ps = append(ps, p)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(ps) > 0 {
|
||||||
|
return func(t *testing.T) {
|
||||||
|
for _, p := range ps {
|
||||||
|
p(t)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
addSector := func(sn abi.SectorNumber) action {
|
||||||
|
return func(t *testing.T, s *mocks.MockCommitBatcherApi, pcb *sealing.CommitBatcher) promise {
|
||||||
|
var pcres sealiface.CommitBatchRes
|
||||||
|
var pcerr error
|
||||||
|
done := sync.Mutex{}
|
||||||
|
done.Lock()
|
||||||
|
|
||||||
|
si := sealing.SectorInfo{
|
||||||
|
SectorNumber: sn,
|
||||||
|
}
|
||||||
|
|
||||||
|
s.EXPECT().ChainHead(gomock.Any()).Return(nil, abi.ChainEpoch(1), nil)
|
||||||
|
s.EXPECT().StateNetworkVersion(gomock.Any(), gomock.Any()).Return(network.Version13, nil)
|
||||||
|
s.EXPECT().StateSectorPreCommitInfo(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(&miner.SectorPreCommitOnChainInfo{
|
||||||
|
PreCommitDeposit: big.Zero(),
|
||||||
|
}, nil)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
defer done.Unlock()
|
||||||
|
pcres, pcerr = pcb.AddCommit(ctx, si, sealing.AggregateInput{
|
||||||
|
Info: proof5.AggregateSealVerifyInfo{
|
||||||
|
Number: sn,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}()
|
||||||
|
|
||||||
|
return func(t *testing.T) {
|
||||||
|
done.Lock()
|
||||||
|
require.NoError(t, pcerr)
|
||||||
|
require.Empty(t, pcres.Error)
|
||||||
|
require.Contains(t, pcres.Sectors, si.SectorNumber)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
addSectors := func(sectors []abi.SectorNumber) action {
|
||||||
|
as := make([]action, len(sectors))
|
||||||
|
for i, sector := range sectors {
|
||||||
|
as[i] = addSector(sector)
|
||||||
|
}
|
||||||
|
return actions(as...)
|
||||||
|
}
|
||||||
|
|
||||||
|
waitPending := func(n int) action {
|
||||||
|
return func(t *testing.T, s *mocks.MockCommitBatcherApi, pcb *sealing.CommitBatcher) promise {
|
||||||
|
require.Eventually(t, func() bool {
|
||||||
|
p, err := pcb.Pending(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
return len(p) == n
|
||||||
|
}, time.Second*5, 10*time.Millisecond)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
expectSend := func(expect []abi.SectorNumber) action {
|
||||||
|
return func(t *testing.T, s *mocks.MockCommitBatcherApi, pcb *sealing.CommitBatcher) promise {
|
||||||
|
s.EXPECT().StateMinerInfo(gomock.Any(), gomock.Any(), gomock.Any()).Return(miner.MinerInfo{Owner: t0123, Worker: t0123}, nil)
|
||||||
|
|
||||||
|
ti := len(expect)
|
||||||
|
batch := false
|
||||||
|
if ti >= minBatch {
|
||||||
|
batch = true
|
||||||
|
ti = 1
|
||||||
|
}
|
||||||
|
s.EXPECT().ChainHead(gomock.Any()).Return(nil, abi.ChainEpoch(1), nil)
|
||||||
|
s.EXPECT().StateSectorPreCommitInfo(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(&miner.SectorPreCommitOnChainInfo{
|
||||||
|
PreCommitDeposit: big.Zero(),
|
||||||
|
}, nil).Times(len(expect))
|
||||||
|
s.EXPECT().StateMinerInitialPledgeCollateral(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(big.Zero(), nil).Times(len(expect))
|
||||||
|
if batch {
|
||||||
|
s.EXPECT().StateNetworkVersion(gomock.Any(), gomock.Any()).Return(network.Version13, nil)
|
||||||
|
s.EXPECT().ChainBaseFee(gomock.Any(), gomock.Any()).Return(big.NewInt(2000), nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
s.EXPECT().SendMsg(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), funMatcher(func(i interface{}) bool {
|
||||||
|
b := i.([]byte)
|
||||||
|
if batch {
|
||||||
|
var params miner5.ProveCommitAggregateParams
|
||||||
|
require.NoError(t, params.UnmarshalCBOR(bytes.NewReader(b)))
|
||||||
|
for _, number := range expect {
|
||||||
|
set, err := params.SectorNumbers.IsSet(uint64(number))
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.True(t, set)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
var params miner5.ProveCommitSectorParams
|
||||||
|
require.NoError(t, params.UnmarshalCBOR(bytes.NewReader(b)))
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
})).Times(ti)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
flush := func(expect []abi.SectorNumber) action {
|
||||||
|
return func(t *testing.T, s *mocks.MockCommitBatcherApi, pcb *sealing.CommitBatcher) promise {
|
||||||
|
_ = expectSend(expect)(t, s, pcb)
|
||||||
|
|
||||||
|
batch := len(expect) >= minBatch
|
||||||
|
|
||||||
|
r, err := pcb.Flush(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
if batch {
|
||||||
|
require.Len(t, r, 1)
|
||||||
|
require.Empty(t, r[0].Error)
|
||||||
|
sort.Slice(r[0].Sectors, func(i, j int) bool {
|
||||||
|
return r[0].Sectors[i] < r[0].Sectors[j]
|
||||||
|
})
|
||||||
|
require.Equal(t, expect, r[0].Sectors)
|
||||||
|
} else {
|
||||||
|
require.Len(t, r, len(expect))
|
||||||
|
for _, res := range r {
|
||||||
|
require.Len(t, res.Sectors, 1)
|
||||||
|
require.Empty(t, res.Error)
|
||||||
|
}
|
||||||
|
sort.Slice(r, func(i, j int) bool {
|
||||||
|
return r[i].Sectors[0] < r[j].Sectors[0]
|
||||||
|
})
|
||||||
|
for i, res := range r {
|
||||||
|
require.Equal(t, abi.SectorNumber(i), res.Sectors[0])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
getSectors := func(n int) []abi.SectorNumber {
|
||||||
|
out := make([]abi.SectorNumber, n)
|
||||||
|
for i := range out {
|
||||||
|
out[i] = abi.SectorNumber(i)
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
tcs := map[string]struct {
|
||||||
|
actions []action
|
||||||
|
}{
|
||||||
|
"addSingle": {
|
||||||
|
actions: []action{
|
||||||
|
addSector(0),
|
||||||
|
waitPending(1),
|
||||||
|
flush([]abi.SectorNumber{0}),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"addTwo": {
|
||||||
|
actions: []action{
|
||||||
|
addSectors(getSectors(2)),
|
||||||
|
waitPending(2),
|
||||||
|
flush(getSectors(2)),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"addAte": {
|
||||||
|
actions: []action{
|
||||||
|
addSectors(getSectors(8)),
|
||||||
|
waitPending(8),
|
||||||
|
flush(getSectors(8)),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"addMax": {
|
||||||
|
actions: []action{
|
||||||
|
expectSend(getSectors(maxBatch)),
|
||||||
|
addSectors(getSectors(maxBatch)),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, tc := range tcs {
|
||||||
|
tc := tc
|
||||||
|
|
||||||
|
t.Run(name, func(t *testing.T) {
|
||||||
|
// create go mock controller here
|
||||||
|
mockCtrl := gomock.NewController(t)
|
||||||
|
// when test is done, assert expectations on all mock objects.
|
||||||
|
defer mockCtrl.Finish()
|
||||||
|
|
||||||
|
// create them mocks
|
||||||
|
pcapi := mocks.NewMockCommitBatcherApi(mockCtrl)
|
||||||
|
|
||||||
|
pcb := sealing.NewCommitBatcher(ctx, t0123, pcapi, as, fc, cfg, &fakeProver{})
|
||||||
|
|
||||||
|
var promises []promise
|
||||||
|
|
||||||
|
for _, a := range tc.actions {
|
||||||
|
p := a(t, pcapi, pcb)
|
||||||
|
if p != nil {
|
||||||
|
promises = append(promises, p)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, p := range promises {
|
||||||
|
p(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
err := pcb.Stop(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type fakeProver struct{}
|
||||||
|
|
||||||
|
func (f fakeProver) AggregateSealProofs(aggregateInfo proof5.AggregateSealVerifyProofAndInfos, proofs [][]byte) ([]byte, error) {
|
||||||
|
return []byte("Trust me, I'm a proof"), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ ffiwrapper.Prover = &fakeProver{}
|
17
extern/storage-sealing/fsm.go
vendored
17
extern/storage-sealing/fsm.go
vendored
@ -103,6 +103,10 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto
|
|||||||
on(SectorChainPreCommitFailed{}, PreCommitFailed),
|
on(SectorChainPreCommitFailed{}, PreCommitFailed),
|
||||||
),
|
),
|
||||||
Committing: planCommitting,
|
Committing: planCommitting,
|
||||||
|
CommitFinalize: planOne(
|
||||||
|
on(SectorFinalized{}, SubmitCommit),
|
||||||
|
on(SectorFinalizeFailed{}, CommitFinalizeFailed),
|
||||||
|
),
|
||||||
SubmitCommit: planOne(
|
SubmitCommit: planOne(
|
||||||
on(SectorCommitSubmitted{}, CommitWait),
|
on(SectorCommitSubmitted{}, CommitWait),
|
||||||
on(SectorSubmitCommitAggregate{}, SubmitCommitAggregate),
|
on(SectorSubmitCommitAggregate{}, SubmitCommitAggregate),
|
||||||
@ -151,6 +155,9 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto
|
|||||||
on(SectorRetryComputeProof{}, Committing),
|
on(SectorRetryComputeProof{}, Committing),
|
||||||
on(SectorSealPreCommit1Failed{}, SealPreCommit1Failed),
|
on(SectorSealPreCommit1Failed{}, SealPreCommit1Failed),
|
||||||
),
|
),
|
||||||
|
CommitFinalizeFailed: planOne(
|
||||||
|
on(SectorRetryFinalize{}, CommitFinalizeFailed),
|
||||||
|
),
|
||||||
CommitFailed: planOne(
|
CommitFailed: planOne(
|
||||||
on(SectorSealPreCommit1Failed{}, SealPreCommit1Failed),
|
on(SectorSealPreCommit1Failed{}, SealPreCommit1Failed),
|
||||||
on(SectorRetryWaitSeed{}, WaitSeed),
|
on(SectorRetryWaitSeed{}, WaitSeed),
|
||||||
@ -379,6 +386,8 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta
|
|||||||
fallthrough
|
fallthrough
|
||||||
case CommitWait:
|
case CommitWait:
|
||||||
return m.handleCommitWait, processed, nil
|
return m.handleCommitWait, processed, nil
|
||||||
|
case CommitFinalize:
|
||||||
|
fallthrough
|
||||||
case FinalizeSector:
|
case FinalizeSector:
|
||||||
return m.handleFinalizeSector, processed, nil
|
return m.handleFinalizeSector, processed, nil
|
||||||
|
|
||||||
@ -393,6 +402,8 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta
|
|||||||
return m.handleComputeProofFailed, processed, nil
|
return m.handleComputeProofFailed, processed, nil
|
||||||
case CommitFailed:
|
case CommitFailed:
|
||||||
return m.handleCommitFailed, processed, nil
|
return m.handleCommitFailed, processed, nil
|
||||||
|
case CommitFinalizeFailed:
|
||||||
|
fallthrough
|
||||||
case FinalizeFailed:
|
case FinalizeFailed:
|
||||||
return m.handleFinalizeFailed, processed, nil
|
return m.handleFinalizeFailed, processed, nil
|
||||||
case PackingFailed: // DEPRECATED: remove this for the next reset
|
case PackingFailed: // DEPRECATED: remove this for the next reset
|
||||||
@ -482,6 +493,9 @@ func planCommitting(events []statemachine.Event, state *SectorInfo) (uint64, err
|
|||||||
case SectorCommitted: // the normal case
|
case SectorCommitted: // the normal case
|
||||||
e.apply(state)
|
e.apply(state)
|
||||||
state.State = SubmitCommit
|
state.State = SubmitCommit
|
||||||
|
case SectorProofReady: // early finalize
|
||||||
|
e.apply(state)
|
||||||
|
state.State = CommitFinalize
|
||||||
case SectorSeedReady: // seed changed :/
|
case SectorSeedReady: // seed changed :/
|
||||||
if e.SeedEpoch == state.SeedEpoch && bytes.Equal(e.SeedValue, state.SeedValue) {
|
if e.SeedEpoch == state.SeedEpoch && bytes.Equal(e.SeedValue, state.SeedValue) {
|
||||||
log.Warnf("planCommitting: got SectorSeedReady, but the seed didn't change")
|
log.Warnf("planCommitting: got SectorSeedReady, but the seed didn't change")
|
||||||
@ -508,6 +522,8 @@ func planCommitting(events []statemachine.Event, state *SectorInfo) (uint64, err
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *Sealing) restartSectors(ctx context.Context) error {
|
func (m *Sealing) restartSectors(ctx context.Context) error {
|
||||||
|
defer m.startupWait.Done()
|
||||||
|
|
||||||
trackedSectors, err := m.ListSectors()
|
trackedSectors, err := m.ListSectors()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("loading sector list: %+v", err)
|
log.Errorf("loading sector list: %+v", err)
|
||||||
@ -525,6 +541,7 @@ func (m *Sealing) restartSectors(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *Sealing) ForceSectorState(ctx context.Context, id abi.SectorNumber, state SectorState) error {
|
func (m *Sealing) ForceSectorState(ctx context.Context, id abi.SectorNumber, state SectorState) error {
|
||||||
|
m.startupWait.Wait()
|
||||||
return m.sectors.Send(id, SectorForceState{state})
|
return m.sectors.Send(id, SectorForceState{state})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
9
extern/storage-sealing/fsm_events.go
vendored
9
extern/storage-sealing/fsm_events.go
vendored
@ -245,6 +245,15 @@ func (evt SectorCommitted) apply(state *SectorInfo) {
|
|||||||
state.Proof = evt.Proof
|
state.Proof = evt.Proof
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// like SectorCommitted, but finalizes before sending the proof to the chain
|
||||||
|
type SectorProofReady struct {
|
||||||
|
Proof []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func (evt SectorProofReady) apply(state *SectorInfo) {
|
||||||
|
state.Proof = evt.Proof
|
||||||
|
}
|
||||||
|
|
||||||
type SectorSubmitCommitAggregate struct{}
|
type SectorSubmitCommitAggregate struct{}
|
||||||
|
|
||||||
func (evt SectorSubmitCommitAggregate) apply(*SectorInfo) {}
|
func (evt SectorSubmitCommitAggregate) apply(*SectorInfo) {}
|
||||||
|
67
extern/storage-sealing/fsm_test.go
vendored
67
extern/storage-sealing/fsm_test.go
vendored
@ -87,6 +87,73 @@ func TestHappyPath(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestHappyPathFinalizeEarly(t *testing.T) {
|
||||||
|
var notif []struct{ before, after SectorInfo }
|
||||||
|
ma, _ := address.NewIDAddress(55151)
|
||||||
|
m := test{
|
||||||
|
s: &Sealing{
|
||||||
|
maddr: ma,
|
||||||
|
stats: SectorStats{
|
||||||
|
bySector: map[abi.SectorID]statSectorState{},
|
||||||
|
},
|
||||||
|
notifee: func(before, after SectorInfo) {
|
||||||
|
notif = append(notif, struct{ before, after SectorInfo }{before, after})
|
||||||
|
},
|
||||||
|
},
|
||||||
|
t: t,
|
||||||
|
state: &SectorInfo{State: Packing},
|
||||||
|
}
|
||||||
|
|
||||||
|
m.planSingle(SectorPacked{})
|
||||||
|
require.Equal(m.t, m.state.State, GetTicket)
|
||||||
|
|
||||||
|
m.planSingle(SectorTicket{})
|
||||||
|
require.Equal(m.t, m.state.State, PreCommit1)
|
||||||
|
|
||||||
|
m.planSingle(SectorPreCommit1{})
|
||||||
|
require.Equal(m.t, m.state.State, PreCommit2)
|
||||||
|
|
||||||
|
m.planSingle(SectorPreCommit2{})
|
||||||
|
require.Equal(m.t, m.state.State, PreCommitting)
|
||||||
|
|
||||||
|
m.planSingle(SectorPreCommitted{})
|
||||||
|
require.Equal(m.t, m.state.State, PreCommitWait)
|
||||||
|
|
||||||
|
m.planSingle(SectorPreCommitLanded{})
|
||||||
|
require.Equal(m.t, m.state.State, WaitSeed)
|
||||||
|
|
||||||
|
m.planSingle(SectorSeedReady{})
|
||||||
|
require.Equal(m.t, m.state.State, Committing)
|
||||||
|
|
||||||
|
m.planSingle(SectorProofReady{})
|
||||||
|
require.Equal(m.t, m.state.State, CommitFinalize)
|
||||||
|
|
||||||
|
m.planSingle(SectorFinalized{})
|
||||||
|
require.Equal(m.t, m.state.State, SubmitCommit)
|
||||||
|
|
||||||
|
m.planSingle(SectorSubmitCommitAggregate{})
|
||||||
|
require.Equal(m.t, m.state.State, SubmitCommitAggregate)
|
||||||
|
|
||||||
|
m.planSingle(SectorCommitAggregateSent{})
|
||||||
|
require.Equal(m.t, m.state.State, CommitWait)
|
||||||
|
|
||||||
|
m.planSingle(SectorProving{})
|
||||||
|
require.Equal(m.t, m.state.State, FinalizeSector)
|
||||||
|
|
||||||
|
m.planSingle(SectorFinalized{})
|
||||||
|
require.Equal(m.t, m.state.State, Proving)
|
||||||
|
|
||||||
|
expected := []SectorState{Packing, GetTicket, PreCommit1, PreCommit2, PreCommitting, PreCommitWait, WaitSeed, Committing, CommitFinalize, SubmitCommit, SubmitCommitAggregate, CommitWait, FinalizeSector, Proving}
|
||||||
|
for i, n := range notif {
|
||||||
|
if n.before.State != expected[i] {
|
||||||
|
t.Fatalf("expected before state: %s, got: %s", expected[i], n.before.State)
|
||||||
|
}
|
||||||
|
if n.after.State != expected[i+1] {
|
||||||
|
t.Fatalf("expected after state: %s, got: %s", expected[i+1], n.after.State)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestSeedRevert(t *testing.T) {
|
func TestSeedRevert(t *testing.T) {
|
||||||
ma, _ := address.NewIDAddress(55151)
|
ma, _ := address.NewIDAddress(55151)
|
||||||
m := test{
|
m := test{
|
||||||
|
2
extern/storage-sealing/garbage.go
vendored
2
extern/storage-sealing/garbage.go
vendored
@ -9,6 +9,8 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func (m *Sealing) PledgeSector(ctx context.Context) (storage.SectorRef, error) {
|
func (m *Sealing) PledgeSector(ctx context.Context) (storage.SectorRef, error) {
|
||||||
|
m.startupWait.Wait()
|
||||||
|
|
||||||
m.inputLk.Lock()
|
m.inputLk.Lock()
|
||||||
defer m.inputLk.Unlock()
|
defer m.inputLk.Unlock()
|
||||||
|
|
||||||
|
3
extern/storage-sealing/input.go
vendored
3
extern/storage-sealing/input.go
vendored
@ -394,6 +394,7 @@ func (m *Sealing) updateInput(ctx context.Context, sp abi.RegisteredSealProof) e
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *Sealing) tryCreateDealSector(ctx context.Context, sp abi.RegisteredSealProof) error {
|
func (m *Sealing) tryCreateDealSector(ctx context.Context, sp abi.RegisteredSealProof) error {
|
||||||
|
m.startupWait.Wait()
|
||||||
if m.creating != nil {
|
if m.creating != nil {
|
||||||
return nil // new sector is being created right now
|
return nil // new sector is being created right now
|
||||||
}
|
}
|
||||||
@ -446,7 +447,9 @@ func (m *Sealing) createSector(ctx context.Context, cfg sealiface.Config, sp abi
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *Sealing) StartPacking(sid abi.SectorNumber) error {
|
func (m *Sealing) StartPacking(sid abi.SectorNumber) error {
|
||||||
|
m.startupWait.Wait()
|
||||||
log.Infow("starting to seal deal sector", "sector", sid, "trigger", "user")
|
log.Infow("starting to seal deal sector", "sector", sid, "trigger", "user")
|
||||||
|
|
||||||
return m.sectors.Send(uint64(sid), SectorStartPacking{})
|
return m.sectors.Send(uint64(sid), SectorStartPacking{})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
149
extern/storage-sealing/mocks/mock_commit_batcher.go
vendored
Normal file
149
extern/storage-sealing/mocks/mock_commit_batcher.go
vendored
Normal file
@ -0,0 +1,149 @@
|
|||||||
|
// Code generated by MockGen. DO NOT EDIT.
|
||||||
|
// Source: github.com/filecoin-project/lotus/extern/storage-sealing (interfaces: CommitBatcherApi)
|
||||||
|
|
||||||
|
// Package mocks is a generated GoMock package.
|
||||||
|
package mocks
|
||||||
|
|
||||||
|
import (
|
||||||
|
context "context"
|
||||||
|
reflect "reflect"
|
||||||
|
|
||||||
|
address "github.com/filecoin-project/go-address"
|
||||||
|
abi "github.com/filecoin-project/go-state-types/abi"
|
||||||
|
big "github.com/filecoin-project/go-state-types/big"
|
||||||
|
network "github.com/filecoin-project/go-state-types/network"
|
||||||
|
miner "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||||
|
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
||||||
|
miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
||||||
|
gomock "github.com/golang/mock/gomock"
|
||||||
|
cid "github.com/ipfs/go-cid"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MockCommitBatcherApi is a mock of CommitBatcherApi interface.
|
||||||
|
type MockCommitBatcherApi struct {
|
||||||
|
ctrl *gomock.Controller
|
||||||
|
recorder *MockCommitBatcherApiMockRecorder
|
||||||
|
}
|
||||||
|
|
||||||
|
// MockCommitBatcherApiMockRecorder is the mock recorder for MockCommitBatcherApi.
|
||||||
|
type MockCommitBatcherApiMockRecorder struct {
|
||||||
|
mock *MockCommitBatcherApi
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMockCommitBatcherApi creates a new mock instance.
|
||||||
|
func NewMockCommitBatcherApi(ctrl *gomock.Controller) *MockCommitBatcherApi {
|
||||||
|
mock := &MockCommitBatcherApi{ctrl: ctrl}
|
||||||
|
mock.recorder = &MockCommitBatcherApiMockRecorder{mock}
|
||||||
|
return mock
|
||||||
|
}
|
||||||
|
|
||||||
|
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||||
|
func (m *MockCommitBatcherApi) EXPECT() *MockCommitBatcherApiMockRecorder {
|
||||||
|
return m.recorder
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChainBaseFee mocks base method.
|
||||||
|
func (m *MockCommitBatcherApi) ChainBaseFee(arg0 context.Context, arg1 sealing.TipSetToken) (big.Int, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ChainBaseFee", arg0, arg1)
|
||||||
|
ret0, _ := ret[0].(big.Int)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChainBaseFee indicates an expected call of ChainBaseFee.
|
||||||
|
func (mr *MockCommitBatcherApiMockRecorder) ChainBaseFee(arg0, arg1 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainBaseFee", reflect.TypeOf((*MockCommitBatcherApi)(nil).ChainBaseFee), arg0, arg1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChainHead mocks base method.
|
||||||
|
func (m *MockCommitBatcherApi) ChainHead(arg0 context.Context) (sealing.TipSetToken, abi.ChainEpoch, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ChainHead", arg0)
|
||||||
|
ret0, _ := ret[0].(sealing.TipSetToken)
|
||||||
|
ret1, _ := ret[1].(abi.ChainEpoch)
|
||||||
|
ret2, _ := ret[2].(error)
|
||||||
|
return ret0, ret1, ret2
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChainHead indicates an expected call of ChainHead.
|
||||||
|
func (mr *MockCommitBatcherApiMockRecorder) ChainHead(arg0 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainHead", reflect.TypeOf((*MockCommitBatcherApi)(nil).ChainHead), arg0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SendMsg mocks base method.
|
||||||
|
func (m *MockCommitBatcherApi) SendMsg(arg0 context.Context, arg1, arg2 address.Address, arg3 abi.MethodNum, arg4, arg5 big.Int, arg6 []byte) (cid.Cid, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "SendMsg", arg0, arg1, arg2, arg3, arg4, arg5, arg6)
|
||||||
|
ret0, _ := ret[0].(cid.Cid)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// SendMsg indicates an expected call of SendMsg.
|
||||||
|
func (mr *MockCommitBatcherApiMockRecorder) SendMsg(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockCommitBatcherApi)(nil).SendMsg), arg0, arg1, arg2, arg3, arg4, arg5, arg6)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StateMinerInfo mocks base method.
|
||||||
|
func (m *MockCommitBatcherApi) StateMinerInfo(arg0 context.Context, arg1 address.Address, arg2 sealing.TipSetToken) (miner.MinerInfo, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "StateMinerInfo", arg0, arg1, arg2)
|
||||||
|
ret0, _ := ret[0].(miner.MinerInfo)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// StateMinerInfo indicates an expected call of StateMinerInfo.
|
||||||
|
func (mr *MockCommitBatcherApiMockRecorder) StateMinerInfo(arg0, arg1, arg2 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerInfo", reflect.TypeOf((*MockCommitBatcherApi)(nil).StateMinerInfo), arg0, arg1, arg2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StateMinerInitialPledgeCollateral mocks base method.
|
||||||
|
func (m *MockCommitBatcherApi) StateMinerInitialPledgeCollateral(arg0 context.Context, arg1 address.Address, arg2 miner0.SectorPreCommitInfo, arg3 sealing.TipSetToken) (big.Int, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "StateMinerInitialPledgeCollateral", arg0, arg1, arg2, arg3)
|
||||||
|
ret0, _ := ret[0].(big.Int)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// StateMinerInitialPledgeCollateral indicates an expected call of StateMinerInitialPledgeCollateral.
|
||||||
|
func (mr *MockCommitBatcherApiMockRecorder) StateMinerInitialPledgeCollateral(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerInitialPledgeCollateral", reflect.TypeOf((*MockCommitBatcherApi)(nil).StateMinerInitialPledgeCollateral), arg0, arg1, arg2, arg3)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StateNetworkVersion mocks base method.
|
||||||
|
func (m *MockCommitBatcherApi) StateNetworkVersion(arg0 context.Context, arg1 sealing.TipSetToken) (network.Version, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "StateNetworkVersion", arg0, arg1)
|
||||||
|
ret0, _ := ret[0].(network.Version)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// StateNetworkVersion indicates an expected call of StateNetworkVersion.
|
||||||
|
func (mr *MockCommitBatcherApiMockRecorder) StateNetworkVersion(arg0, arg1 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateNetworkVersion", reflect.TypeOf((*MockCommitBatcherApi)(nil).StateNetworkVersion), arg0, arg1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StateSectorPreCommitInfo mocks base method.
|
||||||
|
func (m *MockCommitBatcherApi) StateSectorPreCommitInfo(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 sealing.TipSetToken) (*miner.SectorPreCommitOnChainInfo, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "StateSectorPreCommitInfo", arg0, arg1, arg2, arg3)
|
||||||
|
ret0, _ := ret[0].(*miner.SectorPreCommitOnChainInfo)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// StateSectorPreCommitInfo indicates an expected call of StateSectorPreCommitInfo.
|
||||||
|
func (mr *MockCommitBatcherApiMockRecorder) StateSectorPreCommitInfo(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSectorPreCommitInfo", reflect.TypeOf((*MockCommitBatcherApi)(nil).StateSectorPreCommitInfo), arg0, arg1, arg2, arg3)
|
||||||
|
}
|
87
extern/storage-sealing/mocks/mock_precommit_batcher.go
vendored
Normal file
87
extern/storage-sealing/mocks/mock_precommit_batcher.go
vendored
Normal file
@ -0,0 +1,87 @@
|
|||||||
|
// Code generated by MockGen. DO NOT EDIT.
|
||||||
|
// Source: github.com/filecoin-project/lotus/extern/storage-sealing (interfaces: PreCommitBatcherApi)
|
||||||
|
|
||||||
|
// Package mocks is a generated GoMock package.
|
||||||
|
package mocks
|
||||||
|
|
||||||
|
import (
|
||||||
|
context "context"
|
||||||
|
reflect "reflect"
|
||||||
|
|
||||||
|
address "github.com/filecoin-project/go-address"
|
||||||
|
abi "github.com/filecoin-project/go-state-types/abi"
|
||||||
|
big "github.com/filecoin-project/go-state-types/big"
|
||||||
|
miner "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||||
|
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
||||||
|
gomock "github.com/golang/mock/gomock"
|
||||||
|
cid "github.com/ipfs/go-cid"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MockPreCommitBatcherApi is a mock of PreCommitBatcherApi interface.
|
||||||
|
type MockPreCommitBatcherApi struct {
|
||||||
|
ctrl *gomock.Controller
|
||||||
|
recorder *MockPreCommitBatcherApiMockRecorder
|
||||||
|
}
|
||||||
|
|
||||||
|
// MockPreCommitBatcherApiMockRecorder is the mock recorder for MockPreCommitBatcherApi.
|
||||||
|
type MockPreCommitBatcherApiMockRecorder struct {
|
||||||
|
mock *MockPreCommitBatcherApi
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMockPreCommitBatcherApi creates a new mock instance.
|
||||||
|
func NewMockPreCommitBatcherApi(ctrl *gomock.Controller) *MockPreCommitBatcherApi {
|
||||||
|
mock := &MockPreCommitBatcherApi{ctrl: ctrl}
|
||||||
|
mock.recorder = &MockPreCommitBatcherApiMockRecorder{mock}
|
||||||
|
return mock
|
||||||
|
}
|
||||||
|
|
||||||
|
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||||
|
func (m *MockPreCommitBatcherApi) EXPECT() *MockPreCommitBatcherApiMockRecorder {
|
||||||
|
return m.recorder
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChainHead mocks base method.
|
||||||
|
func (m *MockPreCommitBatcherApi) ChainHead(arg0 context.Context) (sealing.TipSetToken, abi.ChainEpoch, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ChainHead", arg0)
|
||||||
|
ret0, _ := ret[0].(sealing.TipSetToken)
|
||||||
|
ret1, _ := ret[1].(abi.ChainEpoch)
|
||||||
|
ret2, _ := ret[2].(error)
|
||||||
|
return ret0, ret1, ret2
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChainHead indicates an expected call of ChainHead.
|
||||||
|
func (mr *MockPreCommitBatcherApiMockRecorder) ChainHead(arg0 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainHead", reflect.TypeOf((*MockPreCommitBatcherApi)(nil).ChainHead), arg0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SendMsg mocks base method.
|
||||||
|
func (m *MockPreCommitBatcherApi) SendMsg(arg0 context.Context, arg1, arg2 address.Address, arg3 abi.MethodNum, arg4, arg5 big.Int, arg6 []byte) (cid.Cid, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "SendMsg", arg0, arg1, arg2, arg3, arg4, arg5, arg6)
|
||||||
|
ret0, _ := ret[0].(cid.Cid)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// SendMsg indicates an expected call of SendMsg.
|
||||||
|
func (mr *MockPreCommitBatcherApiMockRecorder) SendMsg(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockPreCommitBatcherApi)(nil).SendMsg), arg0, arg1, arg2, arg3, arg4, arg5, arg6)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StateMinerInfo mocks base method.
|
||||||
|
func (m *MockPreCommitBatcherApi) StateMinerInfo(arg0 context.Context, arg1 address.Address, arg2 sealing.TipSetToken) (miner.MinerInfo, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "StateMinerInfo", arg0, arg1, arg2)
|
||||||
|
ret0, _ := ret[0].(miner.MinerInfo)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// StateMinerInfo indicates an expected call of StateMinerInfo.
|
||||||
|
func (mr *MockPreCommitBatcherApiMockRecorder) StateMinerInfo(arg0, arg1, arg2 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerInfo", reflect.TypeOf((*MockPreCommitBatcherApi)(nil).StateMinerInfo), arg0, arg1, arg2)
|
||||||
|
}
|
76
extern/storage-sealing/precommit_batch.go
vendored
76
extern/storage-sealing/precommit_batch.go
vendored
@ -7,6 +7,9 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/build"
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors/policy"
|
||||||
|
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
@ -19,8 +22,11 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/api"
|
"github.com/filecoin-project/lotus/api"
|
||||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||||
"github.com/filecoin-project/lotus/extern/storage-sealing/sealiface"
|
"github.com/filecoin-project/lotus/extern/storage-sealing/sealiface"
|
||||||
|
"github.com/filecoin-project/lotus/node/config"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
//go:generate go run github.com/golang/mock/mockgen -destination=mocks/mock_precommit_batcher.go -package=mocks . PreCommitBatcherApi
|
||||||
|
|
||||||
type PreCommitBatcherApi interface {
|
type PreCommitBatcherApi interface {
|
||||||
SendMsg(ctx context.Context, from, to address.Address, method abi.MethodNum, value, maxFee abi.TokenAmount, params []byte) (cid.Cid, error)
|
SendMsg(ctx context.Context, from, to address.Address, method abi.MethodNum, value, maxFee abi.TokenAmount, params []byte) (cid.Cid, error)
|
||||||
StateMinerInfo(context.Context, address.Address, TipSetToken) (miner.MinerInfo, error)
|
StateMinerInfo(context.Context, address.Address, TipSetToken) (miner.MinerInfo, error)
|
||||||
@ -37,19 +43,19 @@ type PreCommitBatcher struct {
|
|||||||
maddr address.Address
|
maddr address.Address
|
||||||
mctx context.Context
|
mctx context.Context
|
||||||
addrSel AddrSel
|
addrSel AddrSel
|
||||||
feeCfg FeeConfig
|
feeCfg config.MinerFeeConfig
|
||||||
getConfig GetSealingConfigFunc
|
getConfig GetSealingConfigFunc
|
||||||
|
|
||||||
deadlines map[abi.SectorNumber]time.Time
|
cutoffs map[abi.SectorNumber]time.Time
|
||||||
todo map[abi.SectorNumber]*preCommitEntry
|
todo map[abi.SectorNumber]*preCommitEntry
|
||||||
waiting map[abi.SectorNumber][]chan sealiface.PreCommitBatchRes
|
waiting map[abi.SectorNumber][]chan sealiface.PreCommitBatchRes
|
||||||
|
|
||||||
notify, stop, stopped chan struct{}
|
notify, stop, stopped chan struct{}
|
||||||
force chan chan []sealiface.PreCommitBatchRes
|
force chan chan []sealiface.PreCommitBatchRes
|
||||||
lk sync.Mutex
|
lk sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewPreCommitBatcher(mctx context.Context, maddr address.Address, api PreCommitBatcherApi, addrSel AddrSel, feeCfg FeeConfig, getConfig GetSealingConfigFunc) *PreCommitBatcher {
|
func NewPreCommitBatcher(mctx context.Context, maddr address.Address, api PreCommitBatcherApi, addrSel AddrSel, feeCfg config.MinerFeeConfig, getConfig GetSealingConfigFunc) *PreCommitBatcher {
|
||||||
b := &PreCommitBatcher{
|
b := &PreCommitBatcher{
|
||||||
api: api,
|
api: api,
|
||||||
maddr: maddr,
|
maddr: maddr,
|
||||||
@ -58,9 +64,9 @@ func NewPreCommitBatcher(mctx context.Context, maddr address.Address, api PreCom
|
|||||||
feeCfg: feeCfg,
|
feeCfg: feeCfg,
|
||||||
getConfig: getConfig,
|
getConfig: getConfig,
|
||||||
|
|
||||||
deadlines: map[abi.SectorNumber]time.Time{},
|
cutoffs: map[abi.SectorNumber]time.Time{},
|
||||||
todo: map[abi.SectorNumber]*preCommitEntry{},
|
todo: map[abi.SectorNumber]*preCommitEntry{},
|
||||||
waiting: map[abi.SectorNumber][]chan sealiface.PreCommitBatchRes{},
|
waiting: map[abi.SectorNumber][]chan sealiface.PreCommitBatchRes{},
|
||||||
|
|
||||||
notify: make(chan struct{}, 1),
|
notify: make(chan struct{}, 1),
|
||||||
force: make(chan chan []sealiface.PreCommitBatchRes),
|
force: make(chan chan []sealiface.PreCommitBatchRes),
|
||||||
@ -120,30 +126,30 @@ func (b *PreCommitBatcher) batchWait(maxWait, slack time.Duration) <-chan time.T
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var deadline time.Time
|
var cutoff time.Time
|
||||||
for sn := range b.todo {
|
for sn := range b.todo {
|
||||||
sectorDeadline := b.deadlines[sn]
|
sectorCutoff := b.cutoffs[sn]
|
||||||
if deadline.IsZero() || (!sectorDeadline.IsZero() && sectorDeadline.Before(deadline)) {
|
if cutoff.IsZero() || (!sectorCutoff.IsZero() && sectorCutoff.Before(cutoff)) {
|
||||||
deadline = sectorDeadline
|
cutoff = sectorCutoff
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for sn := range b.waiting {
|
for sn := range b.waiting {
|
||||||
sectorDeadline := b.deadlines[sn]
|
sectorCutoff := b.cutoffs[sn]
|
||||||
if deadline.IsZero() || (!sectorDeadline.IsZero() && sectorDeadline.Before(deadline)) {
|
if cutoff.IsZero() || (!sectorCutoff.IsZero() && sectorCutoff.Before(cutoff)) {
|
||||||
deadline = sectorDeadline
|
cutoff = sectorCutoff
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if deadline.IsZero() {
|
if cutoff.IsZero() {
|
||||||
return time.After(maxWait)
|
return time.After(maxWait)
|
||||||
}
|
}
|
||||||
|
|
||||||
deadline = deadline.Add(-slack)
|
cutoff = cutoff.Add(-slack)
|
||||||
if deadline.Before(now) {
|
if cutoff.Before(now) {
|
||||||
return time.After(time.Nanosecond) // can't return 0
|
return time.After(time.Nanosecond) // can't return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
wait := deadline.Sub(now)
|
wait := cutoff.Sub(now)
|
||||||
if wait > maxWait {
|
if wait > maxWait {
|
||||||
wait = maxWait
|
wait = maxWait
|
||||||
}
|
}
|
||||||
@ -191,7 +197,7 @@ func (b *PreCommitBatcher) maybeStartBatch(notif, after bool) ([]sealiface.PreCo
|
|||||||
|
|
||||||
delete(b.waiting, sn)
|
delete(b.waiting, sn)
|
||||||
delete(b.todo, sn)
|
delete(b.todo, sn)
|
||||||
delete(b.deadlines, sn)
|
delete(b.cutoffs, sn)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -224,21 +230,22 @@ func (b *PreCommitBatcher) processBatch(cfg sealiface.Config) ([]sealiface.PreCo
|
|||||||
return []sealiface.PreCommitBatchRes{res}, xerrors.Errorf("couldn't get miner info: %w", err)
|
return []sealiface.PreCommitBatchRes{res}, xerrors.Errorf("couldn't get miner info: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
goodFunds := big.Add(deposit, b.feeCfg.MaxPreCommitGasFee)
|
maxFee := b.feeCfg.MaxPreCommitBatchGasFee.FeeForSectors(len(params.Sectors))
|
||||||
|
goodFunds := big.Add(deposit, maxFee)
|
||||||
|
|
||||||
from, _, err := b.addrSel(b.mctx, mi, api.PreCommitAddr, goodFunds, deposit)
|
from, _, err := b.addrSel(b.mctx, mi, api.PreCommitAddr, goodFunds, deposit)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return []sealiface.PreCommitBatchRes{res}, xerrors.Errorf("no good address found: %w", err)
|
return []sealiface.PreCommitBatchRes{res}, xerrors.Errorf("no good address found: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
mcid, err := b.api.SendMsg(b.mctx, from, b.maddr, miner.Methods.PreCommitSectorBatch, deposit, b.feeCfg.MaxPreCommitGasFee, enc.Bytes())
|
mcid, err := b.api.SendMsg(b.mctx, from, b.maddr, miner.Methods.PreCommitSectorBatch, deposit, maxFee, enc.Bytes())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return []sealiface.PreCommitBatchRes{res}, xerrors.Errorf("sending message failed: %w", err)
|
return []sealiface.PreCommitBatchRes{res}, xerrors.Errorf("sending message failed: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
res.Msg = &mcid
|
res.Msg = &mcid
|
||||||
|
|
||||||
log.Infow("Sent ProveCommitAggregate message", "cid", mcid, "from", from, "sectors", len(b.todo))
|
log.Infow("Sent PreCommitSectorBatch message", "cid", mcid, "from", from, "sectors", len(b.todo))
|
||||||
|
|
||||||
return []sealiface.PreCommitBatchRes{res}, nil
|
return []sealiface.PreCommitBatchRes{res}, nil
|
||||||
}
|
}
|
||||||
@ -254,7 +261,7 @@ func (b *PreCommitBatcher) AddPreCommit(ctx context.Context, s SectorInfo, depos
|
|||||||
sn := s.SectorNumber
|
sn := s.SectorNumber
|
||||||
|
|
||||||
b.lk.Lock()
|
b.lk.Lock()
|
||||||
b.deadlines[sn] = getSectorDeadline(curEpoch, s)
|
b.cutoffs[sn] = getPreCommitCutoff(curEpoch, s)
|
||||||
b.todo[sn] = &preCommitEntry{
|
b.todo[sn] = &preCommitEntry{
|
||||||
deposit: deposit,
|
deposit: deposit,
|
||||||
pci: in,
|
pci: in,
|
||||||
@ -330,3 +337,24 @@ func (b *PreCommitBatcher) Stop(ctx context.Context) error {
|
|||||||
return ctx.Err()
|
return ctx.Err()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: If this returned epochs, it would make testing much easier
|
||||||
|
func getPreCommitCutoff(curEpoch abi.ChainEpoch, si SectorInfo) time.Time {
|
||||||
|
cutoffEpoch := si.TicketEpoch + policy.MaxPreCommitRandomnessLookback
|
||||||
|
for _, p := range si.Pieces {
|
||||||
|
if p.DealInfo == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
startEpoch := p.DealInfo.DealSchedule.StartEpoch
|
||||||
|
if startEpoch < cutoffEpoch {
|
||||||
|
cutoffEpoch = startEpoch
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if cutoffEpoch <= curEpoch {
|
||||||
|
return time.Now()
|
||||||
|
}
|
||||||
|
|
||||||
|
return time.Now().Add(time.Duration(cutoffEpoch-curEpoch) * time.Duration(build.BlockDelaySecs) * time.Second)
|
||||||
|
}
|
||||||
|
258
extern/storage-sealing/precommit_batch_test.go
vendored
Normal file
258
extern/storage-sealing/precommit_batch_test.go
vendored
Normal file
@ -0,0 +1,258 @@
|
|||||||
|
package sealing_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"sort"
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/golang/mock/gomock"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
|
miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/api"
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
||||||
|
"github.com/filecoin-project/lotus/extern/storage-sealing/mocks"
|
||||||
|
"github.com/filecoin-project/lotus/extern/storage-sealing/sealiface"
|
||||||
|
"github.com/filecoin-project/lotus/node/config"
|
||||||
|
miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
||||||
|
)
|
||||||
|
|
||||||
|
var fc = config.MinerFeeConfig{
|
||||||
|
MaxPreCommitGasFee: types.FIL(types.FromFil(1)),
|
||||||
|
MaxCommitGasFee: types.FIL(types.FromFil(1)),
|
||||||
|
MaxTerminateGasFee: types.FIL(types.FromFil(1)),
|
||||||
|
MaxPreCommitBatchGasFee: config.BatchFeeConfig{Base: types.FIL(types.FromFil(3)), PerSector: types.FIL(types.FromFil(1))},
|
||||||
|
MaxCommitBatchGasFee: config.BatchFeeConfig{Base: types.FIL(types.FromFil(3)), PerSector: types.FIL(types.FromFil(1))},
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPrecommitBatcher(t *testing.T) {
|
||||||
|
t0123, err := address.NewFromString("t0123")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
as := func(ctx context.Context, mi miner.MinerInfo, use api.AddrUse, goodFunds, minFunds abi.TokenAmount) (address.Address, abi.TokenAmount, error) {
|
||||||
|
return t0123, big.Zero(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
maxBatch := miner5.PreCommitSectorBatchMaxSize
|
||||||
|
|
||||||
|
cfg := func() (sealiface.Config, error) {
|
||||||
|
return sealiface.Config{
|
||||||
|
MaxWaitDealsSectors: 2,
|
||||||
|
MaxSealingSectors: 0,
|
||||||
|
MaxSealingSectorsForDeals: 0,
|
||||||
|
WaitDealsDelay: time.Hour * 6,
|
||||||
|
AlwaysKeepUnsealedCopy: true,
|
||||||
|
|
||||||
|
BatchPreCommits: true,
|
||||||
|
MinPreCommitBatch: 1,
|
||||||
|
MaxPreCommitBatch: maxBatch,
|
||||||
|
PreCommitBatchWait: 24 * time.Hour,
|
||||||
|
PreCommitBatchSlack: 3 * time.Hour,
|
||||||
|
|
||||||
|
AggregateCommits: true,
|
||||||
|
MinCommitBatch: miner5.MinAggregatedSectors,
|
||||||
|
MaxCommitBatch: miner5.MaxAggregatedSectors,
|
||||||
|
CommitBatchWait: 24 * time.Hour,
|
||||||
|
CommitBatchSlack: 1 * time.Hour,
|
||||||
|
|
||||||
|
TerminateBatchMin: 1,
|
||||||
|
TerminateBatchMax: 100,
|
||||||
|
TerminateBatchWait: 5 * time.Minute,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type promise func(t *testing.T)
|
||||||
|
type action func(t *testing.T, s *mocks.MockPreCommitBatcherApi, pcb *sealing.PreCommitBatcher) promise
|
||||||
|
|
||||||
|
actions := func(as ...action) action {
|
||||||
|
return func(t *testing.T, s *mocks.MockPreCommitBatcherApi, pcb *sealing.PreCommitBatcher) promise {
|
||||||
|
var ps []promise
|
||||||
|
for _, a := range as {
|
||||||
|
p := a(t, s, pcb)
|
||||||
|
if p != nil {
|
||||||
|
ps = append(ps, p)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(ps) > 0 {
|
||||||
|
return func(t *testing.T) {
|
||||||
|
for _, p := range ps {
|
||||||
|
p(t)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
addSector := func(sn abi.SectorNumber) action {
|
||||||
|
return func(t *testing.T, s *mocks.MockPreCommitBatcherApi, pcb *sealing.PreCommitBatcher) promise {
|
||||||
|
var pcres sealiface.PreCommitBatchRes
|
||||||
|
var pcerr error
|
||||||
|
done := sync.Mutex{}
|
||||||
|
done.Lock()
|
||||||
|
|
||||||
|
si := sealing.SectorInfo{
|
||||||
|
SectorNumber: sn,
|
||||||
|
}
|
||||||
|
|
||||||
|
s.EXPECT().ChainHead(gomock.Any()).Return(nil, abi.ChainEpoch(1), nil)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
defer done.Unlock()
|
||||||
|
pcres, pcerr = pcb.AddPreCommit(ctx, si, big.Zero(), &miner0.SectorPreCommitInfo{
|
||||||
|
SectorNumber: si.SectorNumber,
|
||||||
|
SealedCID: fakePieceCid(t),
|
||||||
|
DealIDs: nil,
|
||||||
|
Expiration: 0,
|
||||||
|
})
|
||||||
|
}()
|
||||||
|
|
||||||
|
return func(t *testing.T) {
|
||||||
|
done.Lock()
|
||||||
|
require.NoError(t, pcerr)
|
||||||
|
require.Empty(t, pcres.Error)
|
||||||
|
require.Contains(t, pcres.Sectors, si.SectorNumber)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
addSectors := func(sectors []abi.SectorNumber) action {
|
||||||
|
as := make([]action, len(sectors))
|
||||||
|
for i, sector := range sectors {
|
||||||
|
as[i] = addSector(sector)
|
||||||
|
}
|
||||||
|
return actions(as...)
|
||||||
|
}
|
||||||
|
|
||||||
|
waitPending := func(n int) action {
|
||||||
|
return func(t *testing.T, s *mocks.MockPreCommitBatcherApi, pcb *sealing.PreCommitBatcher) promise {
|
||||||
|
require.Eventually(t, func() bool {
|
||||||
|
p, err := pcb.Pending(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
return len(p) == n
|
||||||
|
}, time.Second*5, 10*time.Millisecond)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
expectSend := func(expect []abi.SectorNumber) action {
|
||||||
|
return func(t *testing.T, s *mocks.MockPreCommitBatcherApi, pcb *sealing.PreCommitBatcher) promise {
|
||||||
|
s.EXPECT().StateMinerInfo(gomock.Any(), gomock.Any(), gomock.Any()).Return(miner.MinerInfo{Owner: t0123, Worker: t0123}, nil)
|
||||||
|
s.EXPECT().SendMsg(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), funMatcher(func(i interface{}) bool {
|
||||||
|
b := i.([]byte)
|
||||||
|
var params miner5.PreCommitSectorBatchParams
|
||||||
|
require.NoError(t, params.UnmarshalCBOR(bytes.NewReader(b)))
|
||||||
|
for s, number := range expect {
|
||||||
|
require.Equal(t, number, params.Sectors[s].SectorNumber)
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
flush := func(expect []abi.SectorNumber) action {
|
||||||
|
return func(t *testing.T, s *mocks.MockPreCommitBatcherApi, pcb *sealing.PreCommitBatcher) promise {
|
||||||
|
_ = expectSend(expect)(t, s, pcb)
|
||||||
|
|
||||||
|
r, err := pcb.Flush(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, r, 1)
|
||||||
|
require.Empty(t, r[0].Error)
|
||||||
|
sort.Slice(r[0].Sectors, func(i, j int) bool {
|
||||||
|
return r[0].Sectors[i] < r[0].Sectors[j]
|
||||||
|
})
|
||||||
|
require.Equal(t, expect, r[0].Sectors)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
getSectors := func(n int) []abi.SectorNumber {
|
||||||
|
out := make([]abi.SectorNumber, n)
|
||||||
|
for i := range out {
|
||||||
|
out[i] = abi.SectorNumber(i)
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
tcs := map[string]struct {
|
||||||
|
actions []action
|
||||||
|
}{
|
||||||
|
"addSingle": {
|
||||||
|
actions: []action{
|
||||||
|
addSector(0),
|
||||||
|
waitPending(1),
|
||||||
|
flush([]abi.SectorNumber{0}),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"addTwo": {
|
||||||
|
actions: []action{
|
||||||
|
addSectors(getSectors(2)),
|
||||||
|
waitPending(2),
|
||||||
|
flush(getSectors(2)),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"addMax": {
|
||||||
|
actions: []action{
|
||||||
|
expectSend(getSectors(maxBatch)),
|
||||||
|
addSectors(getSectors(maxBatch)),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, tc := range tcs {
|
||||||
|
tc := tc
|
||||||
|
|
||||||
|
t.Run(name, func(t *testing.T) {
|
||||||
|
// create go mock controller here
|
||||||
|
mockCtrl := gomock.NewController(t)
|
||||||
|
// when test is done, assert expectations on all mock objects.
|
||||||
|
defer mockCtrl.Finish()
|
||||||
|
|
||||||
|
// create them mocks
|
||||||
|
pcapi := mocks.NewMockPreCommitBatcherApi(mockCtrl)
|
||||||
|
|
||||||
|
pcb := sealing.NewPreCommitBatcher(ctx, t0123, pcapi, as, fc, cfg)
|
||||||
|
|
||||||
|
var promises []promise
|
||||||
|
|
||||||
|
for _, a := range tc.actions {
|
||||||
|
p := a(t, pcapi, pcb)
|
||||||
|
if p != nil {
|
||||||
|
promises = append(promises, p)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, p := range promises {
|
||||||
|
p(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
err := pcb.Stop(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type funMatcher func(interface{}) bool
|
||||||
|
|
||||||
|
func (funMatcher) Matches(interface{}) bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (funMatcher) String() string {
|
||||||
|
return "fun"
|
||||||
|
}
|
2
extern/storage-sealing/sealiface/config.go
vendored
2
extern/storage-sealing/sealiface/config.go
vendored
@ -18,6 +18,8 @@ type Config struct {
|
|||||||
|
|
||||||
AlwaysKeepUnsealedCopy bool
|
AlwaysKeepUnsealedCopy bool
|
||||||
|
|
||||||
|
FinalizeEarly bool
|
||||||
|
|
||||||
BatchPreCommits bool
|
BatchPreCommits bool
|
||||||
MaxPreCommitBatch int
|
MaxPreCommitBatch int
|
||||||
MinPreCommitBatch int
|
MinPreCommitBatch int
|
||||||
|
19
extern/storage-sealing/sealing.go
vendored
19
extern/storage-sealing/sealing.go
vendored
@ -28,6 +28,7 @@ import (
|
|||||||
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
|
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
||||||
"github.com/filecoin-project/lotus/extern/storage-sealing/sealiface"
|
"github.com/filecoin-project/lotus/extern/storage-sealing/sealiface"
|
||||||
|
"github.com/filecoin-project/lotus/node/config"
|
||||||
)
|
)
|
||||||
|
|
||||||
const SectorStorePrefix = "/sectors"
|
const SectorStorePrefix = "/sectors"
|
||||||
@ -66,6 +67,7 @@ type SealingAPI interface {
|
|||||||
StateMinerPartitions(ctx context.Context, m address.Address, dlIdx uint64, tok TipSetToken) ([]api.Partition, error)
|
StateMinerPartitions(ctx context.Context, m address.Address, dlIdx uint64, tok TipSetToken) ([]api.Partition, error)
|
||||||
SendMsg(ctx context.Context, from, to address.Address, method abi.MethodNum, value, maxFee abi.TokenAmount, params []byte) (cid.Cid, error)
|
SendMsg(ctx context.Context, from, to address.Address, method abi.MethodNum, value, maxFee abi.TokenAmount, params []byte) (cid.Cid, error)
|
||||||
ChainHead(ctx context.Context) (TipSetToken, abi.ChainEpoch, error)
|
ChainHead(ctx context.Context) (TipSetToken, abi.ChainEpoch, error)
|
||||||
|
ChainBaseFee(context.Context, TipSetToken) (abi.TokenAmount, error)
|
||||||
ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error)
|
ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error)
|
||||||
ChainGetRandomnessFromBeacon(ctx context.Context, tok TipSetToken, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error)
|
ChainGetRandomnessFromBeacon(ctx context.Context, tok TipSetToken, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error)
|
||||||
ChainGetRandomnessFromTickets(ctx context.Context, tok TipSetToken, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error)
|
ChainGetRandomnessFromTickets(ctx context.Context, tok TipSetToken, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error)
|
||||||
@ -78,9 +80,11 @@ type AddrSel func(ctx context.Context, mi miner.MinerInfo, use api.AddrUse, good
|
|||||||
|
|
||||||
type Sealing struct {
|
type Sealing struct {
|
||||||
api SealingAPI
|
api SealingAPI
|
||||||
feeCfg FeeConfig
|
feeCfg config.MinerFeeConfig
|
||||||
events Events
|
events Events
|
||||||
|
|
||||||
|
startupWait sync.WaitGroup
|
||||||
|
|
||||||
maddr address.Address
|
maddr address.Address
|
||||||
|
|
||||||
sealer sectorstorage.SectorManager
|
sealer sectorstorage.SectorManager
|
||||||
@ -112,12 +116,6 @@ type Sealing struct {
|
|||||||
dealInfo *CurrentDealInfoManager
|
dealInfo *CurrentDealInfoManager
|
||||||
}
|
}
|
||||||
|
|
||||||
type FeeConfig struct {
|
|
||||||
MaxPreCommitGasFee abi.TokenAmount
|
|
||||||
MaxCommitGasFee abi.TokenAmount
|
|
||||||
MaxTerminateGasFee abi.TokenAmount
|
|
||||||
}
|
|
||||||
|
|
||||||
type openSector struct {
|
type openSector struct {
|
||||||
used abi.UnpaddedPieceSize // change to bitfield/rle when AddPiece gains offset support to better fill sectors
|
used abi.UnpaddedPieceSize // change to bitfield/rle when AddPiece gains offset support to better fill sectors
|
||||||
|
|
||||||
@ -134,7 +132,7 @@ type pendingPiece struct {
|
|||||||
accepted func(abi.SectorNumber, abi.UnpaddedPieceSize, error)
|
accepted func(abi.SectorNumber, abi.UnpaddedPieceSize, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
func New(api SealingAPI, fc FeeConfig, events Events, maddr address.Address, ds datastore.Batching, sealer sectorstorage.SectorManager, sc SectorIDCounter, verif ffiwrapper.Verifier, prov ffiwrapper.Prover, pcp PreCommitPolicy, gc GetSealingConfigFunc, notifee SectorStateNotifee, as AddrSel) *Sealing {
|
func New(api SealingAPI, fc config.MinerFeeConfig, events Events, maddr address.Address, ds datastore.Batching, sealer sectorstorage.SectorManager, sc SectorIDCounter, verif ffiwrapper.Verifier, prov ffiwrapper.Prover, pcp PreCommitPolicy, gc GetSealingConfigFunc, notifee SectorStateNotifee, as AddrSel) *Sealing {
|
||||||
s := &Sealing{
|
s := &Sealing{
|
||||||
api: api,
|
api: api,
|
||||||
feeCfg: fc,
|
feeCfg: fc,
|
||||||
@ -166,6 +164,7 @@ func New(api SealingAPI, fc FeeConfig, events Events, maddr address.Address, ds
|
|||||||
bySector: map[abi.SectorID]statSectorState{},
|
bySector: map[abi.SectorID]statSectorState{},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
s.startupWait.Add(1)
|
||||||
|
|
||||||
s.sectors = statemachine.New(namespace.Wrap(ds, datastore.NewKey(SectorStorePrefix)), s, SectorInfo{})
|
s.sectors = statemachine.New(namespace.Wrap(ds, datastore.NewKey(SectorStorePrefix)), s, SectorInfo{})
|
||||||
|
|
||||||
@ -193,10 +192,14 @@ func (m *Sealing) Stop(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *Sealing) Remove(ctx context.Context, sid abi.SectorNumber) error {
|
func (m *Sealing) Remove(ctx context.Context, sid abi.SectorNumber) error {
|
||||||
|
m.startupWait.Wait()
|
||||||
|
|
||||||
return m.sectors.Send(uint64(sid), SectorRemove{})
|
return m.sectors.Send(uint64(sid), SectorRemove{})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Sealing) Terminate(ctx context.Context, sid abi.SectorNumber) error {
|
func (m *Sealing) Terminate(ctx context.Context, sid abi.SectorNumber) error {
|
||||||
|
m.startupWait.Wait()
|
||||||
|
|
||||||
return m.sectors.Send(uint64(sid), SectorTerminate{})
|
return m.sectors.Send(uint64(sid), SectorTerminate{})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
10
extern/storage-sealing/sector_state.go
vendored
10
extern/storage-sealing/sector_state.go
vendored
@ -17,6 +17,8 @@ var ExistSectorStateList = map[SectorState]struct{}{
|
|||||||
PreCommitBatchWait: {},
|
PreCommitBatchWait: {},
|
||||||
WaitSeed: {},
|
WaitSeed: {},
|
||||||
Committing: {},
|
Committing: {},
|
||||||
|
CommitFinalize: {},
|
||||||
|
CommitFinalizeFailed: {},
|
||||||
SubmitCommit: {},
|
SubmitCommit: {},
|
||||||
CommitWait: {},
|
CommitWait: {},
|
||||||
SubmitCommitAggregate: {},
|
SubmitCommitAggregate: {},
|
||||||
@ -63,8 +65,10 @@ const (
|
|||||||
SubmitPreCommitBatch SectorState = "SubmitPreCommitBatch"
|
SubmitPreCommitBatch SectorState = "SubmitPreCommitBatch"
|
||||||
PreCommitBatchWait SectorState = "PreCommitBatchWait"
|
PreCommitBatchWait SectorState = "PreCommitBatchWait"
|
||||||
|
|
||||||
WaitSeed SectorState = "WaitSeed" // waiting for seed
|
WaitSeed SectorState = "WaitSeed" // waiting for seed
|
||||||
Committing SectorState = "Committing" // compute PoRep
|
Committing SectorState = "Committing" // compute PoRep
|
||||||
|
CommitFinalize SectorState = "CommitFinalize" // cleanup sector metadata before submitting the proof (early finalize)
|
||||||
|
CommitFinalizeFailed SectorState = "CommitFinalizeFailed"
|
||||||
|
|
||||||
// single commit
|
// single commit
|
||||||
SubmitCommit SectorState = "SubmitCommit" // send commit message to the chain
|
SubmitCommit SectorState = "SubmitCommit" // send commit message to the chain
|
||||||
@ -106,7 +110,7 @@ func toStatState(st SectorState) statSectorState {
|
|||||||
switch st {
|
switch st {
|
||||||
case UndefinedSectorState, Empty, WaitDeals, AddPiece:
|
case UndefinedSectorState, Empty, WaitDeals, AddPiece:
|
||||||
return sstStaging
|
return sstStaging
|
||||||
case Packing, GetTicket, PreCommit1, PreCommit2, PreCommitting, PreCommitWait, SubmitPreCommitBatch, PreCommitBatchWait, WaitSeed, Committing, SubmitCommit, CommitWait, SubmitCommitAggregate, CommitAggregateWait, FinalizeSector:
|
case Packing, GetTicket, PreCommit1, PreCommit2, PreCommitting, PreCommitWait, SubmitPreCommitBatch, PreCommitBatchWait, WaitSeed, Committing, CommitFinalize, SubmitCommit, CommitWait, SubmitCommitAggregate, CommitAggregateWait, FinalizeSector:
|
||||||
return sstSealing
|
return sstSealing
|
||||||
case Proving, Removed, Removing, Terminating, TerminateWait, TerminateFinality, TerminateFailed:
|
case Proving, Removed, Removing, Terminating, TerminateWait, TerminateFinality, TerminateFailed:
|
||||||
return sstProving
|
return sstProving
|
||||||
|
39
extern/storage-sealing/states_sealing.go
vendored
39
extern/storage-sealing/states_sealing.go
vendored
@ -334,7 +334,7 @@ func (m *Sealing) handlePreCommitting(ctx statemachine.Context, sector SectorInf
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
goodFunds := big.Add(deposit, m.feeCfg.MaxPreCommitGasFee)
|
goodFunds := big.Add(deposit, big.Int(m.feeCfg.MaxPreCommitGasFee))
|
||||||
|
|
||||||
from, _, err := m.addrSel(ctx.Context(), mi, api.PreCommitAddr, goodFunds, deposit)
|
from, _, err := m.addrSel(ctx.Context(), mi, api.PreCommitAddr, goodFunds, deposit)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -342,7 +342,7 @@ func (m *Sealing) handlePreCommitting(ctx statemachine.Context, sector SectorInf
|
|||||||
}
|
}
|
||||||
|
|
||||||
log.Infof("submitting precommit for sector %d (deposit: %s): ", sector.SectorNumber, deposit)
|
log.Infof("submitting precommit for sector %d (deposit: %s): ", sector.SectorNumber, deposit)
|
||||||
mcid, err := m.api.SendMsg(ctx.Context(), from, m.maddr, miner.Methods.PreCommitSector, deposit, m.feeCfg.MaxPreCommitGasFee, enc.Bytes())
|
mcid, err := m.api.SendMsg(ctx.Context(), from, m.maddr, miner.Methods.PreCommitSector, deposit, big.Int(m.feeCfg.MaxPreCommitGasFee), enc.Bytes())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if params.ReplaceCapacity {
|
if params.ReplaceCapacity {
|
||||||
m.remarkForUpgrade(params.ReplaceSectorNumber)
|
m.remarkForUpgrade(params.ReplaceSectorNumber)
|
||||||
@ -478,6 +478,11 @@ func (m *Sealing) handleCommitting(ctx statemachine.Context, sector SectorInfo)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cfg, err := m.getConfig()
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("getting config: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
log.Info("scheduling seal proof computation...")
|
log.Info("scheduling seal proof computation...")
|
||||||
|
|
||||||
log.Infof("KOMIT %d %x(%d); %x(%d); %v; r:%x; d:%x", sector.SectorNumber, sector.TicketValue, sector.TicketEpoch, sector.SeedValue, sector.SeedEpoch, sector.pieceInfos(), sector.CommR, sector.CommD)
|
log.Infof("KOMIT %d %x(%d); %x(%d); %v; r:%x; d:%x", sector.SectorNumber, sector.TicketValue, sector.TicketEpoch, sector.SeedValue, sector.SeedEpoch, sector.pieceInfos(), sector.CommR, sector.CommD)
|
||||||
@ -500,6 +505,24 @@ func (m *Sealing) handleCommitting(ctx statemachine.Context, sector SectorInfo)
|
|||||||
return ctx.Send(SectorComputeProofFailed{xerrors.Errorf("computing seal proof failed(2): %w", err)})
|
return ctx.Send(SectorComputeProofFailed{xerrors.Errorf("computing seal proof failed(2): %w", err)})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
tok, _, err := m.api.ChainHead(ctx.Context())
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("handleCommitting: api error, not proceeding: %+v", err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := m.checkCommit(ctx.Context(), sector, proof, tok); err != nil {
|
||||||
|
return ctx.Send(SectorComputeProofFailed{xerrors.Errorf("commit check error: %w", err)})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if cfg.FinalizeEarly {
|
||||||
|
return ctx.Send(SectorProofReady{
|
||||||
|
Proof: proof,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
return ctx.Send(SectorCommitted{
|
return ctx.Send(SectorCommitted{
|
||||||
Proof: proof,
|
Proof: proof,
|
||||||
})
|
})
|
||||||
@ -524,7 +547,7 @@ func (m *Sealing) handleSubmitCommit(ctx statemachine.Context, sector SectorInfo
|
|||||||
|
|
||||||
tok, _, err := m.api.ChainHead(ctx.Context())
|
tok, _, err := m.api.ChainHead(ctx.Context())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("handleCommitting: api error, not proceeding: %+v", err)
|
log.Errorf("handleSubmitCommit: api error, not proceeding: %+v", err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -566,7 +589,7 @@ func (m *Sealing) handleSubmitCommit(ctx statemachine.Context, sector SectorInfo
|
|||||||
collateral = big.Zero()
|
collateral = big.Zero()
|
||||||
}
|
}
|
||||||
|
|
||||||
goodFunds := big.Add(collateral, m.feeCfg.MaxCommitGasFee)
|
goodFunds := big.Add(collateral, big.Int(m.feeCfg.MaxCommitGasFee))
|
||||||
|
|
||||||
from, _, err := m.addrSel(ctx.Context(), mi, api.CommitAddr, goodFunds, collateral)
|
from, _, err := m.addrSel(ctx.Context(), mi, api.CommitAddr, goodFunds, collateral)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -574,7 +597,7 @@ func (m *Sealing) handleSubmitCommit(ctx statemachine.Context, sector SectorInfo
|
|||||||
}
|
}
|
||||||
|
|
||||||
// TODO: check seed / ticket / deals are up to date
|
// TODO: check seed / ticket / deals are up to date
|
||||||
mcid, err := m.api.SendMsg(ctx.Context(), from, m.maddr, miner.Methods.ProveCommitSector, collateral, m.feeCfg.MaxCommitGasFee, enc.Bytes())
|
mcid, err := m.api.SendMsg(ctx.Context(), from, m.maddr, miner.Methods.ProveCommitSector, collateral, big.Int(m.feeCfg.MaxCommitGasFee), enc.Bytes())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ctx.Send(SectorCommitFailed{xerrors.Errorf("pushing message to mpool: %w", err)})
|
return ctx.Send(SectorCommitFailed{xerrors.Errorf("pushing message to mpool: %w", err)})
|
||||||
}
|
}
|
||||||
@ -590,15 +613,15 @@ func (m *Sealing) handleSubmitCommitAggregate(ctx statemachine.Context, sector S
|
|||||||
}
|
}
|
||||||
|
|
||||||
res, err := m.commiter.AddCommit(ctx.Context(), sector, AggregateInput{
|
res, err := m.commiter.AddCommit(ctx.Context(), sector, AggregateInput{
|
||||||
info: proof.AggregateSealVerifyInfo{
|
Info: proof.AggregateSealVerifyInfo{
|
||||||
Number: sector.SectorNumber,
|
Number: sector.SectorNumber,
|
||||||
Randomness: sector.TicketValue,
|
Randomness: sector.TicketValue,
|
||||||
InteractiveRandomness: sector.SeedValue,
|
InteractiveRandomness: sector.SeedValue,
|
||||||
SealedCID: *sector.CommR,
|
SealedCID: *sector.CommR,
|
||||||
UnsealedCID: *sector.CommD,
|
UnsealedCID: *sector.CommD,
|
||||||
},
|
},
|
||||||
proof: sector.Proof, // todo: this correct??
|
Proof: sector.Proof, // todo: this correct??
|
||||||
spt: sector.SectorType,
|
Spt: sector.SectorType,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ctx.Send(SectorCommitFailed{xerrors.Errorf("queuing commit for aggregation failed: %w", err)})
|
return ctx.Send(SectorCommitFailed{xerrors.Errorf("queuing commit for aggregation failed: %w", err)})
|
||||||
|
9
extern/storage-sealing/terminate_batch.go
vendored
9
extern/storage-sealing/terminate_batch.go
vendored
@ -19,6 +19,7 @@ import (
|
|||||||
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
"github.com/filecoin-project/lotus/api"
|
||||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||||
|
"github.com/filecoin-project/lotus/node/config"
|
||||||
)
|
)
|
||||||
|
|
||||||
type TerminateBatcherApi interface {
|
type TerminateBatcherApi interface {
|
||||||
@ -34,7 +35,7 @@ type TerminateBatcher struct {
|
|||||||
maddr address.Address
|
maddr address.Address
|
||||||
mctx context.Context
|
mctx context.Context
|
||||||
addrSel AddrSel
|
addrSel AddrSel
|
||||||
feeCfg FeeConfig
|
feeCfg config.MinerFeeConfig
|
||||||
getConfig GetSealingConfigFunc
|
getConfig GetSealingConfigFunc
|
||||||
|
|
||||||
todo map[SectorLocation]*bitfield.BitField // MinerSectorLocation -> BitField
|
todo map[SectorLocation]*bitfield.BitField // MinerSectorLocation -> BitField
|
||||||
@ -46,7 +47,7 @@ type TerminateBatcher struct {
|
|||||||
lk sync.Mutex
|
lk sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewTerminationBatcher(mctx context.Context, maddr address.Address, api TerminateBatcherApi, addrSel AddrSel, feeCfg FeeConfig, getConfig GetSealingConfigFunc) *TerminateBatcher {
|
func NewTerminationBatcher(mctx context.Context, maddr address.Address, api TerminateBatcherApi, addrSel AddrSel, feeCfg config.MinerFeeConfig, getConfig GetSealingConfigFunc) *TerminateBatcher {
|
||||||
b := &TerminateBatcher{
|
b := &TerminateBatcher{
|
||||||
api: api,
|
api: api,
|
||||||
maddr: maddr,
|
maddr: maddr,
|
||||||
@ -214,12 +215,12 @@ func (b *TerminateBatcher) processBatch(notif, after bool) (*cid.Cid, error) {
|
|||||||
return nil, xerrors.Errorf("couldn't get miner info: %w", err)
|
return nil, xerrors.Errorf("couldn't get miner info: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
from, _, err := b.addrSel(b.mctx, mi, api.TerminateSectorsAddr, b.feeCfg.MaxTerminateGasFee, b.feeCfg.MaxTerminateGasFee)
|
from, _, err := b.addrSel(b.mctx, mi, api.TerminateSectorsAddr, big.Int(b.feeCfg.MaxTerminateGasFee), big.Int(b.feeCfg.MaxTerminateGasFee))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("no good address found: %w", err)
|
return nil, xerrors.Errorf("no good address found: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
mcid, err := b.api.SendMsg(b.mctx, from, b.maddr, miner.Methods.TerminateSectors, big.Zero(), b.feeCfg.MaxTerminateGasFee, enc.Bytes())
|
mcid, err := b.api.SendMsg(b.mctx, from, b.maddr, miner.Methods.TerminateSectors, big.Zero(), big.Int(b.feeCfg.MaxTerminateGasFee), enc.Bytes())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("sending message failed: %w", err)
|
return nil, xerrors.Errorf("sending message failed: %w", err)
|
||||||
}
|
}
|
||||||
|
4
go.mod
4
go.mod
@ -39,7 +39,7 @@ require (
|
|||||||
github.com/filecoin-project/go-jsonrpc v0.1.4-0.20210217175800-45ea43ac2bec
|
github.com/filecoin-project/go-jsonrpc v0.1.4-0.20210217175800-45ea43ac2bec
|
||||||
github.com/filecoin-project/go-multistore v0.0.3
|
github.com/filecoin-project/go-multistore v0.0.3
|
||||||
github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20
|
github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20
|
||||||
github.com/filecoin-project/go-paramfetch v0.0.2-0.20210330140417-936748d3f5ec
|
github.com/filecoin-project/go-paramfetch v0.0.2-0.20210614165157-25a6c7769498
|
||||||
github.com/filecoin-project/go-state-types v0.1.1-0.20210506134452-99b279731c48
|
github.com/filecoin-project/go-state-types v0.1.1-0.20210506134452-99b279731c48
|
||||||
github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe
|
github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe
|
||||||
github.com/filecoin-project/go-statestore v0.1.1
|
github.com/filecoin-project/go-statestore v0.1.1
|
||||||
@ -48,7 +48,7 @@ require (
|
|||||||
github.com/filecoin-project/specs-actors/v2 v2.3.5
|
github.com/filecoin-project/specs-actors/v2 v2.3.5
|
||||||
github.com/filecoin-project/specs-actors/v3 v3.1.1
|
github.com/filecoin-project/specs-actors/v3 v3.1.1
|
||||||
github.com/filecoin-project/specs-actors/v4 v4.0.1
|
github.com/filecoin-project/specs-actors/v4 v4.0.1
|
||||||
github.com/filecoin-project/specs-actors/v5 v5.0.0-20210602024058-0c296bb386bf
|
github.com/filecoin-project/specs-actors/v5 v5.0.0-20210609212542-73e0409ac77c
|
||||||
github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506
|
github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506
|
||||||
github.com/filecoin-project/test-vectors/schema v0.0.5
|
github.com/filecoin-project/test-vectors/schema v0.0.5
|
||||||
github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1
|
github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1
|
||||||
|
10
go.sum
10
go.sum
@ -298,8 +298,8 @@ github.com/filecoin-project/go-multistore v0.0.3 h1:vaRBY4YiA2UZFPK57RNuewypB8u0
|
|||||||
github.com/filecoin-project/go-multistore v0.0.3/go.mod h1:kaNqCC4IhU4B1uyr7YWFHd23TL4KM32aChS0jNkyUvQ=
|
github.com/filecoin-project/go-multistore v0.0.3/go.mod h1:kaNqCC4IhU4B1uyr7YWFHd23TL4KM32aChS0jNkyUvQ=
|
||||||
github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20 h1:+/4aUeUoKr6AKfPE3mBhXA5spIV6UcKdTYDPNU2Tdmg=
|
github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20 h1:+/4aUeUoKr6AKfPE3mBhXA5spIV6UcKdTYDPNU2Tdmg=
|
||||||
github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20/go.mod h1:mPn+LRRd5gEKNAtc+r3ScpW2JRU/pj4NBKdADYWHiak=
|
github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20/go.mod h1:mPn+LRRd5gEKNAtc+r3ScpW2JRU/pj4NBKdADYWHiak=
|
||||||
github.com/filecoin-project/go-paramfetch v0.0.2-0.20210330140417-936748d3f5ec h1:gExwWUiT1TcARkxGneS4nvp9C+wBsKU0bFdg7qFpNco=
|
github.com/filecoin-project/go-paramfetch v0.0.2-0.20210614165157-25a6c7769498 h1:G10ezOvpH1CLXQ19EA9VWNwyL0mg536ujSayjV0yg0k=
|
||||||
github.com/filecoin-project/go-paramfetch v0.0.2-0.20210330140417-936748d3f5ec/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc=
|
github.com/filecoin-project/go-paramfetch v0.0.2-0.20210614165157-25a6c7769498/go.mod h1:1FH85P8U+DUEmWk1Jkw3Bw7FrwTVUNHk/95PSPG+dts=
|
||||||
github.com/filecoin-project/go-state-types v0.0.0-20200903145444-247639ffa6ad/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I=
|
github.com/filecoin-project/go-state-types v0.0.0-20200903145444-247639ffa6ad/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I=
|
||||||
github.com/filecoin-project/go-state-types v0.0.0-20200904021452-1883f36ca2f4/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I=
|
github.com/filecoin-project/go-state-types v0.0.0-20200904021452-1883f36ca2f4/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I=
|
||||||
github.com/filecoin-project/go-state-types v0.0.0-20200928172055-2df22083d8ab/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g=
|
github.com/filecoin-project/go-state-types v0.0.0-20200928172055-2df22083d8ab/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g=
|
||||||
@ -331,8 +331,8 @@ github.com/filecoin-project/specs-actors/v4 v4.0.0/go.mod h1:TkHXf/l7Wyw4ZejyXIP
|
|||||||
github.com/filecoin-project/specs-actors/v4 v4.0.1 h1:AiWrtvJZ63MHGe6rn7tPu4nSUY8bA1KDNszqJaD5+Fg=
|
github.com/filecoin-project/specs-actors/v4 v4.0.1 h1:AiWrtvJZ63MHGe6rn7tPu4nSUY8bA1KDNszqJaD5+Fg=
|
||||||
github.com/filecoin-project/specs-actors/v4 v4.0.1/go.mod h1:TkHXf/l7Wyw4ZejyXIPS2rK8bBO0rdwhTZyQQgaglng=
|
github.com/filecoin-project/specs-actors/v4 v4.0.1/go.mod h1:TkHXf/l7Wyw4ZejyXIPS2rK8bBO0rdwhTZyQQgaglng=
|
||||||
github.com/filecoin-project/specs-actors/v5 v5.0.0-20210512015452-4fe3889fff57/go.mod h1:283yBMMUSDB2abcjP/hhrwTkhb9h3sfM6KGrep/ZlBI=
|
github.com/filecoin-project/specs-actors/v5 v5.0.0-20210512015452-4fe3889fff57/go.mod h1:283yBMMUSDB2abcjP/hhrwTkhb9h3sfM6KGrep/ZlBI=
|
||||||
github.com/filecoin-project/specs-actors/v5 v5.0.0-20210602024058-0c296bb386bf h1:xt9A1omyhSDbQvpVk7Na1J15a/n8y0y4GQDLeiWLpFs=
|
github.com/filecoin-project/specs-actors/v5 v5.0.0-20210609212542-73e0409ac77c h1:GnDJ6q3QEm2ytTKjPFQSvczAltgCSb3j9F1FeynwvPA=
|
||||||
github.com/filecoin-project/specs-actors/v5 v5.0.0-20210602024058-0c296bb386bf/go.mod h1:b/btpRl84Q9SeDKlyIoORBQwe2OTmq14POrYrVvBWCM=
|
github.com/filecoin-project/specs-actors/v5 v5.0.0-20210609212542-73e0409ac77c/go.mod h1:b/btpRl84Q9SeDKlyIoORBQwe2OTmq14POrYrVvBWCM=
|
||||||
github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506 h1:Ur/l2+6qN+lQiqjozWWc5p9UDaAMDZKTlDS98oRnlIw=
|
github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506 h1:Ur/l2+6qN+lQiqjozWWc5p9UDaAMDZKTlDS98oRnlIw=
|
||||||
github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506/go.mod h1:nJRRM7Aa9XVvygr3W9k6xGF46RWzr2zxF/iGoAIfA/g=
|
github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506/go.mod h1:nJRRM7Aa9XVvygr3W9k6xGF46RWzr2zxF/iGoAIfA/g=
|
||||||
github.com/filecoin-project/test-vectors/schema v0.0.5 h1:w3zHQhzM4pYxJDl21avXjOKBLF8egrvwUwjpT8TquDg=
|
github.com/filecoin-project/test-vectors/schema v0.0.5 h1:w3zHQhzM4pYxJDl21avXjOKBLF8egrvwUwjpT8TquDg=
|
||||||
@ -705,6 +705,8 @@ github.com/ipfs/go-log/v2 v2.1.2-0.20200626104915-0016c0b4b3e4/go.mod h1:2v2nsGf
|
|||||||
github.com/ipfs/go-log/v2 v2.1.2/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM=
|
github.com/ipfs/go-log/v2 v2.1.2/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM=
|
||||||
github.com/ipfs/go-log/v2 v2.1.3 h1:1iS3IU7aXRlbgUpN8yTTpJ53NXYjAe37vcI5+5nYrzk=
|
github.com/ipfs/go-log/v2 v2.1.3 h1:1iS3IU7aXRlbgUpN8yTTpJ53NXYjAe37vcI5+5nYrzk=
|
||||||
github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Axpmri6g=
|
github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Axpmri6g=
|
||||||
|
github.com/ipfs/go-log/v2 v2.1.3 h1:1iS3IU7aXRlbgUpN8yTTpJ53NXYjAe37vcI5+5nYrzk=
|
||||||
|
github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Axpmri6g=
|
||||||
github.com/ipfs/go-merkledag v0.0.3/go.mod h1:Oc5kIXLHokkE1hWGMBHw+oxehkAaTOqtEb7Zbh6BhLA=
|
github.com/ipfs/go-merkledag v0.0.3/go.mod h1:Oc5kIXLHokkE1hWGMBHw+oxehkAaTOqtEb7Zbh6BhLA=
|
||||||
github.com/ipfs/go-merkledag v0.0.6/go.mod h1:QYPdnlvkOg7GnQRofu9XZimC5ZW5Wi3bKys/4GQQfto=
|
github.com/ipfs/go-merkledag v0.0.6/go.mod h1:QYPdnlvkOg7GnQRofu9XZimC5ZW5Wi3bKys/4GQQfto=
|
||||||
github.com/ipfs/go-merkledag v0.2.3/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk=
|
github.com/ipfs/go-merkledag v0.2.3/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk=
|
||||||
|
@ -10,16 +10,15 @@ import (
|
|||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/go-state-types/big"
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
"github.com/filecoin-project/lotus/extern/storage-sealing/sealiface"
|
"github.com/filecoin-project/lotus/extern/storage-sealing/sealiface"
|
||||||
"github.com/filecoin-project/lotus/itests/kit"
|
"github.com/filecoin-project/lotus/itests/kit2"
|
||||||
"github.com/filecoin-project/lotus/markets/storageadapter"
|
"github.com/filecoin-project/lotus/markets/storageadapter"
|
||||||
"github.com/filecoin-project/lotus/node"
|
"github.com/filecoin-project/lotus/node"
|
||||||
"github.com/filecoin-project/lotus/node/impl"
|
|
||||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestBatchDealInput(t *testing.T) {
|
func TestBatchDealInput(t *testing.T) {
|
||||||
kit.QuietMiningLogs()
|
kit2.QuietMiningLogs()
|
||||||
|
|
||||||
var (
|
var (
|
||||||
blockTime = 10 * time.Millisecond
|
blockTime = 10 * time.Millisecond
|
||||||
@ -32,50 +31,40 @@ func TestBatchDealInput(t *testing.T) {
|
|||||||
|
|
||||||
run := func(piece, deals, expectSectors int) func(t *testing.T) {
|
run := func(piece, deals, expectSectors int) func(t *testing.T) {
|
||||||
return func(t *testing.T) {
|
return func(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
publishPeriod := 10 * time.Second
|
publishPeriod := 10 * time.Second
|
||||||
maxDealsPerMsg := uint64(deals)
|
maxDealsPerMsg := uint64(deals)
|
||||||
|
|
||||||
// Set max deals per publish deals message to maxDealsPerMsg
|
// Set max deals per publish deals message to maxDealsPerMsg
|
||||||
minerDef := []kit.StorageMiner{{
|
opts := kit2.ConstructorOpts(node.Options(
|
||||||
Full: 0,
|
node.Override(
|
||||||
Opts: node.Options(
|
new(*storageadapter.DealPublisher),
|
||||||
node.Override(
|
storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{
|
||||||
new(*storageadapter.DealPublisher),
|
Period: publishPeriod,
|
||||||
storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{
|
MaxDealsPerMsg: maxDealsPerMsg,
|
||||||
Period: publishPeriod,
|
})),
|
||||||
MaxDealsPerMsg: maxDealsPerMsg,
|
node.Override(new(dtypes.GetSealingConfigFunc), func() (dtypes.GetSealingConfigFunc, error) {
|
||||||
})),
|
return func() (sealiface.Config, error) {
|
||||||
node.Override(new(dtypes.GetSealingConfigFunc), func() (dtypes.GetSealingConfigFunc, error) {
|
return sealiface.Config{
|
||||||
return func() (sealiface.Config, error) {
|
MaxWaitDealsSectors: 2,
|
||||||
return sealiface.Config{
|
MaxSealingSectors: 1,
|
||||||
MaxWaitDealsSectors: 2,
|
MaxSealingSectorsForDeals: 3,
|
||||||
MaxSealingSectors: 1,
|
AlwaysKeepUnsealedCopy: true,
|
||||||
MaxSealingSectorsForDeals: 3,
|
WaitDealsDelay: time.Hour,
|
||||||
AlwaysKeepUnsealedCopy: true,
|
|
||||||
WaitDealsDelay: time.Hour,
|
|
||||||
}, nil
|
|
||||||
}, nil
|
}, nil
|
||||||
}),
|
}, nil
|
||||||
),
|
}),
|
||||||
Preseal: kit.PresealGenesis,
|
))
|
||||||
}}
|
client, miner, ens := kit2.EnsembleMinimal(t, kit2.MockProofs(), opts)
|
||||||
|
ens.InterconnectAll().BeginMining(blockTime)
|
||||||
// Create a connect client and miner node
|
dh := kit2.NewDealHarness(t, client, miner)
|
||||||
n, sn := kit.MockMinerBuilder(t, kit.OneFull, minerDef)
|
|
||||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
|
||||||
miner := sn[0]
|
|
||||||
|
|
||||||
blockMiner := kit.ConnectAndStartMining(t, blockTime, miner, client)
|
|
||||||
t.Cleanup(blockMiner.Stop)
|
|
||||||
|
|
||||||
dh := kit.NewDealHarness(t, client, miner)
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
err := miner.MarketSetAsk(ctx, big.Zero(), big.Zero(), 200, 128, 32<<30)
|
err := miner.MarketSetAsk(ctx, big.Zero(), big.Zero(), 200, 128, 32<<30)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
checkNoPadding := func() {
|
checkNoPadding := func() {
|
||||||
sl, err := sn[0].SectorsList(ctx)
|
sl, err := miner.SectorsList(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
sort.Slice(sl, func(i, j int) bool {
|
sort.Slice(sl, func(i, j int) bool {
|
||||||
@ -83,7 +72,7 @@ func TestBatchDealInput(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
for _, snum := range sl {
|
for _, snum := range sl {
|
||||||
si, err := sn[0].SectorsStatus(ctx, snum, false)
|
si, err := miner.SectorsStatus(ctx, snum, false)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// fmt.Printf("S %d: %+v %s\n", snum, si.Deals, si.State)
|
// fmt.Printf("S %d: %+v %s\n", snum, si.Deals, si.State)
|
||||||
@ -98,7 +87,7 @@ func TestBatchDealInput(t *testing.T) {
|
|||||||
|
|
||||||
// Starts a deal and waits until it's published
|
// Starts a deal and waits until it's published
|
||||||
runDealTillSeal := func(rseed int) {
|
runDealTillSeal := func(rseed int) {
|
||||||
res, _, _, err := kit.CreateImportFile(ctx, client, rseed, piece)
|
res, _, _, err := kit2.CreateImportFile(ctx, client, rseed, piece)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
deal := dh.StartDeal(ctx, res.Root, false, dealStartEpoch)
|
deal := dh.StartDeal(ctx, res.Root, false, dealStartEpoch)
|
||||||
@ -122,7 +111,7 @@ func TestBatchDealInput(t *testing.T) {
|
|||||||
|
|
||||||
checkNoPadding()
|
checkNoPadding()
|
||||||
|
|
||||||
sl, err := sn[0].SectorsList(ctx)
|
sl, err := miner.SectorsList(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, len(sl), expectSectors)
|
require.Equal(t, len(sl), expectSectors)
|
||||||
}
|
}
|
||||||
|
@ -3,21 +3,19 @@ package itests
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"sync/atomic"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/itests/kit"
|
"github.com/filecoin-project/lotus/itests/kit2"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/filecoin-project/lotus/node/impl"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestCCUpgrade(t *testing.T) {
|
func TestCCUpgrade(t *testing.T) {
|
||||||
kit.QuietMiningLogs()
|
kit2.QuietMiningLogs()
|
||||||
|
|
||||||
for _, height := range []abi.ChainEpoch{
|
for _, height := range []abi.ChainEpoch{
|
||||||
-1, // before
|
-1, // before
|
||||||
@ -27,60 +25,33 @@ func TestCCUpgrade(t *testing.T) {
|
|||||||
} {
|
} {
|
||||||
height := height // make linters happy by copying
|
height := height // make linters happy by copying
|
||||||
t.Run(fmt.Sprintf("upgrade-%d", height), func(t *testing.T) {
|
t.Run(fmt.Sprintf("upgrade-%d", height), func(t *testing.T) {
|
||||||
runTestCCUpgrade(t, kit.MockMinerBuilder, 5*time.Millisecond, height)
|
runTestCCUpgrade(t, height)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func runTestCCUpgrade(t *testing.T, b kit.APIBuilder, blocktime time.Duration, upgradeHeight abi.ChainEpoch) {
|
func runTestCCUpgrade(t *testing.T, upgradeHeight abi.ChainEpoch) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
n, sn := b(t, []kit.FullNodeOpts{kit.FullNodeWithLatestActorsAt(upgradeHeight)}, kit.OneMiner)
|
blockTime := 5 * time.Millisecond
|
||||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
|
||||||
miner := sn[0]
|
|
||||||
|
|
||||||
addrinfo, err := client.NetAddrsListen(ctx)
|
opts := kit2.ConstructorOpts(kit2.LatestActorsAt(upgradeHeight))
|
||||||
if err != nil {
|
client, miner, ens := kit2.EnsembleMinimal(t, kit2.MockProofs(), opts)
|
||||||
t.Fatal(err)
|
ens.InterconnectAll().BeginMining(blockTime)
|
||||||
}
|
|
||||||
|
|
||||||
if err := miner.NetConnect(ctx, addrinfo); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
time.Sleep(time.Second)
|
|
||||||
|
|
||||||
mine := int64(1)
|
|
||||||
done := make(chan struct{})
|
|
||||||
go func() {
|
|
||||||
defer close(done)
|
|
||||||
for atomic.LoadInt64(&mine) == 1 {
|
|
||||||
time.Sleep(blocktime)
|
|
||||||
if err := sn[0].MineOne(ctx, kit.MineNext); err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
maddr, err := miner.ActorAddress(ctx)
|
maddr, err := miner.ActorAddress(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
CC := abi.SectorNumber(kit.GenesisPreseals + 1)
|
CC := abi.SectorNumber(kit2.DefaultPresealsPerBootstrapMiner + 1)
|
||||||
Upgraded := CC + 1
|
Upgraded := CC + 1
|
||||||
|
|
||||||
kit.PledgeSectors(t, ctx, miner, 1, 0, nil)
|
miner.PledgeSectors(ctx, 1, 0, nil)
|
||||||
|
|
||||||
sl, err := miner.SectorsList(ctx)
|
sl, err := miner.SectorsList(ctx)
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatal(err)
|
require.Len(t, sl, 1, "expected 1 sector")
|
||||||
}
|
require.Equal(t, CC, sl[0], "unexpected sector number")
|
||||||
if len(sl) != 1 {
|
|
||||||
t.Fatal("expected 1 sector")
|
|
||||||
}
|
|
||||||
|
|
||||||
if sl[0] != CC {
|
|
||||||
t.Fatal("bad")
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
{
|
||||||
si, err := client.StateSectorGetInfo(ctx, maddr, CC, types.EmptyTSK)
|
si, err := client.StateSectorGetInfo(ctx, maddr, CC, types.EmptyTSK)
|
||||||
@ -88,13 +59,12 @@ func runTestCCUpgrade(t *testing.T, b kit.APIBuilder, blocktime time.Duration, u
|
|||||||
require.Less(t, 50000, int(si.Expiration))
|
require.Less(t, 50000, int(si.Expiration))
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := miner.SectorMarkForUpgrade(ctx, sl[0]); err != nil {
|
err = miner.SectorMarkForUpgrade(ctx, sl[0])
|
||||||
t.Fatal(err)
|
require.NoError(t, err)
|
||||||
}
|
|
||||||
|
|
||||||
dh := kit.NewDealHarness(t, client, miner)
|
dh := kit2.NewDealHarness(t, client, miner)
|
||||||
|
|
||||||
dh.MakeFullDeal(context.Background(), 6, false, false, 0)
|
dh.MakeOnlineDeal(context.Background(), 6, false, 0)
|
||||||
|
|
||||||
// Validate upgrade
|
// Validate upgrade
|
||||||
|
|
||||||
@ -123,10 +93,6 @@ func runTestCCUpgrade(t *testing.T, b kit.APIBuilder, blocktime time.Duration, u
|
|||||||
}
|
}
|
||||||
t.Log("waiting for sector to expire")
|
t.Log("waiting for sector to expire")
|
||||||
// wait one deadline per loop.
|
// wait one deadline per loop.
|
||||||
time.Sleep(time.Duration(dlInfo.WPoStChallengeWindow) * blocktime)
|
time.Sleep(time.Duration(dlInfo.WPoStChallengeWindow) * blockTime)
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println("shutting down mining")
|
|
||||||
atomic.AddInt64(&mine, -1)
|
|
||||||
<-done
|
|
||||||
}
|
}
|
||||||
|
@ -3,6 +3,7 @@ package kit
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
@ -11,6 +12,11 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
bin := os.Args[0]
|
||||||
|
if !strings.HasSuffix(bin, ".test") {
|
||||||
|
panic("package itests/kit must only be imported from tests")
|
||||||
|
}
|
||||||
|
|
||||||
_ = logging.SetLogLevel("*", "INFO")
|
_ = logging.SetLogLevel("*", "INFO")
|
||||||
|
|
||||||
policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048))
|
policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048))
|
||||||
@ -22,4 +28,5 @@ func init() {
|
|||||||
panic(fmt.Sprintf("failed to set BELLMAN_NO_GPU env variable: %s", err))
|
panic(fmt.Sprintf("failed to set BELLMAN_NO_GPU env variable: %s", err))
|
||||||
}
|
}
|
||||||
build.InsecurePoStValidation = true
|
build.InsecurePoStValidation = true
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -13,6 +13,7 @@ import (
|
|||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/go-state-types/big"
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
"github.com/filecoin-project/go-state-types/exitcode"
|
"github.com/filecoin-project/go-state-types/exitcode"
|
||||||
|
"github.com/filecoin-project/go-state-types/network"
|
||||||
"github.com/filecoin-project/go-storedcounter"
|
"github.com/filecoin-project/go-storedcounter"
|
||||||
"github.com/filecoin-project/lotus/api"
|
"github.com/filecoin-project/lotus/api"
|
||||||
"github.com/filecoin-project/lotus/api/v1api"
|
"github.com/filecoin-project/lotus/api/v1api"
|
||||||
@ -114,15 +115,26 @@ type Ensemble struct {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewEnsemble instantiates a new blank Ensemble. This enables you to
|
// NewEnsemble instantiates a new blank Ensemble.
|
||||||
// programmatically
|
|
||||||
func NewEnsemble(t *testing.T, opts ...EnsembleOpt) *Ensemble {
|
func NewEnsemble(t *testing.T, opts ...EnsembleOpt) *Ensemble {
|
||||||
options := DefaultEnsembleOpts
|
options := DefaultEnsembleOpts
|
||||||
for _, o := range opts {
|
for _, o := range opts {
|
||||||
err := o(&options)
|
err := o(&options)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
return &Ensemble{t: t, options: &options}
|
|
||||||
|
n := &Ensemble{t: t, options: &options}
|
||||||
|
|
||||||
|
// add accounts from ensemble options to genesis.
|
||||||
|
for _, acc := range options.accounts {
|
||||||
|
n.genesis.accounts = append(n.genesis.accounts, genesis.Actor{
|
||||||
|
Type: genesis.TAccount,
|
||||||
|
Balance: acc.initialBalance,
|
||||||
|
Meta: (&genesis.AccountMeta{Owner: acc.key.Address}).ActorMeta(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
// FullNode enrolls a new full node.
|
// FullNode enrolls a new full node.
|
||||||
@ -135,8 +147,7 @@ func (n *Ensemble) FullNode(full *TestFullNode, opts ...NodeOpt) *Ensemble {
|
|||||||
|
|
||||||
var key *wallet.Key
|
var key *wallet.Key
|
||||||
if !n.bootstrapped && !options.balance.IsZero() {
|
if !n.bootstrapped && !options.balance.IsZero() {
|
||||||
// create a key+ddress, and assign it some FIL.
|
// create a key+address, and assign it some FIL; this will be set as the default wallet.
|
||||||
// this will be set as the default wallet.
|
|
||||||
var err error
|
var err error
|
||||||
key, err = wallet.GenerateKey(types.KTBLS)
|
key, err = wallet.GenerateKey(types.KTBLS)
|
||||||
require.NoError(n.t, err)
|
require.NoError(n.t, err)
|
||||||
@ -594,12 +605,22 @@ func (n *Ensemble) BeginMining(blocktime time.Duration, miners ...*TestMiner) []
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (n *Ensemble) generateGenesis() *genesis.Template {
|
func (n *Ensemble) generateGenesis() *genesis.Template {
|
||||||
|
var verifRoot = gen.DefaultVerifregRootkeyActor
|
||||||
|
if k := n.options.verifiedRoot.key; k != nil {
|
||||||
|
verifRoot = genesis.Actor{
|
||||||
|
Type: genesis.TAccount,
|
||||||
|
Balance: n.options.verifiedRoot.initialBalance,
|
||||||
|
Meta: (&genesis.AccountMeta{Owner: k.Address}).ActorMeta(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
templ := &genesis.Template{
|
templ := &genesis.Template{
|
||||||
|
NetworkVersion: network.Version0,
|
||||||
Accounts: n.genesis.accounts,
|
Accounts: n.genesis.accounts,
|
||||||
Miners: n.genesis.miners,
|
Miners: n.genesis.miners,
|
||||||
NetworkName: "test",
|
NetworkName: "test",
|
||||||
Timestamp: uint64(time.Now().Unix() - 10000), // some time sufficiently far in the past
|
Timestamp: uint64(time.Now().Unix() - int64(n.options.pastOffset.Seconds())),
|
||||||
VerifregRootKey: gen.DefaultVerifregRootkeyActor,
|
VerifregRootKey: verifRoot,
|
||||||
RemainderAccount: gen.DefaultRemainderAccountActor,
|
RemainderAccount: gen.DefaultRemainderAccountActor,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4,18 +4,26 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/lotus/chain/wallet"
|
||||||
)
|
)
|
||||||
|
|
||||||
type EnsembleOpt func(opts *ensembleOpts) error
|
type EnsembleOpt func(opts *ensembleOpts) error
|
||||||
|
|
||||||
|
type genesisAccount struct {
|
||||||
|
key *wallet.Key
|
||||||
|
initialBalance abi.TokenAmount
|
||||||
|
}
|
||||||
|
|
||||||
type ensembleOpts struct {
|
type ensembleOpts struct {
|
||||||
pastOffset time.Duration
|
pastOffset time.Duration
|
||||||
proofType abi.RegisteredSealProof
|
proofType abi.RegisteredSealProof
|
||||||
mockProofs bool
|
verifiedRoot genesisAccount
|
||||||
|
accounts []genesisAccount
|
||||||
|
mockProofs bool
|
||||||
}
|
}
|
||||||
|
|
||||||
var DefaultEnsembleOpts = ensembleOpts{
|
var DefaultEnsembleOpts = ensembleOpts{
|
||||||
pastOffset: 10000 * time.Second,
|
pastOffset: 100000 * time.Second, // time sufficiently in the past to trigger catch-up mining.
|
||||||
proofType: abi.RegisteredSealProof_StackedDrg2KiBV1,
|
proofType: abi.RegisteredSealProof_StackedDrg2KiBV1,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -33,3 +41,24 @@ func MockProofs() EnsembleOpt {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RootVerifier specifies the key to be enlisted as the verified registry root,
|
||||||
|
// as well as the initial balance to be attributed during genesis.
|
||||||
|
func RootVerifier(key *wallet.Key, balance abi.TokenAmount) EnsembleOpt {
|
||||||
|
return func(opts *ensembleOpts) error {
|
||||||
|
opts.verifiedRoot.key = key
|
||||||
|
opts.verifiedRoot.initialBalance = balance
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Account sets up an account at genesis with the specified key and balance.
|
||||||
|
func Account(key *wallet.Key, balance abi.TokenAmount) EnsembleOpt {
|
||||||
|
return func(opts *ensembleOpts) error {
|
||||||
|
opts.accounts = append(opts.accounts, genesisAccount{
|
||||||
|
key: key,
|
||||||
|
initialBalance: balance,
|
||||||
|
})
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -8,7 +8,7 @@ import (
|
|||||||
func QuietMiningLogs() {
|
func QuietMiningLogs() {
|
||||||
lotuslog.SetupLogLevels()
|
lotuslog.SetupLogLevels()
|
||||||
|
|
||||||
_ = logging.SetLogLevel("miner", "ERROR")
|
_ = logging.SetLogLevel("miner", "ERROR") // set this to INFO to watch mining happen.
|
||||||
_ = logging.SetLogLevel("chainstore", "ERROR")
|
_ = logging.SetLogLevel("chainstore", "ERROR")
|
||||||
_ = logging.SetLogLevel("chain", "ERROR")
|
_ = logging.SetLogLevel("chain", "ERROR")
|
||||||
_ = logging.SetLogLevel("sub", "ERROR")
|
_ = logging.SetLogLevel("sub", "ERROR")
|
||||||
|
@ -1,36 +1,64 @@
|
|||||||
package kit2
|
package kit2
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/go-state-types/network"
|
"github.com/filecoin-project/go-state-types/network"
|
||||||
"github.com/filecoin-project/lotus/chain/stmgr"
|
"github.com/filecoin-project/lotus/chain/stmgr"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/filecoin-project/lotus/node"
|
"github.com/filecoin-project/lotus/node"
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// DefaultTestUpgradeSchedule
|
||||||
|
var DefaultTestUpgradeSchedule = stmgr.UpgradeSchedule{{
|
||||||
|
Network: network.Version9,
|
||||||
|
Height: 1,
|
||||||
|
Migration: stmgr.UpgradeActorsV2,
|
||||||
|
}, {
|
||||||
|
Network: network.Version10,
|
||||||
|
Height: 2,
|
||||||
|
Migration: stmgr.UpgradeActorsV3,
|
||||||
|
}, {
|
||||||
|
Network: network.Version12,
|
||||||
|
Height: 3,
|
||||||
|
Migration: stmgr.UpgradeActorsV4,
|
||||||
|
}, {
|
||||||
|
Network: network.Version13,
|
||||||
|
Height: 4,
|
||||||
|
Migration: stmgr.UpgradeActorsV5,
|
||||||
|
}}
|
||||||
|
|
||||||
func LatestActorsAt(upgradeHeight abi.ChainEpoch) node.Option {
|
func LatestActorsAt(upgradeHeight abi.ChainEpoch) node.Option {
|
||||||
// Attention: Update this when introducing new actor versions or your tests will be sad
|
// Attention: Update this when introducing new actor versions or your tests will be sad
|
||||||
return NetworkUpgradeAt(network.Version13, upgradeHeight)
|
return NetworkUpgradeAt(network.Version13, upgradeHeight)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// InstantaneousNetworkVersion starts the network instantaneously at the
|
||||||
|
// specified version in height 1.
|
||||||
|
func InstantaneousNetworkVersion(version network.Version) node.Option {
|
||||||
|
// composes all migration functions
|
||||||
|
var mf stmgr.MigrationFunc = func(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, cb stmgr.ExecMonitor, oldState cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (newState cid.Cid, err error) {
|
||||||
|
var state = oldState
|
||||||
|
for _, u := range DefaultTestUpgradeSchedule {
|
||||||
|
if u.Network > version {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
state, err = u.Migration(ctx, sm, cache, cb, state, height, ts)
|
||||||
|
if err != nil {
|
||||||
|
return cid.Undef, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return state, nil
|
||||||
|
}
|
||||||
|
return node.Override(new(stmgr.UpgradeSchedule), stmgr.UpgradeSchedule{
|
||||||
|
{Network: version, Height: 1, Migration: mf},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func NetworkUpgradeAt(version network.Version, upgradeHeight abi.ChainEpoch) node.Option {
|
func NetworkUpgradeAt(version network.Version, upgradeHeight abi.ChainEpoch) node.Option {
|
||||||
fullSchedule := stmgr.UpgradeSchedule{{
|
fullSchedule := stmgr.UpgradeSchedule{}
|
||||||
// prepare for upgrade.
|
|
||||||
Network: network.Version9,
|
|
||||||
Height: 1,
|
|
||||||
Migration: stmgr.UpgradeActorsV2,
|
|
||||||
}, {
|
|
||||||
Network: network.Version10,
|
|
||||||
Height: 2,
|
|
||||||
Migration: stmgr.UpgradeActorsV3,
|
|
||||||
}, {
|
|
||||||
Network: network.Version12,
|
|
||||||
Height: 3,
|
|
||||||
Migration: stmgr.UpgradeActorsV4,
|
|
||||||
}, {
|
|
||||||
Network: network.Version13,
|
|
||||||
Height: 4,
|
|
||||||
Migration: stmgr.UpgradeActorsV5,
|
|
||||||
}}
|
|
||||||
|
|
||||||
schedule := stmgr.UpgradeSchedule{}
|
schedule := stmgr.UpgradeSchedule{}
|
||||||
for _, upgrade := range fullSchedule {
|
for _, upgrade := range fullSchedule {
|
||||||
|
@ -2,14 +2,13 @@ package itests
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/go-state-types/big"
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
"github.com/filecoin-project/lotus/itests/kit"
|
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
cbor "github.com/ipfs/go-ipld-cbor"
|
cbor "github.com/ipfs/go-ipld-cbor"
|
||||||
@ -24,69 +23,52 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/chain/events"
|
"github.com/filecoin-project/lotus/chain/events"
|
||||||
"github.com/filecoin-project/lotus/chain/events/state"
|
"github.com/filecoin-project/lotus/chain/events/state"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
"github.com/filecoin-project/lotus/itests/kit2"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestPaymentChannelsAPI(t *testing.T) {
|
func TestPaymentChannelsAPI(t *testing.T) {
|
||||||
kit.QuietMiningLogs()
|
kit2.QuietMiningLogs()
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
n, sn := kit.MockMinerBuilder(t, kit.TwoFull, kit.OneMiner)
|
blockTime := 5 * time.Millisecond
|
||||||
|
|
||||||
paymentCreator := n[0]
|
var (
|
||||||
paymentReceiver := n[1]
|
paymentCreator kit2.TestFullNode
|
||||||
miner := sn[0]
|
paymentReceiver kit2.TestFullNode
|
||||||
|
miner kit2.TestMiner
|
||||||
|
)
|
||||||
|
|
||||||
// get everyone connected
|
ens := kit2.NewEnsemble(t, kit2.MockProofs()).
|
||||||
addrs, err := paymentCreator.NetAddrsListen(ctx)
|
FullNode(&paymentCreator).
|
||||||
if err != nil {
|
FullNode(&paymentReceiver).
|
||||||
t.Fatal(err)
|
Miner(&miner, &paymentCreator).
|
||||||
}
|
Start().
|
||||||
|
InterconnectAll()
|
||||||
if err := paymentReceiver.NetConnect(ctx, addrs); err != nil {
|
bms := ens.BeginMining(blockTime)
|
||||||
t.Fatal(err)
|
bm := bms[0]
|
||||||
}
|
|
||||||
|
|
||||||
if err := miner.NetConnect(ctx, addrs); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// start mining blocks
|
|
||||||
bm := kit.NewBlockMiner(t, miner)
|
|
||||||
bm.MineBlocks(ctx, 5*time.Millisecond)
|
|
||||||
t.Cleanup(bm.Stop)
|
|
||||||
|
|
||||||
// send some funds to register the receiver
|
// send some funds to register the receiver
|
||||||
receiverAddr, err := paymentReceiver.WalletNew(ctx, types.KTSecp256k1)
|
receiverAddr, err := paymentReceiver.WalletNew(ctx, types.KTSecp256k1)
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
kit.SendFunds(ctx, t, paymentCreator, receiverAddr, abi.NewTokenAmount(1e18))
|
kit2.SendFunds(ctx, t, paymentCreator, receiverAddr, abi.NewTokenAmount(1e18))
|
||||||
|
|
||||||
// setup the payment channel
|
// setup the payment channel
|
||||||
createrAddr, err := paymentCreator.WalletDefaultAddress(ctx)
|
createrAddr, err := paymentCreator.WalletDefaultAddress(ctx)
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
channelAmt := int64(7000)
|
channelAmt := int64(7000)
|
||||||
channelInfo, err := paymentCreator.PaychGet(ctx, createrAddr, receiverAddr, abi.NewTokenAmount(channelAmt))
|
channelInfo, err := paymentCreator.PaychGet(ctx, createrAddr, receiverAddr, abi.NewTokenAmount(channelAmt))
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
channel, err := paymentCreator.PaychGetWaitReady(ctx, channelInfo.WaitSentinel)
|
channel, err := paymentCreator.PaychGetWaitReady(ctx, channelInfo.WaitSentinel)
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// allocate three lanes
|
// allocate three lanes
|
||||||
var lanes []uint64
|
var lanes []uint64
|
||||||
for i := 0; i < 3; i++ {
|
for i := 0; i < 3; i++ {
|
||||||
lane, err := paymentCreator.PaychAllocateLane(ctx, channel)
|
lane, err := paymentCreator.PaychAllocateLane(ctx, channel)
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
lanes = append(lanes, lane)
|
lanes = append(lanes, lane)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -95,45 +77,28 @@ func TestPaymentChannelsAPI(t *testing.T) {
|
|||||||
// supersedes the voucher with a value of 1000
|
// supersedes the voucher with a value of 1000
|
||||||
for _, lane := range lanes {
|
for _, lane := range lanes {
|
||||||
vouch1, err := paymentCreator.PaychVoucherCreate(ctx, channel, abi.NewTokenAmount(1000), lane)
|
vouch1, err := paymentCreator.PaychVoucherCreate(ctx, channel, abi.NewTokenAmount(1000), lane)
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatal(err)
|
require.NotNil(t, vouch1.Voucher, "Not enough funds to create voucher: missing %d", vouch1.Shortfall)
|
||||||
}
|
|
||||||
if vouch1.Voucher == nil {
|
|
||||||
t.Fatal(fmt.Errorf("Not enough funds to create voucher: missing %d", vouch1.Shortfall))
|
|
||||||
}
|
|
||||||
vouch2, err := paymentCreator.PaychVoucherCreate(ctx, channel, abi.NewTokenAmount(2000), lane)
|
vouch2, err := paymentCreator.PaychVoucherCreate(ctx, channel, abi.NewTokenAmount(2000), lane)
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatal(err)
|
require.NotNil(t, vouch2.Voucher, "Not enough funds to create voucher: missing %d", vouch2.Shortfall)
|
||||||
}
|
|
||||||
if vouch2.Voucher == nil {
|
|
||||||
t.Fatal(fmt.Errorf("Not enough funds to create voucher: missing %d", vouch2.Shortfall))
|
|
||||||
}
|
|
||||||
delta1, err := paymentReceiver.PaychVoucherAdd(ctx, channel, vouch1.Voucher, nil, abi.NewTokenAmount(1000))
|
delta1, err := paymentReceiver.PaychVoucherAdd(ctx, channel, vouch1.Voucher, nil, abi.NewTokenAmount(1000))
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatal(err)
|
require.EqualValues(t, abi.NewTokenAmount(1000), delta1, "voucher didn't have the right amount")
|
||||||
}
|
|
||||||
if !delta1.Equals(abi.NewTokenAmount(1000)) {
|
|
||||||
t.Fatal("voucher didn't have the right amount")
|
|
||||||
}
|
|
||||||
delta2, err := paymentReceiver.PaychVoucherAdd(ctx, channel, vouch2.Voucher, nil, abi.NewTokenAmount(1000))
|
delta2, err := paymentReceiver.PaychVoucherAdd(ctx, channel, vouch2.Voucher, nil, abi.NewTokenAmount(1000))
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatal(err)
|
require.EqualValues(t, abi.NewTokenAmount(1000), delta2, "voucher didn't have the right amount")
|
||||||
}
|
|
||||||
if !delta2.Equals(abi.NewTokenAmount(1000)) {
|
|
||||||
t.Fatal("voucher didn't have the right amount")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// settle the payment channel
|
// settle the payment channel
|
||||||
settleMsgCid, err := paymentCreator.PaychSettle(ctx, channel)
|
settleMsgCid, err := paymentCreator.PaychSettle(ctx, channel)
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
res := waitForMessage(ctx, t, paymentCreator, settleMsgCid, time.Second*10, "settle")
|
res := waitForMessage(ctx, t, paymentCreator, settleMsgCid, time.Second*10, "settle")
|
||||||
if res.Receipt.ExitCode != 0 {
|
require.EqualValues(t, 0, res.Receipt.ExitCode, "Unable to settle payment channel")
|
||||||
t.Fatal("Unable to settle payment channel")
|
|
||||||
}
|
|
||||||
|
|
||||||
creatorStore := adt.WrapStore(ctx, cbor.NewCborStore(blockstore.NewAPIBlockstore(paymentCreator)))
|
creatorStore := adt.WrapStore(ctx, cbor.NewCborStore(blockstore.NewAPIBlockstore(paymentCreator)))
|
||||||
|
|
||||||
@ -170,9 +135,7 @@ func TestPaymentChannelsAPI(t *testing.T) {
|
|||||||
}, int(build.MessageConfidence)+1, build.Finality, func(oldTs, newTs *types.TipSet) (bool, events.StateChange, error) {
|
}, int(build.MessageConfidence)+1, build.Finality, func(oldTs, newTs *types.TipSet) (bool, events.StateChange, error) {
|
||||||
return preds.OnPaymentChannelActorChanged(channel, preds.OnToSendAmountChanges())(ctx, oldTs.Key(), newTs.Key())
|
return preds.OnPaymentChannelActorChanged(channel, preds.OnToSendAmountChanges())(ctx, oldTs.Key(), newTs.Key())
|
||||||
})
|
})
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case <-finished:
|
case <-finished:
|
||||||
@ -182,75 +145,49 @@ func TestPaymentChannelsAPI(t *testing.T) {
|
|||||||
|
|
||||||
// Create a new voucher now that some vouchers have already been submitted
|
// Create a new voucher now that some vouchers have already been submitted
|
||||||
vouchRes, err := paymentCreator.PaychVoucherCreate(ctx, channel, abi.NewTokenAmount(1000), 3)
|
vouchRes, err := paymentCreator.PaychVoucherCreate(ctx, channel, abi.NewTokenAmount(1000), 3)
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatal(err)
|
require.NotNil(t, vouchRes.Voucher, "Not enough funds to create voucher: missing %d", vouchRes.Shortfall)
|
||||||
}
|
|
||||||
if vouchRes.Voucher == nil {
|
|
||||||
t.Fatal(fmt.Errorf("Not enough funds to create voucher: missing %d", vouchRes.Shortfall))
|
|
||||||
}
|
|
||||||
vdelta, err := paymentReceiver.PaychVoucherAdd(ctx, channel, vouchRes.Voucher, nil, abi.NewTokenAmount(1000))
|
vdelta, err := paymentReceiver.PaychVoucherAdd(ctx, channel, vouchRes.Voucher, nil, abi.NewTokenAmount(1000))
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatal(err)
|
require.EqualValues(t, abi.NewTokenAmount(1000), vdelta, "voucher didn't have the right amount")
|
||||||
}
|
|
||||||
if !vdelta.Equals(abi.NewTokenAmount(1000)) {
|
|
||||||
t.Fatal("voucher didn't have the right amount")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a new voucher whose value would exceed the channel balance
|
// Create a new voucher whose value would exceed the channel balance
|
||||||
excessAmt := abi.NewTokenAmount(1000)
|
excessAmt := abi.NewTokenAmount(1000)
|
||||||
vouchRes, err = paymentCreator.PaychVoucherCreate(ctx, channel, excessAmt, 4)
|
vouchRes, err = paymentCreator.PaychVoucherCreate(ctx, channel, excessAmt, 4)
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatal(err)
|
require.Nil(t, vouchRes.Voucher, "Expected not to be able to create voucher whose value would exceed channel balance")
|
||||||
}
|
require.EqualValues(t, excessAmt, vouchRes.Shortfall, "Expected voucher shortfall of %d, got %d", excessAmt, vouchRes.Shortfall)
|
||||||
if vouchRes.Voucher != nil {
|
|
||||||
t.Fatal("Expected not to be able to create voucher whose value would exceed channel balance")
|
|
||||||
}
|
|
||||||
if !vouchRes.Shortfall.Equals(excessAmt) {
|
|
||||||
t.Fatal(fmt.Errorf("Expected voucher shortfall of %d, got %d", excessAmt, vouchRes.Shortfall))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add a voucher whose value would exceed the channel balance
|
// Add a voucher whose value would exceed the channel balance
|
||||||
vouch := &paych.SignedVoucher{ChannelAddr: channel, Amount: excessAmt, Lane: 4, Nonce: 1}
|
vouch := &paych.SignedVoucher{ChannelAddr: channel, Amount: excessAmt, Lane: 4, Nonce: 1}
|
||||||
vb, err := vouch.SigningBytes()
|
vb, err := vouch.SigningBytes()
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
sig, err := paymentCreator.WalletSign(ctx, createrAddr, vb)
|
sig, err := paymentCreator.WalletSign(ctx, createrAddr, vb)
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
vouch.Signature = sig
|
vouch.Signature = sig
|
||||||
_, err = paymentReceiver.PaychVoucherAdd(ctx, channel, vouch, nil, abi.NewTokenAmount(1000))
|
_, err = paymentReceiver.PaychVoucherAdd(ctx, channel, vouch, nil, abi.NewTokenAmount(1000))
|
||||||
if err == nil {
|
require.Errorf(t, err, "Expected shortfall error of %d", excessAmt)
|
||||||
t.Fatal(fmt.Errorf("Expected shortfall error of %d", excessAmt))
|
|
||||||
}
|
|
||||||
|
|
||||||
// wait for the settlement period to pass before collecting
|
// wait for the settlement period to pass before collecting
|
||||||
waitForBlocks(ctx, t, bm, paymentReceiver, receiverAddr, policy.PaychSettleDelay)
|
waitForBlocks(ctx, t, bm, paymentReceiver, receiverAddr, policy.PaychSettleDelay)
|
||||||
|
|
||||||
creatorPreCollectBalance, err := paymentCreator.WalletBalance(ctx, createrAddr)
|
creatorPreCollectBalance, err := paymentCreator.WalletBalance(ctx, createrAddr)
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// collect funds (from receiver, though either party can do it)
|
// collect funds (from receiver, though either party can do it)
|
||||||
collectMsg, err := paymentReceiver.PaychCollect(ctx, channel)
|
collectMsg, err := paymentReceiver.PaychCollect(ctx, channel)
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
res, err = paymentReceiver.StateWaitMsg(ctx, collectMsg, 3, api.LookbackNoLimit, true)
|
res, err = paymentReceiver.StateWaitMsg(ctx, collectMsg, 3, api.LookbackNoLimit, true)
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatal(err)
|
require.EqualValues(t, 0, res.Receipt.ExitCode, "unable to collect on payment channel")
|
||||||
}
|
|
||||||
if res.Receipt.ExitCode != 0 {
|
|
||||||
t.Fatal("unable to collect on payment channel")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Finally, check the balance for the creator
|
// Finally, check the balance for the creator
|
||||||
currentCreatorBalance, err := paymentCreator.WalletBalance(ctx, createrAddr)
|
currentCreatorBalance, err := paymentCreator.WalletBalance(ctx, createrAddr)
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// The highest nonce voucher that the creator sent on each lane is 2000
|
// The highest nonce voucher that the creator sent on each lane is 2000
|
||||||
totalVouchers := int64(len(lanes) * 2000)
|
totalVouchers := int64(len(lanes) * 2000)
|
||||||
@ -260,15 +197,10 @@ func TestPaymentChannelsAPI(t *testing.T) {
|
|||||||
// channel amount - total voucher value
|
// channel amount - total voucher value
|
||||||
expectedRefund := channelAmt - totalVouchers
|
expectedRefund := channelAmt - totalVouchers
|
||||||
delta := big.Sub(currentCreatorBalance, creatorPreCollectBalance)
|
delta := big.Sub(currentCreatorBalance, creatorPreCollectBalance)
|
||||||
if !delta.Equals(abi.NewTokenAmount(expectedRefund)) {
|
require.EqualValues(t, abi.NewTokenAmount(expectedRefund), delta, "did not send correct funds from creator: expected %d, got %d", expectedRefund, delta)
|
||||||
t.Fatalf("did not send correct funds from creator: expected %d, got %d", expectedRefund, delta)
|
|
||||||
}
|
|
||||||
|
|
||||||
// shut down mining
|
|
||||||
bm.Stop()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func waitForBlocks(ctx context.Context, t *testing.T, bm *kit.BlockMiner, paymentReceiver kit.TestFullNode, receiverAddr address.Address, count int) {
|
func waitForBlocks(ctx context.Context, t *testing.T, bm *kit2.BlockMiner, paymentReceiver kit2.TestFullNode, receiverAddr address.Address, count int) {
|
||||||
// We need to add null blocks in batches, if we add too many the chain can't sync
|
// We need to add null blocks in batches, if we add too many the chain can't sync
|
||||||
batchSize := 60
|
batchSize := 60
|
||||||
for i := 0; i < count; i += batchSize {
|
for i := 0; i < count; i += batchSize {
|
||||||
@ -286,30 +218,23 @@ func waitForBlocks(ctx context.Context, t *testing.T, bm *kit.BlockMiner, paymen
|
|||||||
From: receiverAddr,
|
From: receiverAddr,
|
||||||
Value: types.NewInt(0),
|
Value: types.NewInt(0),
|
||||||
}, nil)
|
}, nil)
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = paymentReceiver.StateWaitMsg(ctx, m.Cid(), 1, api.LookbackNoLimit, true)
|
_, err = paymentReceiver.StateWaitMsg(ctx, m.Cid(), 1, api.LookbackNoLimit, true)
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func waitForMessage(ctx context.Context, t *testing.T, paymentCreator kit.TestFullNode, msgCid cid.Cid, duration time.Duration, desc string) *api.MsgLookup {
|
func waitForMessage(ctx context.Context, t *testing.T, paymentCreator kit2.TestFullNode, msgCid cid.Cid, duration time.Duration, desc string) *api.MsgLookup {
|
||||||
ctx, cancel := context.WithTimeout(ctx, duration)
|
ctx, cancel := context.WithTimeout(ctx, duration)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
fmt.Println("Waiting for", desc)
|
t.Log("Waiting for", desc)
|
||||||
|
|
||||||
res, err := paymentCreator.StateWaitMsg(ctx, msgCid, 1, api.LookbackNoLimit, true)
|
res, err := paymentCreator.StateWaitMsg(ctx, msgCid, 1, api.LookbackNoLimit, true)
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
fmt.Println("Error waiting for", desc, err)
|
require.EqualValues(t, 0, res.Receipt.ExitCode, "did not successfully send %s", desc)
|
||||||
t.Fatal(err)
|
|
||||||
}
|
t.Log("Confirmed", desc)
|
||||||
if res.Receipt.ExitCode != 0 {
|
|
||||||
t.Fatalf("did not successfully send %s", desc)
|
|
||||||
}
|
|
||||||
fmt.Println("Confirmed", desc)
|
|
||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
|
@ -11,7 +11,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/cli"
|
"github.com/filecoin-project/lotus/cli"
|
||||||
"github.com/filecoin-project/lotus/itests/kit"
|
"github.com/filecoin-project/lotus/itests/kit2"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
@ -37,18 +37,19 @@ func init() {
|
|||||||
// commands
|
// commands
|
||||||
func TestPaymentChannelsBasic(t *testing.T) {
|
func TestPaymentChannelsBasic(t *testing.T) {
|
||||||
_ = os.Setenv("BELLMAN_NO_GPU", "1")
|
_ = os.Setenv("BELLMAN_NO_GPU", "1")
|
||||||
kit.QuietMiningLogs()
|
kit2.QuietMiningLogs()
|
||||||
|
|
||||||
blocktime := 5 * time.Millisecond
|
blocktime := 5 * time.Millisecond
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
nodes, addrs := kit.StartTwoNodesOneMiner(ctx, t, blocktime)
|
|
||||||
paymentCreator := nodes[0]
|
var (
|
||||||
paymentReceiver := nodes[1]
|
paymentCreator kit2.TestFullNode
|
||||||
creatorAddr := addrs[0]
|
paymentReceiver kit2.TestFullNode
|
||||||
receiverAddr := addrs[1]
|
)
|
||||||
|
creatorAddr, receiverAddr := startPaychCreatorReceiverMiner(ctx, t, &paymentCreator, &paymentReceiver, blocktime)
|
||||||
|
|
||||||
// Create mock CLI
|
// Create mock CLI
|
||||||
mockCLI := kit.NewMockCLI(ctx, t, cli.Commands)
|
mockCLI := kit2.NewMockCLI(ctx, t, cli.Commands)
|
||||||
creatorCLI := mockCLI.Client(paymentCreator.ListenAddr)
|
creatorCLI := mockCLI.Client(paymentCreator.ListenAddr)
|
||||||
receiverCLI := mockCLI.Client(paymentReceiver.ListenAddr)
|
receiverCLI := mockCLI.Client(paymentReceiver.ListenAddr)
|
||||||
|
|
||||||
@ -70,12 +71,16 @@ func TestPaymentChannelsBasic(t *testing.T) {
|
|||||||
// creator: paych settle <channel>
|
// creator: paych settle <channel>
|
||||||
creatorCLI.RunCmd("paych", "settle", chAddr.String())
|
creatorCLI.RunCmd("paych", "settle", chAddr.String())
|
||||||
|
|
||||||
|
t.Log("wait for chain to reach settle height")
|
||||||
|
|
||||||
// Wait for the chain to reach the settle height
|
// Wait for the chain to reach the settle height
|
||||||
chState := getPaychState(ctx, t, paymentReceiver, chAddr)
|
chState := getPaychState(ctx, t, paymentReceiver, chAddr)
|
||||||
sa, err := chState.SettlingAt()
|
sa, err := chState.SettlingAt()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
waitForHeight(ctx, t, paymentReceiver, sa)
|
waitForHeight(ctx, t, paymentReceiver, sa)
|
||||||
|
|
||||||
|
t.Log("settle height reached")
|
||||||
|
|
||||||
// receiver: paych collect <channel>
|
// receiver: paych collect <channel>
|
||||||
receiverCLI.RunCmd("paych", "collect", chAddr.String())
|
receiverCLI.RunCmd("paych", "collect", chAddr.String())
|
||||||
}
|
}
|
||||||
@ -89,17 +94,18 @@ type voucherSpec struct {
|
|||||||
// TestPaymentChannelStatus tests the payment channel status CLI command
|
// TestPaymentChannelStatus tests the payment channel status CLI command
|
||||||
func TestPaymentChannelStatus(t *testing.T) {
|
func TestPaymentChannelStatus(t *testing.T) {
|
||||||
_ = os.Setenv("BELLMAN_NO_GPU", "1")
|
_ = os.Setenv("BELLMAN_NO_GPU", "1")
|
||||||
kit.QuietMiningLogs()
|
kit2.QuietMiningLogs()
|
||||||
|
|
||||||
blocktime := 5 * time.Millisecond
|
blocktime := 5 * time.Millisecond
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
nodes, addrs := kit.StartTwoNodesOneMiner(ctx, t, blocktime)
|
var (
|
||||||
paymentCreator := nodes[0]
|
paymentCreator kit2.TestFullNode
|
||||||
creatorAddr := addrs[0]
|
paymentReceiver kit2.TestFullNode
|
||||||
receiverAddr := addrs[1]
|
)
|
||||||
|
creatorAddr, receiverAddr := startPaychCreatorReceiverMiner(ctx, t, &paymentCreator, &paymentReceiver, blocktime)
|
||||||
|
|
||||||
// Create mock CLI
|
// Create mock CLI
|
||||||
mockCLI := kit.NewMockCLI(ctx, t, cli.Commands)
|
mockCLI := kit2.NewMockCLI(ctx, t, cli.Commands)
|
||||||
creatorCLI := mockCLI.Client(paymentCreator.ListenAddr)
|
creatorCLI := mockCLI.Client(paymentCreator.ListenAddr)
|
||||||
|
|
||||||
// creator: paych status-by-from-to <creator> <receiver>
|
// creator: paych status-by-from-to <creator> <receiver>
|
||||||
@ -168,18 +174,18 @@ func TestPaymentChannelStatus(t *testing.T) {
|
|||||||
// channel voucher commands
|
// channel voucher commands
|
||||||
func TestPaymentChannelVouchers(t *testing.T) {
|
func TestPaymentChannelVouchers(t *testing.T) {
|
||||||
_ = os.Setenv("BELLMAN_NO_GPU", "1")
|
_ = os.Setenv("BELLMAN_NO_GPU", "1")
|
||||||
kit.QuietMiningLogs()
|
kit2.QuietMiningLogs()
|
||||||
|
|
||||||
blocktime := 5 * time.Millisecond
|
blocktime := 5 * time.Millisecond
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
nodes, addrs := kit.StartTwoNodesOneMiner(ctx, t, blocktime)
|
var (
|
||||||
paymentCreator := nodes[0]
|
paymentCreator kit2.TestFullNode
|
||||||
paymentReceiver := nodes[1]
|
paymentReceiver kit2.TestFullNode
|
||||||
creatorAddr := addrs[0]
|
)
|
||||||
receiverAddr := addrs[1]
|
creatorAddr, receiverAddr := startPaychCreatorReceiverMiner(ctx, t, &paymentCreator, &paymentReceiver, blocktime)
|
||||||
|
|
||||||
// Create mock CLI
|
// Create mock CLI
|
||||||
mockCLI := kit.NewMockCLI(ctx, t, cli.Commands)
|
mockCLI := kit2.NewMockCLI(ctx, t, cli.Commands)
|
||||||
creatorCLI := mockCLI.Client(paymentCreator.ListenAddr)
|
creatorCLI := mockCLI.Client(paymentCreator.ListenAddr)
|
||||||
receiverCLI := mockCLI.Client(paymentReceiver.ListenAddr)
|
receiverCLI := mockCLI.Client(paymentReceiver.ListenAddr)
|
||||||
|
|
||||||
@ -300,17 +306,18 @@ func TestPaymentChannelVouchers(t *testing.T) {
|
|||||||
// is greater than what's left in the channel, voucher create fails
|
// is greater than what's left in the channel, voucher create fails
|
||||||
func TestPaymentChannelVoucherCreateShortfall(t *testing.T) {
|
func TestPaymentChannelVoucherCreateShortfall(t *testing.T) {
|
||||||
_ = os.Setenv("BELLMAN_NO_GPU", "1")
|
_ = os.Setenv("BELLMAN_NO_GPU", "1")
|
||||||
kit.QuietMiningLogs()
|
kit2.QuietMiningLogs()
|
||||||
|
|
||||||
blocktime := 5 * time.Millisecond
|
blocktime := 5 * time.Millisecond
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
nodes, addrs := kit.StartTwoNodesOneMiner(ctx, t, blocktime)
|
var (
|
||||||
paymentCreator := nodes[0]
|
paymentCreator kit2.TestFullNode
|
||||||
creatorAddr := addrs[0]
|
paymentReceiver kit2.TestFullNode
|
||||||
receiverAddr := addrs[1]
|
)
|
||||||
|
creatorAddr, receiverAddr := startPaychCreatorReceiverMiner(ctx, t, &paymentCreator, &paymentReceiver, blocktime)
|
||||||
|
|
||||||
// Create mock CLI
|
// Create mock CLI
|
||||||
mockCLI := kit.NewMockCLI(ctx, t, cli.Commands)
|
mockCLI := kit2.NewMockCLI(ctx, t, cli.Commands)
|
||||||
creatorCLI := mockCLI.Client(paymentCreator.ListenAddr)
|
creatorCLI := mockCLI.Client(paymentCreator.ListenAddr)
|
||||||
|
|
||||||
// creator: paych add-funds <creator> <receiver> <amount>
|
// creator: paych add-funds <creator> <receiver> <amount>
|
||||||
@ -378,7 +385,7 @@ func checkVoucherOutput(t *testing.T, list string, vouchers []voucherSpec) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// waitForHeight waits for the node to reach the given chain epoch
|
// waitForHeight waits for the node to reach the given chain epoch
|
||||||
func waitForHeight(ctx context.Context, t *testing.T, node kit.TestFullNode, height abi.ChainEpoch) {
|
func waitForHeight(ctx context.Context, t *testing.T, node kit2.TestFullNode, height abi.ChainEpoch) {
|
||||||
atHeight := make(chan struct{})
|
atHeight := make(chan struct{})
|
||||||
chainEvents := events.NewEvents(ctx, node)
|
chainEvents := events.NewEvents(ctx, node)
|
||||||
err := chainEvents.ChainAt(func(ctx context.Context, ts *types.TipSet, curH abi.ChainEpoch) error {
|
err := chainEvents.ChainAt(func(ctx context.Context, ts *types.TipSet, curH abi.ChainEpoch) error {
|
||||||
@ -396,7 +403,7 @@ func waitForHeight(ctx context.Context, t *testing.T, node kit.TestFullNode, hei
|
|||||||
}
|
}
|
||||||
|
|
||||||
// getPaychState gets the state of the payment channel with the given address
|
// getPaychState gets the state of the payment channel with the given address
|
||||||
func getPaychState(ctx context.Context, t *testing.T, node kit.TestFullNode, chAddr address.Address) paych.State {
|
func getPaychState(ctx context.Context, t *testing.T, node kit2.TestFullNode, chAddr address.Address) paych.State {
|
||||||
act, err := node.StateGetActor(ctx, chAddr, types.EmptyTSK)
|
act, err := node.StateGetActor(ctx, chAddr, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
@ -406,3 +413,25 @@ func getPaychState(ctx context.Context, t *testing.T, node kit.TestFullNode, chA
|
|||||||
|
|
||||||
return chState
|
return chState
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func startPaychCreatorReceiverMiner(ctx context.Context, t *testing.T, paymentCreator *kit2.TestFullNode, paymentReceiver *kit2.TestFullNode, blocktime time.Duration) (address.Address, address.Address) {
|
||||||
|
var miner kit2.TestMiner
|
||||||
|
opts := kit2.ThroughRPC()
|
||||||
|
kit2.NewEnsemble(t, kit2.MockProofs()).
|
||||||
|
FullNode(paymentCreator, opts).
|
||||||
|
FullNode(paymentReceiver, opts).
|
||||||
|
Miner(&miner, paymentCreator).
|
||||||
|
Start().
|
||||||
|
InterconnectAll().
|
||||||
|
BeginMining(blocktime)
|
||||||
|
|
||||||
|
// Send some funds to the second node
|
||||||
|
receiverAddr, err := paymentReceiver.WalletDefaultAddress(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
kit2.SendFunds(ctx, t, *paymentCreator, receiverAddr, abi.NewTokenAmount(1e18))
|
||||||
|
|
||||||
|
// Get the first node's address
|
||||||
|
creatorAddr, err := paymentCreator.WalletDefaultAddress(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
return creatorAddr, receiverAddr
|
||||||
|
}
|
||||||
|
@ -10,15 +10,14 @@ import (
|
|||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/go-state-types/network"
|
"github.com/filecoin-project/go-state-types/network"
|
||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
"github.com/filecoin-project/lotus/itests/kit"
|
"github.com/filecoin-project/lotus/itests/kit2"
|
||||||
bminer "github.com/filecoin-project/lotus/miner"
|
bminer "github.com/filecoin-project/lotus/miner"
|
||||||
"github.com/filecoin-project/lotus/node/impl"
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestSDRUpgrade(t *testing.T) {
|
func TestSDRUpgrade(t *testing.T) {
|
||||||
kit.QuietMiningLogs()
|
kit2.QuietMiningLogs()
|
||||||
|
|
||||||
// oldDelay := policy.GetPreCommitChallengeDelay()
|
// oldDelay := policy.GetPreCommitChallengeDelay()
|
||||||
// policy.SetPreCommitChallengeDelay(5)
|
// policy.SetPreCommitChallengeDelay(5)
|
||||||
@ -31,18 +30,10 @@ func TestSDRUpgrade(t *testing.T) {
|
|||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
n, sn := kit.MockMinerBuilder(t, []kit.FullNodeOpts{kit.FullNodeWithSDRAt(500, 1000)}, kit.OneMiner)
|
opts := kit2.ConstructorOpts(kit2.SDRUpgradeAt(500, 1000))
|
||||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
client, miner, ens := kit2.EnsembleMinimal(t, kit2.MockProofs(), opts)
|
||||||
miner := sn[0]
|
ens.InterconnectAll()
|
||||||
|
|
||||||
addrinfo, err := client.NetAddrsListen(ctx)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := miner.NetConnect(ctx, addrinfo); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
build.Clock.Sleep(time.Second)
|
build.Clock.Sleep(time.Second)
|
||||||
|
|
||||||
pledge := make(chan struct{})
|
pledge := make(chan struct{})
|
||||||
@ -53,7 +44,7 @@ func TestSDRUpgrade(t *testing.T) {
|
|||||||
round := 0
|
round := 0
|
||||||
for atomic.LoadInt64(&mine) != 0 {
|
for atomic.LoadInt64(&mine) != 0 {
|
||||||
build.Clock.Sleep(blocktime)
|
build.Clock.Sleep(blocktime)
|
||||||
if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) {
|
if err := miner.MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) {
|
||||||
|
|
||||||
}}); err != nil {
|
}}); err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
@ -88,7 +79,7 @@ func TestSDRUpgrade(t *testing.T) {
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
// before.
|
// before.
|
||||||
kit.PledgeSectors(t, ctx, miner, 9, 0, pledge)
|
miner.PledgeSectors(ctx, 9, 0, pledge)
|
||||||
|
|
||||||
s, err := miner.SectorsList(ctx)
|
s, err := miner.SectorsList(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -2,7 +2,6 @@ package itests
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -11,24 +10,23 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
"github.com/filecoin-project/lotus/chain/stmgr"
|
"github.com/filecoin-project/lotus/chain/stmgr"
|
||||||
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
||||||
"github.com/filecoin-project/lotus/itests/kit"
|
"github.com/filecoin-project/lotus/itests/kit2"
|
||||||
"github.com/filecoin-project/lotus/node"
|
"github.com/filecoin-project/lotus/node"
|
||||||
"github.com/filecoin-project/lotus/node/impl"
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestTapeFix(t *testing.T) {
|
func TestTapeFix(t *testing.T) {
|
||||||
kit.QuietMiningLogs()
|
kit2.QuietMiningLogs()
|
||||||
|
|
||||||
var blocktime = 2 * time.Millisecond
|
var blocktime = 2 * time.Millisecond
|
||||||
|
|
||||||
// The "before" case is disabled, because we need the builder to mock 32 GiB sectors to accurately repro this case
|
// The "before" case is disabled, because we need the builder to mock 32 GiB sectors to accurately repro this case
|
||||||
// TODO: Make the mock sector size configurable and reenable this
|
// TODO: Make the mock sector size configurable and reenable this
|
||||||
// t.Run("before", func(t *testing.T) { testTapeFix(t, b, blocktime, false) })
|
// t.Run("before", func(t *testing.T) { testTapeFix(t, b, blocktime, false) })
|
||||||
t.Run("after", func(t *testing.T) { testTapeFix(t, kit.MockMinerBuilder, blocktime, true) })
|
t.Run("after", func(t *testing.T) { testTapeFix(t, blocktime, true) })
|
||||||
}
|
}
|
||||||
|
|
||||||
func testTapeFix(t *testing.T, b kit.APIBuilder, blocktime time.Duration, after bool) {
|
func testTapeFix(t *testing.T, blocktime time.Duration, after bool) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
@ -44,46 +42,14 @@ func testTapeFix(t *testing.T, b kit.APIBuilder, blocktime time.Duration, after
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
n, sn := b(t, []kit.FullNodeOpts{{Opts: func(_ []kit.TestFullNode) node.Option {
|
nopts := kit2.ConstructorOpts(node.Override(new(stmgr.UpgradeSchedule), upgradeSchedule))
|
||||||
return node.Override(new(stmgr.UpgradeSchedule), upgradeSchedule)
|
_, miner, ens := kit2.EnsembleMinimal(t, kit2.MockProofs(), nopts)
|
||||||
}}}, kit.OneMiner)
|
ens.InterconnectAll().BeginMining(blocktime)
|
||||||
|
|
||||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
|
||||||
miner := sn[0]
|
|
||||||
|
|
||||||
addrinfo, err := client.NetAddrsListen(ctx)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := miner.NetConnect(ctx, addrinfo); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
build.Clock.Sleep(time.Second)
|
|
||||||
|
|
||||||
done := make(chan struct{})
|
|
||||||
go func() {
|
|
||||||
defer close(done)
|
|
||||||
for ctx.Err() == nil {
|
|
||||||
build.Clock.Sleep(blocktime)
|
|
||||||
if err := sn[0].MineOne(ctx, kit.MineNext); err != nil {
|
|
||||||
if ctx.Err() != nil {
|
|
||||||
// context was canceled, ignore the error.
|
|
||||||
return
|
|
||||||
}
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
defer func() {
|
|
||||||
cancel()
|
|
||||||
<-done
|
|
||||||
}()
|
|
||||||
|
|
||||||
sid, err := miner.PledgeSector(ctx)
|
sid, err := miner.PledgeSector(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
fmt.Printf("All sectors is fsm\n")
|
t.Log("All sectors is fsm")
|
||||||
|
|
||||||
// If before, we expect the precommit to fail
|
// If before, we expect the precommit to fail
|
||||||
successState := api.SectorState(sealing.CommitFailed)
|
successState := api.SectorState(sealing.CommitFailed)
|
||||||
@ -101,6 +67,6 @@ func testTapeFix(t *testing.T, b kit.APIBuilder, blocktime time.Duration, after
|
|||||||
}
|
}
|
||||||
require.NotEqual(t, failureState, st.State)
|
require.NotEqual(t, failureState, st.State)
|
||||||
build.Clock.Sleep(100 * time.Millisecond)
|
build.Clock.Sleep(100 * time.Millisecond)
|
||||||
fmt.Println("WaitSeal")
|
t.Log("WaitSeal")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2,122 +2,127 @@ package itests
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
"github.com/filecoin-project/go-state-types/network"
|
"github.com/filecoin-project/go-state-types/network"
|
||||||
"github.com/filecoin-project/lotus/itests/kit"
|
"github.com/filecoin-project/lotus/chain/wallet"
|
||||||
|
verifreg4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/verifreg"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
lapi "github.com/filecoin-project/lotus/api"
|
lapi "github.com/filecoin-project/lotus/api"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/chain/actors"
|
"github.com/filecoin-project/lotus/chain/actors"
|
||||||
"github.com/filecoin-project/lotus/chain/actors/builtin/verifreg"
|
"github.com/filecoin-project/lotus/chain/actors/builtin/verifreg"
|
||||||
"github.com/filecoin-project/lotus/node/impl"
|
|
||||||
verifreg4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/verifreg"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/big"
|
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
"github.com/filecoin-project/lotus/itests/kit2"
|
||||||
|
"github.com/filecoin-project/lotus/node/impl"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestVerifiedClientTopUp(t *testing.T) {
|
func TestVerifiedClientTopUp(t *testing.T) {
|
||||||
|
blockTime := 100 * time.Millisecond
|
||||||
|
|
||||||
test := func(nv network.Version, shouldWork bool) func(*testing.T) {
|
test := func(nv network.Version, shouldWork bool) func(*testing.T) {
|
||||||
return func(t *testing.T) {
|
return func(t *testing.T) {
|
||||||
nodes, miners := kit.MockMinerBuilder(t, []kit.FullNodeOpts{kit.FullNodeWithNetworkUpgradeAt(nv, -1)}, kit.OneMiner)
|
rootKey, err := wallet.GenerateKey(types.KTSecp256k1)
|
||||||
api := nodes[0].FullNode.(*impl.FullNodeAPI)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
verifierKey, err := wallet.GenerateKey(types.KTSecp256k1)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
verifiedClientKey, err := wallet.GenerateKey(types.KTBLS)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
bal, err := types.ParseFIL("100fil")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
node, _, ens := kit2.EnsembleMinimal(t, kit2.MockProofs(),
|
||||||
|
kit2.RootVerifier(rootKey, abi.NewTokenAmount(bal.Int64())),
|
||||||
|
kit2.Account(verifierKey, abi.NewTokenAmount(bal.Int64())), // assign some balance to the verifier so they can send an AddClient message.
|
||||||
|
kit2.ConstructorOpts(kit2.InstantaneousNetworkVersion(nv)))
|
||||||
|
|
||||||
|
ens.InterconnectAll().BeginMining(blockTime)
|
||||||
|
|
||||||
|
api := node.FullNode.(*impl.FullNodeAPI)
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
//Get VRH
|
// get VRH
|
||||||
vrh, err := api.StateVerifiedRegistryRootKey(ctx, types.TipSetKey{})
|
vrh, err := api.StateVerifiedRegistryRootKey(ctx, types.TipSetKey{})
|
||||||
if err != nil {
|
fmt.Println(vrh.String())
|
||||||
t.Fatal(err)
|
require.NoError(t, err)
|
||||||
}
|
|
||||||
|
|
||||||
//Add verifier
|
// import the root key.
|
||||||
verifier, err := api.WalletDefaultAddress(ctx)
|
rootAddr, err := api.WalletImport(ctx, &rootKey.KeyInfo)
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatal(err)
|
|
||||||
}
|
// import the verifier's key.
|
||||||
|
verifierAddr, err := api.WalletImport(ctx, &verifierKey.KeyInfo)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// import the verified client's key.
|
||||||
|
verifiedClientAddr, err := api.WalletImport(ctx, &verifiedClientKey.KeyInfo)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
params, err := actors.SerializeParams(&verifreg4.AddVerifierParams{Address: verifierAddr, Allowance: big.NewInt(100000000000)})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
params, err := actors.SerializeParams(&verifreg4.AddVerifierParams{Address: verifier, Allowance: big.NewInt(100000000000)})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
msg := &types.Message{
|
msg := &types.Message{
|
||||||
|
From: rootAddr,
|
||||||
To: verifreg.Address,
|
To: verifreg.Address,
|
||||||
From: vrh,
|
|
||||||
Method: verifreg.Methods.AddVerifier,
|
Method: verifreg.Methods.AddVerifier,
|
||||||
Params: params,
|
Params: params,
|
||||||
Value: big.Zero(),
|
Value: big.Zero(),
|
||||||
}
|
}
|
||||||
|
|
||||||
bm := kit.NewBlockMiner(t, miners[0])
|
|
||||||
bm.MineBlocks(ctx, 100*time.Millisecond)
|
|
||||||
t.Cleanup(bm.Stop)
|
|
||||||
|
|
||||||
sm, err := api.MpoolPushMessage(ctx, msg, nil)
|
sm, err := api.MpoolPushMessage(ctx, msg, nil)
|
||||||
if err != nil {
|
require.NoError(t, err, "AddVerifier failed")
|
||||||
t.Fatal("AddVerifier failed: ", err)
|
|
||||||
}
|
|
||||||
res, err := api.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true)
|
res, err := api.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true)
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatal(err)
|
require.EqualValues(t, 0, res.Receipt.ExitCode)
|
||||||
}
|
|
||||||
if res.Receipt.ExitCode != 0 {
|
|
||||||
t.Fatal("did not successfully send message")
|
|
||||||
}
|
|
||||||
|
|
||||||
//Assign datacap to a client
|
// assign datacap to a client
|
||||||
datacap := big.NewInt(10000)
|
datacap := big.NewInt(10000)
|
||||||
clientAddress, err := api.WalletNew(ctx, types.KTBLS)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
params, err = actors.SerializeParams(&verifreg4.AddVerifiedClientParams{Address: clientAddress, Allowance: datacap})
|
params, err = actors.SerializeParams(&verifreg4.AddVerifiedClientParams{Address: verifiedClientAddr, Allowance: datacap})
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
msg = &types.Message{
|
msg = &types.Message{
|
||||||
|
From: verifierAddr,
|
||||||
To: verifreg.Address,
|
To: verifreg.Address,
|
||||||
From: verifier,
|
|
||||||
Method: verifreg.Methods.AddVerifiedClient,
|
Method: verifreg.Methods.AddVerifiedClient,
|
||||||
Params: params,
|
Params: params,
|
||||||
Value: big.Zero(),
|
Value: big.Zero(),
|
||||||
}
|
}
|
||||||
|
|
||||||
sm, err = api.MpoolPushMessage(ctx, msg, nil)
|
sm, err = api.MpoolPushMessage(ctx, msg, nil)
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatal("AddVerifiedClient faield: ", err)
|
|
||||||
}
|
res, err = api.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true)
|
||||||
res, err = api.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true)
|
require.NoError(t, err)
|
||||||
if err != nil {
|
require.EqualValues(t, 0, res.Receipt.ExitCode)
|
||||||
t.Fatal(err)
|
|
||||||
}
|
// check datacap balance
|
||||||
if res.Receipt.ExitCode != 0 {
|
dcap, err := api.StateVerifiedClientStatus(ctx, verifiedClientAddr, types.EmptyTSK)
|
||||||
t.Fatal("did not successfully send message")
|
require.NoError(t, err)
|
||||||
}
|
|
||||||
|
|
||||||
//check datacap balance
|
|
||||||
dcap, err := api.StateVerifiedClientStatus(ctx, clientAddress, types.EmptyTSK)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if !dcap.Equals(datacap) {
|
if !dcap.Equals(datacap) {
|
||||||
t.Fatal("")
|
t.Fatal("")
|
||||||
}
|
}
|
||||||
|
|
||||||
//try to assign datacap to the same client should fail for actor v4 and below
|
// try to assign datacap to the same client should fail for actor v4 and below
|
||||||
params, err = actors.SerializeParams(&verifreg4.AddVerifiedClientParams{Address: clientAddress, Allowance: datacap})
|
params, err = actors.SerializeParams(&verifreg4.AddVerifiedClientParams{Address: verifiedClientAddr, Allowance: datacap})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
msg = &types.Message{
|
msg = &types.Message{
|
||||||
|
From: verifierAddr,
|
||||||
To: verifreg.Address,
|
To: verifreg.Address,
|
||||||
From: verifier,
|
|
||||||
Method: verifreg.Methods.AddVerifiedClient,
|
Method: verifreg.Methods.AddVerifiedClient,
|
||||||
Params: params,
|
Params: params,
|
||||||
Value: big.Zero(),
|
Value: big.Zero(),
|
||||||
|
@ -38,6 +38,7 @@ var (
|
|||||||
MessageTo, _ = tag.NewKey("message_to")
|
MessageTo, _ = tag.NewKey("message_to")
|
||||||
MessageNonce, _ = tag.NewKey("message_nonce")
|
MessageNonce, _ = tag.NewKey("message_nonce")
|
||||||
ReceivedFrom, _ = tag.NewKey("received_from")
|
ReceivedFrom, _ = tag.NewKey("received_from")
|
||||||
|
MsgValid, _ = tag.NewKey("message_valid")
|
||||||
Endpoint, _ = tag.NewKey("endpoint")
|
Endpoint, _ = tag.NewKey("endpoint")
|
||||||
APIInterface, _ = tag.NewKey("api") // to distinguish between gateway api and full node api endpoint calls
|
APIInterface, _ = tag.NewKey("api") // to distinguish between gateway api and full node api endpoint calls
|
||||||
|
|
||||||
@ -61,6 +62,12 @@ var (
|
|||||||
MessageReceived = stats.Int64("message/received", "Counter for total received messages", stats.UnitDimensionless)
|
MessageReceived = stats.Int64("message/received", "Counter for total received messages", stats.UnitDimensionless)
|
||||||
MessageValidationFailure = stats.Int64("message/failure", "Counter for message validation failures", stats.UnitDimensionless)
|
MessageValidationFailure = stats.Int64("message/failure", "Counter for message validation failures", stats.UnitDimensionless)
|
||||||
MessageValidationSuccess = stats.Int64("message/success", "Counter for message validation successes", stats.UnitDimensionless)
|
MessageValidationSuccess = stats.Int64("message/success", "Counter for message validation successes", stats.UnitDimensionless)
|
||||||
|
MessageValidationDuration = stats.Float64("message/validation_ms", "Duration of message validation", stats.UnitMilliseconds)
|
||||||
|
MpoolGetNonceDuration = stats.Float64("mpool/getnonce_ms", "Duration of getStateNonce in mpool", stats.UnitMilliseconds)
|
||||||
|
MpoolGetBalanceDuration = stats.Float64("mpool/getbalance_ms", "Duration of getStateBalance in mpool", stats.UnitMilliseconds)
|
||||||
|
MpoolAddTsDuration = stats.Float64("mpool/addts_ms", "Duration of addTs in mpool", stats.UnitMilliseconds)
|
||||||
|
MpoolAddDuration = stats.Float64("mpool/add_ms", "Duration of Add in mpool", stats.UnitMilliseconds)
|
||||||
|
MpoolPushDuration = stats.Float64("mpool/push_ms", "Duration of Push in mpool", stats.UnitMilliseconds)
|
||||||
BlockPublished = stats.Int64("block/published", "Counter for total locally published blocks", stats.UnitDimensionless)
|
BlockPublished = stats.Int64("block/published", "Counter for total locally published blocks", stats.UnitDimensionless)
|
||||||
BlockReceived = stats.Int64("block/received", "Counter for total received blocks", stats.UnitDimensionless)
|
BlockReceived = stats.Int64("block/received", "Counter for total received blocks", stats.UnitDimensionless)
|
||||||
BlockValidationFailure = stats.Int64("block/failure", "Counter for block validation failures", stats.UnitDimensionless)
|
BlockValidationFailure = stats.Int64("block/failure", "Counter for block validation failures", stats.UnitDimensionless)
|
||||||
@ -170,6 +177,31 @@ var (
|
|||||||
Measure: MessageValidationSuccess,
|
Measure: MessageValidationSuccess,
|
||||||
Aggregation: view.Count(),
|
Aggregation: view.Count(),
|
||||||
}
|
}
|
||||||
|
MessageValidationDurationView = &view.View{
|
||||||
|
Measure: MessageValidationDuration,
|
||||||
|
Aggregation: defaultMillisecondsDistribution,
|
||||||
|
TagKeys: []tag.Key{MsgValid, Local},
|
||||||
|
}
|
||||||
|
MpoolGetNonceDurationView = &view.View{
|
||||||
|
Measure: MpoolGetNonceDuration,
|
||||||
|
Aggregation: defaultMillisecondsDistribution,
|
||||||
|
}
|
||||||
|
MpoolGetBalanceDurationView = &view.View{
|
||||||
|
Measure: MpoolGetBalanceDuration,
|
||||||
|
Aggregation: defaultMillisecondsDistribution,
|
||||||
|
}
|
||||||
|
MpoolAddTsDurationView = &view.View{
|
||||||
|
Measure: MpoolAddTsDuration,
|
||||||
|
Aggregation: defaultMillisecondsDistribution,
|
||||||
|
}
|
||||||
|
MpoolAddDurationView = &view.View{
|
||||||
|
Measure: MpoolAddDuration,
|
||||||
|
Aggregation: defaultMillisecondsDistribution,
|
||||||
|
}
|
||||||
|
MpoolPushDurationView = &view.View{
|
||||||
|
Measure: MpoolPushDuration,
|
||||||
|
Aggregation: defaultMillisecondsDistribution,
|
||||||
|
}
|
||||||
PeerCountView = &view.View{
|
PeerCountView = &view.View{
|
||||||
Measure: PeerCount,
|
Measure: PeerCount,
|
||||||
Aggregation: view.LastValue(),
|
Aggregation: view.LastValue(),
|
||||||
@ -313,6 +345,12 @@ var ChainNodeViews = append([]*view.View{
|
|||||||
MessageReceivedView,
|
MessageReceivedView,
|
||||||
MessageValidationFailureView,
|
MessageValidationFailureView,
|
||||||
MessageValidationSuccessView,
|
MessageValidationSuccessView,
|
||||||
|
MessageValidationDurationView,
|
||||||
|
MpoolGetNonceDurationView,
|
||||||
|
MpoolGetBalanceDurationView,
|
||||||
|
MpoolAddTsDurationView,
|
||||||
|
MpoolAddDurationView,
|
||||||
|
MpoolPushDurationView,
|
||||||
PubsubPublishMessageView,
|
PubsubPublishMessageView,
|
||||||
PubsubDeliverMessageView,
|
PubsubDeliverMessageView,
|
||||||
PubsubRejectMessageView,
|
PubsubRejectMessageView,
|
||||||
|
@ -6,6 +6,8 @@ import (
|
|||||||
|
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner"
|
miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
@ -84,6 +86,9 @@ type SealingConfig struct {
|
|||||||
|
|
||||||
AlwaysKeepUnsealedCopy bool
|
AlwaysKeepUnsealedCopy bool
|
||||||
|
|
||||||
|
// Run sector finalization before submitting sector proof to the chain
|
||||||
|
FinalizeEarly bool
|
||||||
|
|
||||||
// enable / disable precommit batching (takes effect after nv13)
|
// enable / disable precommit batching (takes effect after nv13)
|
||||||
BatchPreCommits bool
|
BatchPreCommits bool
|
||||||
// maximum precommit batch size - batches will be sent immediately above this size
|
// maximum precommit batch size - batches will be sent immediately above this size
|
||||||
@ -91,7 +96,7 @@ type SealingConfig struct {
|
|||||||
MinPreCommitBatch int
|
MinPreCommitBatch int
|
||||||
// how long to wait before submitting a batch after crossing the minimum batch size
|
// how long to wait before submitting a batch after crossing the minimum batch size
|
||||||
PreCommitBatchWait Duration
|
PreCommitBatchWait Duration
|
||||||
// time buffer for forceful batch submission before sectors in batch would start expiring
|
// time buffer for forceful batch submission before sectors/deal in batch would start expiring
|
||||||
PreCommitBatchSlack Duration
|
PreCommitBatchSlack Duration
|
||||||
|
|
||||||
// enable / disable commit aggregation (takes effect after nv13)
|
// enable / disable commit aggregation (takes effect after nv13)
|
||||||
@ -101,7 +106,7 @@ type SealingConfig struct {
|
|||||||
MaxCommitBatch int
|
MaxCommitBatch int
|
||||||
// how long to wait before submitting a batch after crossing the minimum batch size
|
// how long to wait before submitting a batch after crossing the minimum batch size
|
||||||
CommitBatchWait Duration
|
CommitBatchWait Duration
|
||||||
// time buffer for forceful batch submission before sectors in batch would start expiring
|
// time buffer for forceful batch submission before sectors/deals in batch would start expiring
|
||||||
CommitBatchSlack Duration
|
CommitBatchSlack Duration
|
||||||
|
|
||||||
TerminateBatchMax uint64
|
TerminateBatchMax uint64
|
||||||
@ -114,9 +119,23 @@ type SealingConfig struct {
|
|||||||
// todo TargetSectors - stop auto-pleding new sectors after this many sectors are sealed, default CC upgrade for deals sectors if above
|
// todo TargetSectors - stop auto-pleding new sectors after this many sectors are sealed, default CC upgrade for deals sectors if above
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type BatchFeeConfig struct {
|
||||||
|
Base types.FIL
|
||||||
|
PerSector types.FIL
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *BatchFeeConfig) FeeForSectors(nSectors int) abi.TokenAmount {
|
||||||
|
return big.Add(big.Int(b.Base), big.Mul(big.NewInt(int64(nSectors)), big.Int(b.PerSector)))
|
||||||
|
}
|
||||||
|
|
||||||
type MinerFeeConfig struct {
|
type MinerFeeConfig struct {
|
||||||
MaxPreCommitGasFee types.FIL
|
MaxPreCommitGasFee types.FIL
|
||||||
MaxCommitGasFee types.FIL
|
MaxCommitGasFee types.FIL
|
||||||
|
|
||||||
|
// maxBatchFee = maxBase + maxPerSector * nSectors
|
||||||
|
MaxPreCommitBatchGasFee BatchFeeConfig
|
||||||
|
MaxCommitBatchGasFee BatchFeeConfig
|
||||||
|
|
||||||
MaxTerminateGasFee types.FIL
|
MaxTerminateGasFee types.FIL
|
||||||
MaxWindowPoStGasFee types.FIL
|
MaxWindowPoStGasFee types.FIL
|
||||||
MaxPublishDealsFee types.FIL
|
MaxPublishDealsFee types.FIL
|
||||||
@ -263,18 +282,19 @@ func DefaultStorageMiner() *StorageMiner {
|
|||||||
MaxSealingSectorsForDeals: 0,
|
MaxSealingSectorsForDeals: 0,
|
||||||
WaitDealsDelay: Duration(time.Hour * 6),
|
WaitDealsDelay: Duration(time.Hour * 6),
|
||||||
AlwaysKeepUnsealedCopy: true,
|
AlwaysKeepUnsealedCopy: true,
|
||||||
|
FinalizeEarly: false,
|
||||||
|
|
||||||
BatchPreCommits: true,
|
BatchPreCommits: true,
|
||||||
MinPreCommitBatch: 1, // we must have at least one proof to aggregate
|
MinPreCommitBatch: 1, // we must have at least one precommit to batch
|
||||||
MaxPreCommitBatch: miner5.PreCommitSectorBatchMaxSize, //
|
MaxPreCommitBatch: miner5.PreCommitSectorBatchMaxSize, // up to 256 sectors
|
||||||
PreCommitBatchWait: Duration(24 * time.Hour), // this can be up to 6 days
|
PreCommitBatchWait: Duration(24 * time.Hour), // this should be less than 31.5 hours, which is the expiration of a precommit ticket
|
||||||
PreCommitBatchSlack: Duration(3 * time.Hour),
|
PreCommitBatchSlack: Duration(3 * time.Hour), // time buffer for forceful batch submission before sectors/deals in batch would start expiring, higher value will lower the chances for message fail due to expiration
|
||||||
|
|
||||||
AggregateCommits: true,
|
AggregateCommits: true,
|
||||||
MinCommitBatch: miner5.MinAggregatedSectors, // we must have at least four proofs to aggregate
|
MinCommitBatch: miner5.MinAggregatedSectors, // per FIP13, we must have at least four proofs to aggregate, where 4 is the cross over point where aggregation wins out on single provecommit gas costs
|
||||||
MaxCommitBatch: miner5.MaxAggregatedSectors, // this is the maximum aggregation per FIP13
|
MaxCommitBatch: miner5.MaxAggregatedSectors, // maximum 819 sectors, this is the maximum aggregation per FIP13
|
||||||
CommitBatchWait: Duration(24 * time.Hour), // this can be up to 6 days
|
CommitBatchWait: Duration(24 * time.Hour), // this can be up to 30 days
|
||||||
CommitBatchSlack: Duration(1 * time.Hour),
|
CommitBatchSlack: Duration(1 * time.Hour), // time buffer for forceful batch submission before sectors/deals in batch would start expiring, higher value will lower the chances for message fail due to expiration
|
||||||
|
|
||||||
TerminateBatchMin: 1,
|
TerminateBatchMin: 1,
|
||||||
TerminateBatchMax: 100,
|
TerminateBatchMax: 100,
|
||||||
@ -309,8 +329,18 @@ func DefaultStorageMiner() *StorageMiner {
|
|||||||
},
|
},
|
||||||
|
|
||||||
Fees: MinerFeeConfig{
|
Fees: MinerFeeConfig{
|
||||||
MaxPreCommitGasFee: types.MustParseFIL("0.025"),
|
MaxPreCommitGasFee: types.MustParseFIL("0.025"),
|
||||||
MaxCommitGasFee: types.MustParseFIL("0.05"),
|
MaxCommitGasFee: types.MustParseFIL("0.05"),
|
||||||
|
|
||||||
|
MaxPreCommitBatchGasFee: BatchFeeConfig{
|
||||||
|
Base: types.MustParseFIL("0.025"), // TODO: update before v1.10.0
|
||||||
|
PerSector: types.MustParseFIL("0.025"), // TODO: update before v1.10.0
|
||||||
|
},
|
||||||
|
MaxCommitBatchGasFee: BatchFeeConfig{
|
||||||
|
Base: types.MustParseFIL("0.05"), // TODO: update before v1.10.0
|
||||||
|
PerSector: types.MustParseFIL("0.05"), // TODO: update before v1.10.0
|
||||||
|
},
|
||||||
|
|
||||||
MaxTerminateGasFee: types.MustParseFIL("0.5"),
|
MaxTerminateGasFee: types.MustParseFIL("0.5"),
|
||||||
MaxWindowPoStGasFee: types.MustParseFIL("5"),
|
MaxWindowPoStGasFee: types.MustParseFIL("5"),
|
||||||
MaxPublishDealsFee: types.MustParseFIL("0.05"),
|
MaxPublishDealsFee: types.MustParseFIL("0.05"),
|
||||||
|
@ -226,15 +226,15 @@ func (a *MpoolAPI) MpoolBatchPushMessage(ctx context.Context, msgs []*types.Mess
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (a *MpoolAPI) MpoolCheckMessages(ctx context.Context, protos []*api.MessagePrototype) ([][]api.MessageCheckStatus, error) {
|
func (a *MpoolAPI) MpoolCheckMessages(ctx context.Context, protos []*api.MessagePrototype) ([][]api.MessageCheckStatus, error) {
|
||||||
return a.Mpool.CheckMessages(protos)
|
return a.Mpool.CheckMessages(ctx, protos)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *MpoolAPI) MpoolCheckPendingMessages(ctx context.Context, from address.Address) ([][]api.MessageCheckStatus, error) {
|
func (a *MpoolAPI) MpoolCheckPendingMessages(ctx context.Context, from address.Address) ([][]api.MessageCheckStatus, error) {
|
||||||
return a.Mpool.CheckPendingMessages(from)
|
return a.Mpool.CheckPendingMessages(ctx, from)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *MpoolAPI) MpoolCheckReplaceMessages(ctx context.Context, msgs []*types.Message) ([][]api.MessageCheckStatus, error) {
|
func (a *MpoolAPI) MpoolCheckReplaceMessages(ctx context.Context, msgs []*types.Message) ([][]api.MessageCheckStatus, error) {
|
||||||
return a.Mpool.CheckReplaceMessages(msgs)
|
return a.Mpool.CheckReplaceMessages(ctx, msgs)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *MpoolAPI) MpoolGetNonce(ctx context.Context, addr address.Address) (uint64, error) {
|
func (a *MpoolAPI) MpoolGetNonce(ctx context.Context, addr address.Address) (uint64, error) {
|
||||||
|
@ -834,6 +834,7 @@ func NewSetSealConfigFunc(r repo.LockedRepo) (dtypes.SetSealingConfigFunc, error
|
|||||||
MaxSealingSectorsForDeals: cfg.MaxSealingSectorsForDeals,
|
MaxSealingSectorsForDeals: cfg.MaxSealingSectorsForDeals,
|
||||||
WaitDealsDelay: config.Duration(cfg.WaitDealsDelay),
|
WaitDealsDelay: config.Duration(cfg.WaitDealsDelay),
|
||||||
AlwaysKeepUnsealedCopy: cfg.AlwaysKeepUnsealedCopy,
|
AlwaysKeepUnsealedCopy: cfg.AlwaysKeepUnsealedCopy,
|
||||||
|
FinalizeEarly: cfg.FinalizeEarly,
|
||||||
|
|
||||||
BatchPreCommits: cfg.BatchPreCommits,
|
BatchPreCommits: cfg.BatchPreCommits,
|
||||||
MinPreCommitBatch: cfg.MinPreCommitBatch,
|
MinPreCommitBatch: cfg.MinPreCommitBatch,
|
||||||
@ -865,6 +866,7 @@ func NewGetSealConfigFunc(r repo.LockedRepo) (dtypes.GetSealingConfigFunc, error
|
|||||||
MaxSealingSectorsForDeals: cfg.Sealing.MaxSealingSectorsForDeals,
|
MaxSealingSectorsForDeals: cfg.Sealing.MaxSealingSectorsForDeals,
|
||||||
WaitDealsDelay: time.Duration(cfg.Sealing.WaitDealsDelay),
|
WaitDealsDelay: time.Duration(cfg.Sealing.WaitDealsDelay),
|
||||||
AlwaysKeepUnsealedCopy: cfg.Sealing.AlwaysKeepUnsealedCopy,
|
AlwaysKeepUnsealedCopy: cfg.Sealing.AlwaysKeepUnsealedCopy,
|
||||||
|
FinalizeEarly: cfg.Sealing.FinalizeEarly,
|
||||||
|
|
||||||
BatchPreCommits: cfg.Sealing.BatchPreCommits,
|
BatchPreCommits: cfg.Sealing.BatchPreCommits,
|
||||||
MinPreCommitBatch: cfg.Sealing.MinPreCommitBatch,
|
MinPreCommitBatch: cfg.Sealing.MinPreCommitBatch,
|
||||||
|
@ -360,6 +360,20 @@ func (s SealingAPIAdapter) ChainHead(ctx context.Context) (sealing.TipSetToken,
|
|||||||
return head.Key().Bytes(), head.Height(), nil
|
return head.Key().Bytes(), head.Height(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s SealingAPIAdapter) ChainBaseFee(ctx context.Context, tok sealing.TipSetToken) (abi.TokenAmount, error) {
|
||||||
|
tsk, err := types.TipSetKeyFromBytes(tok)
|
||||||
|
if err != nil {
|
||||||
|
return big.Zero(), err
|
||||||
|
}
|
||||||
|
|
||||||
|
ts, err := s.delegate.ChainGetTipSet(ctx, tsk)
|
||||||
|
if err != nil {
|
||||||
|
return big.Zero(), err
|
||||||
|
}
|
||||||
|
|
||||||
|
return ts.Blocks()[0].ParentBaseFee, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (s SealingAPIAdapter) ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error) {
|
func (s SealingAPIAdapter) ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error) {
|
||||||
return s.delegate.ChainGetMessage(ctx, mc)
|
return s.delegate.ChainGetMessage(ctx, mc)
|
||||||
}
|
}
|
||||||
|
@ -171,12 +171,6 @@ func (m *Miner) Run(ctx context.Context) error {
|
|||||||
return xerrors.Errorf("getting miner info: %w", err)
|
return xerrors.Errorf("getting miner info: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
fc := sealing.FeeConfig{
|
|
||||||
MaxPreCommitGasFee: abi.TokenAmount(m.feeCfg.MaxPreCommitGasFee),
|
|
||||||
MaxCommitGasFee: abi.TokenAmount(m.feeCfg.MaxCommitGasFee),
|
|
||||||
MaxTerminateGasFee: abi.TokenAmount(m.feeCfg.MaxTerminateGasFee),
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// consumer of chain head changes.
|
// consumer of chain head changes.
|
||||||
evts = events.NewEvents(ctx, m.api)
|
evts = events.NewEvents(ctx, m.api)
|
||||||
@ -205,7 +199,7 @@ func (m *Miner) Run(ctx context.Context) error {
|
|||||||
)
|
)
|
||||||
|
|
||||||
// Instantiate the sealing FSM.
|
// Instantiate the sealing FSM.
|
||||||
m.sealing = sealing.New(adaptedAPI, fc, evtsAdapter, m.maddr, m.ds, m.sealer, m.sc, m.verif, m.prover, &pcp, cfg, m.handleSealingNotifications, as)
|
m.sealing = sealing.New(adaptedAPI, m.feeCfg, evtsAdapter, m.maddr, m.ds, m.sealer, m.sc, m.verif, m.prover, &pcp, cfg, m.handleSealingNotifications, as)
|
||||||
|
|
||||||
// Run the sealing FSM.
|
// Run the sealing FSM.
|
||||||
go m.sealing.Run(ctx) //nolint:errcheck // logged intside the function
|
go m.sealing.Run(ctx) //nolint:errcheck // logged intside the function
|
||||||
|
Loading…
Reference in New Issue
Block a user