resolve conflicts
This commit is contained in:
commit
31e6fb154e
137
CHANGELOG.md
137
CHANGELOG.md
@ -70,143 +70,6 @@ This is an optional Lotus release that introduces various improvements to the se
|
||||
- fix health report (https://github.com/filecoin-project/lotus/pull/6011)
|
||||
- fix(ci): Use recent ubuntu LTS release; Update release params ((https://github.com/filecoin-project/lotus/pull/6011))
|
||||
|
||||
# 1.9.0-rc4 / 2021-05-13
|
||||
|
||||
This is an optional Lotus release that introduces various improvements to the sealing, mining, and deal-making processes.
|
||||
|
||||
## Highlights
|
||||
|
||||
- OpenRPC Support (https://github.com/filecoin-project/lotus/pull/5843)
|
||||
- Take latency into account when making interactive deals (https://github.com/filecoin-project/lotus/pull/5876)
|
||||
- Update go-commp-utils for >10x faster client commp calculation (https://github.com/filecoin-project/lotus/pull/5892)
|
||||
- add `lotus client cancel-retrieval` cmd to lotus CLI (https://github.com/filecoin-project/lotus/pull/5871)
|
||||
- add `inspect-deal` command to `lotus client` (https://github.com/filecoin-project/lotus/pull/5833)
|
||||
- Local retrieval support (https://github.com/filecoin-project/lotus/pull/5917)
|
||||
- go-fil-markets v1.1.9 -> v1.2.5
|
||||
- For a detailed changelog see https://github.com/filecoin-project/go-fil-markets/blob/master/CHANGELOG.md
|
||||
- rust-fil-proofs v5.4.1 -> v7.0.1
|
||||
- For a detailed changelog see https://github.com/filecoin-project/rust-fil-proofs/blob/master/CHANGELOG.md
|
||||
|
||||
## Changes
|
||||
- storagefsm: Apply global events even in broken states (https://github.com/filecoin-project/lotus/pull/5962)
|
||||
- Default the AlwaysKeepUnsealedCopy flag to true (https://github.com/filecoin-project/lotus/pull/5743)
|
||||
- splitstore: compact hotstore prior to garbage collection (https://github.com/filecoin-project/lotus/pull/5778)
|
||||
- ipfs-force bootstrapper update (https://github.com/filecoin-project/lotus/pull/5799)
|
||||
- better logging when unsealing fails (https://github.com/filecoin-project/lotus/pull/5851)
|
||||
- perf: add cache for gas permium estimation (https://github.com/filecoin-project/lotus/pull/5709)
|
||||
- backupds: Compact log on restart (https://github.com/filecoin-project/lotus/pull/5875)
|
||||
- backupds: Improve truncated log handling (https://github.com/filecoin-project/lotus/pull/5891)
|
||||
- State CLI improvements (State CLI improvements)
|
||||
- API proxy struct codegen (https://github.com/filecoin-project/lotus/pull/5854)
|
||||
- move DI stuff for paychmgr into modules (https://github.com/filecoin-project/lotus/pull/5791)
|
||||
- Implement Event observer and Settings for 3rd party dep injection (https://github.com/filecoin-project/lotus/pull/5693)
|
||||
- Export developer and network commands for consumption by derivatives of Lotus (https://github.com/filecoin-project/lotus/pull/5864)
|
||||
- mock sealer: Simulate randomness sideeffects (https://github.com/filecoin-project/lotus/pull/5805)
|
||||
- localstorage: Demote reservation stat error to debug (https://github.com/filecoin-project/lotus/pull/5976)
|
||||
- shed command to unpack miner info dumps (https://github.com/filecoin-project/lotus/pull/5800)
|
||||
- Add two utils to Lotus-shed (https://github.com/filecoin-project/lotus/pull/5867)
|
||||
- add shed election estimate command (https://github.com/filecoin-project/lotus/pull/5092)
|
||||
- Add --actor flag in lotus-shed sectors terminate (https://github.com/filecoin-project/lotus/pull/5819)
|
||||
- Move lotus mpool clear to lotus-shed (https://github.com/filecoin-project/lotus/pull/5900)
|
||||
- Centralize everything on ipfs/go-log/v2 (https://github.com/filecoin-project/lotus/pull/5974)
|
||||
- expose NextID from nice market actor interface (https://github.com/filecoin-project/lotus/pull/5850)
|
||||
- add available options for perm on error (https://github.com/filecoin-project/lotus/pull/5814)
|
||||
- API docs clarification: Document StateSearchMsg replaced message behavior (https://github.com/filecoin-project/lotus/pull/5838)
|
||||
- api: Document StateReplay replaced message behavior (https://github.com/filecoin-project/lotus/pull/5840)
|
||||
- add godocs to miner objects (https://github.com/filecoin-project/lotus/pull/2184)
|
||||
- Add description to the client deal CLI command (https://github.com/filecoin-project/lotus/pull/5999)
|
||||
- lint: don't skip builtin (https://github.com/filecoin-project/lotus/pull/5881)
|
||||
- use deal duration from actors (https://github.com/filecoin-project/lotus/pull/5270)
|
||||
- remote calc winningpost proof (https://github.com/filecoin-project/lotus/pull/5884)
|
||||
- packer: other network images (https://github.com/filecoin-project/lotus/pull/5930)
|
||||
- Convert the chainstore lock to RW (https://github.com/filecoin-project/lotus/pull/5971)
|
||||
- Remove CachedBlockstore (https://github.com/filecoin-project/lotus/pull/5972)
|
||||
- remove messagepool CapGasFee duplicate code (https://github.com/filecoin-project/lotus/pull/5992)
|
||||
- Add a mining-heartbeat INFO line at every epoch (https://github.com/filecoin-project/lotus/pull/6183)
|
||||
- chore(ci): Enable build on RC tags (https://github.com/filecoin-project/lotus/pull/6245)
|
||||
- Upgrade nerpa to actor v4 and bump the version to rc4 (https://github.com/filecoin-project/lotus/pull/6249)
|
||||
## Fixes
|
||||
- return buffers after canceling badger operation (https://github.com/filecoin-project/lotus/pull/5796)
|
||||
- avoid holding a lock while calling the View callback (https://github.com/filecoin-project/lotus/pull/5792)
|
||||
- storagefsm: Trigger input processing when below limits (https://github.com/filecoin-project/lotus/pull/5801)
|
||||
- After importing a previously deleted key, be able to delete it again (https://github.com/filecoin-project/lotus/pull/4653)
|
||||
- fix StateManager.Replay on reward actor (https://github.com/filecoin-project/lotus/pull/5804)
|
||||
- make sure atomic 64bit fields are 64bit aligned (https://github.com/filecoin-project/lotus/pull/5794)
|
||||
- Import secp sigs in paych tests (https://github.com/filecoin-project/lotus/pull/5879)
|
||||
- fix ci build-macos (https://github.com/filecoin-project/lotus/pull/5934)
|
||||
- Fix creation of remainder account when it's not a multisig (https://github.com/filecoin-project/lotus/pull/5807)
|
||||
- Fix fallback chainstore (https://github.com/filecoin-project/lotus/pull/6003)
|
||||
- fix 4857: show help for set-addrs (https://github.com/filecoin-project/lotus/pull/5943)
|
||||
- fix health report (https://github.com/filecoin-project/lotus/pull/6011)
|
||||
|
||||
|
||||
# 1.9.0-rc2 / 2021-04-30
|
||||
|
||||
This is an optional Lotus release that introduces various improvements to the sealing, mining, and deal-making processes.
|
||||
|
||||
## Highlights
|
||||
|
||||
- OpenRPC Support (https://github.com/filecoin-project/lotus/pull/5843)
|
||||
- Take latency into account when making interactive deals (https://github.com/filecoin-project/lotus/pull/5876)
|
||||
- Update go-commp-utils for >10x faster client commp calculation (https://github.com/filecoin-project/lotus/pull/5892)
|
||||
- add `lotus client cancel-retrieval` cmd to lotus CLI (https://github.com/filecoin-project/lotus/pull/5871)
|
||||
- add `inspect-deal` command to `lotus client` (https://github.com/filecoin-project/lotus/pull/5833)
|
||||
- Local retrieval support (https://github.com/filecoin-project/lotus/pull/5917)
|
||||
- go-fil-markets v1.1.9 -> v1.2.5
|
||||
- For a detailed changelog see https://github.com/filecoin-project/go-fil-markets/blob/master/CHANGELOG.md
|
||||
- rust-fil-proofs v5.4.1 -> v7
|
||||
- For a detailed changelog see https://github.com/filecoin-project/rust-fil-proofs/blob/master/CHANGELOG.md
|
||||
|
||||
## Changes
|
||||
- storagefsm: Apply global events even in broken states (https://github.com/filecoin-project/lotus/pull/5962)
|
||||
- Default the AlwaysKeepUnsealedCopy flag to true (https://github.com/filecoin-project/lotus/pull/5743)
|
||||
- splitstore: compact hotstore prior to garbage collection (https://github.com/filecoin-project/lotus/pull/5778)
|
||||
- ipfs-force bootstrapper update (https://github.com/filecoin-project/lotus/pull/5799)
|
||||
- better logging when unsealing fails (https://github.com/filecoin-project/lotus/pull/5851)
|
||||
- perf: add cache for gas permium estimation (https://github.com/filecoin-project/lotus/pull/5709)
|
||||
- backupds: Compact log on restart (https://github.com/filecoin-project/lotus/pull/5875)
|
||||
- backupds: Improve truncated log handling (https://github.com/filecoin-project/lotus/pull/5891)
|
||||
- State CLI improvements (State CLI improvements)
|
||||
- API proxy struct codegen (https://github.com/filecoin-project/lotus/pull/5854)
|
||||
- move DI stuff for paychmgr into modules (https://github.com/filecoin-project/lotus/pull/5791)
|
||||
- Implement Event observer and Settings for 3rd party dep injection (https://github.com/filecoin-project/lotus/pull/5693)
|
||||
- Export developer and network commands for consumption by derivatives of Lotus (https://github.com/filecoin-project/lotus/pull/5864)
|
||||
- mock sealer: Simulate randomness sideeffects (https://github.com/filecoin-project/lotus/pull/5805)
|
||||
- localstorage: Demote reservation stat error to debug (https://github.com/filecoin-project/lotus/pull/5976)
|
||||
- shed command to unpack miner info dumps (https://github.com/filecoin-project/lotus/pull/5800)
|
||||
- Add two utils to Lotus-shed (https://github.com/filecoin-project/lotus/pull/5867)
|
||||
- add shed election estimate command (https://github.com/filecoin-project/lotus/pull/5092)
|
||||
- Add --actor flag in lotus-shed sectors terminate (https://github.com/filecoin-project/lotus/pull/5819)
|
||||
- Move lotus mpool clear to lotus-shed (https://github.com/filecoin-project/lotus/pull/5900)
|
||||
- Centralize everything on ipfs/go-log/v2 (https://github.com/filecoin-project/lotus/pull/5974)
|
||||
- expose NextID from nice market actor interface (https://github.com/filecoin-project/lotus/pull/5850)
|
||||
- add available options for perm on error (https://github.com/filecoin-project/lotus/pull/5814)
|
||||
- API docs clarification: Document StateSearchMsg replaced message behavior (https://github.com/filecoin-project/lotus/pull/5838)
|
||||
- api: Document StateReplay replaced message behavior (https://github.com/filecoin-project/lotus/pull/5840)
|
||||
- add godocs to miner objects (https://github.com/filecoin-project/lotus/pull/2184)
|
||||
- Add description to the client deal CLI command (https://github.com/filecoin-project/lotus/pull/5999)
|
||||
- lint: don't skip builtin (https://github.com/filecoin-project/lotus/pull/5881)
|
||||
- use deal duration from actors (https://github.com/filecoin-project/lotus/pull/5270)
|
||||
- remote calc winningpost proof (https://github.com/filecoin-project/lotus/pull/5884)
|
||||
- packer: other network images (https://github.com/filecoin-project/lotus/pull/5930)
|
||||
- Convert the chainstore lock to RW (https://github.com/filecoin-project/lotus/pull/5971)
|
||||
- Remove CachedBlockstore (https://github.com/filecoin-project/lotus/pull/5972)
|
||||
- remove messagepool CapGasFee duplicate code (https://github.com/filecoin-project/lotus/pull/5992)
|
||||
|
||||
## Fixes
|
||||
- return buffers after canceling badger operation (https://github.com/filecoin-project/lotus/pull/5796)
|
||||
- avoid holding a lock while calling the View callback (https://github.com/filecoin-project/lotus/pull/5792)
|
||||
- storagefsm: Trigger input processing when below limits (https://github.com/filecoin-project/lotus/pull/5801)
|
||||
- After importing a previously deleted key, be able to delete it again (https://github.com/filecoin-project/lotus/pull/4653)
|
||||
- fix StateManager.Replay on reward actor (https://github.com/filecoin-project/lotus/pull/5804)
|
||||
- make sure atomic 64bit fields are 64bit aligned (https://github.com/filecoin-project/lotus/pull/5794)
|
||||
- Import secp sigs in paych tests (https://github.com/filecoin-project/lotus/pull/5879)
|
||||
- fix ci build-macos (https://github.com/filecoin-project/lotus/pull/5934)
|
||||
- Fix creation of remainder account when it's not a multisig (https://github.com/filecoin-project/lotus/pull/5807)
|
||||
- Fix fallback chainstore (https://github.com/filecoin-project/lotus/pull/6003)
|
||||
- fix 4857: show help for set-addrs (https://github.com/filecoin-project/lotus/pull/5943)
|
||||
- fix health report (https://github.com/filecoin-project/lotus/pull/6011)
|
||||
|
||||
# 1.8.0 / 2021-04-05
|
||||
|
||||
This is a mandatory release of Lotus that upgrades the network to version 12, which introduces various performance improvements to the cron processing of the power actor. The network will upgrade at height 712320, which is 2021-04-29T06:00:00Z.
|
||||
|
@ -1,61 +0,0 @@
|
||||
package test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/lotus/miner"
|
||||
)
|
||||
|
||||
type BlockMiner struct {
|
||||
ctx context.Context
|
||||
t *testing.T
|
||||
miner TestStorageNode
|
||||
blocktime time.Duration
|
||||
mine int64
|
||||
nulls int64
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
func NewBlockMiner(ctx context.Context, t *testing.T, miner TestStorageNode, blocktime time.Duration) *BlockMiner {
|
||||
return &BlockMiner{
|
||||
ctx: ctx,
|
||||
t: t,
|
||||
miner: miner,
|
||||
blocktime: blocktime,
|
||||
mine: int64(1),
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
func (bm *BlockMiner) MineBlocks() {
|
||||
time.Sleep(time.Second)
|
||||
go func() {
|
||||
defer close(bm.done)
|
||||
for atomic.LoadInt64(&bm.mine) == 1 {
|
||||
select {
|
||||
case <-bm.ctx.Done():
|
||||
return
|
||||
case <-time.After(bm.blocktime):
|
||||
}
|
||||
|
||||
nulls := atomic.SwapInt64(&bm.nulls, 0)
|
||||
if err := bm.miner.MineOne(bm.ctx, miner.MineReq{
|
||||
InjectNulls: abi.ChainEpoch(nulls),
|
||||
Done: func(bool, abi.ChainEpoch, error) {},
|
||||
}); err != nil {
|
||||
bm.t.Error(err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (bm *BlockMiner) Stop() {
|
||||
atomic.AddInt64(&bm.mine, -1)
|
||||
fmt.Println("shutting down mining")
|
||||
<-bm.done
|
||||
}
|
@ -1,703 +0,0 @@
|
||||
package test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
files "github.com/ipfs/go-ipfs-files"
|
||||
"github.com/ipld/go-car"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/market"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
||||
"github.com/filecoin-project/lotus/extern/storage-sealing/sealiface"
|
||||
"github.com/filecoin-project/lotus/markets/storageadapter"
|
||||
"github.com/filecoin-project/lotus/node"
|
||||
"github.com/filecoin-project/lotus/node/impl"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
dag "github.com/ipfs/go-merkledag"
|
||||
dstest "github.com/ipfs/go-merkledag/test"
|
||||
unixfile "github.com/ipfs/go-unixfs/file"
|
||||
)
|
||||
|
||||
func TestDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, carExport, fastRet bool, startEpoch abi.ChainEpoch) {
|
||||
s := setupOneClientOneMiner(t, b, blocktime)
|
||||
defer s.blockMiner.Stop()
|
||||
|
||||
MakeDeal(t, s.ctx, 6, s.client, s.miner, carExport, fastRet, startEpoch)
|
||||
}
|
||||
|
||||
func TestDoubleDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch) {
|
||||
s := setupOneClientOneMiner(t, b, blocktime)
|
||||
defer s.blockMiner.Stop()
|
||||
|
||||
MakeDeal(t, s.ctx, 6, s.client, s.miner, false, false, startEpoch)
|
||||
MakeDeal(t, s.ctx, 7, s.client, s.miner, false, false, startEpoch)
|
||||
}
|
||||
|
||||
func MakeDeal(t *testing.T, ctx context.Context, rseed int, client api.FullNode, miner TestStorageNode, carExport, fastRet bool, startEpoch abi.ChainEpoch) {
|
||||
res, data, err := CreateClientFile(ctx, client, rseed, 0)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
fcid := res.Root
|
||||
fmt.Println("FILE CID: ", fcid)
|
||||
|
||||
deal := startDeal(t, ctx, miner, client, fcid, fastRet, startEpoch)
|
||||
|
||||
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
|
||||
time.Sleep(time.Second)
|
||||
waitDealSealed(t, ctx, miner, client, deal, false, false, nil)
|
||||
|
||||
// Retrieval
|
||||
info, err := client.ClientGetDealInfo(ctx, *deal)
|
||||
require.NoError(t, err)
|
||||
|
||||
testRetrieval(t, ctx, client, fcid, &info.PieceCID, carExport, data)
|
||||
}
|
||||
|
||||
func CreateClientFile(ctx context.Context, client api.FullNode, rseed, size int) (*api.ImportRes, []byte, error) {
|
||||
data, path, err := createRandomFile(rseed, size)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
res, err := client.ClientImport(ctx, api.FileRef{Path: path})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return res, data, nil
|
||||
}
|
||||
|
||||
func createRandomFile(rseed, size int) ([]byte, string, error) {
|
||||
if size == 0 {
|
||||
size = 1600
|
||||
}
|
||||
data := make([]byte, size)
|
||||
rand.New(rand.NewSource(int64(rseed))).Read(data)
|
||||
|
||||
dir, err := ioutil.TempDir(os.TempDir(), "test-make-deal-")
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
path := filepath.Join(dir, "sourcefile.dat")
|
||||
err = ioutil.WriteFile(path, data, 0644)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
return data, path, nil
|
||||
}
|
||||
|
||||
func TestPublishDealsBatching(t *testing.T, b APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch) {
|
||||
publishPeriod := 10 * time.Second
|
||||
maxDealsPerMsg := uint64(2)
|
||||
|
||||
// Set max deals per publish deals message to 2
|
||||
minerDef := []StorageMiner{{
|
||||
Full: 0,
|
||||
Opts: node.Override(
|
||||
new(*storageadapter.DealPublisher),
|
||||
storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{
|
||||
Period: publishPeriod,
|
||||
MaxDealsPerMsg: maxDealsPerMsg,
|
||||
})),
|
||||
Preseal: PresealGenesis,
|
||||
}}
|
||||
|
||||
// Create a connect client and miner node
|
||||
n, sn := b(t, OneFull, minerDef)
|
||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
||||
miner := sn[0]
|
||||
s := connectAndStartMining(t, b, blocktime, client, miner)
|
||||
defer s.blockMiner.Stop()
|
||||
|
||||
// Starts a deal and waits until it's published
|
||||
runDealTillPublish := func(rseed int) {
|
||||
res, _, err := CreateClientFile(s.ctx, s.client, rseed, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
upds, err := client.ClientGetDealUpdates(s.ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
startDeal(t, s.ctx, s.miner, s.client, res.Root, false, startEpoch)
|
||||
|
||||
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
|
||||
time.Sleep(time.Second)
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
for upd := range upds {
|
||||
if upd.DataRef.Root == res.Root && upd.State == storagemarket.StorageDealAwaitingPreCommit {
|
||||
done <- struct{}{}
|
||||
}
|
||||
}
|
||||
}()
|
||||
<-done
|
||||
}
|
||||
|
||||
// Run three deals in parallel
|
||||
done := make(chan struct{}, maxDealsPerMsg+1)
|
||||
for rseed := 1; rseed <= 3; rseed++ {
|
||||
rseed := rseed
|
||||
go func() {
|
||||
runDealTillPublish(rseed)
|
||||
done <- struct{}{}
|
||||
}()
|
||||
}
|
||||
|
||||
// Wait for two of the deals to be published
|
||||
for i := 0; i < int(maxDealsPerMsg); i++ {
|
||||
<-done
|
||||
}
|
||||
|
||||
// Expect a single PublishStorageDeals message that includes the first two deals
|
||||
msgCids, err := s.client.StateListMessages(s.ctx, &api.MessageMatch{To: market.Address}, types.EmptyTSK, 1)
|
||||
require.NoError(t, err)
|
||||
count := 0
|
||||
for _, msgCid := range msgCids {
|
||||
msg, err := s.client.ChainGetMessage(s.ctx, msgCid)
|
||||
require.NoError(t, err)
|
||||
|
||||
if msg.Method == market.Methods.PublishStorageDeals {
|
||||
count++
|
||||
var pubDealsParams market2.PublishStorageDealsParams
|
||||
err = pubDealsParams.UnmarshalCBOR(bytes.NewReader(msg.Params))
|
||||
require.NoError(t, err)
|
||||
require.Len(t, pubDealsParams.Deals, int(maxDealsPerMsg))
|
||||
}
|
||||
}
|
||||
require.Equal(t, 1, count)
|
||||
|
||||
// The third deal should be published once the publish period expires.
|
||||
// Allow a little padding as it takes a moment for the state change to
|
||||
// be noticed by the client.
|
||||
padding := 10 * time.Second
|
||||
select {
|
||||
case <-time.After(publishPeriod + padding):
|
||||
require.Fail(t, "Expected 3rd deal to be published once publish period elapsed")
|
||||
case <-done: // Success
|
||||
}
|
||||
}
|
||||
|
||||
func TestBatchDealInput(t *testing.T, b APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch) {
|
||||
run := func(piece, deals, expectSectors int) func(t *testing.T) {
|
||||
return func(t *testing.T) {
|
||||
publishPeriod := 10 * time.Second
|
||||
maxDealsPerMsg := uint64(deals)
|
||||
|
||||
// Set max deals per publish deals message to maxDealsPerMsg
|
||||
minerDef := []StorageMiner{{
|
||||
Full: 0,
|
||||
Opts: node.Options(
|
||||
node.Override(
|
||||
new(*storageadapter.DealPublisher),
|
||||
storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{
|
||||
Period: publishPeriod,
|
||||
MaxDealsPerMsg: maxDealsPerMsg,
|
||||
})),
|
||||
node.Override(new(dtypes.GetSealingConfigFunc), func() (dtypes.GetSealingConfigFunc, error) {
|
||||
return func() (sealiface.Config, error) {
|
||||
return sealiface.Config{
|
||||
MaxWaitDealsSectors: 2,
|
||||
MaxSealingSectors: 1,
|
||||
MaxSealingSectorsForDeals: 3,
|
||||
AlwaysKeepUnsealedCopy: true,
|
||||
WaitDealsDelay: time.Hour,
|
||||
}, nil
|
||||
}, nil
|
||||
}),
|
||||
),
|
||||
Preseal: PresealGenesis,
|
||||
}}
|
||||
|
||||
// Create a connect client and miner node
|
||||
n, sn := b(t, OneFull, minerDef)
|
||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
||||
miner := sn[0]
|
||||
s := connectAndStartMining(t, b, blocktime, client, miner)
|
||||
defer s.blockMiner.Stop()
|
||||
|
||||
err := miner.MarketSetAsk(s.ctx, big.Zero(), big.Zero(), 200, 128, 32<<30)
|
||||
require.NoError(t, err)
|
||||
|
||||
checkNoPadding := func() {
|
||||
sl, err := sn[0].SectorsList(s.ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
sort.Slice(sl, func(i, j int) bool {
|
||||
return sl[i] < sl[j]
|
||||
})
|
||||
|
||||
for _, snum := range sl {
|
||||
si, err := sn[0].SectorsStatus(s.ctx, snum, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
// fmt.Printf("S %d: %+v %s\n", snum, si.Deals, si.State)
|
||||
|
||||
for _, deal := range si.Deals {
|
||||
if deal == 0 {
|
||||
fmt.Printf("sector %d had a padding piece!\n", snum)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Starts a deal and waits until it's published
|
||||
runDealTillSeal := func(rseed int) {
|
||||
res, _, err := CreateClientFile(s.ctx, s.client, rseed, piece)
|
||||
require.NoError(t, err)
|
||||
|
||||
dc := startDeal(t, s.ctx, s.miner, s.client, res.Root, false, startEpoch)
|
||||
waitDealSealed(t, s.ctx, s.miner, s.client, dc, false, true, checkNoPadding)
|
||||
}
|
||||
|
||||
// Run maxDealsPerMsg deals in parallel
|
||||
done := make(chan struct{}, maxDealsPerMsg)
|
||||
for rseed := 0; rseed < int(maxDealsPerMsg); rseed++ {
|
||||
rseed := rseed
|
||||
go func() {
|
||||
runDealTillSeal(rseed)
|
||||
done <- struct{}{}
|
||||
}()
|
||||
}
|
||||
|
||||
// Wait for maxDealsPerMsg of the deals to be published
|
||||
for i := 0; i < int(maxDealsPerMsg); i++ {
|
||||
<-done
|
||||
}
|
||||
|
||||
checkNoPadding()
|
||||
|
||||
sl, err := sn[0].SectorsList(s.ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(sl), expectSectors)
|
||||
}
|
||||
}
|
||||
|
||||
t.Run("4-p1600B", run(1600, 4, 4))
|
||||
t.Run("4-p513B", run(513, 4, 2))
|
||||
if !testing.Short() {
|
||||
t.Run("32-p257B", run(257, 32, 8))
|
||||
t.Run("32-p10B", run(10, 32, 2))
|
||||
|
||||
// fixme: this appears to break data-transfer / markets in some really creative ways
|
||||
//t.Run("128-p10B", run(10, 128, 8))
|
||||
}
|
||||
}
|
||||
|
||||
func TestFastRetrievalDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch) {
|
||||
s := setupOneClientOneMiner(t, b, blocktime)
|
||||
defer s.blockMiner.Stop()
|
||||
|
||||
data := make([]byte, 1600)
|
||||
rand.New(rand.NewSource(int64(8))).Read(data)
|
||||
|
||||
r := bytes.NewReader(data)
|
||||
fcid, err := s.client.ClientImportLocal(s.ctx, r)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
fmt.Println("FILE CID: ", fcid)
|
||||
|
||||
deal := startDeal(t, s.ctx, s.miner, s.client, fcid, true, startEpoch)
|
||||
|
||||
waitDealPublished(t, s.ctx, s.miner, deal)
|
||||
fmt.Println("deal published, retrieving")
|
||||
// Retrieval
|
||||
info, err := s.client.ClientGetDealInfo(s.ctx, *deal)
|
||||
require.NoError(t, err)
|
||||
|
||||
testRetrieval(t, s.ctx, s.client, fcid, &info.PieceCID, false, data)
|
||||
}
|
||||
|
||||
func TestSecondDealRetrieval(t *testing.T, b APIBuilder, blocktime time.Duration) {
|
||||
s := setupOneClientOneMiner(t, b, blocktime)
|
||||
defer s.blockMiner.Stop()
|
||||
|
||||
{
|
||||
data1 := make([]byte, 800)
|
||||
rand.New(rand.NewSource(int64(3))).Read(data1)
|
||||
r := bytes.NewReader(data1)
|
||||
|
||||
fcid1, err := s.client.ClientImportLocal(s.ctx, r)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
data2 := make([]byte, 800)
|
||||
rand.New(rand.NewSource(int64(9))).Read(data2)
|
||||
r2 := bytes.NewReader(data2)
|
||||
|
||||
fcid2, err := s.client.ClientImportLocal(s.ctx, r2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
deal1 := startDeal(t, s.ctx, s.miner, s.client, fcid1, true, 0)
|
||||
|
||||
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
|
||||
time.Sleep(time.Second)
|
||||
waitDealSealed(t, s.ctx, s.miner, s.client, deal1, true, false, nil)
|
||||
|
||||
deal2 := startDeal(t, s.ctx, s.miner, s.client, fcid2, true, 0)
|
||||
|
||||
time.Sleep(time.Second)
|
||||
waitDealSealed(t, s.ctx, s.miner, s.client, deal2, false, false, nil)
|
||||
|
||||
// Retrieval
|
||||
info, err := s.client.ClientGetDealInfo(s.ctx, *deal2)
|
||||
require.NoError(t, err)
|
||||
|
||||
rf, _ := s.miner.SectorsRefs(s.ctx)
|
||||
fmt.Printf("refs: %+v\n", rf)
|
||||
|
||||
testRetrieval(t, s.ctx, s.client, fcid2, &info.PieceCID, false, data2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestZeroPricePerByteRetrievalDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch) {
|
||||
s := setupOneClientOneMiner(t, b, blocktime)
|
||||
defer s.blockMiner.Stop()
|
||||
|
||||
// Set price-per-byte to zero
|
||||
ask, err := s.miner.MarketGetRetrievalAsk(s.ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
ask.PricePerByte = abi.NewTokenAmount(0)
|
||||
err = s.miner.MarketSetRetrievalAsk(s.ctx, ask)
|
||||
require.NoError(t, err)
|
||||
|
||||
MakeDeal(t, s.ctx, 6, s.client, s.miner, false, false, startEpoch)
|
||||
}
|
||||
|
||||
func TestOfflineDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch, fastRet bool) {
|
||||
s := setupOneClientOneMiner(t, b, blocktime)
|
||||
defer s.blockMiner.Stop()
|
||||
|
||||
// Create a random file
|
||||
data, path, err := createRandomFile(1, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Import the file on the client
|
||||
importRes, err := s.client.ClientImport(s.ctx, api.FileRef{Path: path})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Get the piece size and commP
|
||||
fcid := importRes.Root
|
||||
pieceInfo, err := s.client.ClientDealPieceCID(s.ctx, fcid)
|
||||
require.NoError(t, err)
|
||||
fmt.Println("FILE CID: ", fcid)
|
||||
|
||||
// Create a storage deal with the miner
|
||||
maddr, err := s.miner.ActorAddress(s.ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
addr, err := s.client.WalletDefaultAddress(s.ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Manual storage deal (offline deal)
|
||||
dataRef := &storagemarket.DataRef{
|
||||
TransferType: storagemarket.TTManual,
|
||||
Root: fcid,
|
||||
PieceCid: &pieceInfo.PieceCID,
|
||||
PieceSize: pieceInfo.PieceSize.Unpadded(),
|
||||
}
|
||||
|
||||
proposalCid, err := s.client.ClientStartDeal(s.ctx, &api.StartDealParams{
|
||||
Data: dataRef,
|
||||
Wallet: addr,
|
||||
Miner: maddr,
|
||||
EpochPrice: types.NewInt(1000000),
|
||||
DealStartEpoch: startEpoch,
|
||||
MinBlocksDuration: uint64(build.MinDealDuration),
|
||||
FastRetrieval: fastRet,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Wait for the deal to reach StorageDealCheckForAcceptance on the client
|
||||
cd, err := s.client.ClientGetDealInfo(s.ctx, *proposalCid)
|
||||
require.NoError(t, err)
|
||||
require.Eventually(t, func() bool {
|
||||
cd, _ := s.client.ClientGetDealInfo(s.ctx, *proposalCid)
|
||||
return cd.State == storagemarket.StorageDealCheckForAcceptance
|
||||
}, 30*time.Second, 1*time.Second, "actual deal status is %s", storagemarket.DealStates[cd.State])
|
||||
|
||||
// Create a CAR file from the raw file
|
||||
carFileDir, err := ioutil.TempDir(os.TempDir(), "test-make-deal-car")
|
||||
require.NoError(t, err)
|
||||
carFilePath := filepath.Join(carFileDir, "out.car")
|
||||
err = s.client.ClientGenCar(s.ctx, api.FileRef{Path: path}, carFilePath)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Import the CAR file on the miner - this is the equivalent to
|
||||
// transferring the file across the wire in a normal (non-offline) deal
|
||||
err = s.miner.DealsImportData(s.ctx, *proposalCid, carFilePath)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Wait for the deal to be published
|
||||
waitDealPublished(t, s.ctx, s.miner, proposalCid)
|
||||
|
||||
t.Logf("deal published, retrieving")
|
||||
|
||||
// Retrieve the deal
|
||||
testRetrieval(t, s.ctx, s.client, fcid, &pieceInfo.PieceCID, false, data)
|
||||
}
|
||||
|
||||
func startDeal(t *testing.T, ctx context.Context, miner TestStorageNode, client api.FullNode, fcid cid.Cid, fastRet bool, startEpoch abi.ChainEpoch) *cid.Cid {
|
||||
maddr, err := miner.ActorAddress(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
addr, err := client.WalletDefaultAddress(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
deal, err := client.ClientStartDeal(ctx, &api.StartDealParams{
|
||||
Data: &storagemarket.DataRef{
|
||||
TransferType: storagemarket.TTGraphsync,
|
||||
Root: fcid,
|
||||
},
|
||||
Wallet: addr,
|
||||
Miner: maddr,
|
||||
EpochPrice: types.NewInt(1000000),
|
||||
DealStartEpoch: startEpoch,
|
||||
MinBlocksDuration: uint64(build.MinDealDuration),
|
||||
FastRetrieval: fastRet,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("%+v", err)
|
||||
}
|
||||
return deal
|
||||
}
|
||||
|
||||
func waitDealSealed(t *testing.T, ctx context.Context, miner TestStorageNode, client api.FullNode, deal *cid.Cid, noseal, noSealStart bool, cb func()) {
|
||||
loop:
|
||||
for {
|
||||
di, err := client.ClientGetDealInfo(ctx, *deal)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
switch di.State {
|
||||
case storagemarket.StorageDealAwaitingPreCommit, storagemarket.StorageDealSealing:
|
||||
if noseal {
|
||||
return
|
||||
}
|
||||
if !noSealStart {
|
||||
startSealingWaiting(t, ctx, miner)
|
||||
}
|
||||
case storagemarket.StorageDealProposalRejected:
|
||||
t.Fatal("deal rejected")
|
||||
case storagemarket.StorageDealFailing:
|
||||
t.Fatal("deal failed")
|
||||
case storagemarket.StorageDealError:
|
||||
t.Fatal("deal errored", di.Message)
|
||||
case storagemarket.StorageDealActive:
|
||||
fmt.Println("COMPLETE", di)
|
||||
break loop
|
||||
}
|
||||
|
||||
mds, err := miner.MarketListIncompleteDeals(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var minerState storagemarket.StorageDealStatus
|
||||
for _, md := range mds {
|
||||
if md.DealID == di.DealID {
|
||||
minerState = md.State
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("Deal %d state: client:%s provider:%s\n", di.DealID, storagemarket.DealStates[di.State], storagemarket.DealStates[minerState])
|
||||
time.Sleep(time.Second / 2)
|
||||
if cb != nil {
|
||||
cb()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func waitDealPublished(t *testing.T, ctx context.Context, miner TestStorageNode, deal *cid.Cid) {
|
||||
subCtx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
updates, err := miner.MarketGetDealUpdates(subCtx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
t.Fatal("context timeout")
|
||||
case di := <-updates:
|
||||
if deal.Equals(di.ProposalCid) {
|
||||
switch di.State {
|
||||
case storagemarket.StorageDealProposalRejected:
|
||||
t.Fatal("deal rejected")
|
||||
case storagemarket.StorageDealFailing:
|
||||
t.Fatal("deal failed")
|
||||
case storagemarket.StorageDealError:
|
||||
t.Fatal("deal errored", di.Message)
|
||||
case storagemarket.StorageDealFinalizing, storagemarket.StorageDealAwaitingPreCommit, storagemarket.StorageDealSealing, storagemarket.StorageDealActive:
|
||||
fmt.Println("COMPLETE", di)
|
||||
return
|
||||
}
|
||||
fmt.Println("Deal state: ", storagemarket.DealStates[di.State])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func startSealingWaiting(t *testing.T, ctx context.Context, miner TestStorageNode) {
|
||||
snums, err := miner.SectorsList(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, snum := range snums {
|
||||
si, err := miner.SectorsStatus(ctx, snum, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Logf("Sector %d state: %s", snum, si.State)
|
||||
if si.State == api.SectorState(sealing.WaitDeals) {
|
||||
require.NoError(t, miner.SectorStartSealing(ctx, snum))
|
||||
}
|
||||
}
|
||||
|
||||
flushSealingBatches(t, ctx, miner)
|
||||
}
|
||||
|
||||
func testRetrieval(t *testing.T, ctx context.Context, client api.FullNode, fcid cid.Cid, piece *cid.Cid, carExport bool, data []byte) {
|
||||
offers, err := client.ClientFindData(ctx, fcid, piece)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(offers) < 1 {
|
||||
t.Fatal("no offers")
|
||||
}
|
||||
|
||||
rpath, err := ioutil.TempDir("", "lotus-retrieve-test-")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(rpath) //nolint:errcheck
|
||||
|
||||
caddr, err := client.WalletDefaultAddress(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ref := &api.FileRef{
|
||||
Path: filepath.Join(rpath, "ret"),
|
||||
IsCAR: carExport,
|
||||
}
|
||||
updates, err := client.ClientRetrieveWithEvents(ctx, offers[0].Order(caddr), ref)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for update := range updates {
|
||||
if update.Err != "" {
|
||||
t.Fatalf("retrieval failed: %s", update.Err)
|
||||
}
|
||||
}
|
||||
|
||||
rdata, err := ioutil.ReadFile(filepath.Join(rpath, "ret"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if carExport {
|
||||
rdata = extractCarData(t, ctx, rdata, rpath)
|
||||
}
|
||||
|
||||
if !bytes.Equal(rdata, data) {
|
||||
t.Fatal("wrong data retrieved")
|
||||
}
|
||||
}
|
||||
|
||||
func extractCarData(t *testing.T, ctx context.Context, rdata []byte, rpath string) []byte {
|
||||
bserv := dstest.Bserv()
|
||||
ch, err := car.LoadCar(bserv.Blockstore(), bytes.NewReader(rdata))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
b, err := bserv.GetBlock(ctx, ch.Roots[0])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
nd, err := ipld.Decode(b)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
dserv := dag.NewDAGService(bserv)
|
||||
fil, err := unixfile.NewUnixfsFile(ctx, dserv, nd)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
outPath := filepath.Join(rpath, "retLoadedCAR")
|
||||
if err := files.WriteTo(fil, outPath); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
rdata, err = ioutil.ReadFile(outPath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return rdata
|
||||
}
|
||||
|
||||
type dealsScaffold struct {
|
||||
ctx context.Context
|
||||
client *impl.FullNodeAPI
|
||||
miner TestStorageNode
|
||||
blockMiner *BlockMiner
|
||||
}
|
||||
|
||||
func setupOneClientOneMiner(t *testing.T, b APIBuilder, blocktime time.Duration) *dealsScaffold {
|
||||
n, sn := b(t, OneFull, OneMiner)
|
||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
||||
miner := sn[0]
|
||||
return connectAndStartMining(t, b, blocktime, client, miner)
|
||||
}
|
||||
|
||||
func connectAndStartMining(t *testing.T, b APIBuilder, blocktime time.Duration, client *impl.FullNodeAPI, miner TestStorageNode) *dealsScaffold {
|
||||
ctx := context.Background()
|
||||
addrinfo, err := client.NetAddrsListen(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := miner.NetConnect(ctx, addrinfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
|
||||
blockMiner := NewBlockMiner(ctx, t, miner, blocktime)
|
||||
blockMiner.MineBlocks()
|
||||
|
||||
return &dealsScaffold{
|
||||
ctx: ctx,
|
||||
client: client,
|
||||
miner: miner,
|
||||
blockMiner: blockMiner,
|
||||
}
|
||||
}
|
@ -1,240 +0,0 @@
|
||||
package test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/miner"
|
||||
"github.com/filecoin-project/lotus/node/impl"
|
||||
)
|
||||
|
||||
//nolint:deadcode,varcheck
|
||||
var log = logging.Logger("apitest")
|
||||
|
||||
func (ts *testSuite) testMining(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
apis, sn := ts.makeNodes(t, OneFull, OneMiner)
|
||||
api := apis[0]
|
||||
|
||||
newHeads, err := api.ChainNotify(ctx)
|
||||
require.NoError(t, err)
|
||||
initHead := (<-newHeads)[0]
|
||||
baseHeight := initHead.Val.Height()
|
||||
|
||||
h1, err := api.ChainHead(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(h1.Height()), int64(baseHeight))
|
||||
|
||||
MineUntilBlock(ctx, t, apis[0], sn[0], nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
<-newHeads
|
||||
|
||||
h2, err := api.ChainHead(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Greater(t, int64(h2.Height()), int64(h1.Height()))
|
||||
}
|
||||
|
||||
func (ts *testSuite) testMiningReal(t *testing.T) {
|
||||
build.InsecurePoStValidation = false
|
||||
defer func() {
|
||||
build.InsecurePoStValidation = true
|
||||
}()
|
||||
|
||||
ctx := context.Background()
|
||||
apis, sn := ts.makeNodes(t, OneFull, OneMiner)
|
||||
api := apis[0]
|
||||
|
||||
newHeads, err := api.ChainNotify(ctx)
|
||||
require.NoError(t, err)
|
||||
at := (<-newHeads)[0].Val.Height()
|
||||
|
||||
h1, err := api.ChainHead(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(at), int64(h1.Height()))
|
||||
|
||||
MineUntilBlock(ctx, t, apis[0], sn[0], nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
<-newHeads
|
||||
|
||||
h2, err := api.ChainHead(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Greater(t, int64(h2.Height()), int64(h1.Height()))
|
||||
|
||||
MineUntilBlock(ctx, t, apis[0], sn[0], nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
<-newHeads
|
||||
|
||||
h3, err := api.ChainHead(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Greater(t, int64(h3.Height()), int64(h2.Height()))
|
||||
}
|
||||
|
||||
func TestDealMining(t *testing.T, b APIBuilder, blocktime time.Duration, carExport bool) {
|
||||
// test making a deal with a fresh miner, and see if it starts to mine
|
||||
|
||||
ctx := context.Background()
|
||||
n, sn := b(t, OneFull, []StorageMiner{
|
||||
{Full: 0, Preseal: PresealGenesis},
|
||||
{Full: 0, Preseal: 0}, // TODO: Add support for miners on non-first full node
|
||||
})
|
||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
||||
provider := sn[1]
|
||||
genesisMiner := sn[0]
|
||||
|
||||
addrinfo, err := client.NetAddrsListen(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := provider.NetConnect(ctx, addrinfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := genesisMiner.NetConnect(ctx, addrinfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
time.Sleep(time.Second)
|
||||
|
||||
data := make([]byte, 600)
|
||||
rand.New(rand.NewSource(5)).Read(data)
|
||||
|
||||
r := bytes.NewReader(data)
|
||||
fcid, err := client.ClientImportLocal(ctx, r)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
fmt.Println("FILE CID: ", fcid)
|
||||
|
||||
var mine int32 = 1
|
||||
done := make(chan struct{})
|
||||
minedTwo := make(chan struct{})
|
||||
|
||||
m2addr, err := sn[1].ActorAddress(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer close(done)
|
||||
|
||||
complChan := minedTwo
|
||||
for atomic.LoadInt32(&mine) != 0 {
|
||||
wait := make(chan int)
|
||||
mdone := func(mined bool, _ abi.ChainEpoch, err error) {
|
||||
n := 0
|
||||
if mined {
|
||||
n = 1
|
||||
}
|
||||
wait <- n
|
||||
}
|
||||
|
||||
if err := sn[0].MineOne(ctx, miner.MineReq{Done: mdone}); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if err := sn[1].MineOne(ctx, miner.MineReq{Done: mdone}); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
expect := <-wait
|
||||
expect += <-wait
|
||||
|
||||
time.Sleep(blocktime)
|
||||
if expect == 0 {
|
||||
// null block
|
||||
continue
|
||||
}
|
||||
|
||||
var nodeOneMined bool
|
||||
for _, node := range sn {
|
||||
mb, err := node.MiningBase(ctx)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, b := range mb.Blocks() {
|
||||
if b.Miner == m2addr {
|
||||
nodeOneMined = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if nodeOneMined && complChan != nil {
|
||||
close(complChan)
|
||||
complChan = nil
|
||||
}
|
||||
|
||||
}
|
||||
}()
|
||||
|
||||
deal := startDeal(t, ctx, provider, client, fcid, false, 0)
|
||||
|
||||
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
|
||||
time.Sleep(time.Second)
|
||||
|
||||
waitDealSealed(t, ctx, provider, client, deal, false, false, nil)
|
||||
|
||||
<-minedTwo
|
||||
|
||||
atomic.StoreInt32(&mine, 0)
|
||||
fmt.Println("shutting down mining")
|
||||
<-done
|
||||
}
|
||||
|
||||
func (ts *testSuite) testNonGenesisMiner(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
n, sn := ts.makeNodes(t, []FullNodeOpts{
|
||||
FullNodeWithLatestActorsAt(-1),
|
||||
}, []StorageMiner{
|
||||
{Full: 0, Preseal: PresealGenesis},
|
||||
})
|
||||
|
||||
full, ok := n[0].FullNode.(*impl.FullNodeAPI)
|
||||
if !ok {
|
||||
t.Skip("not testing with a full node")
|
||||
return
|
||||
}
|
||||
genesisMiner := sn[0]
|
||||
|
||||
bm := NewBlockMiner(ctx, t, genesisMiner, 4*time.Millisecond)
|
||||
bm.MineBlocks()
|
||||
t.Cleanup(bm.Stop)
|
||||
|
||||
gaa, err := genesisMiner.ActorAddress(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
gmi, err := full.StateMinerInfo(ctx, gaa, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
|
||||
testm := n[0].Stb(ctx, t, TestSpt, gmi.Owner)
|
||||
|
||||
ta, err := testm.ActorAddress(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
tid, err := address.IDFromAddress(ta)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, uint64(1001), tid)
|
||||
}
|
@ -1,389 +0,0 @@
|
||||
package test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/stmgr"
|
||||
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
||||
bminer "github.com/filecoin-project/lotus/miner"
|
||||
"github.com/filecoin-project/lotus/node"
|
||||
"github.com/filecoin-project/lotus/node/impl"
|
||||
)
|
||||
|
||||
func TestSDRUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
n, sn := b(t, []FullNodeOpts{FullNodeWithSDRAt(500, 1000)}, OneMiner)
|
||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
||||
miner := sn[0]
|
||||
|
||||
addrinfo, err := client.NetAddrsListen(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := miner.NetConnect(ctx, addrinfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
build.Clock.Sleep(time.Second)
|
||||
|
||||
pledge := make(chan struct{})
|
||||
mine := int64(1)
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
defer close(done)
|
||||
round := 0
|
||||
for atomic.LoadInt64(&mine) != 0 {
|
||||
build.Clock.Sleep(blocktime)
|
||||
if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) {
|
||||
|
||||
}}); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
// 3 sealing rounds: before, during after.
|
||||
if round >= 3 {
|
||||
continue
|
||||
}
|
||||
|
||||
head, err := client.ChainHead(ctx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// rounds happen every 100 blocks, with a 50 block offset.
|
||||
if head.Height() >= abi.ChainEpoch(round*500+50) {
|
||||
round++
|
||||
pledge <- struct{}{}
|
||||
|
||||
ver, err := client.StateNetworkVersion(ctx, head.Key())
|
||||
assert.NoError(t, err)
|
||||
switch round {
|
||||
case 1:
|
||||
assert.Equal(t, network.Version6, ver)
|
||||
case 2:
|
||||
assert.Equal(t, network.Version7, ver)
|
||||
case 3:
|
||||
assert.Equal(t, network.Version8, ver)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}()
|
||||
|
||||
// before.
|
||||
pledgeSectors(t, ctx, miner, 9, 0, pledge)
|
||||
|
||||
s, err := miner.SectorsList(ctx)
|
||||
require.NoError(t, err)
|
||||
sort.Slice(s, func(i, j int) bool {
|
||||
return s[i] < s[j]
|
||||
})
|
||||
|
||||
for i, id := range s {
|
||||
info, err := miner.SectorsStatus(ctx, id, true)
|
||||
require.NoError(t, err)
|
||||
expectProof := abi.RegisteredSealProof_StackedDrg2KiBV1
|
||||
if i >= 3 {
|
||||
// after
|
||||
expectProof = abi.RegisteredSealProof_StackedDrg2KiBV1_1
|
||||
}
|
||||
assert.Equal(t, expectProof, info.SealProof, "sector %d, id %d", i, id)
|
||||
}
|
||||
|
||||
atomic.StoreInt64(&mine, 0)
|
||||
<-done
|
||||
}
|
||||
|
||||
func TestPledgeBatching(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
n, sn := b(t, []FullNodeOpts{FullNodeWithLatestActorsAt(-1)}, OneMiner)
|
||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
||||
miner := sn[0]
|
||||
|
||||
addrinfo, err := client.NetAddrsListen(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := miner.NetConnect(ctx, addrinfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
build.Clock.Sleep(time.Second)
|
||||
|
||||
mine := int64(1)
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
defer close(done)
|
||||
for atomic.LoadInt64(&mine) != 0 {
|
||||
build.Clock.Sleep(blocktime)
|
||||
if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) {
|
||||
|
||||
}}); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
h, err := client.ChainHead(ctx)
|
||||
require.NoError(t, err)
|
||||
if h.Height() > 10 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
toCheck := startPledge(t, ctx, miner, nSectors, 0, nil)
|
||||
|
||||
for len(toCheck) > 0 {
|
||||
states := map[api.SectorState]int{}
|
||||
|
||||
for n := range toCheck {
|
||||
st, err := miner.SectorsStatus(ctx, n, false)
|
||||
require.NoError(t, err)
|
||||
states[st.State]++
|
||||
if st.State == api.SectorState(sealing.Proving) {
|
||||
delete(toCheck, n)
|
||||
}
|
||||
if strings.Contains(string(st.State), "Fail") {
|
||||
t.Fatal("sector in a failed state", st.State)
|
||||
}
|
||||
}
|
||||
if states[api.SectorState(sealing.SubmitPreCommitBatch)] == nSectors ||
|
||||
(states[api.SectorState(sealing.SubmitPreCommitBatch)] > 0 && states[api.SectorState(sealing.PreCommit1)] == 0 && states[api.SectorState(sealing.PreCommit2)] == 0) {
|
||||
pcb, err := miner.SectorPreCommitFlush(ctx)
|
||||
require.NoError(t, err)
|
||||
if pcb != nil {
|
||||
fmt.Printf("PRECOMMIT BATCH: %+v\n", pcb)
|
||||
}
|
||||
}
|
||||
|
||||
if states[api.SectorState(sealing.SubmitCommitAggregate)] == nSectors ||
|
||||
(states[api.SectorState(sealing.SubmitCommitAggregate)] > 0 && states[api.SectorState(sealing.WaitSeed)] == 0 && states[api.SectorState(sealing.Committing)] == 0) {
|
||||
cb, err := miner.SectorCommitFlush(ctx)
|
||||
require.NoError(t, err)
|
||||
if cb != nil {
|
||||
fmt.Printf("COMMIT BATCH: %+v\n", cb)
|
||||
}
|
||||
}
|
||||
|
||||
build.Clock.Sleep(100 * time.Millisecond)
|
||||
fmt.Printf("WaitSeal: %d %+v\n", len(toCheck), states)
|
||||
}
|
||||
|
||||
atomic.StoreInt64(&mine, 0)
|
||||
<-done
|
||||
}
|
||||
|
||||
func TestPledgeBeforeNv13(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
n, sn := b(t, []FullNodeOpts{
|
||||
{
|
||||
Opts: func(nodes []TestNode) node.Option {
|
||||
return node.Override(new(stmgr.UpgradeSchedule), stmgr.UpgradeSchedule{{
|
||||
Network: network.Version9,
|
||||
Height: 1,
|
||||
Migration: stmgr.UpgradeActorsV2,
|
||||
}, {
|
||||
Network: network.Version10,
|
||||
Height: 2,
|
||||
Migration: stmgr.UpgradeActorsV3,
|
||||
}, {
|
||||
Network: network.Version12,
|
||||
Height: 3,
|
||||
Migration: stmgr.UpgradeActorsV4,
|
||||
}, {
|
||||
Network: network.Version13,
|
||||
Height: 1000000000,
|
||||
Migration: stmgr.UpgradeActorsV5,
|
||||
}})
|
||||
},
|
||||
},
|
||||
}, OneMiner)
|
||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
||||
miner := sn[0]
|
||||
|
||||
addrinfo, err := client.NetAddrsListen(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := miner.NetConnect(ctx, addrinfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
build.Clock.Sleep(time.Second)
|
||||
|
||||
mine := int64(1)
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
defer close(done)
|
||||
for atomic.LoadInt64(&mine) != 0 {
|
||||
build.Clock.Sleep(blocktime)
|
||||
if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) {
|
||||
|
||||
}}); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
h, err := client.ChainHead(ctx)
|
||||
require.NoError(t, err)
|
||||
if h.Height() > 10 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
toCheck := startPledge(t, ctx, miner, nSectors, 0, nil)
|
||||
|
||||
for len(toCheck) > 0 {
|
||||
states := map[api.SectorState]int{}
|
||||
|
||||
for n := range toCheck {
|
||||
st, err := miner.SectorsStatus(ctx, n, false)
|
||||
require.NoError(t, err)
|
||||
states[st.State]++
|
||||
if st.State == api.SectorState(sealing.Proving) {
|
||||
delete(toCheck, n)
|
||||
}
|
||||
if strings.Contains(string(st.State), "Fail") {
|
||||
t.Fatal("sector in a failed state", st.State)
|
||||
}
|
||||
}
|
||||
|
||||
build.Clock.Sleep(100 * time.Millisecond)
|
||||
fmt.Printf("WaitSeal: %d %+v\n", len(toCheck), states)
|
||||
}
|
||||
|
||||
atomic.StoreInt64(&mine, 0)
|
||||
<-done
|
||||
}
|
||||
|
||||
func TestPledgeSector(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
n, sn := b(t, OneFull, OneMiner)
|
||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
||||
miner := sn[0]
|
||||
|
||||
addrinfo, err := client.NetAddrsListen(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := miner.NetConnect(ctx, addrinfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
build.Clock.Sleep(time.Second)
|
||||
|
||||
mine := int64(1)
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
defer close(done)
|
||||
for atomic.LoadInt64(&mine) != 0 {
|
||||
build.Clock.Sleep(blocktime)
|
||||
if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) {
|
||||
|
||||
}}); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
pledgeSectors(t, ctx, miner, nSectors, 0, nil)
|
||||
|
||||
atomic.StoreInt64(&mine, 0)
|
||||
<-done
|
||||
}
|
||||
|
||||
func flushSealingBatches(t *testing.T, ctx context.Context, miner TestStorageNode) {
|
||||
pcb, err := miner.SectorPreCommitFlush(ctx)
|
||||
require.NoError(t, err)
|
||||
if pcb != nil {
|
||||
fmt.Printf("PRECOMMIT BATCH: %+v\n", pcb)
|
||||
}
|
||||
|
||||
cb, err := miner.SectorCommitFlush(ctx)
|
||||
require.NoError(t, err)
|
||||
if cb != nil {
|
||||
fmt.Printf("COMMIT BATCH: %+v\n", cb)
|
||||
}
|
||||
}
|
||||
|
||||
func startPledge(t *testing.T, ctx context.Context, miner TestStorageNode, n, existing int, blockNotif <-chan struct{}) map[abi.SectorNumber]struct{} {
|
||||
for i := 0; i < n; i++ {
|
||||
if i%3 == 0 && blockNotif != nil {
|
||||
<-blockNotif
|
||||
log.Errorf("WAIT")
|
||||
}
|
||||
log.Errorf("PLEDGING %d", i)
|
||||
_, err := miner.PledgeSector(ctx)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
for {
|
||||
s, err := miner.SectorsList(ctx) // Note - the test builder doesn't import genesis sectors into FSM
|
||||
require.NoError(t, err)
|
||||
fmt.Printf("Sectors: %d\n", len(s))
|
||||
if len(s) >= n+existing {
|
||||
break
|
||||
}
|
||||
|
||||
build.Clock.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
|
||||
fmt.Printf("All sectors is fsm\n")
|
||||
|
||||
s, err := miner.SectorsList(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
toCheck := map[abi.SectorNumber]struct{}{}
|
||||
for _, number := range s {
|
||||
toCheck[number] = struct{}{}
|
||||
}
|
||||
|
||||
return toCheck
|
||||
}
|
||||
|
||||
func pledgeSectors(t *testing.T, ctx context.Context, miner TestStorageNode, n, existing int, blockNotif <-chan struct{}) {
|
||||
toCheck := startPledge(t, ctx, miner, n, existing, blockNotif)
|
||||
|
||||
for len(toCheck) > 0 {
|
||||
flushSealingBatches(t, ctx, miner)
|
||||
|
||||
states := map[api.SectorState]int{}
|
||||
for n := range toCheck {
|
||||
st, err := miner.SectorsStatus(ctx, n, false)
|
||||
require.NoError(t, err)
|
||||
states[st.State]++
|
||||
if st.State == api.SectorState(sealing.Proving) {
|
||||
delete(toCheck, n)
|
||||
}
|
||||
if strings.Contains(string(st.State), "Fail") {
|
||||
t.Fatal("sector in a failed state", st.State)
|
||||
}
|
||||
}
|
||||
|
||||
build.Clock.Sleep(100 * time.Millisecond)
|
||||
fmt.Printf("WaitSeal: %d %+v\n", len(toCheck), states)
|
||||
}
|
||||
}
|
313
api/test/test.go
313
api/test/test.go
@ -1,313 +0,0 @@
|
||||
package test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
|
||||
lapi "github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/api/v1api"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/stmgr"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/miner"
|
||||
"github.com/filecoin-project/lotus/node"
|
||||
)
|
||||
|
||||
func init() {
|
||||
logging.SetAllLoggers(logging.LevelInfo)
|
||||
err := os.Setenv("BELLMAN_NO_GPU", "1")
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to set BELLMAN_NO_GPU env variable: %s", err))
|
||||
}
|
||||
build.InsecurePoStValidation = true
|
||||
}
|
||||
|
||||
type StorageBuilder func(context.Context, *testing.T, abi.RegisteredSealProof, address.Address) TestStorageNode
|
||||
|
||||
type TestNode struct {
|
||||
v1api.FullNode
|
||||
// ListenAddr is the address on which an API server is listening, if an
|
||||
// API server is created for this Node
|
||||
ListenAddr multiaddr.Multiaddr
|
||||
|
||||
Stb StorageBuilder
|
||||
}
|
||||
|
||||
type TestStorageNode struct {
|
||||
lapi.StorageMiner
|
||||
// ListenAddr is the address on which an API server is listening, if an
|
||||
// API server is created for this Node
|
||||
ListenAddr multiaddr.Multiaddr
|
||||
|
||||
MineOne func(context.Context, miner.MineReq) error
|
||||
Stop func(context.Context) error
|
||||
}
|
||||
|
||||
var PresealGenesis = -1
|
||||
|
||||
const GenesisPreseals = 2
|
||||
|
||||
const TestSpt = abi.RegisteredSealProof_StackedDrg2KiBV1_1
|
||||
|
||||
// Options for setting up a mock storage miner
|
||||
type StorageMiner struct {
|
||||
Full int
|
||||
Opts node.Option
|
||||
Preseal int
|
||||
}
|
||||
|
||||
type OptionGenerator func([]TestNode) node.Option
|
||||
|
||||
// Options for setting up a mock full node
|
||||
type FullNodeOpts struct {
|
||||
Lite bool // run node in "lite" mode
|
||||
Opts OptionGenerator // generate dependency injection options
|
||||
}
|
||||
|
||||
// APIBuilder is a function which is invoked in test suite to provide
|
||||
// test nodes and networks
|
||||
//
|
||||
// fullOpts array defines options for each full node
|
||||
// storage array defines storage nodes, numbers in the array specify full node
|
||||
// index the storage node 'belongs' to
|
||||
type APIBuilder func(t *testing.T, full []FullNodeOpts, storage []StorageMiner) ([]TestNode, []TestStorageNode)
|
||||
type testSuite struct {
|
||||
makeNodes APIBuilder
|
||||
}
|
||||
|
||||
// TestApis is the entry point to API test suite
|
||||
func TestApis(t *testing.T, b APIBuilder) {
|
||||
ts := testSuite{
|
||||
makeNodes: b,
|
||||
}
|
||||
|
||||
t.Run("version", ts.testVersion)
|
||||
t.Run("id", ts.testID)
|
||||
t.Run("testConnectTwo", ts.testConnectTwo)
|
||||
t.Run("testMining", ts.testMining)
|
||||
t.Run("testMiningReal", ts.testMiningReal)
|
||||
t.Run("testSearchMsg", ts.testSearchMsg)
|
||||
t.Run("testNonGenesisMiner", ts.testNonGenesisMiner)
|
||||
}
|
||||
|
||||
func DefaultFullOpts(nFull int) []FullNodeOpts {
|
||||
full := make([]FullNodeOpts, nFull)
|
||||
for i := range full {
|
||||
full[i] = FullNodeOpts{
|
||||
Opts: func(nodes []TestNode) node.Option {
|
||||
return node.Options()
|
||||
},
|
||||
}
|
||||
}
|
||||
return full
|
||||
}
|
||||
|
||||
var OneMiner = []StorageMiner{{Full: 0, Preseal: PresealGenesis}}
|
||||
var OneFull = DefaultFullOpts(1)
|
||||
var TwoFull = DefaultFullOpts(2)
|
||||
|
||||
var FullNodeWithLatestActorsAt = func(upgradeHeight abi.ChainEpoch) FullNodeOpts {
|
||||
// Attention: Update this when introducing new actor versions or your tests will be sad
|
||||
return FullNodeWithNetworkUpgradeAt(network.Version13, upgradeHeight)
|
||||
}
|
||||
|
||||
var FullNodeWithNetworkUpgradeAt = func(version network.Version, upgradeHeight abi.ChainEpoch) FullNodeOpts {
|
||||
fullSchedule := stmgr.UpgradeSchedule{{
|
||||
// prepare for upgrade.
|
||||
Network: network.Version9,
|
||||
Height: 1,
|
||||
Migration: stmgr.UpgradeActorsV2,
|
||||
}, {
|
||||
Network: network.Version10,
|
||||
Height: 2,
|
||||
Migration: stmgr.UpgradeActorsV3,
|
||||
}, {
|
||||
Network: network.Version12,
|
||||
Height: 3,
|
||||
Migration: stmgr.UpgradeActorsV4,
|
||||
}, {
|
||||
Network: network.Version13,
|
||||
Height: 4,
|
||||
Migration: stmgr.UpgradeActorsV5,
|
||||
}}
|
||||
|
||||
schedule := stmgr.UpgradeSchedule{}
|
||||
for _, upgrade := range fullSchedule {
|
||||
if upgrade.Network > version {
|
||||
break
|
||||
}
|
||||
|
||||
schedule = append(schedule, upgrade)
|
||||
}
|
||||
|
||||
if upgradeHeight > 0 {
|
||||
schedule[len(schedule)-1].Height = upgradeHeight
|
||||
}
|
||||
|
||||
return FullNodeOpts{
|
||||
Opts: func(nodes []TestNode) node.Option {
|
||||
return node.Override(new(stmgr.UpgradeSchedule), schedule)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
var FullNodeWithSDRAt = func(calico, persian abi.ChainEpoch) FullNodeOpts {
|
||||
return FullNodeOpts{
|
||||
Opts: func(nodes []TestNode) node.Option {
|
||||
return node.Override(new(stmgr.UpgradeSchedule), stmgr.UpgradeSchedule{{
|
||||
Network: network.Version6,
|
||||
Height: 1,
|
||||
Migration: stmgr.UpgradeActorsV2,
|
||||
}, {
|
||||
Network: network.Version7,
|
||||
Height: calico,
|
||||
Migration: stmgr.UpgradeCalico,
|
||||
}, {
|
||||
Network: network.Version8,
|
||||
Height: persian,
|
||||
}})
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
var MineNext = miner.MineReq{
|
||||
InjectNulls: 0,
|
||||
Done: func(bool, abi.ChainEpoch, error) {},
|
||||
}
|
||||
|
||||
func (ts *testSuite) testVersion(t *testing.T) {
|
||||
lapi.RunningNodeType = lapi.NodeFull
|
||||
t.Cleanup(func() {
|
||||
lapi.RunningNodeType = lapi.NodeUnknown
|
||||
})
|
||||
|
||||
ctx := context.Background()
|
||||
apis, _ := ts.makeNodes(t, OneFull, OneMiner)
|
||||
napi := apis[0]
|
||||
|
||||
v, err := napi.Version(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
versions := strings.Split(v.Version, "+")
|
||||
if len(versions) <= 0 {
|
||||
t.Fatal("empty version")
|
||||
}
|
||||
require.Equal(t, versions[0], build.BuildVersion)
|
||||
}
|
||||
|
||||
func (ts *testSuite) testSearchMsg(t *testing.T) {
|
||||
apis, miners := ts.makeNodes(t, OneFull, OneMiner)
|
||||
|
||||
api := apis[0]
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
senderAddr, err := api.WalletDefaultAddress(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
msg := &types.Message{
|
||||
From: senderAddr,
|
||||
To: senderAddr,
|
||||
Value: big.Zero(),
|
||||
}
|
||||
bm := NewBlockMiner(ctx, t, miners[0], 100*time.Millisecond)
|
||||
bm.MineBlocks()
|
||||
defer bm.Stop()
|
||||
|
||||
sm, err := api.MpoolPushMessage(ctx, msg, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
res, err := api.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if res.Receipt.ExitCode != 0 {
|
||||
t.Fatal("did not successfully send message")
|
||||
}
|
||||
|
||||
searchRes, err := api.StateSearchMsg(ctx, types.EmptyTSK, sm.Cid(), lapi.LookbackNoLimit, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if searchRes.TipSet != res.TipSet {
|
||||
t.Fatalf("search ts: %s, different from wait ts: %s", searchRes.TipSet, res.TipSet)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (ts *testSuite) testID(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
apis, _ := ts.makeNodes(t, OneFull, OneMiner)
|
||||
api := apis[0]
|
||||
|
||||
id, err := api.ID(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assert.Regexp(t, "^12", id.Pretty())
|
||||
}
|
||||
|
||||
func (ts *testSuite) testConnectTwo(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
apis, _ := ts.makeNodes(t, TwoFull, OneMiner)
|
||||
|
||||
p, err := apis[0].NetPeers(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(p) != 0 {
|
||||
t.Error("Node 0 has a peer")
|
||||
}
|
||||
|
||||
p, err = apis[1].NetPeers(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(p) != 0 {
|
||||
t.Error("Node 1 has a peer")
|
||||
}
|
||||
|
||||
addrs, err := apis[1].NetAddrsListen(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := apis[0].NetConnect(ctx, addrs); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
p, err = apis[0].NetPeers(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(p) != 1 {
|
||||
t.Error("Node 0 doesn't have 1 peer")
|
||||
}
|
||||
|
||||
p, err = apis[1].NetPeers(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(p) != 1 {
|
||||
t.Error("Node 0 doesn't have 1 peer")
|
||||
}
|
||||
}
|
@ -1,87 +0,0 @@
|
||||
package test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
lapi "github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/miner"
|
||||
)
|
||||
|
||||
func SendFunds(ctx context.Context, t *testing.T, sender TestNode, addr address.Address, amount abi.TokenAmount) {
|
||||
senderAddr, err := sender.WalletDefaultAddress(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
msg := &types.Message{
|
||||
From: senderAddr,
|
||||
To: addr,
|
||||
Value: amount,
|
||||
}
|
||||
|
||||
sm, err := sender.MpoolPushMessage(ctx, msg, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
res, err := sender.StateWaitMsg(ctx, sm.Cid(), 3, lapi.LookbackNoLimit, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if res.Receipt.ExitCode != 0 {
|
||||
t.Fatal("did not successfully send money")
|
||||
}
|
||||
}
|
||||
|
||||
func MineUntilBlock(ctx context.Context, t *testing.T, fn TestNode, sn TestStorageNode, cb func(abi.ChainEpoch)) {
|
||||
for i := 0; i < 1000; i++ {
|
||||
var success bool
|
||||
var err error
|
||||
var epoch abi.ChainEpoch
|
||||
wait := make(chan struct{})
|
||||
mineErr := sn.MineOne(ctx, miner.MineReq{
|
||||
Done: func(win bool, ep abi.ChainEpoch, e error) {
|
||||
success = win
|
||||
err = e
|
||||
epoch = ep
|
||||
wait <- struct{}{}
|
||||
},
|
||||
})
|
||||
if mineErr != nil {
|
||||
t.Fatal(mineErr)
|
||||
}
|
||||
<-wait
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if success {
|
||||
// Wait until it shows up on the given full nodes ChainHead
|
||||
nloops := 50
|
||||
for i := 0; i < nloops; i++ {
|
||||
ts, err := fn.ChainHead(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if ts.Height() == epoch {
|
||||
break
|
||||
}
|
||||
if i == nloops-1 {
|
||||
t.Fatal("block never managed to sync to node")
|
||||
}
|
||||
time.Sleep(time.Millisecond * 10)
|
||||
}
|
||||
|
||||
if cb != nil {
|
||||
cb(epoch)
|
||||
}
|
||||
return
|
||||
}
|
||||
t.Log("did not mine block, trying again", i)
|
||||
}
|
||||
t.Fatal("failed to mine 1000 times in a row...")
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -1,2 +1,2 @@
|
||||
/dns4/bootstrap-0.butterfly.fildev.network/tcp/1347/p2p/12D3KooWRkaF18SR3E6qL6dkGrozT8QJUV5VbhE9E7BZtPmHqdWJ
|
||||
/dns4/bootstrap-1.butterfly.fildev.network/tcp/1347/p2p/12D3KooWJcJUc23WJjJHGSboGcU3t76z9Lb7CghrH2tiBiDCY4ux
|
||||
/dns4/bootstrap-0.butterfly.fildev.network/tcp/1347/p2p/12D3KooWBbZd7Su9XfLUQ12RynGQ3ZmGY1nGqFntmqop9pLNJE6g
|
||||
/dns4/bootstrap-1.butterfly.fildev.network/tcp/1347/p2p/12D3KooWGKRzEY4tJFTmAmrYUpa1CVVohmV9YjJbC9v5XWY2gUji
|
||||
|
Binary file not shown.
@ -45,7 +45,8 @@ const UpgradeNorwegianHeight = 114000
|
||||
|
||||
const UpgradeTurboHeight = 193789
|
||||
|
||||
const UpgradeHyperdriveHeight = 9999999
|
||||
// 2021-06-11T14:30:00Z
|
||||
const UpgradeHyperdriveHeight = 321519
|
||||
|
||||
func init() {
|
||||
policy.SetConsensusMinerMinPower(abi.NewStoragePower(32 << 30))
|
||||
|
@ -3,6 +3,8 @@ package policy
|
||||
import (
|
||||
"sort"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
@ -367,3 +369,31 @@ func GetDeclarationsMax(nwVer network.Version) int {
|
||||
panic("unsupported network version")
|
||||
}
|
||||
}
|
||||
|
||||
func AggregateNetworkFee(nwVer network.Version, aggregateSize int, baseFee abi.TokenAmount) abi.TokenAmount {
|
||||
switch actors.VersionForNetwork(nwVer) {
|
||||
|
||||
case actors.Version0:
|
||||
|
||||
return big.Zero()
|
||||
|
||||
case actors.Version2:
|
||||
|
||||
return big.Zero()
|
||||
|
||||
case actors.Version3:
|
||||
|
||||
return big.Zero()
|
||||
|
||||
case actors.Version4:
|
||||
|
||||
return big.Zero()
|
||||
|
||||
case actors.Version5:
|
||||
|
||||
return miner5.AggregateNetworkFee(aggregateSize, baseFee)
|
||||
|
||||
default:
|
||||
panic("unsupported network version")
|
||||
}
|
||||
}
|
||||
|
@ -3,6 +3,8 @@ package policy
|
||||
import (
|
||||
"sort"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
@ -246,3 +248,18 @@ func GetDeclarationsMax(nwVer network.Version) int {
|
||||
panic("unsupported network version")
|
||||
}
|
||||
}
|
||||
|
||||
func AggregateNetworkFee(nwVer network.Version, aggregateSize int, baseFee abi.TokenAmount) abi.TokenAmount {
|
||||
switch actors.VersionForNetwork(nwVer) {
|
||||
{{range .versions}}
|
||||
case actors.Version{{.}}:
|
||||
{{if (le . 4)}}
|
||||
return big.Zero()
|
||||
{{else}}
|
||||
return miner{{.}}.AggregateNetworkFee(aggregateSize, baseFee)
|
||||
{{end}}
|
||||
{{end}}
|
||||
default:
|
||||
panic("unsupported network version")
|
||||
}
|
||||
}
|
||||
|
@ -243,7 +243,7 @@ func (mp *MessagePool) checkMessages(msgs []*types.Message, interned bool, flexi
|
||||
},
|
||||
}
|
||||
|
||||
if len(bytes) > 32*1024-128 { // 128 bytes to account for signature size
|
||||
if len(bytes) > MaxMessageSize-128 { // 128 bytes to account for signature size
|
||||
check.OK = false
|
||||
check.Err = "message too big"
|
||||
} else {
|
||||
|
@ -59,6 +59,8 @@ var MaxUntrustedActorPendingMessages = 10
|
||||
|
||||
var MaxNonceGap = uint64(4)
|
||||
|
||||
const MaxMessageSize = 64 << 10 // 64KiB
|
||||
|
||||
var (
|
||||
ErrMessageTooBig = errors.New("message too big")
|
||||
|
||||
@ -665,7 +667,7 @@ func (mp *MessagePool) Push(ctx context.Context, m *types.SignedMessage) (cid.Ci
|
||||
|
||||
func (mp *MessagePool) checkMessage(m *types.SignedMessage) error {
|
||||
// big messages are bad, anti DOS
|
||||
if m.Size() > 32*1024 {
|
||||
if m.Size() > MaxMessageSize {
|
||||
return xerrors.Errorf("mpool message too large (%dB): %w", m.Size(), ErrMessageTooBig)
|
||||
}
|
||||
|
||||
|
@ -14,12 +14,14 @@ import (
|
||||
|
||||
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/messagepool/gasguess"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/chain/types/mock"
|
||||
"github.com/filecoin-project/lotus/chain/wallet"
|
||||
_ "github.com/filecoin-project/lotus/lib/sigs/bls"
|
||||
_ "github.com/filecoin-project/lotus/lib/sigs/secp"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -260,6 +262,72 @@ func TestMessagePool(t *testing.T) {
|
||||
assertNonce(t, mp, sender, 2)
|
||||
}
|
||||
|
||||
func TestCheckMessageBig(t *testing.T) {
|
||||
tma := newTestMpoolAPI()
|
||||
|
||||
w, err := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||
assert.NoError(t, err)
|
||||
|
||||
from, err := w.WalletNew(context.Background(), types.KTBLS)
|
||||
assert.NoError(t, err)
|
||||
|
||||
tma.setBalance(from, 1000e9)
|
||||
|
||||
ds := datastore.NewMapDatastore()
|
||||
|
||||
mp, err := New(tma, ds, "mptest", nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
to := mock.Address(1001)
|
||||
|
||||
{
|
||||
msg := &types.Message{
|
||||
To: to,
|
||||
From: from,
|
||||
Value: types.NewInt(1),
|
||||
Nonce: 0,
|
||||
GasLimit: 50000000,
|
||||
GasFeeCap: types.NewInt(100),
|
||||
GasPremium: types.NewInt(1),
|
||||
Params: make([]byte, 41<<10), // 41KiB payload
|
||||
}
|
||||
|
||||
sig, err := w.WalletSign(context.TODO(), from, msg.Cid().Bytes(), api.MsgMeta{})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
sm := &types.SignedMessage{
|
||||
Message: *msg,
|
||||
Signature: *sig,
|
||||
}
|
||||
mustAdd(t, mp, sm)
|
||||
}
|
||||
|
||||
{
|
||||
msg := &types.Message{
|
||||
To: to,
|
||||
From: from,
|
||||
Value: types.NewInt(1),
|
||||
Nonce: 0,
|
||||
GasLimit: 50000000,
|
||||
GasFeeCap: types.NewInt(100),
|
||||
GasPremium: types.NewInt(1),
|
||||
Params: make([]byte, 64<<10), // 64KiB payload
|
||||
}
|
||||
|
||||
sig, err := w.WalletSign(context.TODO(), from, msg.Cid().Bytes(), api.MsgMeta{})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
sm := &types.SignedMessage{
|
||||
Message: *msg,
|
||||
Signature: *sig,
|
||||
}
|
||||
err = mp.Add(context.TODO(), sm)
|
||||
assert.ErrorIs(t, err, ErrMessageTooBig)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMessagePoolMessagesInEachBlock(t *testing.T) {
|
||||
tma := newTestMpoolAPI()
|
||||
|
||||
|
@ -557,7 +557,7 @@ func (mv *MessageValidator) validateLocalMessage(ctx context.Context, msg *pubsu
|
||||
return pubsub.ValidationIgnore
|
||||
}
|
||||
|
||||
if m.Size() > 32*1024 {
|
||||
if m.Size() > messagepool.MaxMessageSize {
|
||||
log.Warnf("local message is too large! (%dB)", m.Size())
|
||||
recordFailure(ctx, metrics.MessageValidationFailure, "oversize")
|
||||
return pubsub.ValidationIgnore
|
||||
|
@ -1,22 +0,0 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
clitest "github.com/filecoin-project/lotus/cli/test"
|
||||
)
|
||||
|
||||
// TestClient does a basic test to exercise the client CLI
|
||||
// commands
|
||||
func TestClient(t *testing.T) {
|
||||
_ = os.Setenv("BELLMAN_NO_GPU", "1")
|
||||
clitest.QuietMiningLogs()
|
||||
|
||||
blocktime := 5 * time.Millisecond
|
||||
ctx := context.Background()
|
||||
clientNode, _ := clitest.StartOneNodeOneMiner(ctx, t, blocktime)
|
||||
clitest.RunClientTest(t, Commands, clientNode)
|
||||
}
|
@ -1,22 +0,0 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
clitest "github.com/filecoin-project/lotus/cli/test"
|
||||
)
|
||||
|
||||
// TestMultisig does a basic test to exercise the multisig CLI
|
||||
// commands
|
||||
func TestMultisig(t *testing.T) {
|
||||
_ = os.Setenv("BELLMAN_NO_GPU", "1")
|
||||
clitest.QuietMiningLogs()
|
||||
|
||||
blocktime := 5 * time.Millisecond
|
||||
ctx := context.Background()
|
||||
clientNode, _ := clitest.StartOneNodeOneMiner(ctx, t, blocktime)
|
||||
clitest.RunMultisigTest(t, Commands, clientNode)
|
||||
}
|
17
cli/state.go
17
cli/state.go
@ -281,17 +281,26 @@ var StatePowerCmd = &cli.Command{
|
||||
|
||||
ctx := ReqContext(cctx)
|
||||
|
||||
ts, err := LoadTipSet(ctx, cctx, api)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var maddr address.Address
|
||||
if cctx.Args().Present() {
|
||||
maddr, err = address.NewFromString(cctx.Args().First())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
ts, err := LoadTipSet(ctx, cctx, api)
|
||||
if err != nil {
|
||||
return err
|
||||
ma, err := api.StateGetActor(ctx, maddr, ts.Key())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !builtin.IsStorageMinerActor(ma.Code) {
|
||||
return xerrors.New("provided address does not correspond to a miner actor")
|
||||
}
|
||||
}
|
||||
|
||||
power, err := api.StateMinerPower(ctx, maddr, ts.Key())
|
||||
|
@ -1,14 +0,0 @@
|
||||
package test
|
||||
|
||||
import "github.com/ipfs/go-log/v2"
|
||||
|
||||
func QuietMiningLogs() {
|
||||
_ = log.SetLogLevel("miner", "ERROR")
|
||||
_ = log.SetLogLevel("chainstore", "ERROR")
|
||||
_ = log.SetLogLevel("chain", "ERROR")
|
||||
_ = log.SetLogLevel("sub", "ERROR")
|
||||
_ = log.SetLogLevel("storageminer", "ERROR")
|
||||
_ = log.SetLogLevel("pubsub", "ERROR")
|
||||
_ = log.SetLogLevel("gen", "ERROR")
|
||||
_ = log.SetLogLevel("dht/RtRefreshManager", "ERROR")
|
||||
}
|
@ -4,32 +4,29 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"contrib.go.opencensus.io/exporter/prometheus"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/lotus/gateway"
|
||||
"github.com/gorilla/mux"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
promclient "github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/urfave/cli/v2"
|
||||
"go.opencensus.io/stats/view"
|
||||
"go.opencensus.io/tag"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
|
||||
"github.com/filecoin-project/go-jsonrpc"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
manet "github.com/multiformats/go-multiaddr/net"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-jsonrpc"
|
||||
|
||||
lapi "github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/api/client"
|
||||
"github.com/filecoin-project/lotus/api/v0api"
|
||||
"github.com/filecoin-project/lotus/api/v1api"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
lcli "github.com/filecoin-project/lotus/cli"
|
||||
cliutil "github.com/filecoin-project/lotus/cli/util"
|
||||
"github.com/filecoin-project/lotus/gateway"
|
||||
"github.com/filecoin-project/lotus/lib/lotuslog"
|
||||
"github.com/filecoin-project/lotus/metrics"
|
||||
"github.com/filecoin-project/lotus/node"
|
||||
)
|
||||
|
||||
var log = logging.Logger("gateway")
|
||||
@ -140,10 +137,6 @@ var runCmd = &cli.Command{
|
||||
Action: func(cctx *cli.Context) error {
|
||||
log.Info("Starting lotus gateway")
|
||||
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
// Register all metric views
|
||||
if err := view.Register(
|
||||
metrics.ChainNodeViews...,
|
||||
@ -157,70 +150,44 @@ var runCmd = &cli.Command{
|
||||
}
|
||||
defer closer()
|
||||
|
||||
address := cctx.String("listen")
|
||||
mux := mux.NewRouter()
|
||||
var (
|
||||
lookbackCap = cctx.Duration("api-max-lookback")
|
||||
address = cctx.String("listen")
|
||||
waitLookback = abi.ChainEpoch(cctx.Int64("api-wait-lookback-limit"))
|
||||
)
|
||||
|
||||
log.Info("Setting up API endpoint at " + address)
|
||||
|
||||
serveRpc := func(path string, hnd interface{}) {
|
||||
serverOptions := make([]jsonrpc.ServerOption, 0)
|
||||
if maxRequestSize := cctx.Int("api-max-req-size"); maxRequestSize != 0 {
|
||||
serverOptions = append(serverOptions, jsonrpc.WithMaxRequestSize(int64(maxRequestSize)))
|
||||
}
|
||||
rpcServer := jsonrpc.NewServer(serverOptions...)
|
||||
rpcServer.Register("Filecoin", hnd)
|
||||
|
||||
mux.Handle(path, rpcServer)
|
||||
serverOptions := make([]jsonrpc.ServerOption, 0)
|
||||
if maxRequestSize := cctx.Int("api-max-req-size"); maxRequestSize != 0 {
|
||||
serverOptions = append(serverOptions, jsonrpc.WithMaxRequestSize(int64(maxRequestSize)))
|
||||
}
|
||||
|
||||
lookbackCap := cctx.Duration("api-max-lookback")
|
||||
log.Info("setting up API endpoint at " + address)
|
||||
|
||||
waitLookback := abi.ChainEpoch(cctx.Int64("api-wait-lookback-limit"))
|
||||
addr, err := net.ResolveTCPAddr("tcp", address)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to resolve endpoint address: %w", err)
|
||||
}
|
||||
|
||||
ma := metrics.MetricedGatewayAPI(gateway.NewNode(api, lookbackCap, waitLookback))
|
||||
maddr, err := manet.FromNetAddr(addr)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to convert endpoint address to multiaddr: %w", err)
|
||||
}
|
||||
|
||||
serveRpc("/rpc/v1", ma)
|
||||
serveRpc("/rpc/v0", lapi.Wrap(new(v1api.FullNodeStruct), new(v0api.WrapperV1Full), ma))
|
||||
gwapi := gateway.NewNode(api, lookbackCap, waitLookback)
|
||||
h, err := gateway.Handler(gwapi, serverOptions...)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to set up gateway HTTP handler")
|
||||
}
|
||||
|
||||
registry := promclient.DefaultRegisterer.(*promclient.Registry)
|
||||
exporter, err := prometheus.NewExporter(prometheus.Options{
|
||||
Registry: registry,
|
||||
Namespace: "lotus_gw",
|
||||
stopFunc, err := node.ServeRPC(h, "lotus-gateway", maddr)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to serve rpc endpoint: %w", err)
|
||||
}
|
||||
|
||||
<-node.MonitorShutdown(nil, node.ShutdownHandler{
|
||||
Component: "rpc",
|
||||
StopFunc: stopFunc,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mux.Handle("/debug/metrics", exporter)
|
||||
|
||||
mux.PathPrefix("/").Handler(http.DefaultServeMux)
|
||||
|
||||
/*ah := &auth.Handler{
|
||||
Verify: nodeApi.AuthVerify,
|
||||
Next: mux.ServeHTTP,
|
||||
}*/
|
||||
|
||||
srv := &http.Server{
|
||||
Handler: mux,
|
||||
BaseContext: func(listener net.Listener) context.Context {
|
||||
ctx, _ := tag.New(context.Background(), tag.Upsert(metrics.APIInterface, "lotus-gateway"))
|
||||
return ctx
|
||||
},
|
||||
}
|
||||
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
log.Warn("Shutting down...")
|
||||
if err := srv.Shutdown(context.TODO()); err != nil {
|
||||
log.Errorf("shutting down RPC server failed: %s", err)
|
||||
}
|
||||
log.Warn("Graceful shutdown successful")
|
||||
}()
|
||||
|
||||
nl, err := net.Listen("tcp", address)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return srv.Serve(nl)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
103
cmd/lotus-shed/export-car.go
Normal file
103
cmd/lotus-shed/export-car.go
Normal file
@ -0,0 +1,103 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/ipfs/go-blockservice"
|
||||
"github.com/ipfs/go-cid"
|
||||
offline "github.com/ipfs/go-ipfs-exchange-offline"
|
||||
format "github.com/ipfs/go-ipld-format"
|
||||
"github.com/ipfs/go-merkledag"
|
||||
"github.com/ipld/go-car"
|
||||
"github.com/urfave/cli/v2"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
lcli "github.com/filecoin-project/lotus/cli"
|
||||
"github.com/filecoin-project/lotus/node/repo"
|
||||
)
|
||||
|
||||
func carWalkFunc(nd format.Node) (out []*format.Link, err error) {
|
||||
for _, link := range nd.Links() {
|
||||
if link.Cid.Prefix().Codec == cid.FilCommitmentSealed || link.Cid.Prefix().Codec == cid.FilCommitmentUnsealed {
|
||||
continue
|
||||
}
|
||||
out = append(out, link)
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
var exportCarCmd = &cli.Command{
|
||||
Name: "export-car",
|
||||
Description: "Export a car from repo (requires node to be offline)",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "repo",
|
||||
Value: "~/.lotus",
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
if cctx.Args().Len() != 2 {
|
||||
return lcli.ShowHelp(cctx, fmt.Errorf("must specify file name and object"))
|
||||
}
|
||||
|
||||
outfile := cctx.Args().First()
|
||||
var roots []cid.Cid
|
||||
for _, arg := range cctx.Args().Tail() {
|
||||
c, err := cid.Decode(arg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
roots = append(roots, c)
|
||||
}
|
||||
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
|
||||
r, err := repo.NewFS(cctx.String("repo"))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("opening fs repo: %w", err)
|
||||
}
|
||||
|
||||
exists, err := r.Exists()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !exists {
|
||||
return xerrors.Errorf("lotus repo doesn't exist")
|
||||
}
|
||||
|
||||
lr, err := r.Lock(repo.FullNode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer lr.Close() //nolint:errcheck
|
||||
|
||||
fi, err := os.Create(outfile)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("opening the output file: %w", err)
|
||||
}
|
||||
|
||||
defer fi.Close() //nolint:errcheck
|
||||
|
||||
bs, err := lr.Blockstore(ctx, repo.UniversalBlockstore)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open blockstore: %w", err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if c, ok := bs.(io.Closer); ok {
|
||||
if err := c.Close(); err != nil {
|
||||
log.Warnf("failed to close blockstore: %s", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
dag := merkledag.NewDAGService(blockservice.New(bs, offline.Exchange(bs)))
|
||||
err = car.WriteCarWithWalker(ctx, dag, roots, fi, carWalkFunc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
@ -43,6 +43,7 @@ func main() {
|
||||
minerCmd,
|
||||
mpoolStatsCmd,
|
||||
exportChainCmd,
|
||||
exportCarCmd,
|
||||
consensusCmd,
|
||||
storageStatsCmd,
|
||||
syncCmd,
|
||||
|
@ -7,7 +7,6 @@ import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -18,13 +17,11 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/api/test"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/actors/policy"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/lib/lotuslog"
|
||||
"github.com/filecoin-project/lotus/itests/kit"
|
||||
"github.com/filecoin-project/lotus/node/repo"
|
||||
builder "github.com/filecoin-project/lotus/node/test"
|
||||
)
|
||||
|
||||
func TestWorkerKeyChange(t *testing.T) {
|
||||
@ -41,20 +38,16 @@ func TestWorkerKeyChange(t *testing.T) {
|
||||
policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1)
|
||||
policy.SetMinVerifiedDealSize(abi.NewStoragePower(256))
|
||||
|
||||
lotuslog.SetupLogLevels()
|
||||
logging.SetLogLevel("miner", "ERROR")
|
||||
logging.SetLogLevel("chainstore", "ERROR")
|
||||
logging.SetLogLevel("chain", "ERROR")
|
||||
logging.SetLogLevel("pubsub", "ERROR")
|
||||
logging.SetLogLevel("sub", "ERROR")
|
||||
logging.SetLogLevel("storageminer", "ERROR")
|
||||
kit.QuietMiningLogs()
|
||||
|
||||
blocktime := 1 * time.Millisecond
|
||||
|
||||
n, sn := builder.MockSbBuilder(t, []test.FullNodeOpts{test.FullNodeWithLatestActorsAt(-1), test.FullNodeWithLatestActorsAt(-1)}, test.OneMiner)
|
||||
clients, miners := kit.MockMinerBuilder(t,
|
||||
[]kit.FullNodeOpts{kit.FullNodeWithLatestActorsAt(-1), kit.FullNodeWithLatestActorsAt(-1)},
|
||||
kit.OneMiner)
|
||||
|
||||
client1 := n[0]
|
||||
client2 := n[1]
|
||||
client1 := clients[0]
|
||||
client2 := clients[1]
|
||||
|
||||
// Connect the nodes.
|
||||
addrinfo, err := client1.NetAddrsListen(ctx)
|
||||
@ -67,8 +60,8 @@ func TestWorkerKeyChange(t *testing.T) {
|
||||
app := cli.NewApp()
|
||||
app.Metadata = map[string]interface{}{
|
||||
"repoType": repo.StorageMiner,
|
||||
"testnode-full": n[0],
|
||||
"testnode-storage": sn[0],
|
||||
"testnode-full": clients[0],
|
||||
"testnode-storage": miners[0],
|
||||
}
|
||||
app.Writer = output
|
||||
api.RunningNodeType = api.NodeMiner
|
||||
@ -85,29 +78,14 @@ func TestWorkerKeyChange(t *testing.T) {
|
||||
return cmd.Action(cctx)
|
||||
}
|
||||
|
||||
// setup miner
|
||||
mine := int64(1)
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
defer close(done)
|
||||
for atomic.LoadInt64(&mine) == 1 {
|
||||
time.Sleep(blocktime)
|
||||
if err := sn[0].MineOne(ctx, test.MineNext); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
defer func() {
|
||||
atomic.AddInt64(&mine, -1)
|
||||
fmt.Println("shutting down mining")
|
||||
<-done
|
||||
}()
|
||||
// start mining
|
||||
kit.ConnectAndStartMining(t, blocktime, miners[0], client1, client2)
|
||||
|
||||
newKey, err := client1.WalletNew(ctx, types.KTBLS)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Initialize wallet.
|
||||
test.SendFunds(ctx, t, client1, newKey, abi.NewTokenAmount(0))
|
||||
kit.SendFunds(ctx, t, client1, newKey, abi.NewTokenAmount(0))
|
||||
|
||||
require.NoError(t, run(actorProposeChangeWorker, "--really-do-it", newKey.String()))
|
||||
|
||||
|
@ -1,10 +1,13 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/lotus/itests/kit"
|
||||
"github.com/filecoin-project/lotus/node/impl"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/urfave/cli/v2"
|
||||
@ -12,11 +15,8 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/api/test"
|
||||
"github.com/filecoin-project/lotus/chain/actors/policy"
|
||||
"github.com/filecoin-project/lotus/lib/lotuslog"
|
||||
"github.com/filecoin-project/lotus/node/repo"
|
||||
builder "github.com/filecoin-project/lotus/node/test"
|
||||
)
|
||||
|
||||
func TestMinerAllInfo(t *testing.T) {
|
||||
@ -32,12 +32,7 @@ func TestMinerAllInfo(t *testing.T) {
|
||||
|
||||
_test = true
|
||||
|
||||
lotuslog.SetupLogLevels()
|
||||
logging.SetLogLevel("miner", "ERROR")
|
||||
logging.SetLogLevel("chainstore", "ERROR")
|
||||
logging.SetLogLevel("chain", "ERROR")
|
||||
logging.SetLogLevel("sub", "ERROR")
|
||||
logging.SetLogLevel("storageminer", "ERROR")
|
||||
kit.QuietMiningLogs()
|
||||
|
||||
oldDelay := policy.GetPreCommitChallengeDelay()
|
||||
policy.SetPreCommitChallengeDelay(5)
|
||||
@ -45,8 +40,9 @@ func TestMinerAllInfo(t *testing.T) {
|
||||
policy.SetPreCommitChallengeDelay(oldDelay)
|
||||
})
|
||||
|
||||
var n []test.TestNode
|
||||
var sn []test.TestStorageNode
|
||||
n, sn := kit.Builder(t, kit.OneFull, kit.OneMiner)
|
||||
client, miner := n[0].FullNode, sn[0]
|
||||
kit.ConnectAndStartMining(t, time.Second, miner, client.(*impl.FullNodeAPI))
|
||||
|
||||
run := func(t *testing.T) {
|
||||
app := cli.NewApp()
|
||||
@ -62,15 +58,10 @@ func TestMinerAllInfo(t *testing.T) {
|
||||
require.NoError(t, infoAllCmd.Action(cctx))
|
||||
}
|
||||
|
||||
bp := func(t *testing.T, fullOpts []test.FullNodeOpts, storage []test.StorageMiner) ([]test.TestNode, []test.TestStorageNode) {
|
||||
n, sn = builder.Builder(t, fullOpts, storage)
|
||||
t.Run("pre-info-all", run)
|
||||
|
||||
t.Run("pre-info-all", run)
|
||||
|
||||
return n, sn
|
||||
}
|
||||
|
||||
test.TestDealFlow(t, bp, time.Second, false, false, 0)
|
||||
dh := kit.NewDealHarness(t, client, miner)
|
||||
dh.MakeFullDeal(context.Background(), 6, false, false, 0)
|
||||
|
||||
t.Run("post-info-all", run)
|
||||
}
|
||||
|
@ -1,38 +1,27 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"net/http"
|
||||
"fmt"
|
||||
_ "net/http/pprof"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"github.com/filecoin-project/lotus/api/v1api"
|
||||
|
||||
"github.com/filecoin-project/lotus/api/v0api"
|
||||
|
||||
mux "github.com/gorilla/mux"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
manet "github.com/multiformats/go-multiaddr/net"
|
||||
"github.com/urfave/cli/v2"
|
||||
"go.opencensus.io/stats"
|
||||
"go.opencensus.io/stats/view"
|
||||
"go.opencensus.io/tag"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-jsonrpc"
|
||||
"github.com/filecoin-project/go-jsonrpc/auth"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
lcli "github.com/filecoin-project/lotus/cli"
|
||||
"github.com/filecoin-project/lotus/lib/rpcenc"
|
||||
"github.com/filecoin-project/lotus/lib/ulimit"
|
||||
"github.com/filecoin-project/lotus/metrics"
|
||||
"github.com/filecoin-project/lotus/node"
|
||||
"github.com/filecoin-project/lotus/node/impl"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
"github.com/filecoin-project/lotus/node/repo"
|
||||
)
|
||||
@ -165,56 +154,25 @@ var runCmd = &cli.Command{
|
||||
|
||||
log.Infof("Remote version %s", v)
|
||||
|
||||
lst, err := manet.Listen(endpoint)
|
||||
// Instantiate the miner node handler.
|
||||
handler, err := node.MinerHandler(minerapi, true)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("could not listen: %w", err)
|
||||
return xerrors.Errorf("failed to instantiate rpc handler: %w", err)
|
||||
}
|
||||
|
||||
mux := mux.NewRouter()
|
||||
|
||||
readerHandler, readerServerOpt := rpcenc.ReaderParamDecoder()
|
||||
rpcServer := jsonrpc.NewServer(readerServerOpt)
|
||||
rpcServer.Register("Filecoin", api.PermissionedStorMinerAPI(metrics.MetricedStorMinerAPI(minerapi)))
|
||||
|
||||
mux.Handle("/rpc/v0", rpcServer)
|
||||
mux.Handle("/rpc/streams/v0/push/{uuid}", readerHandler)
|
||||
mux.PathPrefix("/remote").HandlerFunc(minerapi.(*impl.StorageMinerAPI).ServeRemote)
|
||||
mux.Handle("/debug/metrics", metrics.Exporter())
|
||||
mux.PathPrefix("/").Handler(http.DefaultServeMux) // pprof
|
||||
|
||||
ah := &auth.Handler{
|
||||
Verify: minerapi.AuthVerify,
|
||||
Next: mux.ServeHTTP,
|
||||
// Serve the RPC.
|
||||
rpcStopper, err := node.ServeRPC(handler, "lotus-miner", endpoint)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to start json-rpc endpoint: %s", err)
|
||||
}
|
||||
|
||||
srv := &http.Server{
|
||||
Handler: ah,
|
||||
BaseContext: func(listener net.Listener) context.Context {
|
||||
ctx, _ := tag.New(context.Background(), tag.Upsert(metrics.APIInterface, "lotus-miner"))
|
||||
return ctx
|
||||
},
|
||||
}
|
||||
// Monitor for shutdown.
|
||||
finishCh := node.MonitorShutdown(shutdownChan,
|
||||
node.ShutdownHandler{Component: "rpc server", StopFunc: rpcStopper},
|
||||
node.ShutdownHandler{Component: "miner", StopFunc: stop},
|
||||
)
|
||||
|
||||
sigChan := make(chan os.Signal, 2)
|
||||
go func() {
|
||||
select {
|
||||
case sig := <-sigChan:
|
||||
log.Warnw("received shutdown", "signal", sig)
|
||||
case <-shutdownChan:
|
||||
log.Warn("received shutdown")
|
||||
}
|
||||
|
||||
log.Warn("Shutting down...")
|
||||
if err := stop(context.TODO()); err != nil {
|
||||
log.Errorf("graceful shutting down failed: %s", err)
|
||||
}
|
||||
if err := srv.Shutdown(context.TODO()); err != nil {
|
||||
log.Errorf("shutting down RPC server failed: %s", err)
|
||||
}
|
||||
log.Warn("Graceful shutdown successful")
|
||||
}()
|
||||
signal.Notify(sigChan, syscall.SIGTERM, syscall.SIGINT)
|
||||
|
||||
return srv.Serve(manet.NetListener(lst))
|
||||
<-finishCh
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
@ -15,6 +15,7 @@ import (
|
||||
"runtime/pprof"
|
||||
"strings"
|
||||
|
||||
"github.com/filecoin-project/go-jsonrpc"
|
||||
paramfetch "github.com/filecoin-project/go-paramfetch"
|
||||
metricsprom "github.com/ipfs/go-metrics-prometheus"
|
||||
"github.com/mitchellh/go-homedir"
|
||||
@ -351,8 +352,37 @@ var DaemonCmd = &cli.Command{
|
||||
return xerrors.Errorf("getting api endpoint: %w", err)
|
||||
}
|
||||
|
||||
//
|
||||
// Instantiate JSON-RPC endpoint.
|
||||
// ----
|
||||
|
||||
// Populate JSON-RPC options.
|
||||
serverOptions := make([]jsonrpc.ServerOption, 0)
|
||||
if maxRequestSize := cctx.Int("api-max-req-size"); maxRequestSize != 0 {
|
||||
serverOptions = append(serverOptions, jsonrpc.WithMaxRequestSize(int64(maxRequestSize)))
|
||||
}
|
||||
|
||||
// Instantiate the full node handler.
|
||||
h, err := node.FullNodeHandler(api, true, serverOptions...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to instantiate rpc handler: %s", err)
|
||||
}
|
||||
|
||||
// Serve the RPC.
|
||||
rpcStopper, err := node.ServeRPC(h, "lotus-daemon", endpoint)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to start json-rpc endpoint: %s", err)
|
||||
}
|
||||
|
||||
// Monitor for shutdown.
|
||||
finishCh := node.MonitorShutdown(shutdownChan,
|
||||
node.ShutdownHandler{Component: "rpc server", StopFunc: rpcStopper},
|
||||
node.ShutdownHandler{Component: "node", StopFunc: stop},
|
||||
)
|
||||
<-finishCh // fires when shutdown is complete.
|
||||
|
||||
// TODO: properly parse api endpoint (or make it a URL)
|
||||
return serveRPC(api, stop, endpoint, shutdownChan, int64(cctx.Int("api-max-req-size")))
|
||||
return nil
|
||||
},
|
||||
Subcommands: []*cli.Command{
|
||||
daemonStopCmd,
|
||||
|
@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"os"
|
||||
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
"github.com/mattn/go-isatty"
|
||||
"github.com/urfave/cli/v2"
|
||||
"go.opencensus.io/trace"
|
||||
@ -16,6 +17,8 @@ import (
|
||||
"github.com/filecoin-project/lotus/node/repo"
|
||||
)
|
||||
|
||||
var log = logging.Logger("main")
|
||||
|
||||
var AdvanceBlockCmd *cli.Command
|
||||
|
||||
func main() {
|
||||
|
@ -1,33 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
func handleFractionOpt(name string, setter func(int)) http.HandlerFunc {
|
||||
return func(rw http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodPost {
|
||||
http.Error(rw, "only POST allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
if err := r.ParseForm(); err != nil {
|
||||
http.Error(rw, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
asfr := r.Form.Get("x")
|
||||
if len(asfr) == 0 {
|
||||
http.Error(rw, "parameter 'x' must be set", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
fr, err := strconv.Atoi(asfr)
|
||||
if err != nil {
|
||||
http.Error(rw, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
log.Infof("setting %s to %d", name, fr)
|
||||
setter(fr)
|
||||
}
|
||||
}
|
138
cmd/lotus/rpc.go
138
cmd/lotus/rpc.go
@ -1,138 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net"
|
||||
"net/http"
|
||||
_ "net/http/pprof"
|
||||
"os"
|
||||
"os/signal"
|
||||
"runtime"
|
||||
"syscall"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
manet "github.com/multiformats/go-multiaddr/net"
|
||||
"go.opencensus.io/tag"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-jsonrpc"
|
||||
"github.com/filecoin-project/go-jsonrpc/auth"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/api/v0api"
|
||||
"github.com/filecoin-project/lotus/api/v1api"
|
||||
"github.com/filecoin-project/lotus/metrics"
|
||||
"github.com/filecoin-project/lotus/node"
|
||||
"github.com/filecoin-project/lotus/node/impl"
|
||||
)
|
||||
|
||||
var log = logging.Logger("main")
|
||||
|
||||
func serveRPC(a v1api.FullNode, stop node.StopFunc, addr multiaddr.Multiaddr, shutdownCh <-chan struct{}, maxRequestSize int64) error {
|
||||
serverOptions := make([]jsonrpc.ServerOption, 0)
|
||||
if maxRequestSize != 0 { // config set
|
||||
serverOptions = append(serverOptions, jsonrpc.WithMaxRequestSize(maxRequestSize))
|
||||
}
|
||||
serveRpc := func(path string, hnd interface{}) {
|
||||
rpcServer := jsonrpc.NewServer(serverOptions...)
|
||||
rpcServer.Register("Filecoin", hnd)
|
||||
|
||||
ah := &auth.Handler{
|
||||
Verify: a.AuthVerify,
|
||||
Next: rpcServer.ServeHTTP,
|
||||
}
|
||||
|
||||
http.Handle(path, ah)
|
||||
}
|
||||
|
||||
pma := api.PermissionedFullAPI(metrics.MetricedFullAPI(a))
|
||||
|
||||
serveRpc("/rpc/v1", pma)
|
||||
serveRpc("/rpc/v0", &v0api.WrapperV1Full{FullNode: pma})
|
||||
|
||||
importAH := &auth.Handler{
|
||||
Verify: a.AuthVerify,
|
||||
Next: handleImport(a.(*impl.FullNodeAPI)),
|
||||
}
|
||||
|
||||
http.Handle("/rest/v0/import", importAH)
|
||||
|
||||
http.Handle("/debug/metrics", metrics.Exporter())
|
||||
http.Handle("/debug/pprof-set/block", handleFractionOpt("BlockProfileRate", runtime.SetBlockProfileRate))
|
||||
http.Handle("/debug/pprof-set/mutex", handleFractionOpt("MutexProfileFraction",
|
||||
func(x int) { runtime.SetMutexProfileFraction(x) },
|
||||
))
|
||||
|
||||
lst, err := manet.Listen(addr)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("could not listen: %w", err)
|
||||
}
|
||||
|
||||
srv := &http.Server{
|
||||
Handler: http.DefaultServeMux,
|
||||
BaseContext: func(listener net.Listener) context.Context {
|
||||
ctx, _ := tag.New(context.Background(), tag.Upsert(metrics.APIInterface, "lotus-daemon"))
|
||||
return ctx
|
||||
},
|
||||
}
|
||||
|
||||
sigCh := make(chan os.Signal, 2)
|
||||
shutdownDone := make(chan struct{})
|
||||
go func() {
|
||||
select {
|
||||
case sig := <-sigCh:
|
||||
log.Warnw("received shutdown", "signal", sig)
|
||||
case <-shutdownCh:
|
||||
log.Warn("received shutdown")
|
||||
}
|
||||
|
||||
log.Warn("Shutting down...")
|
||||
if err := srv.Shutdown(context.TODO()); err != nil {
|
||||
log.Errorf("shutting down RPC server failed: %s", err)
|
||||
}
|
||||
if err := stop(context.TODO()); err != nil {
|
||||
log.Errorf("graceful shutting down failed: %s", err)
|
||||
}
|
||||
log.Warn("Graceful shutdown successful")
|
||||
_ = log.Sync() //nolint:errcheck
|
||||
close(shutdownDone)
|
||||
}()
|
||||
signal.Notify(sigCh, syscall.SIGTERM, syscall.SIGINT)
|
||||
|
||||
err = srv.Serve(manet.NetListener(lst))
|
||||
if err == http.ErrServerClosed {
|
||||
<-shutdownDone
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func handleImport(a *impl.FullNodeAPI) func(w http.ResponseWriter, r *http.Request) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != "PUT" {
|
||||
w.WriteHeader(404)
|
||||
return
|
||||
}
|
||||
if !auth.HasPerm(r.Context(), nil, api.PermWrite) {
|
||||
w.WriteHeader(401)
|
||||
_ = json.NewEncoder(w).Encode(struct{ Error string }{"unauthorized: missing write permission"})
|
||||
return
|
||||
}
|
||||
|
||||
c, err := a.ClientImportLocal(r.Context(), r.Body)
|
||||
if err != nil {
|
||||
w.WriteHeader(500)
|
||||
_ = json.NewEncoder(w).Encode(struct{ Error string }{err.Error()})
|
||||
return
|
||||
}
|
||||
w.WriteHeader(200)
|
||||
err = json.NewEncoder(w).Encode(struct{ Cid cid.Cid }{c})
|
||||
if err != nil {
|
||||
log.Errorf("/rest/v0/import: Writing response failed: %+v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
@ -25,7 +25,6 @@ We're happy to announce Lotus X.Y.Z...
|
||||
First steps:
|
||||
|
||||
- [ ] Fork a new branch (`release/vX.Y.Z`) from `master` and make any further release related changes to this branch. If any "non-trivial" changes get added to the release, uncheck all the checkboxes and return to this stage.
|
||||
- [ ] Prep the changelog using `scripts/mkreleaselog`, and add it to `CHANGELOG.md`
|
||||
- [ ] Bump the version in `version.go` in the `master` branch to `vX.(Y+1).0-dev`.
|
||||
|
||||
Prepping an RC:
|
||||
@ -93,7 +92,7 @@ Testing an RC:
|
||||
- [ ] Final preparation
|
||||
- [ ] Verify that version string in [`version.go`](https://github.com/ipfs/go-ipfs/tree/master/version.go) has been updated.
|
||||
- [ ] Ensure that [CHANGELOG.md](https://github.com/filecoin-project/lotus/blob/master/CHANGELOG.md) is up to date
|
||||
- [ ] Ensure that [README.md](https://github.com/filecoin-project/lotus/blob/master/README.md) is up to date
|
||||
- [ ] Prep the changelog using `scripts/mkreleaselog`, and add it to `CHANGELOG.md`
|
||||
- [ ] Merge `release-vX.Y.Z` into the `releases` branch.
|
||||
- [ ] Tag this merge commit (on the `releases` branch) with `vX.Y.Z`
|
||||
- [ ] Cut the release [here](https://github.com/filecoin-project/lotus/releases/new?prerelease=true&target=releases).
|
||||
|
2
extern/filecoin-ffi
vendored
2
extern/filecoin-ffi
vendored
@ -1 +1 @@
|
||||
Subproject commit 8b97bd8230b77bd32f4f27e4766a6d8a03b4e801
|
||||
Subproject commit 1c7190dcc5bdef8042ca091129d6d3c10898dbdb
|
116
extern/storage-sealing/commit_batch.go
vendored
116
extern/storage-sealing/commit_batch.go
vendored
@ -7,6 +7,10 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
@ -23,6 +27,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/actors/policy"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
||||
"github.com/filecoin-project/lotus/extern/storage-sealing/sealiface"
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
)
|
||||
|
||||
const arp = abi.RegisteredAggregationProof_SnarkPackV1
|
||||
@ -31,9 +36,11 @@ type CommitBatcherApi interface {
|
||||
SendMsg(ctx context.Context, from, to address.Address, method abi.MethodNum, value, maxFee abi.TokenAmount, params []byte) (cid.Cid, error)
|
||||
StateMinerInfo(context.Context, address.Address, TipSetToken) (miner.MinerInfo, error)
|
||||
ChainHead(ctx context.Context) (TipSetToken, abi.ChainEpoch, error)
|
||||
ChainBaseFee(context.Context, TipSetToken) (abi.TokenAmount, error)
|
||||
|
||||
StateSectorPreCommitInfo(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok TipSetToken) (*miner.SectorPreCommitOnChainInfo, error)
|
||||
StateMinerInitialPledgeCollateral(context.Context, address.Address, miner.SectorPreCommitInfo, TipSetToken) (big.Int, error)
|
||||
StateNetworkVersion(ctx context.Context, tok TipSetToken) (network.Version, error)
|
||||
}
|
||||
|
||||
type AggregateInput struct {
|
||||
@ -47,20 +54,20 @@ type CommitBatcher struct {
|
||||
maddr address.Address
|
||||
mctx context.Context
|
||||
addrSel AddrSel
|
||||
feeCfg FeeConfig
|
||||
feeCfg config.MinerFeeConfig
|
||||
getConfig GetSealingConfigFunc
|
||||
prover ffiwrapper.Prover
|
||||
|
||||
deadlines map[abi.SectorNumber]time.Time
|
||||
todo map[abi.SectorNumber]AggregateInput
|
||||
waiting map[abi.SectorNumber][]chan sealiface.CommitBatchRes
|
||||
cutoffs map[abi.SectorNumber]time.Time
|
||||
todo map[abi.SectorNumber]AggregateInput
|
||||
waiting map[abi.SectorNumber][]chan sealiface.CommitBatchRes
|
||||
|
||||
notify, stop, stopped chan struct{}
|
||||
force chan chan []sealiface.CommitBatchRes
|
||||
lk sync.Mutex
|
||||
}
|
||||
|
||||
func NewCommitBatcher(mctx context.Context, maddr address.Address, api CommitBatcherApi, addrSel AddrSel, feeCfg FeeConfig, getConfig GetSealingConfigFunc, prov ffiwrapper.Prover) *CommitBatcher {
|
||||
func NewCommitBatcher(mctx context.Context, maddr address.Address, api CommitBatcherApi, addrSel AddrSel, feeCfg config.MinerFeeConfig, getConfig GetSealingConfigFunc, prov ffiwrapper.Prover) *CommitBatcher {
|
||||
b := &CommitBatcher{
|
||||
api: api,
|
||||
maddr: maddr,
|
||||
@ -70,9 +77,9 @@ func NewCommitBatcher(mctx context.Context, maddr address.Address, api CommitBat
|
||||
getConfig: getConfig,
|
||||
prover: prov,
|
||||
|
||||
deadlines: map[abi.SectorNumber]time.Time{},
|
||||
todo: map[abi.SectorNumber]AggregateInput{},
|
||||
waiting: map[abi.SectorNumber][]chan sealiface.CommitBatchRes{},
|
||||
cutoffs: map[abi.SectorNumber]time.Time{},
|
||||
todo: map[abi.SectorNumber]AggregateInput{},
|
||||
waiting: map[abi.SectorNumber][]chan sealiface.CommitBatchRes{},
|
||||
|
||||
notify: make(chan struct{}, 1),
|
||||
force: make(chan chan []sealiface.CommitBatchRes),
|
||||
@ -132,30 +139,30 @@ func (b *CommitBatcher) batchWait(maxWait, slack time.Duration) <-chan time.Time
|
||||
return nil
|
||||
}
|
||||
|
||||
var deadline time.Time
|
||||
var cutoff time.Time
|
||||
for sn := range b.todo {
|
||||
sectorDeadline := b.deadlines[sn]
|
||||
if deadline.IsZero() || (!sectorDeadline.IsZero() && sectorDeadline.Before(deadline)) {
|
||||
deadline = sectorDeadline
|
||||
sectorCutoff := b.cutoffs[sn]
|
||||
if cutoff.IsZero() || (!sectorCutoff.IsZero() && sectorCutoff.Before(cutoff)) {
|
||||
cutoff = sectorCutoff
|
||||
}
|
||||
}
|
||||
for sn := range b.waiting {
|
||||
sectorDeadline := b.deadlines[sn]
|
||||
if deadline.IsZero() || (!sectorDeadline.IsZero() && sectorDeadline.Before(deadline)) {
|
||||
deadline = sectorDeadline
|
||||
sectorCutoff := b.cutoffs[sn]
|
||||
if cutoff.IsZero() || (!sectorCutoff.IsZero() && sectorCutoff.Before(cutoff)) {
|
||||
cutoff = sectorCutoff
|
||||
}
|
||||
}
|
||||
|
||||
if deadline.IsZero() {
|
||||
if cutoff.IsZero() {
|
||||
return time.After(maxWait)
|
||||
}
|
||||
|
||||
deadline = deadline.Add(-slack)
|
||||
if deadline.Before(now) {
|
||||
cutoff = cutoff.Add(-slack)
|
||||
if cutoff.Before(now) {
|
||||
return time.After(time.Nanosecond) // can't return 0
|
||||
}
|
||||
|
||||
wait := deadline.Sub(now)
|
||||
wait := cutoff.Sub(now)
|
||||
if wait > maxWait {
|
||||
wait = maxWait
|
||||
}
|
||||
@ -208,7 +215,7 @@ func (b *CommitBatcher) maybeStartBatch(notif, after bool) ([]sealiface.CommitBa
|
||||
|
||||
delete(b.waiting, sn)
|
||||
delete(b.todo, sn)
|
||||
delete(b.deadlines, sn)
|
||||
delete(b.cutoffs, sn)
|
||||
}
|
||||
}
|
||||
|
||||
@ -285,14 +292,29 @@ func (b *CommitBatcher) processBatch(cfg sealiface.Config) ([]sealiface.CommitBa
|
||||
return []sealiface.CommitBatchRes{res}, xerrors.Errorf("couldn't get miner info: %w", err)
|
||||
}
|
||||
|
||||
goodFunds := big.Add(b.feeCfg.MaxCommitGasFee, collateral)
|
||||
maxFee := b.feeCfg.MaxCommitBatchGasFee.FeeForSectors(len(infos))
|
||||
|
||||
bf, err := b.api.ChainBaseFee(b.mctx, tok)
|
||||
if err != nil {
|
||||
return []sealiface.CommitBatchRes{res}, xerrors.Errorf("couldn't get base fee: %w", err)
|
||||
}
|
||||
|
||||
nv, err := b.api.StateNetworkVersion(b.mctx, tok)
|
||||
if err != nil {
|
||||
log.Errorf("getting network version: %s", err)
|
||||
return []sealiface.CommitBatchRes{res}, xerrors.Errorf("getting network version: %s", err)
|
||||
}
|
||||
|
||||
aggFee := policy.AggregateNetworkFee(nv, len(infos), bf)
|
||||
|
||||
goodFunds := big.Add(maxFee, big.Add(collateral, aggFee))
|
||||
|
||||
from, _, err := b.addrSel(b.mctx, mi, api.CommitAddr, goodFunds, collateral)
|
||||
if err != nil {
|
||||
return []sealiface.CommitBatchRes{res}, xerrors.Errorf("no good address found: %w", err)
|
||||
}
|
||||
|
||||
mcid, err := b.api.SendMsg(b.mctx, from, b.maddr, miner.Methods.ProveCommitAggregate, collateral, b.feeCfg.MaxCommitGasFee, enc.Bytes())
|
||||
mcid, err := b.api.SendMsg(b.mctx, from, b.maddr, miner.Methods.ProveCommitAggregate, collateral, maxFee, enc.Bytes())
|
||||
if err != nil {
|
||||
return []sealiface.CommitBatchRes{res}, xerrors.Errorf("sending message failed: %w", err)
|
||||
}
|
||||
@ -352,14 +374,14 @@ func (b *CommitBatcher) processSingle(mi miner.MinerInfo, sn abi.SectorNumber, i
|
||||
return cid.Undef, err
|
||||
}
|
||||
|
||||
goodFunds := big.Add(collateral, b.feeCfg.MaxCommitGasFee)
|
||||
goodFunds := big.Add(collateral, big.Int(b.feeCfg.MaxCommitGasFee))
|
||||
|
||||
from, _, err := b.addrSel(b.mctx, mi, api.CommitAddr, goodFunds, collateral)
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("no good address to send commit message from: %w", err)
|
||||
}
|
||||
|
||||
mcid, err := b.api.SendMsg(b.mctx, from, b.maddr, miner.Methods.ProveCommitSector, collateral, b.feeCfg.MaxCommitGasFee, enc.Bytes())
|
||||
mcid, err := b.api.SendMsg(b.mctx, from, b.maddr, miner.Methods.ProveCommitSector, collateral, big.Int(b.feeCfg.MaxCommitGasFee), enc.Bytes())
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("pushing message to mpool: %w", err)
|
||||
}
|
||||
@ -369,16 +391,15 @@ func (b *CommitBatcher) processSingle(mi miner.MinerInfo, sn abi.SectorNumber, i
|
||||
|
||||
// register commit, wait for batch message, return message CID
|
||||
func (b *CommitBatcher) AddCommit(ctx context.Context, s SectorInfo, in AggregateInput) (res sealiface.CommitBatchRes, err error) {
|
||||
_, curEpoch, err := b.api.ChainHead(b.mctx)
|
||||
if err != nil {
|
||||
log.Errorf("getting chain head: %s", err)
|
||||
return sealiface.CommitBatchRes{}, nil
|
||||
}
|
||||
|
||||
sn := s.SectorNumber
|
||||
|
||||
cu, err := b.getCommitCutoff(s)
|
||||
if err != nil {
|
||||
return sealiface.CommitBatchRes{}, err
|
||||
}
|
||||
|
||||
b.lk.Lock()
|
||||
b.deadlines[sn] = getSectorDeadline(curEpoch, s)
|
||||
b.cutoffs[sn] = cu
|
||||
b.todo[sn] = in
|
||||
|
||||
sent := make(chan sealiface.CommitBatchRes, 1)
|
||||
@ -452,24 +473,43 @@ func (b *CommitBatcher) Stop(ctx context.Context) error {
|
||||
}
|
||||
}
|
||||
|
||||
func getSectorDeadline(curEpoch abi.ChainEpoch, si SectorInfo) time.Time {
|
||||
deadlineEpoch := si.TicketEpoch + policy.MaxPreCommitRandomnessLookback
|
||||
// TODO: If this returned epochs, it would make testing much easier
|
||||
func (b *CommitBatcher) getCommitCutoff(si SectorInfo) (time.Time, error) {
|
||||
tok, curEpoch, err := b.api.ChainHead(b.mctx)
|
||||
if err != nil {
|
||||
return time.Now(), xerrors.Errorf("getting chain head: %s", err)
|
||||
}
|
||||
|
||||
nv, err := b.api.StateNetworkVersion(b.mctx, tok)
|
||||
if err != nil {
|
||||
log.Errorf("getting network version: %s", err)
|
||||
return time.Now(), xerrors.Errorf("getting network version: %s", err)
|
||||
}
|
||||
|
||||
pci, err := b.api.StateSectorPreCommitInfo(b.mctx, b.maddr, si.SectorNumber, tok)
|
||||
if err != nil {
|
||||
log.Errorf("getting precommit info: %s", err)
|
||||
return time.Now(), err
|
||||
}
|
||||
|
||||
cutoffEpoch := pci.PreCommitEpoch + policy.GetMaxProveCommitDuration(actors.VersionForNetwork(nv), si.SectorType)
|
||||
|
||||
for _, p := range si.Pieces {
|
||||
if p.DealInfo == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
startEpoch := p.DealInfo.DealSchedule.StartEpoch
|
||||
if startEpoch < deadlineEpoch {
|
||||
deadlineEpoch = startEpoch
|
||||
if startEpoch < cutoffEpoch {
|
||||
cutoffEpoch = startEpoch
|
||||
}
|
||||
}
|
||||
|
||||
if deadlineEpoch <= curEpoch {
|
||||
return time.Now()
|
||||
if cutoffEpoch <= curEpoch {
|
||||
return time.Now(), nil
|
||||
}
|
||||
|
||||
return time.Now().Add(time.Duration(deadlineEpoch-curEpoch) * time.Duration(build.BlockDelaySecs) * time.Second)
|
||||
return time.Now().Add(time.Duration(cutoffEpoch-curEpoch) * time.Duration(build.BlockDelaySecs) * time.Second), nil
|
||||
}
|
||||
|
||||
func (b *CommitBatcher) getSectorCollateral(sn abi.SectorNumber, tok TipSetToken) (abi.TokenAmount, error) {
|
||||
|
72
extern/storage-sealing/precommit_batch.go
vendored
72
extern/storage-sealing/precommit_batch.go
vendored
@ -7,6 +7,9 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/actors/policy"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
@ -19,6 +22,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
"github.com/filecoin-project/lotus/extern/storage-sealing/sealiface"
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
)
|
||||
|
||||
type PreCommitBatcherApi interface {
|
||||
@ -37,19 +41,19 @@ type PreCommitBatcher struct {
|
||||
maddr address.Address
|
||||
mctx context.Context
|
||||
addrSel AddrSel
|
||||
feeCfg FeeConfig
|
||||
feeCfg config.MinerFeeConfig
|
||||
getConfig GetSealingConfigFunc
|
||||
|
||||
deadlines map[abi.SectorNumber]time.Time
|
||||
todo map[abi.SectorNumber]*preCommitEntry
|
||||
waiting map[abi.SectorNumber][]chan sealiface.PreCommitBatchRes
|
||||
cutoffs map[abi.SectorNumber]time.Time
|
||||
todo map[abi.SectorNumber]*preCommitEntry
|
||||
waiting map[abi.SectorNumber][]chan sealiface.PreCommitBatchRes
|
||||
|
||||
notify, stop, stopped chan struct{}
|
||||
force chan chan []sealiface.PreCommitBatchRes
|
||||
lk sync.Mutex
|
||||
}
|
||||
|
||||
func NewPreCommitBatcher(mctx context.Context, maddr address.Address, api PreCommitBatcherApi, addrSel AddrSel, feeCfg FeeConfig, getConfig GetSealingConfigFunc) *PreCommitBatcher {
|
||||
func NewPreCommitBatcher(mctx context.Context, maddr address.Address, api PreCommitBatcherApi, addrSel AddrSel, feeCfg config.MinerFeeConfig, getConfig GetSealingConfigFunc) *PreCommitBatcher {
|
||||
b := &PreCommitBatcher{
|
||||
api: api,
|
||||
maddr: maddr,
|
||||
@ -58,9 +62,9 @@ func NewPreCommitBatcher(mctx context.Context, maddr address.Address, api PreCom
|
||||
feeCfg: feeCfg,
|
||||
getConfig: getConfig,
|
||||
|
||||
deadlines: map[abi.SectorNumber]time.Time{},
|
||||
todo: map[abi.SectorNumber]*preCommitEntry{},
|
||||
waiting: map[abi.SectorNumber][]chan sealiface.PreCommitBatchRes{},
|
||||
cutoffs: map[abi.SectorNumber]time.Time{},
|
||||
todo: map[abi.SectorNumber]*preCommitEntry{},
|
||||
waiting: map[abi.SectorNumber][]chan sealiface.PreCommitBatchRes{},
|
||||
|
||||
notify: make(chan struct{}, 1),
|
||||
force: make(chan chan []sealiface.PreCommitBatchRes),
|
||||
@ -120,30 +124,30 @@ func (b *PreCommitBatcher) batchWait(maxWait, slack time.Duration) <-chan time.T
|
||||
return nil
|
||||
}
|
||||
|
||||
var deadline time.Time
|
||||
var cutoff time.Time
|
||||
for sn := range b.todo {
|
||||
sectorDeadline := b.deadlines[sn]
|
||||
if deadline.IsZero() || (!sectorDeadline.IsZero() && sectorDeadline.Before(deadline)) {
|
||||
deadline = sectorDeadline
|
||||
sectorCutoff := b.cutoffs[sn]
|
||||
if cutoff.IsZero() || (!sectorCutoff.IsZero() && sectorCutoff.Before(cutoff)) {
|
||||
cutoff = sectorCutoff
|
||||
}
|
||||
}
|
||||
for sn := range b.waiting {
|
||||
sectorDeadline := b.deadlines[sn]
|
||||
if deadline.IsZero() || (!sectorDeadline.IsZero() && sectorDeadline.Before(deadline)) {
|
||||
deadline = sectorDeadline
|
||||
sectorCutoff := b.cutoffs[sn]
|
||||
if cutoff.IsZero() || (!sectorCutoff.IsZero() && sectorCutoff.Before(cutoff)) {
|
||||
cutoff = sectorCutoff
|
||||
}
|
||||
}
|
||||
|
||||
if deadline.IsZero() {
|
||||
if cutoff.IsZero() {
|
||||
return time.After(maxWait)
|
||||
}
|
||||
|
||||
deadline = deadline.Add(-slack)
|
||||
if deadline.Before(now) {
|
||||
cutoff = cutoff.Add(-slack)
|
||||
if cutoff.Before(now) {
|
||||
return time.After(time.Nanosecond) // can't return 0
|
||||
}
|
||||
|
||||
wait := deadline.Sub(now)
|
||||
wait := cutoff.Sub(now)
|
||||
if wait > maxWait {
|
||||
wait = maxWait
|
||||
}
|
||||
@ -191,7 +195,7 @@ func (b *PreCommitBatcher) maybeStartBatch(notif, after bool) ([]sealiface.PreCo
|
||||
|
||||
delete(b.waiting, sn)
|
||||
delete(b.todo, sn)
|
||||
delete(b.deadlines, sn)
|
||||
delete(b.cutoffs, sn)
|
||||
}
|
||||
}
|
||||
|
||||
@ -224,14 +228,15 @@ func (b *PreCommitBatcher) processBatch(cfg sealiface.Config) ([]sealiface.PreCo
|
||||
return []sealiface.PreCommitBatchRes{res}, xerrors.Errorf("couldn't get miner info: %w", err)
|
||||
}
|
||||
|
||||
goodFunds := big.Add(deposit, b.feeCfg.MaxPreCommitGasFee)
|
||||
maxFee := b.feeCfg.MaxPreCommitBatchGasFee.FeeForSectors(len(params.Sectors))
|
||||
goodFunds := big.Add(deposit, maxFee)
|
||||
|
||||
from, _, err := b.addrSel(b.mctx, mi, api.PreCommitAddr, goodFunds, deposit)
|
||||
if err != nil {
|
||||
return []sealiface.PreCommitBatchRes{res}, xerrors.Errorf("no good address found: %w", err)
|
||||
}
|
||||
|
||||
mcid, err := b.api.SendMsg(b.mctx, from, b.maddr, miner.Methods.PreCommitSectorBatch, deposit, b.feeCfg.MaxPreCommitGasFee, enc.Bytes())
|
||||
mcid, err := b.api.SendMsg(b.mctx, from, b.maddr, miner.Methods.PreCommitSectorBatch, deposit, maxFee, enc.Bytes())
|
||||
if err != nil {
|
||||
return []sealiface.PreCommitBatchRes{res}, xerrors.Errorf("sending message failed: %w", err)
|
||||
}
|
||||
@ -254,7 +259,7 @@ func (b *PreCommitBatcher) AddPreCommit(ctx context.Context, s SectorInfo, depos
|
||||
sn := s.SectorNumber
|
||||
|
||||
b.lk.Lock()
|
||||
b.deadlines[sn] = getSectorDeadline(curEpoch, s)
|
||||
b.cutoffs[sn] = getPreCommitCutoff(curEpoch, s)
|
||||
b.todo[sn] = &preCommitEntry{
|
||||
deposit: deposit,
|
||||
pci: in,
|
||||
@ -330,3 +335,24 @@ func (b *PreCommitBatcher) Stop(ctx context.Context) error {
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: If this returned epochs, it would make testing much easier
|
||||
func getPreCommitCutoff(curEpoch abi.ChainEpoch, si SectorInfo) time.Time {
|
||||
cutoffEpoch := si.TicketEpoch + policy.MaxPreCommitRandomnessLookback
|
||||
for _, p := range si.Pieces {
|
||||
if p.DealInfo == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
startEpoch := p.DealInfo.DealSchedule.StartEpoch
|
||||
if startEpoch < cutoffEpoch {
|
||||
cutoffEpoch = startEpoch
|
||||
}
|
||||
}
|
||||
|
||||
if cutoffEpoch <= curEpoch {
|
||||
return time.Now()
|
||||
}
|
||||
|
||||
return time.Now().Add(time.Duration(cutoffEpoch-curEpoch) * time.Duration(build.BlockDelaySecs) * time.Second)
|
||||
}
|
||||
|
12
extern/storage-sealing/sealing.go
vendored
12
extern/storage-sealing/sealing.go
vendored
@ -28,6 +28,7 @@ import (
|
||||
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
||||
"github.com/filecoin-project/lotus/extern/storage-sealing/sealiface"
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
)
|
||||
|
||||
const SectorStorePrefix = "/sectors"
|
||||
@ -66,6 +67,7 @@ type SealingAPI interface {
|
||||
StateMinerPartitions(ctx context.Context, m address.Address, dlIdx uint64, tok TipSetToken) ([]api.Partition, error)
|
||||
SendMsg(ctx context.Context, from, to address.Address, method abi.MethodNum, value, maxFee abi.TokenAmount, params []byte) (cid.Cid, error)
|
||||
ChainHead(ctx context.Context) (TipSetToken, abi.ChainEpoch, error)
|
||||
ChainBaseFee(context.Context, TipSetToken) (abi.TokenAmount, error)
|
||||
ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error)
|
||||
ChainGetRandomnessFromBeacon(ctx context.Context, tok TipSetToken, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error)
|
||||
ChainGetRandomnessFromTickets(ctx context.Context, tok TipSetToken, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error)
|
||||
@ -78,7 +80,7 @@ type AddrSel func(ctx context.Context, mi miner.MinerInfo, use api.AddrUse, good
|
||||
|
||||
type Sealing struct {
|
||||
api SealingAPI
|
||||
feeCfg FeeConfig
|
||||
feeCfg config.MinerFeeConfig
|
||||
events Events
|
||||
|
||||
maddr address.Address
|
||||
@ -112,12 +114,6 @@ type Sealing struct {
|
||||
dealInfo *CurrentDealInfoManager
|
||||
}
|
||||
|
||||
type FeeConfig struct {
|
||||
MaxPreCommitGasFee abi.TokenAmount
|
||||
MaxCommitGasFee abi.TokenAmount
|
||||
MaxTerminateGasFee abi.TokenAmount
|
||||
}
|
||||
|
||||
type openSector struct {
|
||||
used abi.UnpaddedPieceSize // change to bitfield/rle when AddPiece gains offset support to better fill sectors
|
||||
|
||||
@ -134,7 +130,7 @@ type pendingPiece struct {
|
||||
accepted func(abi.SectorNumber, abi.UnpaddedPieceSize, error)
|
||||
}
|
||||
|
||||
func New(api SealingAPI, fc FeeConfig, events Events, maddr address.Address, ds datastore.Batching, sealer sectorstorage.SectorManager, sc SectorIDCounter, verif ffiwrapper.Verifier, prov ffiwrapper.Prover, pcp PreCommitPolicy, gc GetSealingConfigFunc, notifee SectorStateNotifee, as AddrSel) *Sealing {
|
||||
func New(api SealingAPI, fc config.MinerFeeConfig, events Events, maddr address.Address, ds datastore.Batching, sealer sectorstorage.SectorManager, sc SectorIDCounter, verif ffiwrapper.Verifier, prov ffiwrapper.Prover, pcp PreCommitPolicy, gc GetSealingConfigFunc, notifee SectorStateNotifee, as AddrSel) *Sealing {
|
||||
s := &Sealing{
|
||||
api: api,
|
||||
feeCfg: fc,
|
||||
|
8
extern/storage-sealing/states_sealing.go
vendored
8
extern/storage-sealing/states_sealing.go
vendored
@ -334,7 +334,7 @@ func (m *Sealing) handlePreCommitting(ctx statemachine.Context, sector SectorInf
|
||||
return nil
|
||||
}
|
||||
|
||||
goodFunds := big.Add(deposit, m.feeCfg.MaxPreCommitGasFee)
|
||||
goodFunds := big.Add(deposit, big.Int(m.feeCfg.MaxPreCommitGasFee))
|
||||
|
||||
from, _, err := m.addrSel(ctx.Context(), mi, api.PreCommitAddr, goodFunds, deposit)
|
||||
if err != nil {
|
||||
@ -342,7 +342,7 @@ func (m *Sealing) handlePreCommitting(ctx statemachine.Context, sector SectorInf
|
||||
}
|
||||
|
||||
log.Infof("submitting precommit for sector %d (deposit: %s): ", sector.SectorNumber, deposit)
|
||||
mcid, err := m.api.SendMsg(ctx.Context(), from, m.maddr, miner.Methods.PreCommitSector, deposit, m.feeCfg.MaxPreCommitGasFee, enc.Bytes())
|
||||
mcid, err := m.api.SendMsg(ctx.Context(), from, m.maddr, miner.Methods.PreCommitSector, deposit, big.Int(m.feeCfg.MaxPreCommitGasFee), enc.Bytes())
|
||||
if err != nil {
|
||||
if params.ReplaceCapacity {
|
||||
m.remarkForUpgrade(params.ReplaceSectorNumber)
|
||||
@ -566,7 +566,7 @@ func (m *Sealing) handleSubmitCommit(ctx statemachine.Context, sector SectorInfo
|
||||
collateral = big.Zero()
|
||||
}
|
||||
|
||||
goodFunds := big.Add(collateral, m.feeCfg.MaxCommitGasFee)
|
||||
goodFunds := big.Add(collateral, big.Int(m.feeCfg.MaxCommitGasFee))
|
||||
|
||||
from, _, err := m.addrSel(ctx.Context(), mi, api.CommitAddr, goodFunds, collateral)
|
||||
if err != nil {
|
||||
@ -574,7 +574,7 @@ func (m *Sealing) handleSubmitCommit(ctx statemachine.Context, sector SectorInfo
|
||||
}
|
||||
|
||||
// TODO: check seed / ticket / deals are up to date
|
||||
mcid, err := m.api.SendMsg(ctx.Context(), from, m.maddr, miner.Methods.ProveCommitSector, collateral, m.feeCfg.MaxCommitGasFee, enc.Bytes())
|
||||
mcid, err := m.api.SendMsg(ctx.Context(), from, m.maddr, miner.Methods.ProveCommitSector, collateral, big.Int(m.feeCfg.MaxCommitGasFee), enc.Bytes())
|
||||
if err != nil {
|
||||
return ctx.Send(SectorCommitFailed{xerrors.Errorf("pushing message to mpool: %w", err)})
|
||||
}
|
||||
|
9
extern/storage-sealing/terminate_batch.go
vendored
9
extern/storage-sealing/terminate_batch.go
vendored
@ -19,6 +19,7 @@ import (
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
)
|
||||
|
||||
type TerminateBatcherApi interface {
|
||||
@ -34,7 +35,7 @@ type TerminateBatcher struct {
|
||||
maddr address.Address
|
||||
mctx context.Context
|
||||
addrSel AddrSel
|
||||
feeCfg FeeConfig
|
||||
feeCfg config.MinerFeeConfig
|
||||
getConfig GetSealingConfigFunc
|
||||
|
||||
todo map[SectorLocation]*bitfield.BitField // MinerSectorLocation -> BitField
|
||||
@ -46,7 +47,7 @@ type TerminateBatcher struct {
|
||||
lk sync.Mutex
|
||||
}
|
||||
|
||||
func NewTerminationBatcher(mctx context.Context, maddr address.Address, api TerminateBatcherApi, addrSel AddrSel, feeCfg FeeConfig, getConfig GetSealingConfigFunc) *TerminateBatcher {
|
||||
func NewTerminationBatcher(mctx context.Context, maddr address.Address, api TerminateBatcherApi, addrSel AddrSel, feeCfg config.MinerFeeConfig, getConfig GetSealingConfigFunc) *TerminateBatcher {
|
||||
b := &TerminateBatcher{
|
||||
api: api,
|
||||
maddr: maddr,
|
||||
@ -214,12 +215,12 @@ func (b *TerminateBatcher) processBatch(notif, after bool) (*cid.Cid, error) {
|
||||
return nil, xerrors.Errorf("couldn't get miner info: %w", err)
|
||||
}
|
||||
|
||||
from, _, err := b.addrSel(b.mctx, mi, api.TerminateSectorsAddr, b.feeCfg.MaxTerminateGasFee, b.feeCfg.MaxTerminateGasFee)
|
||||
from, _, err := b.addrSel(b.mctx, mi, api.TerminateSectorsAddr, big.Int(b.feeCfg.MaxTerminateGasFee), big.Int(b.feeCfg.MaxTerminateGasFee))
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("no good address found: %w", err)
|
||||
}
|
||||
|
||||
mcid, err := b.api.SendMsg(b.mctx, from, b.maddr, miner.Methods.TerminateSectors, big.Zero(), b.feeCfg.MaxTerminateGasFee, enc.Bytes())
|
||||
mcid, err := b.api.SendMsg(b.mctx, from, b.maddr, miner.Methods.TerminateSectors, big.Zero(), big.Int(b.feeCfg.MaxTerminateGasFee), enc.Bytes())
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("sending message failed: %w", err)
|
||||
}
|
||||
|
48
gateway/handler.go
Normal file
48
gateway/handler.go
Normal file
@ -0,0 +1,48 @@
|
||||
package gateway
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"contrib.go.opencensus.io/exporter/prometheus"
|
||||
"github.com/filecoin-project/go-jsonrpc"
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/api/v0api"
|
||||
"github.com/filecoin-project/lotus/api/v1api"
|
||||
"github.com/filecoin-project/lotus/metrics"
|
||||
"github.com/gorilla/mux"
|
||||
promclient "github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
// Handler returns a gateway http.Handler, to be mounted as-is on the server.
|
||||
func Handler(a api.Gateway, opts ...jsonrpc.ServerOption) (http.Handler, error) {
|
||||
m := mux.NewRouter()
|
||||
|
||||
serveRpc := func(path string, hnd interface{}) {
|
||||
rpcServer := jsonrpc.NewServer(opts...)
|
||||
rpcServer.Register("Filecoin", hnd)
|
||||
m.Handle(path, rpcServer)
|
||||
}
|
||||
|
||||
ma := metrics.MetricedGatewayAPI(a)
|
||||
|
||||
serveRpc("/rpc/v1", ma)
|
||||
serveRpc("/rpc/v0", api.Wrap(new(v1api.FullNodeStruct), new(v0api.WrapperV1Full), ma))
|
||||
|
||||
registry := promclient.DefaultRegisterer.(*promclient.Registry)
|
||||
exporter, err := prometheus.NewExporter(prometheus.Options{
|
||||
Registry: registry,
|
||||
Namespace: "lotus_gw",
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m.Handle("/debug/metrics", exporter)
|
||||
m.PathPrefix("/").Handler(http.DefaultServeMux)
|
||||
|
||||
/*ah := &auth.Handler{
|
||||
Verify: nodeApi.AuthVerify,
|
||||
Next: mux.ServeHTTP,
|
||||
}*/
|
||||
|
||||
return m, nil
|
||||
}
|
2
go.mod
2
go.mod
@ -48,7 +48,7 @@ require (
|
||||
github.com/filecoin-project/specs-actors/v2 v2.3.5
|
||||
github.com/filecoin-project/specs-actors/v3 v3.1.1
|
||||
github.com/filecoin-project/specs-actors/v4 v4.0.1
|
||||
github.com/filecoin-project/specs-actors/v5 v5.0.0-20210602024058-0c296bb386bf
|
||||
github.com/filecoin-project/specs-actors/v5 v5.0.0-20210609212542-73e0409ac77c
|
||||
github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506
|
||||
github.com/filecoin-project/test-vectors/schema v0.0.5
|
||||
github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1
|
||||
|
4
go.sum
4
go.sum
@ -331,8 +331,8 @@ github.com/filecoin-project/specs-actors/v4 v4.0.0/go.mod h1:TkHXf/l7Wyw4ZejyXIP
|
||||
github.com/filecoin-project/specs-actors/v4 v4.0.1 h1:AiWrtvJZ63MHGe6rn7tPu4nSUY8bA1KDNszqJaD5+Fg=
|
||||
github.com/filecoin-project/specs-actors/v4 v4.0.1/go.mod h1:TkHXf/l7Wyw4ZejyXIPS2rK8bBO0rdwhTZyQQgaglng=
|
||||
github.com/filecoin-project/specs-actors/v5 v5.0.0-20210512015452-4fe3889fff57/go.mod h1:283yBMMUSDB2abcjP/hhrwTkhb9h3sfM6KGrep/ZlBI=
|
||||
github.com/filecoin-project/specs-actors/v5 v5.0.0-20210602024058-0c296bb386bf h1:xt9A1omyhSDbQvpVk7Na1J15a/n8y0y4GQDLeiWLpFs=
|
||||
github.com/filecoin-project/specs-actors/v5 v5.0.0-20210602024058-0c296bb386bf/go.mod h1:b/btpRl84Q9SeDKlyIoORBQwe2OTmq14POrYrVvBWCM=
|
||||
github.com/filecoin-project/specs-actors/v5 v5.0.0-20210609212542-73e0409ac77c h1:GnDJ6q3QEm2ytTKjPFQSvczAltgCSb3j9F1FeynwvPA=
|
||||
github.com/filecoin-project/specs-actors/v5 v5.0.0-20210609212542-73e0409ac77c/go.mod h1:b/btpRl84Q9SeDKlyIoORBQwe2OTmq14POrYrVvBWCM=
|
||||
github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506 h1:Ur/l2+6qN+lQiqjozWWc5p9UDaAMDZKTlDS98oRnlIw=
|
||||
github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506/go.mod h1:nJRRM7Aa9XVvygr3W9k6xGF46RWzr2zxF/iGoAIfA/g=
|
||||
github.com/filecoin-project/test-vectors/schema v0.0.5 h1:w3zHQhzM4pYxJDl21avXjOKBLF8egrvwUwjpT8TquDg=
|
||||
|
267
itests/api_test.go
Normal file
267
itests/api_test.go
Normal file
@ -0,0 +1,267 @@
|
||||
package itests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
lapi "github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/itests/kit"
|
||||
"github.com/filecoin-project/lotus/node/impl"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestAPI(t *testing.T) {
|
||||
t.Run("direct", func(t *testing.T) {
|
||||
runAPITest(t, kit.Builder)
|
||||
})
|
||||
t.Run("rpc", func(t *testing.T) {
|
||||
runAPITest(t, kit.RPCBuilder)
|
||||
})
|
||||
}
|
||||
|
||||
type apiSuite struct {
|
||||
makeNodes kit.APIBuilder
|
||||
}
|
||||
|
||||
// runAPITest is the entry point to API test suite
|
||||
func runAPITest(t *testing.T, b kit.APIBuilder) {
|
||||
ts := apiSuite{
|
||||
makeNodes: b,
|
||||
}
|
||||
|
||||
t.Run("version", ts.testVersion)
|
||||
t.Run("id", ts.testID)
|
||||
t.Run("testConnectTwo", ts.testConnectTwo)
|
||||
t.Run("testMining", ts.testMining)
|
||||
t.Run("testMiningReal", ts.testMiningReal)
|
||||
t.Run("testSearchMsg", ts.testSearchMsg)
|
||||
t.Run("testNonGenesisMiner", ts.testNonGenesisMiner)
|
||||
}
|
||||
|
||||
func (ts *apiSuite) testVersion(t *testing.T) {
|
||||
lapi.RunningNodeType = lapi.NodeFull
|
||||
t.Cleanup(func() {
|
||||
lapi.RunningNodeType = lapi.NodeUnknown
|
||||
})
|
||||
|
||||
ctx := context.Background()
|
||||
apis, _ := ts.makeNodes(t, kit.OneFull, kit.OneMiner)
|
||||
napi := apis[0]
|
||||
|
||||
v, err := napi.Version(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
versions := strings.Split(v.Version, "+")
|
||||
if len(versions) <= 0 {
|
||||
t.Fatal("empty version")
|
||||
}
|
||||
require.Equal(t, versions[0], build.BuildVersion)
|
||||
}
|
||||
|
||||
func (ts *apiSuite) testSearchMsg(t *testing.T) {
|
||||
apis, miners := ts.makeNodes(t, kit.OneFull, kit.OneMiner)
|
||||
|
||||
api := apis[0]
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
senderAddr, err := api.WalletDefaultAddress(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
msg := &types.Message{
|
||||
From: senderAddr,
|
||||
To: senderAddr,
|
||||
Value: big.Zero(),
|
||||
}
|
||||
bm := kit.NewBlockMiner(t, miners[0])
|
||||
bm.MineBlocks(ctx, 100*time.Millisecond)
|
||||
defer bm.Stop()
|
||||
|
||||
sm, err := api.MpoolPushMessage(ctx, msg, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
res, err := api.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if res.Receipt.ExitCode != 0 {
|
||||
t.Fatal("did not successfully send message")
|
||||
}
|
||||
|
||||
searchRes, err := api.StateSearchMsg(ctx, types.EmptyTSK, sm.Cid(), lapi.LookbackNoLimit, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if searchRes.TipSet != res.TipSet {
|
||||
t.Fatalf("search ts: %s, different from wait ts: %s", searchRes.TipSet, res.TipSet)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (ts *apiSuite) testID(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
apis, _ := ts.makeNodes(t, kit.OneFull, kit.OneMiner)
|
||||
api := apis[0]
|
||||
|
||||
id, err := api.ID(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assert.Regexp(t, "^12", id.Pretty())
|
||||
}
|
||||
|
||||
func (ts *apiSuite) testConnectTwo(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
apis, _ := ts.makeNodes(t, kit.TwoFull, kit.OneMiner)
|
||||
|
||||
p, err := apis[0].NetPeers(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(p) != 0 {
|
||||
t.Error("Node 0 has a peer")
|
||||
}
|
||||
|
||||
p, err = apis[1].NetPeers(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(p) != 0 {
|
||||
t.Error("Node 1 has a peer")
|
||||
}
|
||||
|
||||
addrs, err := apis[1].NetAddrsListen(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := apis[0].NetConnect(ctx, addrs); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
p, err = apis[0].NetPeers(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(p) != 1 {
|
||||
t.Error("Node 0 doesn't have 1 peer")
|
||||
}
|
||||
|
||||
p, err = apis[1].NetPeers(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(p) != 1 {
|
||||
t.Error("Node 0 doesn't have 1 peer")
|
||||
}
|
||||
}
|
||||
|
||||
func (ts *apiSuite) testMining(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fulls, miners := ts.makeNodes(t, kit.OneFull, kit.OneMiner)
|
||||
api := fulls[0]
|
||||
|
||||
newHeads, err := api.ChainNotify(ctx)
|
||||
require.NoError(t, err)
|
||||
initHead := (<-newHeads)[0]
|
||||
baseHeight := initHead.Val.Height()
|
||||
|
||||
h1, err := api.ChainHead(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(h1.Height()), int64(baseHeight))
|
||||
|
||||
bm := kit.NewBlockMiner(t, miners[0])
|
||||
bm.MineUntilBlock(ctx, fulls[0], nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
<-newHeads
|
||||
|
||||
h2, err := api.ChainHead(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Greater(t, int64(h2.Height()), int64(h1.Height()))
|
||||
}
|
||||
|
||||
func (ts *apiSuite) testMiningReal(t *testing.T) {
|
||||
build.InsecurePoStValidation = false
|
||||
defer func() {
|
||||
build.InsecurePoStValidation = true
|
||||
}()
|
||||
|
||||
ctx := context.Background()
|
||||
fulls, miners := ts.makeNodes(t, kit.OneFull, kit.OneMiner)
|
||||
api := fulls[0]
|
||||
|
||||
newHeads, err := api.ChainNotify(ctx)
|
||||
require.NoError(t, err)
|
||||
at := (<-newHeads)[0].Val.Height()
|
||||
|
||||
h1, err := api.ChainHead(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(at), int64(h1.Height()))
|
||||
|
||||
bm := kit.NewBlockMiner(t, miners[0])
|
||||
|
||||
bm.MineUntilBlock(ctx, fulls[0], nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
<-newHeads
|
||||
|
||||
h2, err := api.ChainHead(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Greater(t, int64(h2.Height()), int64(h1.Height()))
|
||||
|
||||
bm.MineUntilBlock(ctx, fulls[0], nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
<-newHeads
|
||||
|
||||
h3, err := api.ChainHead(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Greater(t, int64(h3.Height()), int64(h2.Height()))
|
||||
}
|
||||
|
||||
func (ts *apiSuite) testNonGenesisMiner(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
n, sn := ts.makeNodes(t,
|
||||
[]kit.FullNodeOpts{kit.FullNodeWithLatestActorsAt(-1)},
|
||||
[]kit.StorageMiner{{Full: 0, Preseal: kit.PresealGenesis}},
|
||||
)
|
||||
|
||||
full, ok := n[0].FullNode.(*impl.FullNodeAPI)
|
||||
if !ok {
|
||||
t.Skip("not testing with a full node")
|
||||
return
|
||||
}
|
||||
genesisMiner := sn[0]
|
||||
|
||||
bm := kit.NewBlockMiner(t, genesisMiner)
|
||||
bm.MineBlocks(ctx, 4*time.Millisecond)
|
||||
t.Cleanup(bm.Stop)
|
||||
|
||||
gaa, err := genesisMiner.ActorAddress(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
gmi, err := full.StateMinerInfo(ctx, gaa, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
|
||||
testm := n[0].Stb(ctx, t, kit.TestSpt, gmi.Owner)
|
||||
|
||||
ta, err := testm.ActorAddress(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
tid, err := address.IDFromAddress(ta)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, uint64(1001), tid)
|
||||
}
|
140
itests/batch_deal_test.go
Normal file
140
itests/batch_deal_test.go
Normal file
@ -0,0 +1,140 @@
|
||||
package itests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
"github.com/filecoin-project/lotus/extern/storage-sealing/sealiface"
|
||||
"github.com/filecoin-project/lotus/itests/kit"
|
||||
"github.com/filecoin-project/lotus/markets/storageadapter"
|
||||
"github.com/filecoin-project/lotus/node"
|
||||
"github.com/filecoin-project/lotus/node/impl"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestBatchDealInput(t *testing.T) {
|
||||
kit.QuietMiningLogs()
|
||||
|
||||
var (
|
||||
blockTime = 10 * time.Millisecond
|
||||
|
||||
// For these tests where the block time is artificially short, just use
|
||||
// a deal start epoch that is guaranteed to be far enough in the future
|
||||
// so that the deal starts sealing in time
|
||||
dealStartEpoch = abi.ChainEpoch(2 << 12)
|
||||
)
|
||||
|
||||
run := func(piece, deals, expectSectors int) func(t *testing.T) {
|
||||
return func(t *testing.T) {
|
||||
publishPeriod := 10 * time.Second
|
||||
maxDealsPerMsg := uint64(deals)
|
||||
|
||||
// Set max deals per publish deals message to maxDealsPerMsg
|
||||
minerDef := []kit.StorageMiner{{
|
||||
Full: 0,
|
||||
Opts: node.Options(
|
||||
node.Override(
|
||||
new(*storageadapter.DealPublisher),
|
||||
storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{
|
||||
Period: publishPeriod,
|
||||
MaxDealsPerMsg: maxDealsPerMsg,
|
||||
})),
|
||||
node.Override(new(dtypes.GetSealingConfigFunc), func() (dtypes.GetSealingConfigFunc, error) {
|
||||
return func() (sealiface.Config, error) {
|
||||
return sealiface.Config{
|
||||
MaxWaitDealsSectors: 2,
|
||||
MaxSealingSectors: 1,
|
||||
MaxSealingSectorsForDeals: 3,
|
||||
AlwaysKeepUnsealedCopy: true,
|
||||
WaitDealsDelay: time.Hour,
|
||||
}, nil
|
||||
}, nil
|
||||
}),
|
||||
),
|
||||
Preseal: kit.PresealGenesis,
|
||||
}}
|
||||
|
||||
// Create a connect client and miner node
|
||||
n, sn := kit.MockMinerBuilder(t, kit.OneFull, minerDef)
|
||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
||||
miner := sn[0]
|
||||
|
||||
blockMiner := kit.ConnectAndStartMining(t, blockTime, miner, client)
|
||||
t.Cleanup(blockMiner.Stop)
|
||||
|
||||
dh := kit.NewDealHarness(t, client, miner)
|
||||
ctx := context.Background()
|
||||
|
||||
err := miner.MarketSetAsk(ctx, big.Zero(), big.Zero(), 200, 128, 32<<30)
|
||||
require.NoError(t, err)
|
||||
|
||||
checkNoPadding := func() {
|
||||
sl, err := sn[0].SectorsList(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
sort.Slice(sl, func(i, j int) bool {
|
||||
return sl[i] < sl[j]
|
||||
})
|
||||
|
||||
for _, snum := range sl {
|
||||
si, err := sn[0].SectorsStatus(ctx, snum, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
// fmt.Printf("S %d: %+v %s\n", snum, si.Deals, si.State)
|
||||
|
||||
for _, deal := range si.Deals {
|
||||
if deal == 0 {
|
||||
fmt.Printf("sector %d had a padding piece!\n", snum)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Starts a deal and waits until it's published
|
||||
runDealTillSeal := func(rseed int) {
|
||||
res, _, _, err := kit.CreateImportFile(ctx, client, rseed, piece)
|
||||
require.NoError(t, err)
|
||||
|
||||
deal := dh.StartDeal(ctx, res.Root, false, dealStartEpoch)
|
||||
dh.WaitDealSealed(ctx, deal, false, true, checkNoPadding)
|
||||
}
|
||||
|
||||
// Run maxDealsPerMsg deals in parallel
|
||||
done := make(chan struct{}, maxDealsPerMsg)
|
||||
for rseed := 0; rseed < int(maxDealsPerMsg); rseed++ {
|
||||
rseed := rseed
|
||||
go func() {
|
||||
runDealTillSeal(rseed)
|
||||
done <- struct{}{}
|
||||
}()
|
||||
}
|
||||
|
||||
// Wait for maxDealsPerMsg of the deals to be published
|
||||
for i := 0; i < int(maxDealsPerMsg); i++ {
|
||||
<-done
|
||||
}
|
||||
|
||||
checkNoPadding()
|
||||
|
||||
sl, err := sn[0].SectorsList(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(sl), expectSectors)
|
||||
}
|
||||
}
|
||||
|
||||
t.Run("4-p1600B", run(1600, 4, 4))
|
||||
t.Run("4-p513B", run(513, 4, 2))
|
||||
if !testing.Short() {
|
||||
t.Run("32-p257B", run(257, 32, 8))
|
||||
t.Run("32-p10B", run(10, 32, 2))
|
||||
|
||||
// fixme: this appears to break data-transfer / markets in some really creative ways
|
||||
// t.Run("128-p10B", run(10, 128, 8))
|
||||
}
|
||||
}
|
@ -1,4 +1,4 @@
|
||||
package test
|
||||
package itests
|
||||
|
||||
import (
|
||||
"context"
|
||||
@ -7,6 +7,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/lotus/itests/kit"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
@ -15,7 +16,9 @@ import (
|
||||
"github.com/filecoin-project/lotus/node/impl"
|
||||
)
|
||||
|
||||
func TestCCUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration) {
|
||||
func TestCCUpgrade(t *testing.T) {
|
||||
kit.QuietMiningLogs()
|
||||
|
||||
for _, height := range []abi.ChainEpoch{
|
||||
-1, // before
|
||||
162, // while sealing
|
||||
@ -24,14 +27,14 @@ func TestCCUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration) {
|
||||
} {
|
||||
height := height // make linters happy by copying
|
||||
t.Run(fmt.Sprintf("upgrade-%d", height), func(t *testing.T) {
|
||||
testCCUpgrade(t, b, blocktime, height)
|
||||
runTestCCUpgrade(t, kit.MockMinerBuilder, 5*time.Millisecond, height)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testCCUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration, upgradeHeight abi.ChainEpoch) {
|
||||
func runTestCCUpgrade(t *testing.T, b kit.APIBuilder, blocktime time.Duration, upgradeHeight abi.ChainEpoch) {
|
||||
ctx := context.Background()
|
||||
n, sn := b(t, []FullNodeOpts{FullNodeWithLatestActorsAt(upgradeHeight)}, OneMiner)
|
||||
n, sn := b(t, []kit.FullNodeOpts{kit.FullNodeWithLatestActorsAt(upgradeHeight)}, kit.OneMiner)
|
||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
||||
miner := sn[0]
|
||||
|
||||
@ -51,7 +54,7 @@ func testCCUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration, upgradeH
|
||||
defer close(done)
|
||||
for atomic.LoadInt64(&mine) == 1 {
|
||||
time.Sleep(blocktime)
|
||||
if err := sn[0].MineOne(ctx, MineNext); err != nil {
|
||||
if err := sn[0].MineOne(ctx, kit.MineNext); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
@ -62,10 +65,10 @@ func testCCUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration, upgradeH
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
CC := abi.SectorNumber(GenesisPreseals + 1)
|
||||
CC := abi.SectorNumber(kit.GenesisPreseals + 1)
|
||||
Upgraded := CC + 1
|
||||
|
||||
pledgeSectors(t, ctx, miner, 1, 0, nil)
|
||||
kit.PledgeSectors(t, ctx, miner, 1, 0, nil)
|
||||
|
||||
sl, err := miner.SectorsList(ctx)
|
||||
if err != nil {
|
||||
@ -89,7 +92,9 @@ func testCCUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration, upgradeH
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
MakeDeal(t, ctx, 6, client, miner, false, false, 0)
|
||||
dh := kit.NewDealHarness(t, client, miner)
|
||||
|
||||
dh.MakeFullDeal(context.Background(), 6, false, false, 0)
|
||||
|
||||
// Validate upgrade
|
||||
|
22
itests/cli_test.go
Normal file
22
itests/cli_test.go
Normal file
@ -0,0 +1,22 @@
|
||||
package itests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/lotus/cli"
|
||||
"github.com/filecoin-project/lotus/itests/kit"
|
||||
)
|
||||
|
||||
// TestClient does a basic test to exercise the client CLI commands.
|
||||
func TestClient(t *testing.T) {
|
||||
_ = os.Setenv("BELLMAN_NO_GPU", "1")
|
||||
kit.QuietMiningLogs()
|
||||
|
||||
blocktime := 5 * time.Millisecond
|
||||
ctx := context.Background()
|
||||
clientNode, _ := kit.StartOneNodeOneMiner(ctx, t, blocktime)
|
||||
kit.RunClientTest(t, cli.Commands, clientNode)
|
||||
}
|
@ -1,26 +1,20 @@
|
||||
package test
|
||||
package itests
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-bitfield"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
"github.com/filecoin-project/go-state-types/exitcode"
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner"
|
||||
"github.com/ipfs/go-cid"
|
||||
cbor "github.com/ipfs/go-ipld-cbor"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/blockstore"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
@ -28,7 +22,13 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/mock"
|
||||
"github.com/filecoin-project/lotus/itests/kit"
|
||||
"github.com/filecoin-project/lotus/node/impl"
|
||||
miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner"
|
||||
"github.com/ipfs/go-cid"
|
||||
cbor "github.com/ipfs/go-ipld-cbor"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestDeadlineToggling:
|
||||
@ -54,16 +54,28 @@ import (
|
||||
// * goes through another PP
|
||||
// * asserts that miner B loses power
|
||||
// * asserts that miner D loses power, is inactive
|
||||
func TestDeadlineToggling(t *testing.T, b APIBuilder, blocktime time.Duration) {
|
||||
var upgradeH abi.ChainEpoch = 4000
|
||||
var provingPeriod abi.ChainEpoch = 2880
|
||||
func TestDeadlineToggling(t *testing.T) {
|
||||
if os.Getenv("LOTUS_TEST_DEADLINE_TOGGLING") != "1" {
|
||||
t.Skip("this takes a few minutes, set LOTUS_TEST_DEADLINE_TOGGLING=1 to run")
|
||||
}
|
||||
_ = logging.SetLogLevel("miner", "ERROR")
|
||||
_ = logging.SetLogLevel("chainstore", "ERROR")
|
||||
_ = logging.SetLogLevel("chain", "ERROR")
|
||||
_ = logging.SetLogLevel("sub", "ERROR")
|
||||
_ = logging.SetLogLevel("storageminer", "FATAL")
|
||||
|
||||
const sectorsC, sectorsD, sectersB = 10, 9, 8
|
||||
const sectorsC, sectorsD, sectorsB = 10, 9, 8
|
||||
|
||||
var (
|
||||
upgradeH abi.ChainEpoch = 4000
|
||||
provingPeriod abi.ChainEpoch = 2880
|
||||
blocktime = 2 * time.Millisecond
|
||||
)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
n, sn := b(t, []FullNodeOpts{FullNodeWithNetworkUpgradeAt(network.Version12, upgradeH)}, OneMiner)
|
||||
n, sn := kit.MockMinerBuilder(t, []kit.FullNodeOpts{kit.FullNodeWithNetworkUpgradeAt(network.Version12, upgradeH)}, kit.OneMiner)
|
||||
|
||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
||||
minerA := sn[0]
|
||||
@ -92,7 +104,7 @@ func TestDeadlineToggling(t *testing.T, b APIBuilder, blocktime time.Duration) {
|
||||
defer close(done)
|
||||
for ctx.Err() == nil {
|
||||
build.Clock.Sleep(blocktime)
|
||||
if err := minerA.MineOne(ctx, MineNext); err != nil {
|
||||
if err := minerA.MineOne(ctx, kit.MineNext); err != nil {
|
||||
if ctx.Err() != nil {
|
||||
// context was canceled, ignore the error.
|
||||
return
|
||||
@ -106,8 +118,8 @@ func TestDeadlineToggling(t *testing.T, b APIBuilder, blocktime time.Duration) {
|
||||
<-done
|
||||
}()
|
||||
|
||||
minerB := n[0].Stb(ctx, t, TestSpt, defaultFrom)
|
||||
minerC := n[0].Stb(ctx, t, TestSpt, defaultFrom)
|
||||
minerB := n[0].Stb(ctx, t, kit.TestSpt, defaultFrom)
|
||||
minerC := n[0].Stb(ctx, t, kit.TestSpt, defaultFrom)
|
||||
|
||||
maddrB, err := minerB.ActorAddress(ctx)
|
||||
require.NoError(t, err)
|
||||
@ -119,7 +131,7 @@ func TestDeadlineToggling(t *testing.T, b APIBuilder, blocktime time.Duration) {
|
||||
|
||||
// pledge sectors on C, go through a PP, check for power
|
||||
{
|
||||
pledgeSectors(t, ctx, minerC, sectorsC, 0, nil)
|
||||
kit.PledgeSectors(t, ctx, minerC, sectorsC, 0, nil)
|
||||
|
||||
di, err := client.StateMinerProvingDeadline(ctx, maddrC, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
@ -204,8 +216,8 @@ func TestDeadlineToggling(t *testing.T, b APIBuilder, blocktime time.Duration) {
|
||||
require.NoError(t, err)
|
||||
require.GreaterOrEqual(t, nv, network.Version12)
|
||||
|
||||
minerD := n[0].Stb(ctx, t, TestSpt, defaultFrom)
|
||||
minerE := n[0].Stb(ctx, t, TestSpt, defaultFrom)
|
||||
minerD := n[0].Stb(ctx, t, kit.TestSpt, defaultFrom)
|
||||
minerE := n[0].Stb(ctx, t, kit.TestSpt, defaultFrom)
|
||||
|
||||
maddrD, err := minerD.ActorAddress(ctx)
|
||||
require.NoError(t, err)
|
||||
@ -213,7 +225,7 @@ func TestDeadlineToggling(t *testing.T, b APIBuilder, blocktime time.Duration) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// first round of miner checks
|
||||
checkMiner(maddrA, types.NewInt(uint64(ssz)*GenesisPreseals), true, true, types.EmptyTSK)
|
||||
checkMiner(maddrA, types.NewInt(uint64(ssz)*kit.GenesisPreseals), true, true, types.EmptyTSK)
|
||||
checkMiner(maddrC, types.NewInt(uint64(ssz)*sectorsC), true, true, types.EmptyTSK)
|
||||
|
||||
checkMiner(maddrB, types.NewInt(0), false, false, types.EmptyTSK)
|
||||
@ -221,10 +233,10 @@ func TestDeadlineToggling(t *testing.T, b APIBuilder, blocktime time.Duration) {
|
||||
checkMiner(maddrE, types.NewInt(0), false, false, types.EmptyTSK)
|
||||
|
||||
// pledge sectors on minerB/minerD, stop post on minerC
|
||||
pledgeSectors(t, ctx, minerB, sectersB, 0, nil)
|
||||
kit.PledgeSectors(t, ctx, minerB, sectorsB, 0, nil)
|
||||
checkMiner(maddrB, types.NewInt(0), true, true, types.EmptyTSK)
|
||||
|
||||
pledgeSectors(t, ctx, minerD, sectorsD, 0, nil)
|
||||
kit.PledgeSectors(t, ctx, minerD, sectorsD, 0, nil)
|
||||
checkMiner(maddrD, types.NewInt(0), true, true, types.EmptyTSK)
|
||||
|
||||
minerC.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).Fail()
|
||||
@ -240,7 +252,7 @@ func TestDeadlineToggling(t *testing.T, b APIBuilder, blocktime time.Duration) {
|
||||
params := &miner.SectorPreCommitInfo{
|
||||
Expiration: 2880 * 300,
|
||||
SectorNumber: 22,
|
||||
SealProof: TestSpt,
|
||||
SealProof: kit.TestSpt,
|
||||
|
||||
SealedCID: cr,
|
||||
SealRandEpoch: head.Height() - 200,
|
||||
@ -290,9 +302,9 @@ func TestDeadlineToggling(t *testing.T, b APIBuilder, blocktime time.Duration) {
|
||||
}
|
||||
|
||||
// second round of miner checks
|
||||
checkMiner(maddrA, types.NewInt(uint64(ssz)*GenesisPreseals), true, true, types.EmptyTSK)
|
||||
checkMiner(maddrA, types.NewInt(uint64(ssz)*kit.GenesisPreseals), true, true, types.EmptyTSK)
|
||||
checkMiner(maddrC, types.NewInt(0), true, true, types.EmptyTSK)
|
||||
checkMiner(maddrB, types.NewInt(uint64(ssz)*sectersB), true, true, types.EmptyTSK)
|
||||
checkMiner(maddrB, types.NewInt(uint64(ssz)*sectorsB), true, true, types.EmptyTSK)
|
||||
checkMiner(maddrD, types.NewInt(uint64(ssz)*sectorsD), true, true, types.EmptyTSK)
|
||||
checkMiner(maddrE, types.NewInt(0), false, false, types.EmptyTSK)
|
||||
|
||||
@ -361,8 +373,7 @@ func TestDeadlineToggling(t *testing.T, b APIBuilder, blocktime time.Duration) {
|
||||
build.Clock.Sleep(blocktime)
|
||||
}
|
||||
|
||||
// third round of miner checks
|
||||
checkMiner(maddrA, types.NewInt(uint64(ssz)*GenesisPreseals), true, true, types.EmptyTSK)
|
||||
checkMiner(maddrA, types.NewInt(uint64(ssz)*kit.GenesisPreseals), true, true, types.EmptyTSK)
|
||||
checkMiner(maddrC, types.NewInt(0), true, true, types.EmptyTSK)
|
||||
checkMiner(maddrB, types.NewInt(0), true, true, types.EmptyTSK)
|
||||
checkMiner(maddrD, types.NewInt(0), false, false, types.EmptyTSK)
|
523
itests/deals_test.go
Normal file
523
itests/deals_test.go
Normal file
@ -0,0 +1,523 @@
|
||||
package itests
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/market"
|
||||
"github.com/filecoin-project/lotus/chain/actors/policy"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/itests/kit"
|
||||
"github.com/filecoin-project/lotus/markets/storageadapter"
|
||||
"github.com/filecoin-project/lotus/miner"
|
||||
"github.com/filecoin-project/lotus/node"
|
||||
"github.com/filecoin-project/lotus/node/impl"
|
||||
market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestDealCycle(t *testing.T) {
|
||||
kit.QuietMiningLogs()
|
||||
|
||||
blockTime := 10 * time.Millisecond
|
||||
|
||||
// For these tests where the block time is artificially short, just use
|
||||
// a deal start epoch that is guaranteed to be far enough in the future
|
||||
// so that the deal starts sealing in time
|
||||
dealStartEpoch := abi.ChainEpoch(2 << 12)
|
||||
|
||||
t.Run("TestFullDealCycle_Single", func(t *testing.T) {
|
||||
runFullDealCycles(t, 1, kit.MockMinerBuilder, blockTime, false, false, dealStartEpoch)
|
||||
})
|
||||
t.Run("TestFullDealCycle_Two", func(t *testing.T) {
|
||||
runFullDealCycles(t, 2, kit.MockMinerBuilder, blockTime, false, false, dealStartEpoch)
|
||||
})
|
||||
t.Run("WithExportedCAR", func(t *testing.T) {
|
||||
runFullDealCycles(t, 1, kit.MockMinerBuilder, blockTime, true, false, dealStartEpoch)
|
||||
})
|
||||
t.Run("TestFastRetrievalDealCycle", func(t *testing.T) {
|
||||
runFastRetrievalDealFlowT(t, kit.MockMinerBuilder, blockTime, dealStartEpoch)
|
||||
})
|
||||
t.Run("TestZeroPricePerByteRetrievalDealFlow", func(t *testing.T) {
|
||||
runZeroPricePerByteRetrievalDealFlow(t, kit.MockMinerBuilder, blockTime, dealStartEpoch)
|
||||
})
|
||||
}
|
||||
|
||||
func TestAPIDealFlowReal(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode")
|
||||
}
|
||||
|
||||
kit.QuietMiningLogs()
|
||||
|
||||
// TODO: just set this globally?
|
||||
oldDelay := policy.GetPreCommitChallengeDelay()
|
||||
policy.SetPreCommitChallengeDelay(5)
|
||||
t.Cleanup(func() {
|
||||
policy.SetPreCommitChallengeDelay(oldDelay)
|
||||
})
|
||||
|
||||
t.Run("basic", func(t *testing.T) {
|
||||
runFullDealCycles(t, 1, kit.Builder, time.Second, false, false, 0)
|
||||
})
|
||||
|
||||
t.Run("fast-retrieval", func(t *testing.T) {
|
||||
runFullDealCycles(t, 1, kit.Builder, time.Second, false, true, 0)
|
||||
})
|
||||
|
||||
t.Run("retrieval-second", func(t *testing.T) {
|
||||
runSecondDealRetrievalTest(t, kit.Builder, time.Second)
|
||||
})
|
||||
}
|
||||
|
||||
func TestPublishDealsBatching(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
kit.QuietMiningLogs()
|
||||
|
||||
b := kit.MockMinerBuilder
|
||||
blocktime := 10 * time.Millisecond
|
||||
startEpoch := abi.ChainEpoch(2 << 12)
|
||||
|
||||
publishPeriod := 10 * time.Second
|
||||
maxDealsPerMsg := uint64(2)
|
||||
|
||||
// Set max deals per publish deals message to 2
|
||||
minerDef := []kit.StorageMiner{{
|
||||
Full: 0,
|
||||
Opts: node.Override(
|
||||
new(*storageadapter.DealPublisher),
|
||||
storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{
|
||||
Period: publishPeriod,
|
||||
MaxDealsPerMsg: maxDealsPerMsg,
|
||||
})),
|
||||
Preseal: kit.PresealGenesis,
|
||||
}}
|
||||
|
||||
// Create a connect client and miner node
|
||||
n, sn := b(t, kit.OneFull, minerDef)
|
||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
||||
miner := sn[0]
|
||||
|
||||
kit.ConnectAndStartMining(t, blocktime, miner, client)
|
||||
|
||||
dh := kit.NewDealHarness(t, client, miner)
|
||||
|
||||
// Starts a deal and waits until it's published
|
||||
runDealTillPublish := func(rseed int) {
|
||||
res, _, _, err := kit.CreateImportFile(ctx, client, rseed, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
upds, err := client.ClientGetDealUpdates(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
dh.StartDeal(ctx, res.Root, false, startEpoch)
|
||||
|
||||
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
|
||||
time.Sleep(time.Second)
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
for upd := range upds {
|
||||
if upd.DataRef.Root == res.Root && upd.State == storagemarket.StorageDealAwaitingPreCommit {
|
||||
done <- struct{}{}
|
||||
}
|
||||
}
|
||||
}()
|
||||
<-done
|
||||
}
|
||||
|
||||
// Run three deals in parallel
|
||||
done := make(chan struct{}, maxDealsPerMsg+1)
|
||||
for rseed := 1; rseed <= 3; rseed++ {
|
||||
rseed := rseed
|
||||
go func() {
|
||||
runDealTillPublish(rseed)
|
||||
done <- struct{}{}
|
||||
}()
|
||||
}
|
||||
|
||||
// Wait for two of the deals to be published
|
||||
for i := 0; i < int(maxDealsPerMsg); i++ {
|
||||
<-done
|
||||
}
|
||||
|
||||
// Expect a single PublishStorageDeals message that includes the first two deals
|
||||
msgCids, err := client.StateListMessages(ctx, &api.MessageMatch{To: market.Address}, types.EmptyTSK, 1)
|
||||
require.NoError(t, err)
|
||||
count := 0
|
||||
for _, msgCid := range msgCids {
|
||||
msg, err := client.ChainGetMessage(ctx, msgCid)
|
||||
require.NoError(t, err)
|
||||
|
||||
if msg.Method == market.Methods.PublishStorageDeals {
|
||||
count++
|
||||
var pubDealsParams market2.PublishStorageDealsParams
|
||||
err = pubDealsParams.UnmarshalCBOR(bytes.NewReader(msg.Params))
|
||||
require.NoError(t, err)
|
||||
require.Len(t, pubDealsParams.Deals, int(maxDealsPerMsg))
|
||||
}
|
||||
}
|
||||
require.Equal(t, 1, count)
|
||||
|
||||
// The third deal should be published once the publish period expires.
|
||||
// Allow a little padding as it takes a moment for the state change to
|
||||
// be noticed by the client.
|
||||
padding := 10 * time.Second
|
||||
select {
|
||||
case <-time.After(publishPeriod + padding):
|
||||
require.Fail(t, "Expected 3rd deal to be published once publish period elapsed")
|
||||
case <-done: // Success
|
||||
}
|
||||
}
|
||||
|
||||
func TestDealMining(t *testing.T) {
|
||||
// test making a deal with a fresh miner, and see if it starts to mine.
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode")
|
||||
}
|
||||
|
||||
kit.QuietMiningLogs()
|
||||
|
||||
b := kit.MockMinerBuilder
|
||||
blocktime := 50 * time.Millisecond
|
||||
|
||||
ctx := context.Background()
|
||||
fulls, miners := b(t,
|
||||
kit.OneFull,
|
||||
[]kit.StorageMiner{
|
||||
{Full: 0, Preseal: kit.PresealGenesis},
|
||||
{Full: 0, Preseal: 0}, // TODO: Add support for miners on non-first full node
|
||||
})
|
||||
client := fulls[0].FullNode.(*impl.FullNodeAPI)
|
||||
genesisMiner := miners[0]
|
||||
provider := miners[1]
|
||||
|
||||
addrinfo, err := client.NetAddrsListen(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := provider.NetConnect(ctx, addrinfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := genesisMiner.NetConnect(ctx, addrinfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
time.Sleep(time.Second)
|
||||
|
||||
data := make([]byte, 600)
|
||||
rand.New(rand.NewSource(5)).Read(data)
|
||||
|
||||
r := bytes.NewReader(data)
|
||||
fcid, err := client.ClientImportLocal(ctx, r)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
fmt.Println("FILE CID: ", fcid)
|
||||
|
||||
var mine int32 = 1
|
||||
done := make(chan struct{})
|
||||
minedTwo := make(chan struct{})
|
||||
|
||||
m2addr, err := miners[1].ActorAddress(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer close(done)
|
||||
|
||||
complChan := minedTwo
|
||||
for atomic.LoadInt32(&mine) != 0 {
|
||||
wait := make(chan int)
|
||||
mdone := func(mined bool, _ abi.ChainEpoch, err error) {
|
||||
n := 0
|
||||
if mined {
|
||||
n = 1
|
||||
}
|
||||
wait <- n
|
||||
}
|
||||
|
||||
if err := miners[0].MineOne(ctx, miner.MineReq{Done: mdone}); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if err := miners[1].MineOne(ctx, miner.MineReq{Done: mdone}); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
expect := <-wait
|
||||
expect += <-wait
|
||||
|
||||
time.Sleep(blocktime)
|
||||
if expect == 0 {
|
||||
// null block
|
||||
continue
|
||||
}
|
||||
|
||||
var nodeOneMined bool
|
||||
for _, node := range miners {
|
||||
mb, err := node.MiningBase(ctx)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, b := range mb.Blocks() {
|
||||
if b.Miner == m2addr {
|
||||
nodeOneMined = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if nodeOneMined && complChan != nil {
|
||||
close(complChan)
|
||||
complChan = nil
|
||||
}
|
||||
|
||||
}
|
||||
}()
|
||||
|
||||
dh := kit.NewDealHarness(t, client, provider)
|
||||
|
||||
deal := dh.StartDeal(ctx, fcid, false, 0)
|
||||
|
||||
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
|
||||
time.Sleep(time.Second)
|
||||
|
||||
dh.WaitDealSealed(ctx, deal, false, false, nil)
|
||||
|
||||
<-minedTwo
|
||||
|
||||
atomic.StoreInt32(&mine, 0)
|
||||
fmt.Println("shutting down mining")
|
||||
<-done
|
||||
}
|
||||
|
||||
func TestOfflineDealFlow(t *testing.T) {
|
||||
blocktime := 10 * time.Millisecond
|
||||
|
||||
// For these tests where the block time is artificially short, just use
|
||||
// a deal start epoch that is guaranteed to be far enough in the future
|
||||
// so that the deal starts sealing in time
|
||||
startEpoch := abi.ChainEpoch(2 << 12)
|
||||
|
||||
runTest := func(t *testing.T, fastRet bool) {
|
||||
ctx := context.Background()
|
||||
fulls, miners := kit.MockMinerBuilder(t, kit.OneFull, kit.OneMiner)
|
||||
client, miner := fulls[0].FullNode.(*impl.FullNodeAPI), miners[0]
|
||||
|
||||
kit.ConnectAndStartMining(t, blocktime, miner, client)
|
||||
|
||||
dh := kit.NewDealHarness(t, client, miner)
|
||||
|
||||
// Create a random file and import on the client.
|
||||
res, path, data, err := kit.CreateImportFile(ctx, client, 1, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Get the piece size and commP
|
||||
fcid := res.Root
|
||||
pieceInfo, err := client.ClientDealPieceCID(ctx, fcid)
|
||||
require.NoError(t, err)
|
||||
fmt.Println("FILE CID: ", fcid)
|
||||
|
||||
// Create a storage deal with the miner
|
||||
maddr, err := miner.ActorAddress(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
addr, err := client.WalletDefaultAddress(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Manual storage deal (offline deal)
|
||||
dataRef := &storagemarket.DataRef{
|
||||
TransferType: storagemarket.TTManual,
|
||||
Root: fcid,
|
||||
PieceCid: &pieceInfo.PieceCID,
|
||||
PieceSize: pieceInfo.PieceSize.Unpadded(),
|
||||
}
|
||||
|
||||
proposalCid, err := client.ClientStartDeal(ctx, &api.StartDealParams{
|
||||
Data: dataRef,
|
||||
Wallet: addr,
|
||||
Miner: maddr,
|
||||
EpochPrice: types.NewInt(1000000),
|
||||
DealStartEpoch: startEpoch,
|
||||
MinBlocksDuration: uint64(build.MinDealDuration),
|
||||
FastRetrieval: fastRet,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Wait for the deal to reach StorageDealCheckForAcceptance on the client
|
||||
cd, err := client.ClientGetDealInfo(ctx, *proposalCid)
|
||||
require.NoError(t, err)
|
||||
require.Eventually(t, func() bool {
|
||||
cd, _ := client.ClientGetDealInfo(ctx, *proposalCid)
|
||||
return cd.State == storagemarket.StorageDealCheckForAcceptance
|
||||
}, 30*time.Second, 1*time.Second, "actual deal status is %s", storagemarket.DealStates[cd.State])
|
||||
|
||||
// Create a CAR file from the raw file
|
||||
carFileDir, err := ioutil.TempDir(os.TempDir(), "test-make-deal-car")
|
||||
require.NoError(t, err)
|
||||
carFilePath := filepath.Join(carFileDir, "out.car")
|
||||
err = client.ClientGenCar(ctx, api.FileRef{Path: path}, carFilePath)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Import the CAR file on the miner - this is the equivalent to
|
||||
// transferring the file across the wire in a normal (non-offline) deal
|
||||
err = miner.DealsImportData(ctx, *proposalCid, carFilePath)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Wait for the deal to be published
|
||||
dh.WaitDealPublished(ctx, proposalCid)
|
||||
|
||||
t.Logf("deal published, retrieving")
|
||||
|
||||
// Retrieve the deal
|
||||
dh.TestRetrieval(ctx, fcid, &pieceInfo.PieceCID, false, data)
|
||||
}
|
||||
|
||||
t.Run("NormalRetrieval", func(t *testing.T) {
|
||||
runTest(t, false)
|
||||
})
|
||||
t.Run("FastRetrieval", func(t *testing.T) {
|
||||
runTest(t, true)
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func runFullDealCycles(t *testing.T, n int, b kit.APIBuilder, blocktime time.Duration, carExport, fastRet bool, startEpoch abi.ChainEpoch) {
|
||||
fulls, miners := b(t, kit.OneFull, kit.OneMiner)
|
||||
client, miner := fulls[0].FullNode.(*impl.FullNodeAPI), miners[0]
|
||||
|
||||
kit.ConnectAndStartMining(t, blocktime, miner, client)
|
||||
|
||||
dh := kit.NewDealHarness(t, client, miner)
|
||||
|
||||
baseseed := 6
|
||||
for i := 0; i < n; i++ {
|
||||
dh.MakeFullDeal(context.Background(), baseseed+i, carExport, fastRet, startEpoch)
|
||||
}
|
||||
}
|
||||
|
||||
func runFastRetrievalDealFlowT(t *testing.T, b kit.APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch) {
|
||||
ctx := context.Background()
|
||||
|
||||
fulls, miners := b(t, kit.OneFull, kit.OneMiner)
|
||||
client, miner := fulls[0].FullNode.(*impl.FullNodeAPI), miners[0]
|
||||
|
||||
kit.ConnectAndStartMining(t, blocktime, miner, client)
|
||||
|
||||
dh := kit.NewDealHarness(t, client, miner)
|
||||
|
||||
data := make([]byte, 1600)
|
||||
rand.New(rand.NewSource(int64(8))).Read(data)
|
||||
|
||||
r := bytes.NewReader(data)
|
||||
fcid, err := client.ClientImportLocal(ctx, r)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
fmt.Println("FILE CID: ", fcid)
|
||||
|
||||
deal := dh.StartDeal(ctx, fcid, true, startEpoch)
|
||||
dh.WaitDealPublished(ctx, deal)
|
||||
|
||||
fmt.Println("deal published, retrieving")
|
||||
|
||||
// Retrieval
|
||||
info, err := client.ClientGetDealInfo(ctx, *deal)
|
||||
require.NoError(t, err)
|
||||
|
||||
dh.TestRetrieval(ctx, fcid, &info.PieceCID, false, data)
|
||||
}
|
||||
|
||||
func runSecondDealRetrievalTest(t *testing.T, b kit.APIBuilder, blocktime time.Duration) {
|
||||
ctx := context.Background()
|
||||
|
||||
fulls, miners := b(t, kit.OneFull, kit.OneMiner)
|
||||
client, miner := fulls[0].FullNode.(*impl.FullNodeAPI), miners[0]
|
||||
|
||||
kit.ConnectAndStartMining(t, blocktime, miner, client)
|
||||
|
||||
dh := kit.NewDealHarness(t, client, miner)
|
||||
|
||||
{
|
||||
data1 := make([]byte, 800)
|
||||
rand.New(rand.NewSource(int64(3))).Read(data1)
|
||||
r := bytes.NewReader(data1)
|
||||
|
||||
fcid1, err := client.ClientImportLocal(ctx, r)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
data2 := make([]byte, 800)
|
||||
rand.New(rand.NewSource(int64(9))).Read(data2)
|
||||
r2 := bytes.NewReader(data2)
|
||||
|
||||
fcid2, err := client.ClientImportLocal(ctx, r2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
deal1 := dh.StartDeal(ctx, fcid1, true, 0)
|
||||
|
||||
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
|
||||
time.Sleep(time.Second)
|
||||
dh.WaitDealSealed(ctx, deal1, true, false, nil)
|
||||
|
||||
deal2 := dh.StartDeal(ctx, fcid2, true, 0)
|
||||
|
||||
time.Sleep(time.Second)
|
||||
dh.WaitDealSealed(ctx, deal2, false, false, nil)
|
||||
|
||||
// Retrieval
|
||||
info, err := client.ClientGetDealInfo(ctx, *deal2)
|
||||
require.NoError(t, err)
|
||||
|
||||
rf, _ := miner.SectorsRefs(ctx)
|
||||
fmt.Printf("refs: %+v\n", rf)
|
||||
|
||||
dh.TestRetrieval(ctx, fcid2, &info.PieceCID, false, data2)
|
||||
}
|
||||
}
|
||||
|
||||
func runZeroPricePerByteRetrievalDealFlow(t *testing.T, b kit.APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch) {
|
||||
ctx := context.Background()
|
||||
|
||||
fulls, miners := b(t, kit.OneFull, kit.OneMiner)
|
||||
client, miner := fulls[0].FullNode.(*impl.FullNodeAPI), miners[0]
|
||||
|
||||
kit.ConnectAndStartMining(t, blocktime, miner, client)
|
||||
|
||||
dh := kit.NewDealHarness(t, client, miner)
|
||||
|
||||
// Set price-per-byte to zero
|
||||
ask, err := miner.MarketGetRetrievalAsk(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
ask.PricePerByte = abi.NewTokenAmount(0)
|
||||
err = miner.MarketSetRetrievalAsk(ctx, ask)
|
||||
require.NoError(t, err)
|
||||
|
||||
dh.MakeFullDeal(ctx, 6, false, false, startEpoch)
|
||||
}
|
2
itests/doc.go
Normal file
2
itests/doc.go
Normal file
@ -0,0 +1,2 @@
|
||||
// Package itests contains integration tests for Lotus.
|
||||
package itests
|
@ -1,4 +1,4 @@
|
||||
package main
|
||||
package itests
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@ -10,30 +10,25 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/stmgr"
|
||||
"github.com/filecoin-project/lotus/cli"
|
||||
clitest "github.com/filecoin-project/lotus/cli/test"
|
||||
"github.com/filecoin-project/lotus/gateway"
|
||||
|
||||
init2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/init"
|
||||
multisig2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/multisig"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-jsonrpc"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/api/client"
|
||||
"github.com/filecoin-project/lotus/api/test"
|
||||
"github.com/filecoin-project/lotus/api/v0api"
|
||||
"github.com/filecoin-project/lotus/api/v1api"
|
||||
"github.com/filecoin-project/lotus/chain/actors/policy"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/cli"
|
||||
"github.com/filecoin-project/lotus/gateway"
|
||||
"github.com/filecoin-project/lotus/itests/kit"
|
||||
"github.com/filecoin-project/lotus/node"
|
||||
builder "github.com/filecoin-project/lotus/node/test"
|
||||
|
||||
init2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/init"
|
||||
multisig2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/multisig"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -47,11 +42,11 @@ func init() {
|
||||
policy.SetMinVerifiedDealSize(abi.NewStoragePower(256))
|
||||
}
|
||||
|
||||
// TestWalletMsig tests that API calls to wallet and msig can be made on a lite
|
||||
// TestGatewayWalletMsig tests that API calls to wallet and msig can be made on a lite
|
||||
// node that is connected through a gateway to a full API node
|
||||
func TestWalletMsig(t *testing.T) {
|
||||
func TestGatewayWalletMsig(t *testing.T) {
|
||||
_ = os.Setenv("BELLMAN_NO_GPU", "1")
|
||||
clitest.QuietMiningLogs()
|
||||
kit.QuietMiningLogs()
|
||||
|
||||
blocktime := 5 * time.Millisecond
|
||||
ctx := context.Background()
|
||||
@ -181,11 +176,11 @@ func TestWalletMsig(t *testing.T) {
|
||||
require.True(t, approveReturn.Applied)
|
||||
}
|
||||
|
||||
// TestMsigCLI tests that msig CLI calls can be made
|
||||
// TestGatewayMsigCLI tests that msig CLI calls can be made
|
||||
// on a lite node that is connected through a gateway to a full API node
|
||||
func TestMsigCLI(t *testing.T) {
|
||||
func TestGatewayMsigCLI(t *testing.T) {
|
||||
_ = os.Setenv("BELLMAN_NO_GPU", "1")
|
||||
clitest.QuietMiningLogs()
|
||||
kit.QuietMiningLogs()
|
||||
|
||||
blocktime := 5 * time.Millisecond
|
||||
ctx := context.Background()
|
||||
@ -193,12 +188,12 @@ func TestMsigCLI(t *testing.T) {
|
||||
defer nodes.closer()
|
||||
|
||||
lite := nodes.lite
|
||||
clitest.RunMultisigTest(t, cli.Commands, lite)
|
||||
runMultisigTests(t, lite)
|
||||
}
|
||||
|
||||
func TestDealFlow(t *testing.T) {
|
||||
func TestGatewayDealFlow(t *testing.T) {
|
||||
_ = os.Setenv("BELLMAN_NO_GPU", "1")
|
||||
clitest.QuietMiningLogs()
|
||||
kit.QuietMiningLogs()
|
||||
|
||||
blocktime := 5 * time.Millisecond
|
||||
ctx := context.Background()
|
||||
@ -209,25 +204,27 @@ func TestDealFlow(t *testing.T) {
|
||||
// a deal start epoch that is guaranteed to be far enough in the future
|
||||
// so that the deal starts sealing in time
|
||||
dealStartEpoch := abi.ChainEpoch(2 << 12)
|
||||
test.MakeDeal(t, ctx, 6, nodes.lite, nodes.miner, false, false, dealStartEpoch)
|
||||
|
||||
dh := kit.NewDealHarness(t, nodes.lite, nodes.miner)
|
||||
dh.MakeFullDeal(ctx, 6, false, false, dealStartEpoch)
|
||||
}
|
||||
|
||||
func TestCLIDealFlow(t *testing.T) {
|
||||
func TestGatewayCLIDealFlow(t *testing.T) {
|
||||
_ = os.Setenv("BELLMAN_NO_GPU", "1")
|
||||
clitest.QuietMiningLogs()
|
||||
kit.QuietMiningLogs()
|
||||
|
||||
blocktime := 5 * time.Millisecond
|
||||
ctx := context.Background()
|
||||
nodes := startNodesWithFunds(ctx, t, blocktime, maxLookbackCap, maxStateWaitLookbackLimit)
|
||||
defer nodes.closer()
|
||||
|
||||
clitest.RunClientTest(t, cli.Commands, nodes.lite)
|
||||
kit.RunClientTest(t, cli.Commands, nodes.lite)
|
||||
}
|
||||
|
||||
type testNodes struct {
|
||||
lite test.TestNode
|
||||
full test.TestNode
|
||||
miner test.TestStorageNode
|
||||
lite kit.TestFullNode
|
||||
full kit.TestFullNode
|
||||
miner kit.TestMiner
|
||||
closer jsonrpc.ClientCloser
|
||||
}
|
||||
|
||||
@ -270,24 +267,23 @@ func startNodes(
|
||||
// - Connect lite node -> gateway server -> full node
|
||||
opts := append(
|
||||
// Full node
|
||||
test.OneFull,
|
||||
kit.OneFull,
|
||||
// Lite node
|
||||
test.FullNodeOpts{
|
||||
kit.FullNodeOpts{
|
||||
Lite: true,
|
||||
Opts: func(nodes []test.TestNode) node.Option {
|
||||
Opts: func(nodes []kit.TestFullNode) node.Option {
|
||||
fullNode := nodes[0]
|
||||
|
||||
// Create a gateway server in front of the full node
|
||||
gapiImpl := gateway.NewNode(fullNode, lookbackCap, stateWaitLookbackLimit)
|
||||
_, addr, err := builder.CreateRPCServer(t, map[string]interface{}{
|
||||
"/rpc/v1": gapiImpl,
|
||||
"/rpc/v0": api.Wrap(new(v1api.FullNodeStruct), new(v0api.WrapperV1Full), gapiImpl),
|
||||
})
|
||||
gwapi := gateway.NewNode(fullNode, lookbackCap, stateWaitLookbackLimit)
|
||||
handler, err := gateway.Handler(gwapi)
|
||||
require.NoError(t, err)
|
||||
|
||||
srv, _ := kit.CreateRPCServer(t, handler)
|
||||
|
||||
// Create a gateway client API that connects to the gateway server
|
||||
var gapi api.Gateway
|
||||
gapi, closer, err = client.NewGatewayRPCV1(ctx, addr+"/rpc/v1", nil)
|
||||
gapi, closer, err = client.NewGatewayRPCV1(ctx, "ws://"+srv.Listener.Addr().String()+"/rpc/v1", nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Provide the gateway API to dependency injection
|
||||
@ -295,7 +291,7 @@ func startNodes(
|
||||
},
|
||||
},
|
||||
)
|
||||
n, sn := builder.RPCMockSbBuilder(t, opts, test.OneMiner)
|
||||
n, sn := kit.RPCMockMinerBuilder(t, opts, kit.OneMiner)
|
||||
|
||||
full := n[0]
|
||||
lite := n[1]
|
||||
@ -317,14 +313,14 @@ func startNodes(
|
||||
require.NoError(t, err)
|
||||
|
||||
// Start mining blocks
|
||||
bm := test.NewBlockMiner(ctx, t, miner, blocktime)
|
||||
bm.MineBlocks()
|
||||
bm := kit.NewBlockMiner(t, miner)
|
||||
bm.MineBlocks(ctx, blocktime)
|
||||
t.Cleanup(bm.Stop)
|
||||
|
||||
return &testNodes{lite: lite, full: full, miner: miner, closer: closer}
|
||||
}
|
||||
|
||||
func sendFunds(ctx context.Context, fromNode test.TestNode, fromAddr address.Address, toAddr address.Address, amt types.BigInt) error {
|
||||
func sendFunds(ctx context.Context, fromNode kit.TestFullNode, fromAddr address.Address, toAddr address.Address, amt types.BigInt) error {
|
||||
msg := &types.Message{
|
||||
From: fromAddr,
|
||||
To: toAddr,
|
124
itests/kit/blockminer.go
Normal file
124
itests/kit/blockminer.go
Normal file
@ -0,0 +1,124 @@
|
||||
package kit
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/lotus/miner"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// BlockMiner is a utility that makes a test miner Mine blocks on a timer.
|
||||
type BlockMiner struct {
|
||||
t *testing.T
|
||||
miner TestMiner
|
||||
|
||||
nextNulls int64
|
||||
wg sync.WaitGroup
|
||||
cancel context.CancelFunc
|
||||
}
|
||||
|
||||
func NewBlockMiner(t *testing.T, miner TestMiner) *BlockMiner {
|
||||
return &BlockMiner{
|
||||
t: t,
|
||||
miner: miner,
|
||||
cancel: func() {},
|
||||
}
|
||||
}
|
||||
|
||||
func (bm *BlockMiner) MineBlocks(ctx context.Context, blocktime time.Duration) {
|
||||
time.Sleep(time.Second)
|
||||
|
||||
// wrap context in a cancellable context.
|
||||
ctx, bm.cancel = context.WithCancel(ctx)
|
||||
|
||||
bm.wg.Add(1)
|
||||
go func() {
|
||||
defer bm.wg.Done()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-time.After(blocktime):
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
|
||||
nulls := atomic.SwapInt64(&bm.nextNulls, 0)
|
||||
err := bm.miner.MineOne(ctx, miner.MineReq{
|
||||
InjectNulls: abi.ChainEpoch(nulls),
|
||||
Done: func(bool, abi.ChainEpoch, error) {},
|
||||
})
|
||||
switch {
|
||||
case err == nil: // wrap around
|
||||
case ctx.Err() != nil: // context fired.
|
||||
return
|
||||
default: // log error
|
||||
bm.t.Error(err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// InjectNulls injects the specified amount of null rounds in the next
|
||||
// mining rounds.
|
||||
func (bm *BlockMiner) InjectNulls(rounds abi.ChainEpoch) {
|
||||
atomic.AddInt64(&bm.nextNulls, int64(rounds))
|
||||
}
|
||||
|
||||
func (bm *BlockMiner) MineUntilBlock(ctx context.Context, fn TestFullNode, cb func(abi.ChainEpoch)) {
|
||||
for i := 0; i < 1000; i++ {
|
||||
var (
|
||||
success bool
|
||||
err error
|
||||
epoch abi.ChainEpoch
|
||||
wait = make(chan struct{})
|
||||
)
|
||||
|
||||
doneFn := func(win bool, ep abi.ChainEpoch, e error) {
|
||||
success = win
|
||||
err = e
|
||||
epoch = ep
|
||||
wait <- struct{}{}
|
||||
}
|
||||
|
||||
mineErr := bm.miner.MineOne(ctx, miner.MineReq{Done: doneFn})
|
||||
require.NoError(bm.t, mineErr)
|
||||
<-wait
|
||||
|
||||
require.NoError(bm.t, err)
|
||||
|
||||
if success {
|
||||
// Wait until it shows up on the given full nodes ChainHead
|
||||
nloops := 50
|
||||
for i := 0; i < nloops; i++ {
|
||||
ts, err := fn.ChainHead(ctx)
|
||||
require.NoError(bm.t, err)
|
||||
|
||||
if ts.Height() == epoch {
|
||||
break
|
||||
}
|
||||
|
||||
require.NotEqual(bm.t, i, nloops-1, "block never managed to sync to node")
|
||||
time.Sleep(time.Millisecond * 10)
|
||||
}
|
||||
|
||||
if cb != nil {
|
||||
cb(epoch)
|
||||
}
|
||||
return
|
||||
}
|
||||
bm.t.Log("did not Mine block, trying again", i)
|
||||
}
|
||||
bm.t.Fatal("failed to Mine 1000 times in a row...")
|
||||
}
|
||||
|
||||
// Stop stops the block miner.
|
||||
func (bm *BlockMiner) Stop() {
|
||||
bm.t.Log("shutting down mining")
|
||||
bm.cancel()
|
||||
bm.wg.Wait()
|
||||
}
|
@ -1,9 +1,10 @@
|
||||
package test
|
||||
package kit
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
@ -11,9 +12,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/lotus/api/test"
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||
@ -21,8 +20,8 @@ import (
|
||||
lcli "github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
// RunClientTest exercises some of the client CLI commands
|
||||
func RunClientTest(t *testing.T, cmds []*lcli.Command, clientNode test.TestNode) {
|
||||
// RunClientTest exercises some of the Client CLI commands
|
||||
func RunClientTest(t *testing.T, cmds []*lcli.Command, clientNode TestFullNode) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
|
||||
defer cancel()
|
||||
|
||||
@ -30,7 +29,7 @@ func RunClientTest(t *testing.T, cmds []*lcli.Command, clientNode test.TestNode)
|
||||
mockCLI := NewMockCLI(ctx, t, cmds)
|
||||
clientCLI := mockCLI.Client(clientNode.ListenAddr)
|
||||
|
||||
// Get the miner address
|
||||
// Get the Miner address
|
||||
addrs, err := clientNode.StateListMiners(ctx, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, addrs, 1)
|
||||
@ -38,13 +37,14 @@ func RunClientTest(t *testing.T, cmds []*lcli.Command, clientNode test.TestNode)
|
||||
minerAddr := addrs[0]
|
||||
fmt.Println("Miner:", minerAddr)
|
||||
|
||||
// client query-ask <miner addr>
|
||||
// client query-ask <Miner addr>
|
||||
out := clientCLI.RunCmd("client", "query-ask", minerAddr.String())
|
||||
require.Regexp(t, regexp.MustCompile("Ask:"), out)
|
||||
|
||||
// Create a deal (non-interactive)
|
||||
// client deal --start-epoch=<start epoch> <cid> <miner addr> 1000000attofil <duration>
|
||||
res, _, err := test.CreateClientFile(ctx, clientNode, 1, 0)
|
||||
// client deal --start-epoch=<start epoch> <cid> <Miner addr> 1000000attofil <duration>
|
||||
res, _, _, err := CreateImportFile(ctx, clientNode, 1, 0)
|
||||
|
||||
require.NoError(t, err)
|
||||
startEpoch := fmt.Sprintf("--start-epoch=%d", 2<<12)
|
||||
dataCid := res.Root
|
||||
@ -58,9 +58,9 @@ func RunClientTest(t *testing.T, cmds []*lcli.Command, clientNode test.TestNode)
|
||||
// <cid>
|
||||
// <duration> (in days)
|
||||
// <miner addr>
|
||||
// "no" (verified client)
|
||||
// "no" (verified Client)
|
||||
// "yes" (confirm deal)
|
||||
res, _, err = test.CreateClientFile(ctx, clientNode, 2, 0)
|
||||
res, _, _, err = CreateImportFile(ctx, clientNode, 2, 0)
|
||||
require.NoError(t, err)
|
||||
dataCid2 := res.Root
|
||||
duration = fmt.Sprintf("%d", build.MinDealDuration/builtin.EpochsInDay)
|
||||
@ -91,16 +91,19 @@ func RunClientTest(t *testing.T, cmds []*lcli.Command, clientNode test.TestNode)
|
||||
}
|
||||
dealStatus = parts[3]
|
||||
fmt.Println(" Deal status:", dealStatus)
|
||||
if dealComplete(t, dealStatus) {
|
||||
|
||||
st := CategorizeDealState(dealStatus)
|
||||
require.NotEqual(t, TestDealStateFailed, st)
|
||||
if st == TestDealStateComplete {
|
||||
break
|
||||
}
|
||||
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
|
||||
// Retrieve the first file from the miner
|
||||
// Retrieve the first file from the Miner
|
||||
// client retrieve <cid> <file path>
|
||||
tmpdir, err := ioutil.TempDir(os.TempDir(), "test-cli-client")
|
||||
tmpdir, err := ioutil.TempDir(os.TempDir(), "test-cli-Client")
|
||||
require.NoError(t, err)
|
||||
path := filepath.Join(tmpdir, "outfile.dat")
|
||||
out = clientCLI.RunCmd("client", "retrieve", dataCid.String(), path)
|
||||
@ -108,13 +111,36 @@ func RunClientTest(t *testing.T, cmds []*lcli.Command, clientNode test.TestNode)
|
||||
require.Regexp(t, regexp.MustCompile("Success"), out)
|
||||
}
|
||||
|
||||
func dealComplete(t *testing.T, dealStatus string) bool {
|
||||
switch dealStatus {
|
||||
case "StorageDealFailing", "StorageDealError":
|
||||
t.Fatal(xerrors.Errorf("Storage deal failed with status: " + dealStatus))
|
||||
case "StorageDealStaged", "StorageDealAwaitingPreCommit", "StorageDealSealing", "StorageDealActive", "StorageDealExpired", "StorageDealSlashed":
|
||||
return true
|
||||
func CreateImportFile(ctx context.Context, client api.FullNode, rseed int, size int) (res *api.ImportRes, path string, data []byte, err error) {
|
||||
data, path, err = createRandomFile(rseed, size)
|
||||
if err != nil {
|
||||
return nil, "", nil, err
|
||||
}
|
||||
|
||||
return false
|
||||
res, err = client.ClientImport(ctx, api.FileRef{Path: path})
|
||||
if err != nil {
|
||||
return nil, "", nil, err
|
||||
}
|
||||
return res, path, data, nil
|
||||
}
|
||||
|
||||
func createRandomFile(rseed, size int) ([]byte, string, error) {
|
||||
if size == 0 {
|
||||
size = 1600
|
||||
}
|
||||
data := make([]byte, size)
|
||||
rand.New(rand.NewSource(int64(rseed))).Read(data)
|
||||
|
||||
dir, err := ioutil.TempDir(os.TempDir(), "test-make-deal-")
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
path := filepath.Join(dir, "sourcefile.dat")
|
||||
err = ioutil.WriteFile(path, data, 0644)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
return data, path, nil
|
||||
}
|
312
itests/kit/deals.go
Normal file
312
itests/kit/deals.go
Normal file
@ -0,0 +1,312 @@
|
||||
package kit
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
files "github.com/ipfs/go-ipfs-files"
|
||||
"github.com/ipld/go-car"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
||||
"github.com/filecoin-project/lotus/node/impl"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
dag "github.com/ipfs/go-merkledag"
|
||||
dstest "github.com/ipfs/go-merkledag/test"
|
||||
unixfile "github.com/ipfs/go-unixfs/file"
|
||||
)
|
||||
|
||||
type DealHarness struct {
|
||||
t *testing.T
|
||||
client api.FullNode
|
||||
miner TestMiner
|
||||
}
|
||||
|
||||
// NewDealHarness creates a test harness that contains testing utilities for deals.
|
||||
func NewDealHarness(t *testing.T, client api.FullNode, miner TestMiner) *DealHarness {
|
||||
return &DealHarness{
|
||||
t: t,
|
||||
client: client,
|
||||
miner: miner,
|
||||
}
|
||||
}
|
||||
|
||||
func (dh *DealHarness) MakeFullDeal(ctx context.Context, rseed int, carExport, fastRet bool, startEpoch abi.ChainEpoch) {
|
||||
res, _, data, err := CreateImportFile(ctx, dh.client, rseed, 0)
|
||||
if err != nil {
|
||||
dh.t.Fatal(err)
|
||||
}
|
||||
|
||||
fcid := res.Root
|
||||
fmt.Println("FILE CID: ", fcid)
|
||||
|
||||
deal := dh.StartDeal(ctx, fcid, fastRet, startEpoch)
|
||||
|
||||
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
|
||||
time.Sleep(time.Second)
|
||||
dh.WaitDealSealed(ctx, deal, false, false, nil)
|
||||
|
||||
// Retrieval
|
||||
info, err := dh.client.ClientGetDealInfo(ctx, *deal)
|
||||
require.NoError(dh.t, err)
|
||||
|
||||
dh.TestRetrieval(ctx, fcid, &info.PieceCID, carExport, data)
|
||||
}
|
||||
|
||||
func (dh *DealHarness) StartDeal(ctx context.Context, fcid cid.Cid, fastRet bool, startEpoch abi.ChainEpoch) *cid.Cid {
|
||||
maddr, err := dh.miner.ActorAddress(ctx)
|
||||
if err != nil {
|
||||
dh.t.Fatal(err)
|
||||
}
|
||||
|
||||
addr, err := dh.client.WalletDefaultAddress(ctx)
|
||||
if err != nil {
|
||||
dh.t.Fatal(err)
|
||||
}
|
||||
deal, err := dh.client.ClientStartDeal(ctx, &api.StartDealParams{
|
||||
Data: &storagemarket.DataRef{
|
||||
TransferType: storagemarket.TTGraphsync,
|
||||
Root: fcid,
|
||||
},
|
||||
Wallet: addr,
|
||||
Miner: maddr,
|
||||
EpochPrice: types.NewInt(1000000),
|
||||
DealStartEpoch: startEpoch,
|
||||
MinBlocksDuration: uint64(build.MinDealDuration),
|
||||
FastRetrieval: fastRet,
|
||||
})
|
||||
if err != nil {
|
||||
dh.t.Fatalf("%+v", err)
|
||||
}
|
||||
return deal
|
||||
}
|
||||
|
||||
func (dh *DealHarness) WaitDealSealed(ctx context.Context, deal *cid.Cid, noseal, noSealStart bool, cb func()) {
|
||||
loop:
|
||||
for {
|
||||
di, err := dh.client.ClientGetDealInfo(ctx, *deal)
|
||||
require.NoError(dh.t, err)
|
||||
|
||||
switch di.State {
|
||||
case storagemarket.StorageDealAwaitingPreCommit, storagemarket.StorageDealSealing:
|
||||
if noseal {
|
||||
return
|
||||
}
|
||||
if !noSealStart {
|
||||
dh.StartSealingWaiting(ctx)
|
||||
}
|
||||
case storagemarket.StorageDealProposalRejected:
|
||||
dh.t.Fatal("deal rejected")
|
||||
case storagemarket.StorageDealFailing:
|
||||
dh.t.Fatal("deal failed")
|
||||
case storagemarket.StorageDealError:
|
||||
dh.t.Fatal("deal errored", di.Message)
|
||||
case storagemarket.StorageDealActive:
|
||||
fmt.Println("COMPLETE", di)
|
||||
break loop
|
||||
}
|
||||
|
||||
mds, err := dh.miner.MarketListIncompleteDeals(ctx)
|
||||
require.NoError(dh.t, err)
|
||||
|
||||
var minerState storagemarket.StorageDealStatus
|
||||
for _, md := range mds {
|
||||
if md.DealID == di.DealID {
|
||||
minerState = md.State
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("Deal %d state: client:%s provider:%s\n", di.DealID, storagemarket.DealStates[di.State], storagemarket.DealStates[minerState])
|
||||
time.Sleep(time.Second / 2)
|
||||
if cb != nil {
|
||||
cb()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (dh *DealHarness) WaitDealPublished(ctx context.Context, deal *cid.Cid) {
|
||||
subCtx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
updates, err := dh.miner.MarketGetDealUpdates(subCtx)
|
||||
if err != nil {
|
||||
dh.t.Fatal(err)
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
dh.t.Fatal("context timeout")
|
||||
case di := <-updates:
|
||||
if deal.Equals(di.ProposalCid) {
|
||||
switch di.State {
|
||||
case storagemarket.StorageDealProposalRejected:
|
||||
dh.t.Fatal("deal rejected")
|
||||
case storagemarket.StorageDealFailing:
|
||||
dh.t.Fatal("deal failed")
|
||||
case storagemarket.StorageDealError:
|
||||
dh.t.Fatal("deal errored", di.Message)
|
||||
case storagemarket.StorageDealFinalizing, storagemarket.StorageDealAwaitingPreCommit, storagemarket.StorageDealSealing, storagemarket.StorageDealActive:
|
||||
fmt.Println("COMPLETE", di)
|
||||
return
|
||||
}
|
||||
fmt.Println("Deal state: ", storagemarket.DealStates[di.State])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (dh *DealHarness) StartSealingWaiting(ctx context.Context) {
|
||||
snums, err := dh.miner.SectorsList(ctx)
|
||||
require.NoError(dh.t, err)
|
||||
|
||||
for _, snum := range snums {
|
||||
si, err := dh.miner.SectorsStatus(ctx, snum, false)
|
||||
require.NoError(dh.t, err)
|
||||
|
||||
dh.t.Logf("Sector state: %s", si.State)
|
||||
if si.State == api.SectorState(sealing.WaitDeals) {
|
||||
require.NoError(dh.t, dh.miner.SectorStartSealing(ctx, snum))
|
||||
}
|
||||
|
||||
flushSealingBatches(dh.t, ctx, dh.miner)
|
||||
}
|
||||
}
|
||||
|
||||
func (dh *DealHarness) TestRetrieval(ctx context.Context, fcid cid.Cid, piece *cid.Cid, carExport bool, expect []byte) {
|
||||
offers, err := dh.client.ClientFindData(ctx, fcid, piece)
|
||||
if err != nil {
|
||||
dh.t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(offers) < 1 {
|
||||
dh.t.Fatal("no offers")
|
||||
}
|
||||
|
||||
rpath, err := ioutil.TempDir("", "lotus-retrieve-test-")
|
||||
if err != nil {
|
||||
dh.t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(rpath) //nolint:errcheck
|
||||
|
||||
caddr, err := dh.client.WalletDefaultAddress(ctx)
|
||||
if err != nil {
|
||||
dh.t.Fatal(err)
|
||||
}
|
||||
|
||||
ref := &api.FileRef{
|
||||
Path: filepath.Join(rpath, "ret"),
|
||||
IsCAR: carExport,
|
||||
}
|
||||
updates, err := dh.client.ClientRetrieveWithEvents(ctx, offers[0].Order(caddr), ref)
|
||||
if err != nil {
|
||||
dh.t.Fatal(err)
|
||||
}
|
||||
for update := range updates {
|
||||
if update.Err != "" {
|
||||
dh.t.Fatalf("retrieval failed: %s", update.Err)
|
||||
}
|
||||
}
|
||||
|
||||
rdata, err := ioutil.ReadFile(filepath.Join(rpath, "ret"))
|
||||
if err != nil {
|
||||
dh.t.Fatal(err)
|
||||
}
|
||||
|
||||
if carExport {
|
||||
rdata = dh.ExtractCarData(ctx, rdata, rpath)
|
||||
}
|
||||
|
||||
if !bytes.Equal(rdata, expect) {
|
||||
dh.t.Fatal("wrong expect retrieved")
|
||||
}
|
||||
}
|
||||
|
||||
func (dh *DealHarness) ExtractCarData(ctx context.Context, rdata []byte, rpath string) []byte {
|
||||
bserv := dstest.Bserv()
|
||||
ch, err := car.LoadCar(bserv.Blockstore(), bytes.NewReader(rdata))
|
||||
if err != nil {
|
||||
dh.t.Fatal(err)
|
||||
}
|
||||
b, err := bserv.GetBlock(ctx, ch.Roots[0])
|
||||
if err != nil {
|
||||
dh.t.Fatal(err)
|
||||
}
|
||||
nd, err := ipld.Decode(b)
|
||||
if err != nil {
|
||||
dh.t.Fatal(err)
|
||||
}
|
||||
dserv := dag.NewDAGService(bserv)
|
||||
fil, err := unixfile.NewUnixfsFile(ctx, dserv, nd)
|
||||
if err != nil {
|
||||
dh.t.Fatal(err)
|
||||
}
|
||||
outPath := filepath.Join(rpath, "retLoadedCAR")
|
||||
if err := files.WriteTo(fil, outPath); err != nil {
|
||||
dh.t.Fatal(err)
|
||||
}
|
||||
rdata, err = ioutil.ReadFile(outPath)
|
||||
if err != nil {
|
||||
dh.t.Fatal(err)
|
||||
}
|
||||
return rdata
|
||||
}
|
||||
|
||||
type DealsScaffold struct {
|
||||
Ctx context.Context
|
||||
Client *impl.FullNodeAPI
|
||||
Miner TestMiner
|
||||
BlockMiner *BlockMiner
|
||||
}
|
||||
|
||||
func ConnectAndStartMining(t *testing.T, blocktime time.Duration, miner TestMiner, clients ...api.FullNode) *BlockMiner {
|
||||
ctx := context.Background()
|
||||
|
||||
for _, c := range clients {
|
||||
addrinfo, err := c.NetAddrsListen(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := miner.NetConnect(ctx, addrinfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
time.Sleep(time.Second)
|
||||
|
||||
blockMiner := NewBlockMiner(t, miner)
|
||||
blockMiner.MineBlocks(ctx, blocktime)
|
||||
t.Cleanup(blockMiner.Stop)
|
||||
return blockMiner
|
||||
}
|
||||
|
||||
type TestDealState int
|
||||
|
||||
const (
|
||||
TestDealStateFailed = TestDealState(-1)
|
||||
TestDealStateInProgress = TestDealState(0)
|
||||
TestDealStateComplete = TestDealState(1)
|
||||
)
|
||||
|
||||
// CategorizeDealState categorizes deal states into one of three states:
|
||||
// Complete, InProgress, Failed.
|
||||
func CategorizeDealState(dealStatus string) TestDealState {
|
||||
switch dealStatus {
|
||||
case "StorageDealFailing", "StorageDealError":
|
||||
return TestDealStateFailed
|
||||
case "StorageDealStaged", "StorageDealAwaitingPreCommit", "StorageDealSealing", "StorageDealActive", "StorageDealExpired", "StorageDealSlashed":
|
||||
return TestDealStateComplete
|
||||
}
|
||||
return TestDealStateInProgress
|
||||
}
|
39
itests/kit/funds.go
Normal file
39
itests/kit/funds.go
Normal file
@ -0,0 +1,39 @@
|
||||
package kit
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
|
||||
// SendFunds sends funds from the default wallet of the specified sender node
|
||||
// to the recipient address.
|
||||
func SendFunds(ctx context.Context, t *testing.T, sender TestFullNode, recipient address.Address, amount abi.TokenAmount) {
|
||||
senderAddr, err := sender.WalletDefaultAddress(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
msg := &types.Message{
|
||||
From: senderAddr,
|
||||
To: recipient,
|
||||
Value: amount,
|
||||
}
|
||||
|
||||
sm, err := sender.MpoolPushMessage(ctx, msg, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
res, err := sender.StateWaitMsg(ctx, sm.Cid(), 3, api.LookbackNoLimit, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if res.Receipt.ExitCode != 0 {
|
||||
t.Fatal("did not successfully send money")
|
||||
}
|
||||
}
|
32
itests/kit/init.go
Normal file
32
itests/kit/init.go
Normal file
@ -0,0 +1,32 @@
|
||||
package kit
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/actors/policy"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
)
|
||||
|
||||
func init() {
|
||||
bin := os.Args[0]
|
||||
if !strings.HasSuffix(bin, ".test") {
|
||||
panic("package itests/kit must only be imported from tests")
|
||||
}
|
||||
|
||||
_ = logging.SetLogLevel("*", "INFO")
|
||||
|
||||
policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048))
|
||||
policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1)
|
||||
policy.SetMinVerifiedDealSize(abi.NewStoragePower(256))
|
||||
|
||||
err := os.Setenv("BELLMAN_NO_GPU", "1")
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to set BELLMAN_NO_GPU env variable: %s", err))
|
||||
}
|
||||
build.InsecurePoStValidation = true
|
||||
|
||||
}
|
19
itests/kit/log.go
Normal file
19
itests/kit/log.go
Normal file
@ -0,0 +1,19 @@
|
||||
package kit
|
||||
|
||||
import (
|
||||
"github.com/filecoin-project/lotus/lib/lotuslog"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
)
|
||||
|
||||
func QuietMiningLogs() {
|
||||
lotuslog.SetupLogLevels()
|
||||
|
||||
_ = logging.SetLogLevel("miner", "ERROR")
|
||||
_ = logging.SetLogLevel("chainstore", "ERROR")
|
||||
_ = logging.SetLogLevel("chain", "ERROR")
|
||||
_ = logging.SetLogLevel("sub", "ERROR")
|
||||
_ = logging.SetLogLevel("storageminer", "ERROR")
|
||||
_ = logging.SetLogLevel("pubsub", "ERROR")
|
||||
_ = logging.SetLogLevel("gen", "ERROR")
|
||||
_ = logging.SetLogLevel("dht/RtRefreshManager", "ERROR")
|
||||
}
|
@ -1,4 +1,4 @@
|
||||
package test
|
||||
package kit
|
||||
|
||||
import (
|
||||
"bytes"
|
@ -1,4 +1,4 @@
|
||||
package test
|
||||
package kit
|
||||
|
||||
import (
|
||||
"context"
|
||||
@ -9,12 +9,10 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/lotus/api/test"
|
||||
test2 "github.com/filecoin-project/lotus/node/test"
|
||||
)
|
||||
|
||||
func StartOneNodeOneMiner(ctx context.Context, t *testing.T, blocktime time.Duration) (test.TestNode, address.Address) {
|
||||
n, sn := test2.RPCMockSbBuilder(t, test.OneFull, test.OneMiner)
|
||||
func StartOneNodeOneMiner(ctx context.Context, t *testing.T, blocktime time.Duration) (TestFullNode, address.Address) {
|
||||
n, sn := RPCMockMinerBuilder(t, OneFull, OneMiner)
|
||||
|
||||
full := n[0]
|
||||
miner := sn[0]
|
||||
@ -30,8 +28,8 @@ func StartOneNodeOneMiner(ctx context.Context, t *testing.T, blocktime time.Dura
|
||||
}
|
||||
|
||||
// Start mining blocks
|
||||
bm := test.NewBlockMiner(ctx, t, miner, blocktime)
|
||||
bm.MineBlocks()
|
||||
bm := NewBlockMiner(t, miner)
|
||||
bm.MineBlocks(ctx, blocktime)
|
||||
t.Cleanup(bm.Stop)
|
||||
|
||||
// Get the full node's wallet address
|
||||
@ -44,8 +42,8 @@ func StartOneNodeOneMiner(ctx context.Context, t *testing.T, blocktime time.Dura
|
||||
return full, fullAddr
|
||||
}
|
||||
|
||||
func StartTwoNodesOneMiner(ctx context.Context, t *testing.T, blocktime time.Duration) ([]test.TestNode, []address.Address) {
|
||||
n, sn := test2.RPCMockSbBuilder(t, test.TwoFull, test.OneMiner)
|
||||
func StartTwoNodesOneMiner(ctx context.Context, t *testing.T, blocktime time.Duration) ([]TestFullNode, []address.Address) {
|
||||
n, sn := RPCMockMinerBuilder(t, TwoFull, OneMiner)
|
||||
|
||||
fullNode1 := n[0]
|
||||
fullNode2 := n[1]
|
||||
@ -66,8 +64,8 @@ func StartTwoNodesOneMiner(ctx context.Context, t *testing.T, blocktime time.Dur
|
||||
}
|
||||
|
||||
// Start mining blocks
|
||||
bm := test.NewBlockMiner(ctx, t, miner, blocktime)
|
||||
bm.MineBlocks()
|
||||
bm := NewBlockMiner(t, miner)
|
||||
bm.MineBlocks(ctx, blocktime)
|
||||
t.Cleanup(bm.Stop)
|
||||
|
||||
// Send some funds to register the second node
|
||||
@ -76,7 +74,7 @@ func StartTwoNodesOneMiner(ctx context.Context, t *testing.T, blocktime time.Dur
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
test.SendFunds(ctx, t, fullNode1, fullNodeAddr2, abi.NewTokenAmount(1e18))
|
||||
SendFunds(ctx, t, fullNode1, fullNodeAddr2, abi.NewTokenAmount(1e18))
|
||||
|
||||
// Get the first node's address
|
||||
fullNodeAddr1, err := fullNode1.WalletDefaultAddress(ctx)
|
@ -1,32 +1,25 @@
|
||||
package test
|
||||
package kit
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-jsonrpc"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
"github.com/filecoin-project/go-state-types/exitcode"
|
||||
"github.com/filecoin-project/go-storedcounter"
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/api/client"
|
||||
"github.com/filecoin-project/lotus/api/test"
|
||||
"github.com/filecoin-project/lotus/api/v0api"
|
||||
"github.com/filecoin-project/lotus/api/v1api"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain"
|
||||
@ -57,6 +50,7 @@ import (
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
mocknet "github.com/libp2p/go-libp2p/p2p/net/mock"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
manet "github.com/multiformats/go-multiaddr/net"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@ -67,7 +61,7 @@ func init() {
|
||||
messagepool.HeadChangeCoalesceMergeInterval = 100 * time.Nanosecond
|
||||
}
|
||||
|
||||
func CreateTestStorageNode(ctx context.Context, t *testing.T, waddr address.Address, act address.Address, pk crypto.PrivKey, tnd test.TestNode, mn mocknet.Mocknet, opts node.Option) test.TestStorageNode {
|
||||
func CreateTestStorageNode(ctx context.Context, t *testing.T, waddr address.Address, act address.Address, pk crypto.PrivKey, tnd TestFullNode, mn mocknet.Mocknet, opts node.Option) TestMiner {
|
||||
r := repo.NewMemory(nil)
|
||||
|
||||
lr, err := r.Lock(repo.StorageMiner)
|
||||
@ -91,7 +85,7 @@ func CreateTestStorageNode(ctx context.Context, t *testing.T, waddr address.Addr
|
||||
require.NoError(t, err)
|
||||
|
||||
nic := storedcounter.New(ds, datastore.NewKey(modules.StorageCounterDSPrefix))
|
||||
for i := 0; i < test.GenesisPreseals; i++ {
|
||||
for i := 0; i < GenesisPreseals; i++ {
|
||||
_, err := nic.Next()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@ -142,10 +136,10 @@ func CreateTestStorageNode(ctx context.Context, t *testing.T, waddr address.Addr
|
||||
t.Cleanup(func() { _ = stop(context.Background()) })
|
||||
|
||||
/*// Bootstrap with full node
|
||||
remoteAddrs, err := tnd.NetAddrsListen(ctx)
|
||||
remoteAddrs, err := tnd.NetAddrsListen(Ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = minerapi.NetConnect(ctx, remoteAddrs)
|
||||
err = minerapi.NetConnect(Ctx, remoteAddrs)
|
||||
require.NoError(t, err)*/
|
||||
mineOne := func(ctx context.Context, req lotusminer.MineReq) error {
|
||||
select {
|
||||
@ -156,11 +150,11 @@ func CreateTestStorageNode(ctx context.Context, t *testing.T, waddr address.Addr
|
||||
}
|
||||
}
|
||||
|
||||
return test.TestStorageNode{StorageMiner: minerapi, MineOne: mineOne, Stop: stop}
|
||||
return TestMiner{StorageMiner: minerapi, MineOne: mineOne, Stop: stop}
|
||||
}
|
||||
|
||||
func storageBuilder(parentNode test.TestNode, mn mocknet.Mocknet, opts node.Option) test.StorageBuilder {
|
||||
return func(ctx context.Context, t *testing.T, spt abi.RegisteredSealProof, owner address.Address) test.TestStorageNode {
|
||||
func storageBuilder(parentNode TestFullNode, mn mocknet.Mocknet, opts node.Option) MinerBuilder {
|
||||
return func(ctx context.Context, t *testing.T, spt abi.RegisteredSealProof, owner address.Address) TestMiner {
|
||||
pk, _, err := crypto.GenerateEd25519Key(rand.Reader)
|
||||
require.NoError(t, err)
|
||||
|
||||
@ -202,31 +196,32 @@ func storageBuilder(parentNode test.TestNode, mn mocknet.Mocknet, opts node.Opti
|
||||
}
|
||||
}
|
||||
|
||||
func Builder(t *testing.T, fullOpts []test.FullNodeOpts, storage []test.StorageMiner) ([]test.TestNode, []test.TestStorageNode) {
|
||||
func Builder(t *testing.T, fullOpts []FullNodeOpts, storage []StorageMiner) ([]TestFullNode, []TestMiner) {
|
||||
return mockBuilderOpts(t, fullOpts, storage, false)
|
||||
}
|
||||
|
||||
func MockSbBuilder(t *testing.T, fullOpts []test.FullNodeOpts, storage []test.StorageMiner) ([]test.TestNode, []test.TestStorageNode) {
|
||||
return mockSbBuilderOpts(t, fullOpts, storage, false)
|
||||
}
|
||||
|
||||
func RPCBuilder(t *testing.T, fullOpts []test.FullNodeOpts, storage []test.StorageMiner) ([]test.TestNode, []test.TestStorageNode) {
|
||||
func RPCBuilder(t *testing.T, fullOpts []FullNodeOpts, storage []StorageMiner) ([]TestFullNode, []TestMiner) {
|
||||
return mockBuilderOpts(t, fullOpts, storage, true)
|
||||
}
|
||||
|
||||
func RPCMockSbBuilder(t *testing.T, fullOpts []test.FullNodeOpts, storage []test.StorageMiner) ([]test.TestNode, []test.TestStorageNode) {
|
||||
return mockSbBuilderOpts(t, fullOpts, storage, true)
|
||||
func MockMinerBuilder(t *testing.T, fullOpts []FullNodeOpts, storage []StorageMiner) ([]TestFullNode, []TestMiner) {
|
||||
return mockMinerBuilderOpts(t, fullOpts, storage, false)
|
||||
}
|
||||
|
||||
func mockBuilderOpts(t *testing.T, fullOpts []test.FullNodeOpts, storage []test.StorageMiner, rpc bool) ([]test.TestNode, []test.TestStorageNode) {
|
||||
func RPCMockMinerBuilder(t *testing.T, fullOpts []FullNodeOpts, storage []StorageMiner) ([]TestFullNode, []TestMiner) {
|
||||
return mockMinerBuilderOpts(t, fullOpts, storage, true)
|
||||
}
|
||||
|
||||
func mockBuilderOpts(t *testing.T, fullOpts []FullNodeOpts, storage []StorageMiner, rpc bool) ([]TestFullNode, []TestMiner) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
t.Cleanup(cancel)
|
||||
|
||||
mn := mocknet.New(ctx)
|
||||
|
||||
fulls := make([]test.TestNode, len(fullOpts))
|
||||
storers := make([]test.TestStorageNode, len(storage))
|
||||
fulls := make([]TestFullNode, len(fullOpts))
|
||||
miners := make([]TestMiner, len(storage))
|
||||
|
||||
// *****
|
||||
pk, _, err := crypto.GenerateEd25519Key(rand.Reader)
|
||||
require.NoError(t, err)
|
||||
|
||||
@ -238,13 +233,17 @@ func mockBuilderOpts(t *testing.T, fullOpts []test.FullNodeOpts, storage []test.
|
||||
if len(storage) > 1 {
|
||||
panic("need more peer IDs")
|
||||
}
|
||||
// *****
|
||||
|
||||
// PRESEAL SECTION, TRY TO REPLACE WITH BETTER IN THE FUTURE
|
||||
// TODO: would be great if there was a better way to fake the preseals
|
||||
|
||||
var genms []genesis.Miner
|
||||
var maddrs []address.Address
|
||||
var genaccs []genesis.Actor
|
||||
var keys []*wallet.Key
|
||||
var (
|
||||
genms []genesis.Miner
|
||||
maddrs []address.Address
|
||||
genaccs []genesis.Actor
|
||||
keys []*wallet.Key
|
||||
)
|
||||
|
||||
var presealDirs []string
|
||||
for i := 0; i < len(storage); i++ {
|
||||
@ -256,7 +255,7 @@ func mockBuilderOpts(t *testing.T, fullOpts []test.FullNodeOpts, storage []test.
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
genm, k, err := seed.PreSeal(maddr, abi.RegisteredSealProof_StackedDrg2KiBV1, 0, test.GenesisPreseals, tdir, []byte("make genesis mem random"), nil, true)
|
||||
genm, k, err := seed.PreSeal(maddr, abi.RegisteredSealProof_StackedDrg2KiBV1, 0, GenesisPreseals, tdir, []byte("make genesis mem random"), nil, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -364,17 +363,17 @@ func mockBuilderOpts(t *testing.T, fullOpts []test.FullNodeOpts, storage []test.
|
||||
if opts == nil {
|
||||
opts = node.Options()
|
||||
}
|
||||
storers[i] = CreateTestStorageNode(ctx, t, wa, genMiner, pk, f, mn, opts)
|
||||
if err := storers[i].StorageAddLocal(ctx, presealDirs[i]); err != nil {
|
||||
miners[i] = CreateTestStorageNode(ctx, t, wa, genMiner, pk, f, mn, opts)
|
||||
if err := miners[i].StorageAddLocal(ctx, presealDirs[i]); err != nil {
|
||||
t.Fatalf("%+v", err)
|
||||
}
|
||||
/*
|
||||
sma := storers[i].StorageMiner.(*impl.StorageMinerAPI)
|
||||
sma := miners[i].StorageMiner.(*impl.StorageMinerAPI)
|
||||
|
||||
psd := presealDirs[i]
|
||||
*/
|
||||
if rpc {
|
||||
storers[i] = storerRpc(t, storers[i])
|
||||
miners[i] = storerRpc(t, miners[i])
|
||||
}
|
||||
}
|
||||
|
||||
@ -382,44 +381,49 @@ func mockBuilderOpts(t *testing.T, fullOpts []test.FullNodeOpts, storage []test.
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(storers) > 0 {
|
||||
if len(miners) > 0 {
|
||||
// Mine 2 blocks to setup some CE stuff in some actors
|
||||
var wait sync.Mutex
|
||||
wait.Lock()
|
||||
|
||||
test.MineUntilBlock(ctx, t, fulls[0], storers[0], func(epoch abi.ChainEpoch) {
|
||||
bm := NewBlockMiner(t, miners[0])
|
||||
t.Cleanup(bm.Stop)
|
||||
|
||||
bm.MineUntilBlock(ctx, fulls[0], func(epoch abi.ChainEpoch) {
|
||||
wait.Unlock()
|
||||
})
|
||||
|
||||
wait.Lock()
|
||||
test.MineUntilBlock(ctx, t, fulls[0], storers[0], func(epoch abi.ChainEpoch) {
|
||||
bm.MineUntilBlock(ctx, fulls[0], func(epoch abi.ChainEpoch) {
|
||||
wait.Unlock()
|
||||
})
|
||||
wait.Lock()
|
||||
}
|
||||
|
||||
return fulls, storers
|
||||
return fulls, miners
|
||||
}
|
||||
|
||||
func mockSbBuilderOpts(t *testing.T, fullOpts []test.FullNodeOpts, storage []test.StorageMiner, rpc bool) ([]test.TestNode, []test.TestStorageNode) {
|
||||
func mockMinerBuilderOpts(t *testing.T, fullOpts []FullNodeOpts, storage []StorageMiner, rpc bool) ([]TestFullNode, []TestMiner) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
t.Cleanup(cancel)
|
||||
|
||||
mn := mocknet.New(ctx)
|
||||
|
||||
fulls := make([]test.TestNode, len(fullOpts))
|
||||
storers := make([]test.TestStorageNode, len(storage))
|
||||
fulls := make([]TestFullNode, len(fullOpts))
|
||||
miners := make([]TestMiner, len(storage))
|
||||
|
||||
var genbuf bytes.Buffer
|
||||
|
||||
// PRESEAL SECTION, TRY TO REPLACE WITH BETTER IN THE FUTURE
|
||||
// TODO: would be great if there was a better way to fake the preseals
|
||||
|
||||
var genms []genesis.Miner
|
||||
var genaccs []genesis.Actor
|
||||
var maddrs []address.Address
|
||||
var keys []*wallet.Key
|
||||
var pidKeys []crypto.PrivKey
|
||||
var (
|
||||
genms []genesis.Miner
|
||||
genaccs []genesis.Actor
|
||||
maddrs []address.Address
|
||||
keys []*wallet.Key
|
||||
pidKeys []crypto.PrivKey
|
||||
)
|
||||
for i := 0; i < len(storage); i++ {
|
||||
maddr, err := address.NewIDAddress(genesis2.MinerStart + uint64(i))
|
||||
if err != nil {
|
||||
@ -427,8 +431,8 @@ func mockSbBuilderOpts(t *testing.T, fullOpts []test.FullNodeOpts, storage []tes
|
||||
}
|
||||
|
||||
preseals := storage[i].Preseal
|
||||
if preseals == test.PresealGenesis {
|
||||
preseals = test.GenesisPreseals
|
||||
if preseals == PresealGenesis {
|
||||
preseals = GenesisPreseals
|
||||
}
|
||||
|
||||
genm, k, err := mockstorage.PreSeal(abi.RegisteredSealProof_StackedDrg2KiBV1, maddr, preseals)
|
||||
@ -570,7 +574,7 @@ func mockSbBuilderOpts(t *testing.T, fullOpts []test.FullNodeOpts, storage []tes
|
||||
if opts == nil {
|
||||
opts = node.Options()
|
||||
}
|
||||
storers[i] = CreateTestStorageNode(ctx, t, genms[i].Worker, maddrs[i], pidKeys[i], f, mn, node.Options(
|
||||
miners[i] = CreateTestStorageNode(ctx, t, genms[i].Worker, maddrs[i], pidKeys[i], f, mn, node.Options(
|
||||
node.Override(new(*mock.SectorMgr), func() (*mock.SectorMgr, error) {
|
||||
return mock.NewMockSectorMgr(sectors), nil
|
||||
}),
|
||||
@ -586,7 +590,7 @@ func mockSbBuilderOpts(t *testing.T, fullOpts []test.FullNodeOpts, storage []tes
|
||||
))
|
||||
|
||||
if rpc {
|
||||
storers[i] = storerRpc(t, storers[i])
|
||||
miners[i] = storerRpc(t, miners[i])
|
||||
}
|
||||
}
|
||||
|
||||
@ -594,99 +598,63 @@ func mockSbBuilderOpts(t *testing.T, fullOpts []test.FullNodeOpts, storage []tes
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(storers) > 0 {
|
||||
bm := NewBlockMiner(t, miners[0])
|
||||
|
||||
if len(miners) > 0 {
|
||||
// Mine 2 blocks to setup some CE stuff in some actors
|
||||
var wait sync.Mutex
|
||||
wait.Lock()
|
||||
|
||||
test.MineUntilBlock(ctx, t, fulls[0], storers[0], func(abi.ChainEpoch) {
|
||||
bm.MineUntilBlock(ctx, fulls[0], func(abi.ChainEpoch) {
|
||||
wait.Unlock()
|
||||
})
|
||||
wait.Lock()
|
||||
test.MineUntilBlock(ctx, t, fulls[0], storers[0], func(abi.ChainEpoch) {
|
||||
bm.MineUntilBlock(ctx, fulls[0], func(abi.ChainEpoch) {
|
||||
wait.Unlock()
|
||||
})
|
||||
wait.Lock()
|
||||
}
|
||||
|
||||
return fulls, storers
|
||||
return fulls, miners
|
||||
}
|
||||
|
||||
func fullRpc(t *testing.T, nd test.TestNode) test.TestNode {
|
||||
ma, listenAddr, err := CreateRPCServer(t, map[string]interface{}{
|
||||
"/rpc/v1": nd,
|
||||
"/rpc/v0": &v0api.WrapperV1Full{FullNode: nd},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
var stop func()
|
||||
var full test.TestNode
|
||||
full.FullNode, stop, err = client.NewFullNodeRPCV1(context.Background(), listenAddr+"/rpc/v1", nil)
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(stop)
|
||||
|
||||
full.ListenAddr = ma
|
||||
return full
|
||||
}
|
||||
|
||||
func storerRpc(t *testing.T, nd test.TestStorageNode) test.TestStorageNode {
|
||||
ma, listenAddr, err := CreateRPCServer(t, map[string]interface{}{
|
||||
"/rpc/v0": nd,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
var stop func()
|
||||
var storer test.TestStorageNode
|
||||
storer.StorageMiner, stop, err = client.NewStorageMinerRPCV0(context.Background(), listenAddr+"/rpc/v0", nil)
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(stop)
|
||||
|
||||
storer.ListenAddr = ma
|
||||
storer.MineOne = nd.MineOne
|
||||
return storer
|
||||
}
|
||||
|
||||
func CreateRPCServer(t *testing.T, handlers map[string]interface{}) (multiaddr.Multiaddr, string, error) {
|
||||
m := mux.NewRouter()
|
||||
for path, handler := range handlers {
|
||||
rpcServer := jsonrpc.NewServer()
|
||||
rpcServer.Register("Filecoin", handler)
|
||||
m.Handle(path, rpcServer)
|
||||
}
|
||||
testServ := httptest.NewServer(m) // todo: close
|
||||
func CreateRPCServer(t *testing.T, handler http.Handler) (*httptest.Server, multiaddr.Multiaddr) {
|
||||
testServ := httptest.NewServer(handler)
|
||||
t.Cleanup(testServ.Close)
|
||||
t.Cleanup(testServ.CloseClientConnections)
|
||||
|
||||
addr := testServ.Listener.Addr()
|
||||
listenAddr := "ws://" + addr.String()
|
||||
ma, err := parseWSMultiAddr(addr)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
return ma, listenAddr, err
|
||||
maddr, err := manet.FromNetAddr(addr)
|
||||
require.NoError(t, err)
|
||||
return testServ, maddr
|
||||
}
|
||||
|
||||
func parseWSMultiAddr(addr net.Addr) (multiaddr.Multiaddr, error) {
|
||||
host, port, err := net.SplitHostPort(addr.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ma, err := multiaddr.NewMultiaddr("/ip4/" + host + "/" + addr.Network() + "/" + port + "/ws")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ma, nil
|
||||
func fullRpc(t *testing.T, nd TestFullNode) TestFullNode {
|
||||
handler, err := node.FullNodeHandler(nd.FullNode, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
srv, maddr := CreateRPCServer(t, handler)
|
||||
|
||||
var ret TestFullNode
|
||||
cl, stop, err := client.NewFullNodeRPCV1(context.Background(), "ws://"+srv.Listener.Addr().String()+"/rpc/v1", nil)
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(stop)
|
||||
ret.ListenAddr, ret.FullNode = maddr, cl
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
func WSMultiAddrToString(addr multiaddr.Multiaddr) (string, error) {
|
||||
parts := strings.Split(addr.String(), "/")
|
||||
if len(parts) != 6 || parts[0] != "" {
|
||||
return "", xerrors.Errorf("Malformed ws multiaddr %s", addr)
|
||||
}
|
||||
func storerRpc(t *testing.T, nd TestMiner) TestMiner {
|
||||
handler, err := node.MinerHandler(nd.StorageMiner, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
host := parts[2]
|
||||
port := parts[4]
|
||||
proto := parts[5]
|
||||
srv, maddr := CreateRPCServer(t, handler)
|
||||
|
||||
return proto + "://" + host + ":" + port + "/rpc/v0", nil
|
||||
var ret TestMiner
|
||||
cl, stop, err := client.NewStorageMinerRPCV0(context.Background(), "ws://"+srv.Listener.Addr().String()+"/rpc/v0", nil)
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(stop)
|
||||
|
||||
ret.ListenAddr, ret.StorageMiner, ret.MineOne = maddr, cl, nd.MineOne
|
||||
return ret
|
||||
}
|
153
itests/kit/nodes.go
Normal file
153
itests/kit/nodes.go
Normal file
@ -0,0 +1,153 @@
|
||||
package kit
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
|
||||
lapi "github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/api/v1api"
|
||||
"github.com/filecoin-project/lotus/chain/stmgr"
|
||||
"github.com/filecoin-project/lotus/miner"
|
||||
"github.com/filecoin-project/lotus/node"
|
||||
)
|
||||
|
||||
type MinerBuilder func(context.Context, *testing.T, abi.RegisteredSealProof, address.Address) TestMiner
|
||||
|
||||
type TestFullNode struct {
|
||||
v1api.FullNode
|
||||
// ListenAddr is the address on which an API server is listening, if an
|
||||
// API server is created for this Node
|
||||
ListenAddr multiaddr.Multiaddr
|
||||
|
||||
Stb MinerBuilder
|
||||
}
|
||||
|
||||
type TestMiner struct {
|
||||
lapi.StorageMiner
|
||||
// ListenAddr is the address on which an API server is listening, if an
|
||||
// API server is created for this Node
|
||||
ListenAddr multiaddr.Multiaddr
|
||||
|
||||
MineOne func(context.Context, miner.MineReq) error
|
||||
Stop func(context.Context) error
|
||||
}
|
||||
|
||||
var PresealGenesis = -1
|
||||
|
||||
const GenesisPreseals = 2
|
||||
|
||||
const TestSpt = abi.RegisteredSealProof_StackedDrg2KiBV1_1
|
||||
|
||||
// Options for setting up a mock storage Miner
|
||||
type StorageMiner struct {
|
||||
Full int
|
||||
Opts node.Option
|
||||
Preseal int
|
||||
}
|
||||
|
||||
type OptionGenerator func([]TestFullNode) node.Option
|
||||
|
||||
// Options for setting up a mock full node
|
||||
type FullNodeOpts struct {
|
||||
Lite bool // run node in "lite" mode
|
||||
Opts OptionGenerator // generate dependency injection options
|
||||
}
|
||||
|
||||
// APIBuilder is a function which is invoked in test suite to provide
|
||||
// test nodes and networks
|
||||
//
|
||||
// fullOpts array defines options for each full node
|
||||
// storage array defines storage nodes, numbers in the array specify full node
|
||||
// index the storage node 'belongs' to
|
||||
type APIBuilder func(t *testing.T, full []FullNodeOpts, storage []StorageMiner) ([]TestFullNode, []TestMiner)
|
||||
|
||||
func DefaultFullOpts(nFull int) []FullNodeOpts {
|
||||
full := make([]FullNodeOpts, nFull)
|
||||
for i := range full {
|
||||
full[i] = FullNodeOpts{
|
||||
Opts: func(nodes []TestFullNode) node.Option {
|
||||
return node.Options()
|
||||
},
|
||||
}
|
||||
}
|
||||
return full
|
||||
}
|
||||
|
||||
var OneMiner = []StorageMiner{{Full: 0, Preseal: PresealGenesis}}
|
||||
var OneFull = DefaultFullOpts(1)
|
||||
var TwoFull = DefaultFullOpts(2)
|
||||
|
||||
var FullNodeWithLatestActorsAt = func(upgradeHeight abi.ChainEpoch) FullNodeOpts {
|
||||
// Attention: Update this when introducing new actor versions or your tests will be sad
|
||||
return FullNodeWithNetworkUpgradeAt(network.Version13, upgradeHeight)
|
||||
}
|
||||
|
||||
var FullNodeWithNetworkUpgradeAt = func(version network.Version, upgradeHeight abi.ChainEpoch) FullNodeOpts {
|
||||
fullSchedule := stmgr.UpgradeSchedule{{
|
||||
// prepare for upgrade.
|
||||
Network: network.Version9,
|
||||
Height: 1,
|
||||
Migration: stmgr.UpgradeActorsV2,
|
||||
}, {
|
||||
Network: network.Version10,
|
||||
Height: 2,
|
||||
Migration: stmgr.UpgradeActorsV3,
|
||||
}, {
|
||||
Network: network.Version12,
|
||||
Height: 3,
|
||||
Migration: stmgr.UpgradeActorsV4,
|
||||
}, {
|
||||
Network: network.Version13,
|
||||
Height: 4,
|
||||
Migration: stmgr.UpgradeActorsV5,
|
||||
}}
|
||||
|
||||
schedule := stmgr.UpgradeSchedule{}
|
||||
for _, upgrade := range fullSchedule {
|
||||
if upgrade.Network > version {
|
||||
break
|
||||
}
|
||||
|
||||
schedule = append(schedule, upgrade)
|
||||
}
|
||||
|
||||
if upgradeHeight > 0 {
|
||||
schedule[len(schedule)-1].Height = upgradeHeight
|
||||
}
|
||||
|
||||
return FullNodeOpts{
|
||||
Opts: func(nodes []TestFullNode) node.Option {
|
||||
return node.Override(new(stmgr.UpgradeSchedule), schedule)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
var FullNodeWithSDRAt = func(calico, persian abi.ChainEpoch) FullNodeOpts {
|
||||
return FullNodeOpts{
|
||||
Opts: func(nodes []TestFullNode) node.Option {
|
||||
return node.Override(new(stmgr.UpgradeSchedule), stmgr.UpgradeSchedule{{
|
||||
Network: network.Version6,
|
||||
Height: 1,
|
||||
Migration: stmgr.UpgradeActorsV2,
|
||||
}, {
|
||||
Network: network.Version7,
|
||||
Height: calico,
|
||||
Migration: stmgr.UpgradeCalico,
|
||||
}, {
|
||||
Network: network.Version8,
|
||||
Height: persian,
|
||||
}})
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
var MineNext = miner.MineReq{
|
||||
InjectNulls: 0,
|
||||
Done: func(bool, abi.ChainEpoch, error) {},
|
||||
}
|
88
itests/kit/pledge.go
Normal file
88
itests/kit/pledge.go
Normal file
@ -0,0 +1,88 @@
|
||||
package kit
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func PledgeSectors(t *testing.T, ctx context.Context, miner TestMiner, n, existing int, blockNotif <-chan struct{}) { //nolint:golint
|
||||
toCheck := StartPledge(t, ctx, miner, n, existing, blockNotif)
|
||||
|
||||
for len(toCheck) > 0 {
|
||||
flushSealingBatches(t, ctx, miner)
|
||||
|
||||
states := map[api.SectorState]int{}
|
||||
for n := range toCheck {
|
||||
st, err := miner.SectorsStatus(ctx, n, false)
|
||||
require.NoError(t, err)
|
||||
states[st.State]++
|
||||
if st.State == api.SectorState(sealing.Proving) {
|
||||
delete(toCheck, n)
|
||||
}
|
||||
if strings.Contains(string(st.State), "Fail") {
|
||||
t.Fatal("sector in a failed state", st.State)
|
||||
}
|
||||
}
|
||||
|
||||
build.Clock.Sleep(100 * time.Millisecond)
|
||||
fmt.Printf("WaitSeal: %d %+v\n", len(toCheck), states)
|
||||
}
|
||||
}
|
||||
|
||||
func flushSealingBatches(t *testing.T, ctx context.Context, miner TestMiner) { //nolint:golint
|
||||
pcb, err := miner.SectorPreCommitFlush(ctx)
|
||||
require.NoError(t, err)
|
||||
if pcb != nil {
|
||||
fmt.Printf("PRECOMMIT BATCH: %+v\n", pcb)
|
||||
}
|
||||
|
||||
cb, err := miner.SectorCommitFlush(ctx)
|
||||
require.NoError(t, err)
|
||||
if cb != nil {
|
||||
fmt.Printf("COMMIT BATCH: %+v\n", cb)
|
||||
}
|
||||
}
|
||||
|
||||
func StartPledge(t *testing.T, ctx context.Context, miner TestMiner, n, existing int, blockNotif <-chan struct{}) map[abi.SectorNumber]struct{} { //nolint:golint
|
||||
for i := 0; i < n; i++ {
|
||||
if i%3 == 0 && blockNotif != nil {
|
||||
<-blockNotif
|
||||
t.Log("WAIT")
|
||||
}
|
||||
t.Logf("PLEDGING %d", i)
|
||||
_, err := miner.PledgeSector(ctx)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
for {
|
||||
s, err := miner.SectorsList(ctx) // Note - the test builder doesn't import genesis sectors into FSM
|
||||
require.NoError(t, err)
|
||||
fmt.Printf("Sectors: %d\n", len(s))
|
||||
if len(s) >= n+existing {
|
||||
break
|
||||
}
|
||||
|
||||
build.Clock.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
|
||||
fmt.Printf("All sectors is fsm\n")
|
||||
|
||||
s, err := miner.SectorsList(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
toCheck := map[abi.SectorNumber]struct{}{}
|
||||
for _, number := range s {
|
||||
toCheck[number] = struct{}{}
|
||||
}
|
||||
|
||||
return toCheck
|
||||
}
|
@ -1,24 +1,37 @@
|
||||
package test
|
||||
package itests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/lotus/api/test"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/cli"
|
||||
"github.com/filecoin-project/lotus/itests/kit"
|
||||
"github.com/stretchr/testify/require"
|
||||
lcli "github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
func RunMultisigTest(t *testing.T, cmds []*lcli.Command, clientNode test.TestNode) {
|
||||
ctx := context.Background()
|
||||
// TestMultisig does a basic test to exercise the multisig CLI commands
|
||||
func TestMultisig(t *testing.T) {
|
||||
_ = os.Setenv("BELLMAN_NO_GPU", "1")
|
||||
kit.QuietMiningLogs()
|
||||
|
||||
blocktime := 5 * time.Millisecond
|
||||
ctx := context.Background()
|
||||
clientNode, _ := kit.StartOneNodeOneMiner(ctx, t, blocktime)
|
||||
|
||||
runMultisigTests(t, clientNode)
|
||||
}
|
||||
|
||||
func runMultisigTests(t *testing.T, clientNode kit.TestFullNode) {
|
||||
// Create mock CLI
|
||||
mockCLI := NewMockCLI(ctx, t, cmds)
|
||||
ctx := context.Background()
|
||||
mockCLI := kit.NewMockCLI(ctx, t, cli.Commands)
|
||||
clientCLI := mockCLI.Client(clientNode.ListenAddr)
|
||||
|
||||
// Create some wallets on the node to use for testing multisig
|
||||
@ -29,7 +42,7 @@ func RunMultisigTest(t *testing.T, cmds []*lcli.Command, clientNode test.TestNod
|
||||
|
||||
walletAddrs = append(walletAddrs, addr)
|
||||
|
||||
test.SendFunds(ctx, t, clientNode, addr, types.NewInt(1e15))
|
||||
kit.SendFunds(ctx, t, clientNode, addr, types.NewInt(1e15))
|
||||
}
|
||||
|
||||
// Create an msig with three of the addresses and threshold of two sigs
|
@ -1,14 +1,14 @@
|
||||
package test
|
||||
package itests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
"github.com/filecoin-project/lotus/itests/kit"
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
@ -26,9 +26,11 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
|
||||
func TestPaymentChannels(t *testing.T, b APIBuilder, blocktime time.Duration) {
|
||||
func TestPaymentChannelsAPI(t *testing.T) {
|
||||
kit.QuietMiningLogs()
|
||||
|
||||
ctx := context.Background()
|
||||
n, sn := b(t, TwoFull, OneMiner)
|
||||
n, sn := kit.MockMinerBuilder(t, kit.TwoFull, kit.OneMiner)
|
||||
|
||||
paymentCreator := n[0]
|
||||
paymentReceiver := n[1]
|
||||
@ -49,8 +51,9 @@ func TestPaymentChannels(t *testing.T, b APIBuilder, blocktime time.Duration) {
|
||||
}
|
||||
|
||||
// start mining blocks
|
||||
bm := NewBlockMiner(ctx, t, miner, blocktime)
|
||||
bm.MineBlocks()
|
||||
bm := kit.NewBlockMiner(t, miner)
|
||||
bm.MineBlocks(ctx, 5*time.Millisecond)
|
||||
t.Cleanup(bm.Stop)
|
||||
|
||||
// send some funds to register the receiver
|
||||
receiverAddr, err := paymentReceiver.WalletNew(ctx, types.KTSecp256k1)
|
||||
@ -58,7 +61,7 @@ func TestPaymentChannels(t *testing.T, b APIBuilder, blocktime time.Duration) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
SendFunds(ctx, t, paymentCreator, receiverAddr, abi.NewTokenAmount(1e18))
|
||||
kit.SendFunds(ctx, t, paymentCreator, receiverAddr, abi.NewTokenAmount(1e18))
|
||||
|
||||
// setup the payment channel
|
||||
createrAddr, err := paymentCreator.WalletDefaultAddress(ctx)
|
||||
@ -173,7 +176,7 @@ func TestPaymentChannels(t *testing.T, b APIBuilder, blocktime time.Duration) {
|
||||
|
||||
select {
|
||||
case <-finished:
|
||||
case <-time.After(time.Second):
|
||||
case <-time.After(10 * time.Second):
|
||||
t.Fatal("Timed out waiting for receiver to submit vouchers")
|
||||
}
|
||||
|
||||
@ -265,7 +268,7 @@ func TestPaymentChannels(t *testing.T, b APIBuilder, blocktime time.Duration) {
|
||||
bm.Stop()
|
||||
}
|
||||
|
||||
func waitForBlocks(ctx context.Context, t *testing.T, bm *BlockMiner, paymentReceiver TestNode, receiverAddr address.Address, count int) {
|
||||
func waitForBlocks(ctx context.Context, t *testing.T, bm *kit.BlockMiner, paymentReceiver kit.TestFullNode, receiverAddr address.Address, count int) {
|
||||
// We need to add null blocks in batches, if we add too many the chain can't sync
|
||||
batchSize := 60
|
||||
for i := 0; i < count; i += batchSize {
|
||||
@ -274,8 +277,8 @@ func waitForBlocks(ctx context.Context, t *testing.T, bm *BlockMiner, paymentRec
|
||||
size = count - i
|
||||
}
|
||||
|
||||
// Add a batch of null blocks
|
||||
atomic.StoreInt64(&bm.nulls, int64(size-1))
|
||||
// Add a batch of null blocks to advance the chain quicker through finalities.
|
||||
bm.InjectNulls(abi.ChainEpoch(size - 1))
|
||||
|
||||
// Add a real block
|
||||
m, err := paymentReceiver.MpoolPushMessage(ctx, &types.Message{
|
||||
@ -294,7 +297,7 @@ func waitForBlocks(ctx context.Context, t *testing.T, bm *BlockMiner, paymentRec
|
||||
}
|
||||
}
|
||||
|
||||
func waitForMessage(ctx context.Context, t *testing.T, paymentCreator TestNode, msgCid cid.Cid, duration time.Duration, desc string) *api.MsgLookup {
|
||||
func waitForMessage(ctx context.Context, t *testing.T, paymentCreator kit.TestFullNode, msgCid cid.Cid, duration time.Duration, desc string) *api.MsgLookup {
|
||||
ctx, cancel := context.WithTimeout(ctx, duration)
|
||||
defer cancel()
|
||||
|
@ -1,4 +1,4 @@
|
||||
package cli
|
||||
package itests
|
||||
|
||||
import (
|
||||
"context"
|
||||
@ -10,7 +10,8 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
clitest "github.com/filecoin-project/lotus/cli/test"
|
||||
"github.com/filecoin-project/lotus/cli"
|
||||
"github.com/filecoin-project/lotus/itests/kit"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
@ -20,7 +21,6 @@ import (
|
||||
cbor "github.com/ipfs/go-ipld-cbor"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/filecoin-project/lotus/api/test"
|
||||
"github.com/filecoin-project/lotus/blockstore"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/events"
|
||||
@ -35,20 +35,20 @@ func init() {
|
||||
|
||||
// TestPaymentChannels does a basic test to exercise the payment channel CLI
|
||||
// commands
|
||||
func TestPaymentChannels(t *testing.T) {
|
||||
func TestPaymentChannelsBasic(t *testing.T) {
|
||||
_ = os.Setenv("BELLMAN_NO_GPU", "1")
|
||||
clitest.QuietMiningLogs()
|
||||
kit.QuietMiningLogs()
|
||||
|
||||
blocktime := 5 * time.Millisecond
|
||||
ctx := context.Background()
|
||||
nodes, addrs := clitest.StartTwoNodesOneMiner(ctx, t, blocktime)
|
||||
nodes, addrs := kit.StartTwoNodesOneMiner(ctx, t, blocktime)
|
||||
paymentCreator := nodes[0]
|
||||
paymentReceiver := nodes[1]
|
||||
creatorAddr := addrs[0]
|
||||
receiverAddr := addrs[1]
|
||||
|
||||
// Create mock CLI
|
||||
mockCLI := clitest.NewMockCLI(ctx, t, Commands)
|
||||
mockCLI := kit.NewMockCLI(ctx, t, cli.Commands)
|
||||
creatorCLI := mockCLI.Client(paymentCreator.ListenAddr)
|
||||
receiverCLI := mockCLI.Client(paymentReceiver.ListenAddr)
|
||||
|
||||
@ -89,17 +89,17 @@ type voucherSpec struct {
|
||||
// TestPaymentChannelStatus tests the payment channel status CLI command
|
||||
func TestPaymentChannelStatus(t *testing.T) {
|
||||
_ = os.Setenv("BELLMAN_NO_GPU", "1")
|
||||
clitest.QuietMiningLogs()
|
||||
kit.QuietMiningLogs()
|
||||
|
||||
blocktime := 5 * time.Millisecond
|
||||
ctx := context.Background()
|
||||
nodes, addrs := clitest.StartTwoNodesOneMiner(ctx, t, blocktime)
|
||||
nodes, addrs := kit.StartTwoNodesOneMiner(ctx, t, blocktime)
|
||||
paymentCreator := nodes[0]
|
||||
creatorAddr := addrs[0]
|
||||
receiverAddr := addrs[1]
|
||||
|
||||
// Create mock CLI
|
||||
mockCLI := clitest.NewMockCLI(ctx, t, Commands)
|
||||
mockCLI := kit.NewMockCLI(ctx, t, cli.Commands)
|
||||
creatorCLI := mockCLI.Client(paymentCreator.ListenAddr)
|
||||
|
||||
// creator: paych status-by-from-to <creator> <receiver>
|
||||
@ -168,18 +168,18 @@ func TestPaymentChannelStatus(t *testing.T) {
|
||||
// channel voucher commands
|
||||
func TestPaymentChannelVouchers(t *testing.T) {
|
||||
_ = os.Setenv("BELLMAN_NO_GPU", "1")
|
||||
clitest.QuietMiningLogs()
|
||||
kit.QuietMiningLogs()
|
||||
|
||||
blocktime := 5 * time.Millisecond
|
||||
ctx := context.Background()
|
||||
nodes, addrs := clitest.StartTwoNodesOneMiner(ctx, t, blocktime)
|
||||
nodes, addrs := kit.StartTwoNodesOneMiner(ctx, t, blocktime)
|
||||
paymentCreator := nodes[0]
|
||||
paymentReceiver := nodes[1]
|
||||
creatorAddr := addrs[0]
|
||||
receiverAddr := addrs[1]
|
||||
|
||||
// Create mock CLI
|
||||
mockCLI := clitest.NewMockCLI(ctx, t, Commands)
|
||||
mockCLI := kit.NewMockCLI(ctx, t, cli.Commands)
|
||||
creatorCLI := mockCLI.Client(paymentCreator.ListenAddr)
|
||||
receiverCLI := mockCLI.Client(paymentReceiver.ListenAddr)
|
||||
|
||||
@ -300,17 +300,17 @@ func TestPaymentChannelVouchers(t *testing.T) {
|
||||
// is greater than what's left in the channel, voucher create fails
|
||||
func TestPaymentChannelVoucherCreateShortfall(t *testing.T) {
|
||||
_ = os.Setenv("BELLMAN_NO_GPU", "1")
|
||||
clitest.QuietMiningLogs()
|
||||
kit.QuietMiningLogs()
|
||||
|
||||
blocktime := 5 * time.Millisecond
|
||||
ctx := context.Background()
|
||||
nodes, addrs := clitest.StartTwoNodesOneMiner(ctx, t, blocktime)
|
||||
nodes, addrs := kit.StartTwoNodesOneMiner(ctx, t, blocktime)
|
||||
paymentCreator := nodes[0]
|
||||
creatorAddr := addrs[0]
|
||||
receiverAddr := addrs[1]
|
||||
|
||||
// Create mock CLI
|
||||
mockCLI := clitest.NewMockCLI(ctx, t, Commands)
|
||||
mockCLI := kit.NewMockCLI(ctx, t, cli.Commands)
|
||||
creatorCLI := mockCLI.Client(paymentCreator.ListenAddr)
|
||||
|
||||
// creator: paych add-funds <creator> <receiver> <amount>
|
||||
@ -378,7 +378,7 @@ func checkVoucherOutput(t *testing.T, list string, vouchers []voucherSpec) {
|
||||
}
|
||||
|
||||
// waitForHeight waits for the node to reach the given chain epoch
|
||||
func waitForHeight(ctx context.Context, t *testing.T, node test.TestNode, height abi.ChainEpoch) {
|
||||
func waitForHeight(ctx context.Context, t *testing.T, node kit.TestFullNode, height abi.ChainEpoch) {
|
||||
atHeight := make(chan struct{})
|
||||
chainEvents := events.NewEvents(ctx, node)
|
||||
err := chainEvents.ChainAt(func(ctx context.Context, ts *types.TipSet, curH abi.ChainEpoch) error {
|
||||
@ -396,7 +396,7 @@ func waitForHeight(ctx context.Context, t *testing.T, node test.TestNode, height
|
||||
}
|
||||
|
||||
// getPaychState gets the state of the payment channel with the given address
|
||||
func getPaychState(ctx context.Context, t *testing.T, node test.TestNode, chAddr address.Address) paych.State {
|
||||
func getPaychState(ctx context.Context, t *testing.T, node kit.TestFullNode, chAddr address.Address) paych.State {
|
||||
act, err := node.StateGetActor(ctx, chAddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
|
112
itests/sdr_upgrade_test.go
Normal file
112
itests/sdr_upgrade_test.go
Normal file
@ -0,0 +1,112 @@
|
||||
package itests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sort"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/itests/kit"
|
||||
bminer "github.com/filecoin-project/lotus/miner"
|
||||
"github.com/filecoin-project/lotus/node/impl"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestSDRUpgrade(t *testing.T) {
|
||||
kit.QuietMiningLogs()
|
||||
|
||||
// oldDelay := policy.GetPreCommitChallengeDelay()
|
||||
// policy.SetPreCommitChallengeDelay(5)
|
||||
// t.Cleanup(func() {
|
||||
// policy.SetPreCommitChallengeDelay(oldDelay)
|
||||
// })
|
||||
|
||||
blocktime := 50 * time.Millisecond
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
n, sn := kit.MockMinerBuilder(t, []kit.FullNodeOpts{kit.FullNodeWithSDRAt(500, 1000)}, kit.OneMiner)
|
||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
||||
miner := sn[0]
|
||||
|
||||
addrinfo, err := client.NetAddrsListen(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := miner.NetConnect(ctx, addrinfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
build.Clock.Sleep(time.Second)
|
||||
|
||||
pledge := make(chan struct{})
|
||||
mine := int64(1)
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
defer close(done)
|
||||
round := 0
|
||||
for atomic.LoadInt64(&mine) != 0 {
|
||||
build.Clock.Sleep(blocktime)
|
||||
if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) {
|
||||
|
||||
}}); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
// 3 sealing rounds: before, during after.
|
||||
if round >= 3 {
|
||||
continue
|
||||
}
|
||||
|
||||
head, err := client.ChainHead(ctx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// rounds happen every 100 blocks, with a 50 block offset.
|
||||
if head.Height() >= abi.ChainEpoch(round*500+50) {
|
||||
round++
|
||||
pledge <- struct{}{}
|
||||
|
||||
ver, err := client.StateNetworkVersion(ctx, head.Key())
|
||||
assert.NoError(t, err)
|
||||
switch round {
|
||||
case 1:
|
||||
assert.Equal(t, network.Version6, ver)
|
||||
case 2:
|
||||
assert.Equal(t, network.Version7, ver)
|
||||
case 3:
|
||||
assert.Equal(t, network.Version8, ver)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}()
|
||||
|
||||
// before.
|
||||
kit.PledgeSectors(t, ctx, miner, 9, 0, pledge)
|
||||
|
||||
s, err := miner.SectorsList(ctx)
|
||||
require.NoError(t, err)
|
||||
sort.Slice(s, func(i, j int) bool {
|
||||
return s[i] < s[j]
|
||||
})
|
||||
|
||||
for i, id := range s {
|
||||
info, err := miner.SectorsStatus(ctx, id, true)
|
||||
require.NoError(t, err)
|
||||
expectProof := abi.RegisteredSealProof_StackedDrg2KiBV1
|
||||
if i >= 3 {
|
||||
// after
|
||||
expectProof = abi.RegisteredSealProof_StackedDrg2KiBV1_1
|
||||
}
|
||||
assert.Equal(t, expectProof, info.SealProof, "sector %d, id %d", i, id)
|
||||
}
|
||||
|
||||
atomic.StoreInt64(&mine, 0)
|
||||
<-done
|
||||
}
|
261
itests/sector_pledge_test.go
Normal file
261
itests/sector_pledge_test.go
Normal file
@ -0,0 +1,261 @@
|
||||
package itests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/stmgr"
|
||||
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
||||
"github.com/filecoin-project/lotus/itests/kit"
|
||||
bminer "github.com/filecoin-project/lotus/miner"
|
||||
"github.com/filecoin-project/lotus/node"
|
||||
"github.com/filecoin-project/lotus/node/impl"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestPledgeSectors(t *testing.T) {
|
||||
kit.QuietMiningLogs()
|
||||
|
||||
runTest := func(t *testing.T, b kit.APIBuilder, blocktime time.Duration, nSectors int) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
n, sn := b(t, kit.OneFull, kit.OneMiner)
|
||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
||||
miner := sn[0]
|
||||
|
||||
addrinfo, err := client.NetAddrsListen(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := miner.NetConnect(ctx, addrinfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
build.Clock.Sleep(time.Second)
|
||||
|
||||
mine := int64(1)
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
defer close(done)
|
||||
for atomic.LoadInt64(&mine) != 0 {
|
||||
build.Clock.Sleep(blocktime)
|
||||
if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) {
|
||||
|
||||
}}); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
kit.PledgeSectors(t, ctx, miner, nSectors, 0, nil)
|
||||
|
||||
atomic.StoreInt64(&mine, 0)
|
||||
<-done
|
||||
}
|
||||
|
||||
t.Run("1", func(t *testing.T) {
|
||||
runTest(t, kit.MockMinerBuilder, 50*time.Millisecond, 1)
|
||||
})
|
||||
|
||||
t.Run("100", func(t *testing.T) {
|
||||
runTest(t, kit.MockMinerBuilder, 50*time.Millisecond, 100)
|
||||
})
|
||||
|
||||
t.Run("1000", func(t *testing.T) {
|
||||
if testing.Short() { // takes ~16s
|
||||
t.Skip("skipping test in short mode")
|
||||
}
|
||||
|
||||
runTest(t, kit.MockMinerBuilder, 50*time.Millisecond, 1000)
|
||||
})
|
||||
}
|
||||
|
||||
func TestPledgeBatching(t *testing.T) {
|
||||
runTest := func(t *testing.T, b kit.APIBuilder, blocktime time.Duration, nSectors int) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
n, sn := b(t, []kit.FullNodeOpts{kit.FullNodeWithLatestActorsAt(-1)}, kit.OneMiner)
|
||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
||||
miner := sn[0]
|
||||
|
||||
addrinfo, err := client.NetAddrsListen(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := miner.NetConnect(ctx, addrinfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
build.Clock.Sleep(time.Second)
|
||||
|
||||
mine := int64(1)
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
defer close(done)
|
||||
for atomic.LoadInt64(&mine) != 0 {
|
||||
build.Clock.Sleep(blocktime)
|
||||
if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) {
|
||||
|
||||
}}); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
h, err := client.ChainHead(ctx)
|
||||
require.NoError(t, err)
|
||||
if h.Height() > 10 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
toCheck := kit.StartPledge(t, ctx, miner, nSectors, 0, nil)
|
||||
|
||||
for len(toCheck) > 0 {
|
||||
states := map[api.SectorState]int{}
|
||||
|
||||
for n := range toCheck {
|
||||
st, err := miner.SectorsStatus(ctx, n, false)
|
||||
require.NoError(t, err)
|
||||
states[st.State]++
|
||||
if st.State == api.SectorState(sealing.Proving) {
|
||||
delete(toCheck, n)
|
||||
}
|
||||
if strings.Contains(string(st.State), "Fail") {
|
||||
t.Fatal("sector in a failed state", st.State)
|
||||
}
|
||||
}
|
||||
if states[api.SectorState(sealing.SubmitPreCommitBatch)] == nSectors ||
|
||||
(states[api.SectorState(sealing.SubmitPreCommitBatch)] > 0 && states[api.SectorState(sealing.PreCommit1)] == 0 && states[api.SectorState(sealing.PreCommit2)] == 0) {
|
||||
pcb, err := miner.SectorPreCommitFlush(ctx)
|
||||
require.NoError(t, err)
|
||||
if pcb != nil {
|
||||
fmt.Printf("PRECOMMIT BATCH: %+v\n", pcb)
|
||||
}
|
||||
}
|
||||
|
||||
if states[api.SectorState(sealing.SubmitCommitAggregate)] == nSectors ||
|
||||
(states[api.SectorState(sealing.SubmitCommitAggregate)] > 0 && states[api.SectorState(sealing.WaitSeed)] == 0 && states[api.SectorState(sealing.Committing)] == 0) {
|
||||
cb, err := miner.SectorCommitFlush(ctx)
|
||||
require.NoError(t, err)
|
||||
if cb != nil {
|
||||
fmt.Printf("COMMIT BATCH: %+v\n", cb)
|
||||
}
|
||||
}
|
||||
|
||||
build.Clock.Sleep(100 * time.Millisecond)
|
||||
fmt.Printf("WaitSeal: %d %+v\n", len(toCheck), states)
|
||||
}
|
||||
|
||||
atomic.StoreInt64(&mine, 0)
|
||||
<-done
|
||||
}
|
||||
|
||||
t.Run("100", func(t *testing.T) {
|
||||
runTest(t, kit.MockMinerBuilder, 50*time.Millisecond, 100)
|
||||
})
|
||||
}
|
||||
|
||||
func TestPledgeBeforeNv13(t *testing.T) {
|
||||
runTest := func(t *testing.T, b kit.APIBuilder, blocktime time.Duration, nSectors int) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
n, sn := b(t, []kit.FullNodeOpts{
|
||||
{
|
||||
Opts: func(nodes []kit.TestFullNode) node.Option {
|
||||
return node.Override(new(stmgr.UpgradeSchedule), stmgr.UpgradeSchedule{{
|
||||
Network: network.Version9,
|
||||
Height: 1,
|
||||
Migration: stmgr.UpgradeActorsV2,
|
||||
}, {
|
||||
Network: network.Version10,
|
||||
Height: 2,
|
||||
Migration: stmgr.UpgradeActorsV3,
|
||||
}, {
|
||||
Network: network.Version12,
|
||||
Height: 3,
|
||||
Migration: stmgr.UpgradeActorsV4,
|
||||
}, {
|
||||
Network: network.Version13,
|
||||
Height: 1000000000,
|
||||
Migration: stmgr.UpgradeActorsV5,
|
||||
}})
|
||||
},
|
||||
},
|
||||
}, kit.OneMiner)
|
||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
||||
miner := sn[0]
|
||||
|
||||
addrinfo, err := client.NetAddrsListen(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := miner.NetConnect(ctx, addrinfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
build.Clock.Sleep(time.Second)
|
||||
|
||||
mine := int64(1)
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
defer close(done)
|
||||
for atomic.LoadInt64(&mine) != 0 {
|
||||
build.Clock.Sleep(blocktime)
|
||||
if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) {
|
||||
|
||||
}}); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
h, err := client.ChainHead(ctx)
|
||||
require.NoError(t, err)
|
||||
if h.Height() > 10 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
toCheck := kit.StartPledge(t, ctx, miner, nSectors, 0, nil)
|
||||
|
||||
for len(toCheck) > 0 {
|
||||
states := map[api.SectorState]int{}
|
||||
|
||||
for n := range toCheck {
|
||||
st, err := miner.SectorsStatus(ctx, n, false)
|
||||
require.NoError(t, err)
|
||||
states[st.State]++
|
||||
if st.State == api.SectorState(sealing.Proving) {
|
||||
delete(toCheck, n)
|
||||
}
|
||||
if strings.Contains(string(st.State), "Fail") {
|
||||
t.Fatal("sector in a failed state", st.State)
|
||||
}
|
||||
}
|
||||
|
||||
build.Clock.Sleep(100 * time.Millisecond)
|
||||
fmt.Printf("WaitSeal: %d %+v\n", len(toCheck), states)
|
||||
}
|
||||
|
||||
atomic.StoreInt64(&mine, 0)
|
||||
<-done
|
||||
}
|
||||
|
||||
t.Run("100-before-nv13", func(t *testing.T) {
|
||||
runTest(t, kit.MockMinerBuilder, 50*time.Millisecond, 100)
|
||||
})
|
||||
}
|
201
itests/sector_terminate_test.go
Normal file
201
itests/sector_terminate_test.go
Normal file
@ -0,0 +1,201 @@
|
||||
package itests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/go-bitfield"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
||||
"github.com/filecoin-project/lotus/itests/kit"
|
||||
"github.com/filecoin-project/lotus/node/impl"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestTerminate(t *testing.T) {
|
||||
if os.Getenv("LOTUS_TEST_WINDOW_POST") != "1" {
|
||||
t.Skip("this takes a few minutes, set LOTUS_TEST_WINDOW_POST=1 to run")
|
||||
}
|
||||
|
||||
kit.QuietMiningLogs()
|
||||
|
||||
const blocktime = 2 * time.Millisecond
|
||||
|
||||
nSectors := uint64(2)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
n, sn := kit.MockMinerBuilder(t,
|
||||
[]kit.FullNodeOpts{kit.FullNodeWithLatestActorsAt(-1)},
|
||||
[]kit.StorageMiner{{Full: 0, Preseal: int(nSectors)}},
|
||||
)
|
||||
|
||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
||||
miner := sn[0]
|
||||
|
||||
addrinfo, err := client.NetAddrsListen(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := miner.NetConnect(ctx, addrinfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
build.Clock.Sleep(time.Second)
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
defer close(done)
|
||||
for ctx.Err() == nil {
|
||||
build.Clock.Sleep(blocktime)
|
||||
if err := sn[0].MineOne(ctx, kit.MineNext); err != nil {
|
||||
if ctx.Err() != nil {
|
||||
// context was canceled, ignore the error.
|
||||
return
|
||||
}
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
defer func() {
|
||||
cancel()
|
||||
<-done
|
||||
}()
|
||||
|
||||
maddr, err := miner.ActorAddress(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
ssz, err := miner.ActorSectorSize(ctx, maddr)
|
||||
require.NoError(t, err)
|
||||
|
||||
p, err := client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, p.MinerPower, p.TotalPower)
|
||||
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*nSectors))
|
||||
|
||||
fmt.Printf("Seal a sector\n")
|
||||
|
||||
kit.PledgeSectors(t, ctx, miner, 1, 0, nil)
|
||||
|
||||
fmt.Printf("wait for power\n")
|
||||
|
||||
{
|
||||
// Wait until proven.
|
||||
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
|
||||
waitUntil := di.PeriodStart + di.WPoStProvingPeriod + 2
|
||||
fmt.Printf("End for head.Height > %d\n", waitUntil)
|
||||
|
||||
for {
|
||||
head, err := client.ChainHead(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
if head.Height() > waitUntil {
|
||||
fmt.Printf("Now head.Height = %d\n", head.Height())
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
nSectors++
|
||||
|
||||
p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, p.MinerPower, p.TotalPower)
|
||||
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*nSectors))
|
||||
|
||||
fmt.Println("Terminate a sector")
|
||||
|
||||
toTerminate := abi.SectorNumber(3)
|
||||
|
||||
err = miner.SectorTerminate(ctx, toTerminate)
|
||||
require.NoError(t, err)
|
||||
|
||||
msgTriggerred := false
|
||||
loop:
|
||||
for {
|
||||
si, err := miner.SectorsStatus(ctx, toTerminate, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
fmt.Println("state: ", si.State, msgTriggerred)
|
||||
|
||||
switch sealing.SectorState(si.State) {
|
||||
case sealing.Terminating:
|
||||
if !msgTriggerred {
|
||||
{
|
||||
p, err := miner.SectorTerminatePending(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, p, 1)
|
||||
require.Equal(t, abi.SectorNumber(3), p[0].Number)
|
||||
}
|
||||
|
||||
c, err := miner.SectorTerminateFlush(ctx)
|
||||
require.NoError(t, err)
|
||||
if c != nil {
|
||||
msgTriggerred = true
|
||||
fmt.Println("terminate message:", c)
|
||||
|
||||
{
|
||||
p, err := miner.SectorTerminatePending(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, p, 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
case sealing.TerminateWait, sealing.TerminateFinality, sealing.Removed:
|
||||
break loop
|
||||
}
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
|
||||
// check power decreased
|
||||
p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, p.MinerPower, p.TotalPower)
|
||||
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*(nSectors-1)))
|
||||
|
||||
// check in terminated set
|
||||
{
|
||||
parts, err := client.StateMinerPartitions(ctx, maddr, 1, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
require.Greater(t, len(parts), 0)
|
||||
|
||||
bflen := func(b bitfield.BitField) uint64 {
|
||||
l, err := b.Count()
|
||||
require.NoError(t, err)
|
||||
return l
|
||||
}
|
||||
|
||||
require.Equal(t, uint64(1), bflen(parts[0].AllSectors))
|
||||
require.Equal(t, uint64(0), bflen(parts[0].LiveSectors))
|
||||
}
|
||||
|
||||
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
for {
|
||||
head, err := client.ChainHead(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
if head.Height() > di.PeriodStart+di.WPoStProvingPeriod+2 {
|
||||
fmt.Printf("Now head.Height = %d\n", head.Height())
|
||||
break
|
||||
}
|
||||
build.Clock.Sleep(blocktime)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
fmt.Printf("End for head.Height > %d\n", di.PeriodStart+di.WPoStProvingPeriod+2)
|
||||
|
||||
p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, p.MinerPower, p.TotalPower)
|
||||
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*(nSectors-1)))
|
||||
}
|
@ -1,4 +1,4 @@
|
||||
package test
|
||||
package itests
|
||||
|
||||
import (
|
||||
"context"
|
||||
@ -11,18 +11,24 @@ import (
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/stmgr"
|
||||
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
||||
"github.com/filecoin-project/lotus/itests/kit"
|
||||
"github.com/filecoin-project/lotus/node"
|
||||
"github.com/filecoin-project/lotus/node/impl"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestTapeFix(t *testing.T, b APIBuilder, blocktime time.Duration) {
|
||||
func TestTapeFix(t *testing.T) {
|
||||
kit.QuietMiningLogs()
|
||||
|
||||
var blocktime = 2 * time.Millisecond
|
||||
|
||||
// The "before" case is disabled, because we need the builder to mock 32 GiB sectors to accurately repro this case
|
||||
// TODO: Make the mock sector size configurable and reenable this
|
||||
//t.Run("before", func(t *testing.T) { testTapeFix(t, b, blocktime, false) })
|
||||
t.Run("after", func(t *testing.T) { testTapeFix(t, b, blocktime, true) })
|
||||
// t.Run("before", func(t *testing.T) { testTapeFix(t, b, blocktime, false) })
|
||||
t.Run("after", func(t *testing.T) { testTapeFix(t, kit.MockMinerBuilder, blocktime, true) })
|
||||
}
|
||||
func testTapeFix(t *testing.T, b APIBuilder, blocktime time.Duration, after bool) {
|
||||
|
||||
func testTapeFix(t *testing.T, b kit.APIBuilder, blocktime time.Duration, after bool) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
@ -38,9 +44,9 @@ func testTapeFix(t *testing.T, b APIBuilder, blocktime time.Duration, after bool
|
||||
})
|
||||
}
|
||||
|
||||
n, sn := b(t, []FullNodeOpts{{Opts: func(_ []TestNode) node.Option {
|
||||
n, sn := b(t, []kit.FullNodeOpts{{Opts: func(_ []kit.TestFullNode) node.Option {
|
||||
return node.Override(new(stmgr.UpgradeSchedule), upgradeSchedule)
|
||||
}}}, OneMiner)
|
||||
}}}, kit.OneMiner)
|
||||
|
||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
||||
miner := sn[0]
|
||||
@ -60,7 +66,7 @@ func testTapeFix(t *testing.T, b APIBuilder, blocktime time.Duration, after bool
|
||||
defer close(done)
|
||||
for ctx.Err() == nil {
|
||||
build.Clock.Sleep(blocktime)
|
||||
if err := sn[0].MineOne(ctx, MineNext); err != nil {
|
||||
if err := sn[0].MineOne(ctx, kit.MineNext); err != nil {
|
||||
if ctx.Err() != nil {
|
||||
// context was canceled, ignore the error.
|
||||
return
|
||||
@ -97,5 +103,4 @@ func testTapeFix(t *testing.T, b APIBuilder, blocktime time.Duration, after bool
|
||||
build.Clock.Sleep(100 * time.Millisecond)
|
||||
fmt.Println("WaitSeal")
|
||||
}
|
||||
|
||||
}
|
@ -1,10 +1,13 @@
|
||||
package test
|
||||
package itests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
"github.com/filecoin-project/lotus/itests/kit"
|
||||
|
||||
lapi "github.com/filecoin-project/lotus/api"
|
||||
|
||||
@ -13,18 +16,14 @@ import (
|
||||
"github.com/filecoin-project/lotus/node/impl"
|
||||
verifreg4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/verifreg"
|
||||
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
|
||||
func AddVerifiedClient(t *testing.T, b APIBuilder) {
|
||||
func TestVerifiedClientTopUp(t *testing.T) {
|
||||
test := func(nv network.Version, shouldWork bool) func(*testing.T) {
|
||||
return func(t *testing.T) {
|
||||
|
||||
nodes, miners := b(t, []FullNodeOpts{FullNodeWithNetworkUpgradeAt(nv, -1)}, OneMiner)
|
||||
nodes, miners := kit.MockMinerBuilder(t, []kit.FullNodeOpts{kit.FullNodeWithNetworkUpgradeAt(nv, -1)}, kit.OneMiner)
|
||||
api := nodes[0].FullNode.(*impl.FullNodeAPI)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
@ -53,9 +52,9 @@ func AddVerifiedClient(t *testing.T, b APIBuilder) {
|
||||
Value: big.Zero(),
|
||||
}
|
||||
|
||||
bm := NewBlockMiner(ctx, t, miners[0], 100*time.Millisecond)
|
||||
bm.MineBlocks()
|
||||
defer bm.Stop()
|
||||
bm := kit.NewBlockMiner(t, miners[0])
|
||||
bm.MineBlocks(ctx, 100*time.Millisecond)
|
||||
t.Cleanup(bm.Stop)
|
||||
|
||||
sm, err := api.MpoolPushMessage(ctx, msg, nil)
|
||||
if err != nil {
|
458
itests/wdpost_dispute_test.go
Normal file
458
itests/wdpost_dispute_test.go
Normal file
@ -0,0 +1,458 @@
|
||||
package itests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-bitfield"
|
||||
"github.com/filecoin-project/go-state-types/crypto"
|
||||
"github.com/filecoin-project/go-state-types/dline"
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
minerActor "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/itests/kit"
|
||||
"github.com/filecoin-project/lotus/node/impl"
|
||||
proof3 "github.com/filecoin-project/specs-actors/v3/actors/runtime/proof"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestWindowPostDispute(t *testing.T) {
|
||||
if os.Getenv("LOTUS_TEST_WINDOW_POST") != "1" {
|
||||
t.Skip("this takes a few minutes, set LOTUS_TEST_WINDOW_POST=1 to run")
|
||||
}
|
||||
|
||||
kit.QuietMiningLogs()
|
||||
|
||||
b := kit.MockMinerBuilder
|
||||
blocktime := 2 * time.Millisecond
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// First, we configure two miners. After sealing, we're going to turn off the first miner so
|
||||
// it doesn't submit proofs.
|
||||
//
|
||||
// Then we're going to manually submit bad proofs.
|
||||
n, sn := b(t,
|
||||
[]kit.FullNodeOpts{kit.FullNodeWithLatestActorsAt(-1)},
|
||||
[]kit.StorageMiner{
|
||||
{Full: 0, Preseal: kit.PresealGenesis},
|
||||
{Full: 0},
|
||||
})
|
||||
|
||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
||||
chainMiner := sn[0]
|
||||
evilMiner := sn[1]
|
||||
|
||||
{
|
||||
addrinfo, err := client.NetAddrsListen(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := chainMiner.NetConnect(ctx, addrinfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := evilMiner.NetConnect(ctx, addrinfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
defaultFrom, err := client.WalletDefaultAddress(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
build.Clock.Sleep(time.Second)
|
||||
|
||||
// Mine with the _second_ node (the good one).
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
defer close(done)
|
||||
for ctx.Err() == nil {
|
||||
build.Clock.Sleep(blocktime)
|
||||
if err := chainMiner.MineOne(ctx, kit.MineNext); err != nil {
|
||||
if ctx.Err() != nil {
|
||||
// context was canceled, ignore the error.
|
||||
return
|
||||
}
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
defer func() {
|
||||
cancel()
|
||||
<-done
|
||||
}()
|
||||
|
||||
// Give the chain miner enough sectors to win every block.
|
||||
kit.PledgeSectors(t, ctx, chainMiner, 10, 0, nil)
|
||||
// And the evil one 1 sector. No cookie for you.
|
||||
kit.PledgeSectors(t, ctx, evilMiner, 1, 0, nil)
|
||||
|
||||
// Let the evil miner's sectors gain power.
|
||||
evilMinerAddr, err := evilMiner.ActorAddress(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
di, err := client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
|
||||
fmt.Printf("Running one proving period\n")
|
||||
fmt.Printf("End for head.Height > %d\n", di.PeriodStart+di.WPoStProvingPeriod*2)
|
||||
|
||||
for {
|
||||
head, err := client.ChainHead(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
if head.Height() > di.PeriodStart+di.WPoStProvingPeriod*2 {
|
||||
fmt.Printf("Now head.Height = %d\n", head.Height())
|
||||
break
|
||||
}
|
||||
build.Clock.Sleep(blocktime)
|
||||
}
|
||||
|
||||
p, err := client.StateMinerPower(ctx, evilMinerAddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
|
||||
ssz, err := evilMiner.ActorSectorSize(ctx, evilMinerAddr)
|
||||
require.NoError(t, err)
|
||||
|
||||
// make sure it has gained power.
|
||||
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)))
|
||||
|
||||
evilSectors, err := evilMiner.SectorsList(ctx)
|
||||
require.NoError(t, err)
|
||||
evilSectorNo := evilSectors[0] // only one.
|
||||
evilSectorLoc, err := client.StateSectorPartition(ctx, evilMinerAddr, evilSectorNo, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
|
||||
fmt.Println("evil miner stopping")
|
||||
|
||||
// Now stop the evil miner, and start manually submitting bad proofs.
|
||||
require.NoError(t, evilMiner.Stop(ctx))
|
||||
|
||||
fmt.Println("evil miner stopped")
|
||||
|
||||
// Wait until we need to prove our sector.
|
||||
for {
|
||||
di, err = client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
if di.Index == evilSectorLoc.Deadline && di.CurrentEpoch-di.PeriodStart > 1 {
|
||||
break
|
||||
}
|
||||
build.Clock.Sleep(blocktime)
|
||||
}
|
||||
|
||||
err = submitBadProof(ctx, client, evilMinerAddr, di, evilSectorLoc.Deadline, evilSectorLoc.Partition)
|
||||
require.NoError(t, err, "evil proof not accepted")
|
||||
|
||||
// Wait until after the proving period.
|
||||
for {
|
||||
di, err = client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
if di.Index != evilSectorLoc.Deadline {
|
||||
break
|
||||
}
|
||||
build.Clock.Sleep(blocktime)
|
||||
}
|
||||
|
||||
fmt.Println("accepted evil proof")
|
||||
|
||||
// Make sure the evil node didn't lose any power.
|
||||
p, err = client.StateMinerPower(ctx, evilMinerAddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)))
|
||||
|
||||
// OBJECTION! The good miner files a DISPUTE!!!!
|
||||
{
|
||||
params := &minerActor.DisputeWindowedPoStParams{
|
||||
Deadline: evilSectorLoc.Deadline,
|
||||
PoStIndex: 0,
|
||||
}
|
||||
|
||||
enc, aerr := actors.SerializeParams(params)
|
||||
require.NoError(t, aerr)
|
||||
|
||||
msg := &types.Message{
|
||||
To: evilMinerAddr,
|
||||
Method: minerActor.Methods.DisputeWindowedPoSt,
|
||||
Params: enc,
|
||||
Value: types.NewInt(0),
|
||||
From: defaultFrom,
|
||||
}
|
||||
sm, err := client.MpoolPushMessage(ctx, msg, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
fmt.Println("waiting dispute")
|
||||
rec, err := client.StateWaitMsg(ctx, sm.Cid(), build.MessageConfidence, api.LookbackNoLimit, true)
|
||||
require.NoError(t, err)
|
||||
require.Zero(t, rec.Receipt.ExitCode, "dispute not accepted: %s", rec.Receipt.ExitCode.Error())
|
||||
}
|
||||
|
||||
// Objection SUSTAINED!
|
||||
// Make sure the evil node lost power.
|
||||
p, err = client.StateMinerPower(ctx, evilMinerAddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
require.True(t, p.MinerPower.RawBytePower.IsZero())
|
||||
|
||||
// Now we begin the redemption arc.
|
||||
require.True(t, p.MinerPower.RawBytePower.IsZero())
|
||||
|
||||
// First, recover the sector.
|
||||
|
||||
{
|
||||
minerInfo, err := client.StateMinerInfo(ctx, evilMinerAddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
|
||||
params := &minerActor.DeclareFaultsRecoveredParams{
|
||||
Recoveries: []minerActor.RecoveryDeclaration{{
|
||||
Deadline: evilSectorLoc.Deadline,
|
||||
Partition: evilSectorLoc.Partition,
|
||||
Sectors: bitfield.NewFromSet([]uint64{uint64(evilSectorNo)}),
|
||||
}},
|
||||
}
|
||||
|
||||
enc, aerr := actors.SerializeParams(params)
|
||||
require.NoError(t, aerr)
|
||||
|
||||
msg := &types.Message{
|
||||
To: evilMinerAddr,
|
||||
Method: minerActor.Methods.DeclareFaultsRecovered,
|
||||
Params: enc,
|
||||
Value: types.FromFil(30), // repay debt.
|
||||
From: minerInfo.Owner,
|
||||
}
|
||||
sm, err := client.MpoolPushMessage(ctx, msg, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
rec, err := client.StateWaitMsg(ctx, sm.Cid(), build.MessageConfidence, api.LookbackNoLimit, true)
|
||||
require.NoError(t, err)
|
||||
require.Zero(t, rec.Receipt.ExitCode, "recovery not accepted: %s", rec.Receipt.ExitCode.Error())
|
||||
}
|
||||
|
||||
// Then wait for the deadline.
|
||||
for {
|
||||
di, err = client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
if di.Index == evilSectorLoc.Deadline {
|
||||
break
|
||||
}
|
||||
build.Clock.Sleep(blocktime)
|
||||
}
|
||||
|
||||
// Now try to be evil again
|
||||
err = submitBadProof(ctx, client, evilMinerAddr, di, evilSectorLoc.Deadline, evilSectorLoc.Partition)
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "message execution failed: exit 16, reason: window post failed: invalid PoSt")
|
||||
|
||||
// It didn't work because we're recovering.
|
||||
}
|
||||
|
||||
func TestWindowPostDisputeFails(t *testing.T) {
|
||||
if os.Getenv("LOTUS_TEST_WINDOW_POST") != "1" {
|
||||
t.Skip("this takes a few minutes, set LOTUS_TEST_WINDOW_POST=1 to run")
|
||||
}
|
||||
|
||||
kit.QuietMiningLogs()
|
||||
|
||||
b := kit.MockMinerBuilder
|
||||
blocktime := 2 * time.Millisecond
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
n, sn := b(t, []kit.FullNodeOpts{kit.FullNodeWithLatestActorsAt(-1)}, kit.OneMiner)
|
||||
|
||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
||||
miner := sn[0]
|
||||
|
||||
{
|
||||
addrinfo, err := client.NetAddrsListen(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := miner.NetConnect(ctx, addrinfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
defaultFrom, err := client.WalletDefaultAddress(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
maddr, err := miner.ActorAddress(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
build.Clock.Sleep(time.Second)
|
||||
|
||||
// Mine with the _second_ node (the good one).
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
defer close(done)
|
||||
for ctx.Err() == nil {
|
||||
build.Clock.Sleep(blocktime)
|
||||
if err := miner.MineOne(ctx, kit.MineNext); err != nil {
|
||||
if ctx.Err() != nil {
|
||||
// context was canceled, ignore the error.
|
||||
return
|
||||
}
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
defer func() {
|
||||
cancel()
|
||||
<-done
|
||||
}()
|
||||
|
||||
kit.PledgeSectors(t, ctx, miner, 10, 0, nil)
|
||||
|
||||
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
|
||||
fmt.Printf("Running one proving period\n")
|
||||
fmt.Printf("End for head.Height > %d\n", di.PeriodStart+di.WPoStProvingPeriod*2)
|
||||
|
||||
for {
|
||||
head, err := client.ChainHead(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
if head.Height() > di.PeriodStart+di.WPoStProvingPeriod*2 {
|
||||
fmt.Printf("Now head.Height = %d\n", head.Height())
|
||||
break
|
||||
}
|
||||
build.Clock.Sleep(blocktime)
|
||||
}
|
||||
|
||||
ssz, err := miner.ActorSectorSize(ctx, maddr)
|
||||
require.NoError(t, err)
|
||||
expectedPower := types.NewInt(uint64(ssz) * (kit.GenesisPreseals + 10))
|
||||
|
||||
p, err := client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
|
||||
// make sure it has gained power.
|
||||
require.Equal(t, p.MinerPower.RawBytePower, expectedPower)
|
||||
|
||||
// Wait until a proof has been submitted.
|
||||
var targetDeadline uint64
|
||||
waitForProof:
|
||||
for {
|
||||
deadlines, err := client.StateMinerDeadlines(ctx, maddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
for dlIdx, dl := range deadlines {
|
||||
nonEmpty, err := dl.PostSubmissions.IsEmpty()
|
||||
require.NoError(t, err)
|
||||
if nonEmpty {
|
||||
targetDeadline = uint64(dlIdx)
|
||||
break waitForProof
|
||||
}
|
||||
}
|
||||
|
||||
build.Clock.Sleep(blocktime)
|
||||
}
|
||||
|
||||
for {
|
||||
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
// wait until the deadline finishes.
|
||||
if di.Index == ((targetDeadline + 1) % di.WPoStPeriodDeadlines) {
|
||||
break
|
||||
}
|
||||
|
||||
build.Clock.Sleep(blocktime)
|
||||
}
|
||||
|
||||
// Try to object to the proof. This should fail.
|
||||
{
|
||||
params := &minerActor.DisputeWindowedPoStParams{
|
||||
Deadline: targetDeadline,
|
||||
PoStIndex: 0,
|
||||
}
|
||||
|
||||
enc, aerr := actors.SerializeParams(params)
|
||||
require.NoError(t, aerr)
|
||||
|
||||
msg := &types.Message{
|
||||
To: maddr,
|
||||
Method: minerActor.Methods.DisputeWindowedPoSt,
|
||||
Params: enc,
|
||||
Value: types.NewInt(0),
|
||||
From: defaultFrom,
|
||||
}
|
||||
_, err := client.MpoolPushMessage(ctx, msg, nil)
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "failed to dispute valid post (RetCode=16)")
|
||||
}
|
||||
}
|
||||
|
||||
func submitBadProof(
|
||||
ctx context.Context,
|
||||
client api.FullNode, maddr address.Address,
|
||||
di *dline.Info, dlIdx, partIdx uint64,
|
||||
) error {
|
||||
head, err := client.ChainHead(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
from, err := client.WalletDefaultAddress(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
minerInfo, err := client.StateMinerInfo(ctx, maddr, head.Key())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
commEpoch := di.Open
|
||||
commRand, err := client.ChainGetRandomnessFromTickets(
|
||||
ctx, head.Key(), crypto.DomainSeparationTag_PoStChainCommit,
|
||||
commEpoch, nil,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
params := &minerActor.SubmitWindowedPoStParams{
|
||||
ChainCommitEpoch: commEpoch,
|
||||
ChainCommitRand: commRand,
|
||||
Deadline: dlIdx,
|
||||
Partitions: []minerActor.PoStPartition{{Index: partIdx}},
|
||||
Proofs: []proof3.PoStProof{{
|
||||
PoStProof: minerInfo.WindowPoStProofType,
|
||||
ProofBytes: []byte("I'm soooo very evil."),
|
||||
}},
|
||||
}
|
||||
|
||||
enc, aerr := actors.SerializeParams(params)
|
||||
if aerr != nil {
|
||||
return aerr
|
||||
}
|
||||
|
||||
msg := &types.Message{
|
||||
To: maddr,
|
||||
Method: minerActor.Methods.SubmitWindowedPoSt,
|
||||
Params: enc,
|
||||
Value: types.NewInt(0),
|
||||
From: from,
|
||||
}
|
||||
sm, err := client.MpoolPushMessage(ctx, msg, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rec, err := client.StateWaitMsg(ctx, sm.Cid(), build.MessageConfidence, api.LookbackNoLimit, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if rec.Receipt.ExitCode.IsError() {
|
||||
return rec.Receipt.ExitCode
|
||||
}
|
||||
return nil
|
||||
}
|
374
itests/wdpost_test.go
Normal file
374
itests/wdpost_test.go
Normal file
@ -0,0 +1,374 @@
|
||||
package itests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/itests/kit"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/mock"
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/node/impl"
|
||||
)
|
||||
|
||||
func TestWindowedPost(t *testing.T) {
|
||||
if os.Getenv("LOTUS_TEST_WINDOW_POST") != "1" {
|
||||
t.Skip("this takes a few minutes, set LOTUS_TEST_WINDOW_POST=1 to run")
|
||||
}
|
||||
|
||||
kit.QuietMiningLogs()
|
||||
|
||||
var (
|
||||
blocktime = 2 * time.Millisecond
|
||||
nSectors = 10
|
||||
)
|
||||
|
||||
for _, height := range []abi.ChainEpoch{
|
||||
-1, // before
|
||||
162, // while sealing
|
||||
5000, // while proving
|
||||
} {
|
||||
height := height // copy to satisfy lints
|
||||
t.Run(fmt.Sprintf("upgrade-%d", height), func(t *testing.T) {
|
||||
testWindowPostUpgrade(t, kit.MockMinerBuilder, blocktime, nSectors, height)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testWindowPostUpgrade(t *testing.T, b kit.APIBuilder, blocktime time.Duration, nSectors int, upgradeHeight abi.ChainEpoch) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
n, sn := b(t, []kit.FullNodeOpts{kit.FullNodeWithLatestActorsAt(upgradeHeight)}, kit.OneMiner)
|
||||
|
||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
||||
miner := sn[0]
|
||||
|
||||
addrinfo, err := client.NetAddrsListen(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := miner.NetConnect(ctx, addrinfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
build.Clock.Sleep(time.Second)
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
defer close(done)
|
||||
for ctx.Err() == nil {
|
||||
build.Clock.Sleep(blocktime)
|
||||
if err := sn[0].MineOne(ctx, kit.MineNext); err != nil {
|
||||
if ctx.Err() != nil {
|
||||
// context was canceled, ignore the error.
|
||||
return
|
||||
}
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
defer func() {
|
||||
cancel()
|
||||
<-done
|
||||
}()
|
||||
|
||||
kit.PledgeSectors(t, ctx, miner, nSectors, 0, nil)
|
||||
|
||||
maddr, err := miner.ActorAddress(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
|
||||
mid, err := address.IDFromAddress(maddr)
|
||||
require.NoError(t, err)
|
||||
|
||||
fmt.Printf("Running one proving period\n")
|
||||
fmt.Printf("End for head.Height > %d\n", di.PeriodStart+di.WPoStProvingPeriod+2)
|
||||
|
||||
for {
|
||||
head, err := client.ChainHead(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
if head.Height() > di.PeriodStart+di.WPoStProvingPeriod+2 {
|
||||
fmt.Printf("Now head.Height = %d\n", head.Height())
|
||||
break
|
||||
}
|
||||
build.Clock.Sleep(blocktime)
|
||||
}
|
||||
|
||||
p, err := client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
|
||||
ssz, err := miner.ActorSectorSize(ctx, maddr)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, p.MinerPower, p.TotalPower)
|
||||
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*uint64(nSectors+kit.GenesisPreseals)))
|
||||
|
||||
fmt.Printf("Drop some sectors\n")
|
||||
|
||||
// Drop 2 sectors from deadline 2 partition 0 (full partition / deadline)
|
||||
{
|
||||
parts, err := client.StateMinerPartitions(ctx, maddr, 2, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
require.Greater(t, len(parts), 0)
|
||||
|
||||
secs := parts[0].AllSectors
|
||||
n, err := secs.Count()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(2), n)
|
||||
|
||||
// Drop the partition
|
||||
err = secs.ForEach(func(sid uint64) error {
|
||||
return miner.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).MarkCorrupted(storage.SectorRef{
|
||||
ID: abi.SectorID{
|
||||
Miner: abi.ActorID(mid),
|
||||
Number: abi.SectorNumber(sid),
|
||||
},
|
||||
}, true)
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
var s storage.SectorRef
|
||||
|
||||
// Drop 1 sectors from deadline 3 partition 0
|
||||
{
|
||||
parts, err := client.StateMinerPartitions(ctx, maddr, 3, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
require.Greater(t, len(parts), 0)
|
||||
|
||||
secs := parts[0].AllSectors
|
||||
n, err := secs.Count()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(2), n)
|
||||
|
||||
// Drop the sector
|
||||
sn, err := secs.First()
|
||||
require.NoError(t, err)
|
||||
|
||||
all, err := secs.All(2)
|
||||
require.NoError(t, err)
|
||||
fmt.Println("the sectors", all)
|
||||
|
||||
s = storage.SectorRef{
|
||||
ID: abi.SectorID{
|
||||
Miner: abi.ActorID(mid),
|
||||
Number: abi.SectorNumber(sn),
|
||||
},
|
||||
}
|
||||
|
||||
err = miner.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).MarkFailed(s, true)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
di, err = client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
|
||||
fmt.Printf("Go through another PP, wait for sectors to become faulty\n")
|
||||
fmt.Printf("End for head.Height > %d\n", di.PeriodStart+di.WPoStProvingPeriod+2)
|
||||
|
||||
for {
|
||||
head, err := client.ChainHead(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
if head.Height() > di.PeriodStart+(di.WPoStProvingPeriod)+2 {
|
||||
fmt.Printf("Now head.Height = %d\n", head.Height())
|
||||
break
|
||||
}
|
||||
|
||||
build.Clock.Sleep(blocktime)
|
||||
}
|
||||
|
||||
p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, p.MinerPower, p.TotalPower)
|
||||
|
||||
sectors := p.MinerPower.RawBytePower.Uint64() / uint64(ssz)
|
||||
require.Equal(t, nSectors+kit.GenesisPreseals-3, int(sectors)) // -3 just removed sectors
|
||||
|
||||
fmt.Printf("Recover one sector\n")
|
||||
|
||||
err = miner.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).MarkFailed(s, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
di, err = client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
|
||||
fmt.Printf("End for head.Height > %d\n", di.PeriodStart+di.WPoStProvingPeriod+2)
|
||||
|
||||
for {
|
||||
head, err := client.ChainHead(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
if head.Height() > di.PeriodStart+di.WPoStProvingPeriod+2 {
|
||||
fmt.Printf("Now head.Height = %d\n", head.Height())
|
||||
break
|
||||
}
|
||||
|
||||
build.Clock.Sleep(blocktime)
|
||||
}
|
||||
|
||||
p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, p.MinerPower, p.TotalPower)
|
||||
|
||||
sectors = p.MinerPower.RawBytePower.Uint64() / uint64(ssz)
|
||||
require.Equal(t, nSectors+kit.GenesisPreseals-2, int(sectors)) // -2 not recovered sectors
|
||||
|
||||
// pledge a sector after recovery
|
||||
|
||||
kit.PledgeSectors(t, ctx, miner, 1, nSectors, nil)
|
||||
|
||||
{
|
||||
// Wait until proven.
|
||||
di, err = client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
|
||||
waitUntil := di.PeriodStart + di.WPoStProvingPeriod + 2
|
||||
fmt.Printf("End for head.Height > %d\n", waitUntil)
|
||||
|
||||
for {
|
||||
head, err := client.ChainHead(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
if head.Height() > waitUntil {
|
||||
fmt.Printf("Now head.Height = %d\n", head.Height())
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, p.MinerPower, p.TotalPower)
|
||||
|
||||
sectors = p.MinerPower.RawBytePower.Uint64() / uint64(ssz)
|
||||
require.Equal(t, nSectors+kit.GenesisPreseals-2+1, int(sectors)) // -2 not recovered sectors + 1 just pledged
|
||||
}
|
||||
|
||||
func TestWindowPostBaseFeeNoBurn(t *testing.T) {
|
||||
if os.Getenv("LOTUS_TEST_WINDOW_POST") != "1" {
|
||||
t.Skip("this takes a few minutes, set LOTUS_TEST_WINDOW_POST=1 to run")
|
||||
}
|
||||
|
||||
kit.QuietMiningLogs()
|
||||
|
||||
var (
|
||||
blocktime = 2 * time.Millisecond
|
||||
nSectors = 10
|
||||
)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
och := build.UpgradeClausHeight
|
||||
build.UpgradeClausHeight = 10
|
||||
|
||||
n, sn := kit.MockMinerBuilder(t, kit.DefaultFullOpts(1), kit.OneMiner)
|
||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
||||
miner := sn[0]
|
||||
bm := kit.ConnectAndStartMining(t, blocktime, miner, client)
|
||||
t.Cleanup(bm.Stop)
|
||||
|
||||
maddr, err := miner.ActorAddress(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
mi, err := client.StateMinerInfo(ctx, maddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
|
||||
kit.PledgeSectors(t, ctx, miner, nSectors, 0, nil)
|
||||
wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
en := wact.Nonce
|
||||
|
||||
// wait for a new message to be sent from worker address, it will be a PoSt
|
||||
|
||||
waitForProof:
|
||||
for {
|
||||
wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
if wact.Nonce > en {
|
||||
break waitForProof
|
||||
}
|
||||
|
||||
build.Clock.Sleep(blocktime)
|
||||
}
|
||||
|
||||
slm, err := client.StateListMessages(ctx, &api.MessageMatch{To: maddr}, types.EmptyTSK, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
pmr, err := client.StateReplay(ctx, types.EmptyTSK, slm[0])
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, pmr.GasCost.BaseFeeBurn, big.Zero())
|
||||
|
||||
build.UpgradeClausHeight = och
|
||||
}
|
||||
|
||||
func TestWindowPostBaseFeeBurn(t *testing.T) {
|
||||
if os.Getenv("LOTUS_TEST_WINDOW_POST") != "1" {
|
||||
t.Skip("this takes a few minutes, set LOTUS_TEST_WINDOW_POST=1 to run")
|
||||
}
|
||||
|
||||
kit.QuietMiningLogs()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
blocktime := 2 * time.Millisecond
|
||||
|
||||
n, sn := kit.MockMinerBuilder(t, []kit.FullNodeOpts{kit.FullNodeWithLatestActorsAt(-1)}, kit.OneMiner)
|
||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
||||
miner := sn[0]
|
||||
bm := kit.ConnectAndStartMining(t, blocktime, miner, client)
|
||||
t.Cleanup(bm.Stop)
|
||||
|
||||
maddr, err := miner.ActorAddress(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
mi, err := client.StateMinerInfo(ctx, maddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
|
||||
kit.PledgeSectors(t, ctx, miner, 10, 0, nil)
|
||||
wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
en := wact.Nonce
|
||||
|
||||
// wait for a new message to be sent from worker address, it will be a PoSt
|
||||
|
||||
waitForProof:
|
||||
for {
|
||||
wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
if wact.Nonce > en {
|
||||
break waitForProof
|
||||
}
|
||||
|
||||
build.Clock.Sleep(blocktime)
|
||||
}
|
||||
|
||||
slm, err := client.StateListMessages(ctx, &api.MessageMatch{To: maddr}, types.EmptyTSK, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
pmr, err := client.StateReplay(ctx, types.EmptyTSK, slm[0])
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NotEqual(t, pmr.GasCost.BaseFeeBurn, big.Zero())
|
||||
}
|
@ -6,6 +6,8 @@ import (
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
@ -102,7 +104,7 @@ type SealingConfig struct {
|
||||
MinPreCommitBatch int
|
||||
// how long to wait before submitting a batch after crossing the minimum batch size
|
||||
PreCommitBatchWait Duration
|
||||
// time buffer for forceful batch submission before sectors in batch would start expiring
|
||||
// time buffer for forceful batch submission before sectors/deal in batch would start expiring
|
||||
PreCommitBatchSlack Duration
|
||||
|
||||
// enable / disable commit aggregation (takes effect after nv13)
|
||||
@ -112,7 +114,7 @@ type SealingConfig struct {
|
||||
MaxCommitBatch int
|
||||
// how long to wait before submitting a batch after crossing the minimum batch size
|
||||
CommitBatchWait Duration
|
||||
// time buffer for forceful batch submission before sectors in batch would start expiring
|
||||
// time buffer for forceful batch submission before sectors/deals in batch would start expiring
|
||||
CommitBatchSlack Duration
|
||||
|
||||
TerminateBatchMax uint64
|
||||
@ -125,9 +127,23 @@ type SealingConfig struct {
|
||||
// todo TargetSectors - stop auto-pleding new sectors after this many sectors are sealed, default CC upgrade for deals sectors if above
|
||||
}
|
||||
|
||||
type BatchFeeConfig struct {
|
||||
Base types.FIL
|
||||
PerSector types.FIL
|
||||
}
|
||||
|
||||
func (b *BatchFeeConfig) FeeForSectors(nSectors int) abi.TokenAmount {
|
||||
return big.Add(big.Int(b.Base), big.Mul(big.NewInt(int64(nSectors)), big.Int(b.PerSector)))
|
||||
}
|
||||
|
||||
type MinerFeeConfig struct {
|
||||
MaxPreCommitGasFee types.FIL
|
||||
MaxCommitGasFee types.FIL
|
||||
MaxPreCommitGasFee types.FIL
|
||||
MaxCommitGasFee types.FIL
|
||||
|
||||
// maxBatchFee = maxBase + maxPerSector * nSectors
|
||||
MaxPreCommitBatchGasFee BatchFeeConfig
|
||||
MaxCommitBatchGasFee BatchFeeConfig
|
||||
|
||||
MaxTerminateGasFee types.FIL
|
||||
MaxWindowPoStGasFee types.FIL
|
||||
MaxPublishDealsFee types.FIL
|
||||
@ -276,16 +292,16 @@ func DefaultStorageMiner() *StorageMiner {
|
||||
AlwaysKeepUnsealedCopy: true,
|
||||
|
||||
BatchPreCommits: true,
|
||||
MinPreCommitBatch: 1, // we must have at least one proof to aggregate
|
||||
MaxPreCommitBatch: miner5.PreCommitSectorBatchMaxSize, //
|
||||
PreCommitBatchWait: Duration(24 * time.Hour), // this can be up to 6 days
|
||||
PreCommitBatchSlack: Duration(3 * time.Hour),
|
||||
MinPreCommitBatch: 1, // we must have at least one precommit to batch
|
||||
MaxPreCommitBatch: miner5.PreCommitSectorBatchMaxSize, // up to 256 sectors
|
||||
PreCommitBatchWait: Duration(24 * time.Hour), // this should be less than 31.5 hours, which is the expiration of a precommit ticket
|
||||
PreCommitBatchSlack: Duration(3 * time.Hour), // time buffer for forceful batch submission before sectors/deals in batch would start expiring, higher value will lower the chances for message fail due to expiration
|
||||
|
||||
AggregateCommits: true,
|
||||
MinCommitBatch: miner5.MinAggregatedSectors, // we must have at least four proofs to aggregate
|
||||
MaxCommitBatch: miner5.MaxAggregatedSectors, // this is the maximum aggregation per FIP13
|
||||
CommitBatchWait: Duration(24 * time.Hour), // this can be up to 6 days
|
||||
CommitBatchSlack: Duration(1 * time.Hour),
|
||||
MinCommitBatch: miner5.MinAggregatedSectors, // per FIP13, we must have at least four proofs to aggregate, where 4 is the cross over point where aggregation wins out on single provecommit gas costs
|
||||
MaxCommitBatch: miner5.MaxAggregatedSectors, // maximum 819 sectors, this is the maximum aggregation per FIP13
|
||||
CommitBatchWait: Duration(24 * time.Hour), // this can be up to 30 days
|
||||
CommitBatchSlack: Duration(1 * time.Hour), // time buffer for forceful batch submission before sectors/deals in batch would start expiring, higher value will lower the chances for message fail due to expiration
|
||||
|
||||
TerminateBatchMin: 1,
|
||||
TerminateBatchMax: 100,
|
||||
@ -327,8 +343,18 @@ func DefaultStorageMiner() *StorageMiner {
|
||||
},
|
||||
|
||||
Fees: MinerFeeConfig{
|
||||
MaxPreCommitGasFee: types.MustParseFIL("0.025"),
|
||||
MaxCommitGasFee: types.MustParseFIL("0.05"),
|
||||
MaxPreCommitGasFee: types.MustParseFIL("0.025"),
|
||||
MaxCommitGasFee: types.MustParseFIL("0.05"),
|
||||
|
||||
MaxPreCommitBatchGasFee: BatchFeeConfig{
|
||||
Base: types.MustParseFIL("0.025"), // TODO: update before v1.10.0
|
||||
PerSector: types.MustParseFIL("0.025"), // TODO: update before v1.10.0
|
||||
},
|
||||
MaxCommitBatchGasFee: BatchFeeConfig{
|
||||
Base: types.MustParseFIL("0.05"), // TODO: update before v1.10.0
|
||||
PerSector: types.MustParseFIL("0.05"), // TODO: update before v1.10.0
|
||||
},
|
||||
|
||||
MaxTerminateGasFee: types.MustParseFIL("0.5"),
|
||||
MaxWindowPoStGasFee: types.MustParseFIL("5"),
|
||||
MaxPublishDealsFee: types.MustParseFIL("0.05"),
|
||||
|
@ -5,7 +5,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
xerrors "golang.org/x/xerrors"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
"github.com/ipfs/go-cid"
|
||||
@ -13,7 +13,7 @@ import (
|
||||
"github.com/libp2p/go-libp2p-core/host"
|
||||
inet "github.com/libp2p/go-libp2p-core/network"
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
protocol "github.com/libp2p/go-libp2p-core/protocol"
|
||||
"github.com/libp2p/go-libp2p-core/protocol"
|
||||
|
||||
cborutil "github.com/filecoin-project/go-cbor-util"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
@ -23,6 +23,8 @@ import (
|
||||
"github.com/filecoin-project/lotus/lib/peermgr"
|
||||
)
|
||||
|
||||
// TODO(TEST): missing test coverage.
|
||||
|
||||
const ProtocolID = "/fil/hello/1.0.0"
|
||||
|
||||
var log = logging.Logger("hello")
|
||||
@ -33,12 +35,14 @@ type HelloMessage struct {
|
||||
HeaviestTipSetWeight big.Int
|
||||
GenesisHash cid.Cid
|
||||
}
|
||||
|
||||
type LatencyMessage struct {
|
||||
TArrival int64
|
||||
TSent int64
|
||||
}
|
||||
|
||||
type NewStreamFunc func(context.Context, peer.ID, ...protocol.ID) (inet.Stream, error)
|
||||
|
||||
type Service struct {
|
||||
h host.Host
|
||||
|
||||
@ -62,7 +66,6 @@ func NewHelloService(h host.Host, cs *store.ChainStore, syncer *chain.Syncer, pm
|
||||
}
|
||||
|
||||
func (hs *Service) HandleStream(s inet.Stream) {
|
||||
|
||||
var hmsg HelloMessage
|
||||
if err := cborutil.ReadCborRPC(s, &hmsg); err != nil {
|
||||
log.Infow("failed to read hello message, disconnecting", "error", err)
|
||||
@ -121,7 +124,6 @@ func (hs *Service) HandleStream(s inet.Stream) {
|
||||
log.Debugf("Got new tipset through Hello: %s from %s", ts.Cids(), s.Conn().RemotePeer())
|
||||
hs.syncer.InformNewHead(s.Conn().RemotePeer(), ts)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (hs *Service) SayHello(ctx context.Context, pid peer.ID) error {
|
||||
|
@ -1,315 +0,0 @@
|
||||
package node_test
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/lotus/api/test"
|
||||
"github.com/filecoin-project/lotus/chain/actors/policy"
|
||||
"github.com/filecoin-project/lotus/lib/lotuslog"
|
||||
builder "github.com/filecoin-project/lotus/node/test"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
)
|
||||
|
||||
func init() {
|
||||
_ = logging.SetLogLevel("*", "INFO")
|
||||
|
||||
policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048))
|
||||
policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1)
|
||||
policy.SetMinVerifiedDealSize(abi.NewStoragePower(256))
|
||||
}
|
||||
|
||||
func TestAPI(t *testing.T) {
|
||||
test.TestApis(t, builder.Builder)
|
||||
}
|
||||
|
||||
func TestAPIRPC(t *testing.T) {
|
||||
test.TestApis(t, builder.RPCBuilder)
|
||||
}
|
||||
|
||||
func TestAPIDealFlow(t *testing.T) {
|
||||
logging.SetLogLevel("miner", "ERROR")
|
||||
logging.SetLogLevel("chainstore", "ERROR")
|
||||
logging.SetLogLevel("chain", "ERROR")
|
||||
logging.SetLogLevel("sub", "ERROR")
|
||||
logging.SetLogLevel("storageminer", "ERROR")
|
||||
|
||||
blockTime := 10 * time.Millisecond
|
||||
|
||||
// For these tests where the block time is artificially short, just use
|
||||
// a deal start epoch that is guaranteed to be far enough in the future
|
||||
// so that the deal starts sealing in time
|
||||
dealStartEpoch := abi.ChainEpoch(2 << 12)
|
||||
|
||||
t.Run("TestDealFlow", func(t *testing.T) {
|
||||
test.TestDealFlow(t, builder.MockSbBuilder, blockTime, false, false, dealStartEpoch)
|
||||
})
|
||||
t.Run("WithExportedCAR", func(t *testing.T) {
|
||||
test.TestDealFlow(t, builder.MockSbBuilder, blockTime, true, false, dealStartEpoch)
|
||||
})
|
||||
t.Run("TestDoubleDealFlow", func(t *testing.T) {
|
||||
test.TestDoubleDealFlow(t, builder.MockSbBuilder, blockTime, dealStartEpoch)
|
||||
})
|
||||
t.Run("TestFastRetrievalDealFlow", func(t *testing.T) {
|
||||
test.TestFastRetrievalDealFlow(t, builder.MockSbBuilder, blockTime, dealStartEpoch)
|
||||
})
|
||||
t.Run("TestPublishDealsBatching", func(t *testing.T) {
|
||||
test.TestPublishDealsBatching(t, builder.MockSbBuilder, blockTime, dealStartEpoch)
|
||||
})
|
||||
t.Run("TestOfflineDealFlow", func(t *testing.T) {
|
||||
test.TestOfflineDealFlow(t, builder.MockSbBuilder, blockTime, dealStartEpoch, false)
|
||||
})
|
||||
t.Run("TestOfflineDealFlowFastRetrieval", func(t *testing.T) {
|
||||
test.TestOfflineDealFlow(t, builder.MockSbBuilder, blockTime, dealStartEpoch, true)
|
||||
})
|
||||
}
|
||||
|
||||
func TestBatchDealInput(t *testing.T) {
|
||||
logging.SetLogLevel("miner", "ERROR")
|
||||
logging.SetLogLevel("chainstore", "ERROR")
|
||||
logging.SetLogLevel("chain", "ERROR")
|
||||
logging.SetLogLevel("sub", "ERROR")
|
||||
logging.SetLogLevel("storageminer", "ERROR")
|
||||
logging.SetLogLevel("sectors", "DEBUG")
|
||||
|
||||
blockTime := 10 * time.Millisecond
|
||||
|
||||
// For these tests where the block time is artificially short, just use
|
||||
// a deal start epoch that is guaranteed to be far enough in the future
|
||||
// so that the deal starts sealing in time
|
||||
dealStartEpoch := abi.ChainEpoch(2 << 12)
|
||||
|
||||
test.TestBatchDealInput(t, builder.MockSbBuilder, blockTime, dealStartEpoch)
|
||||
}
|
||||
|
||||
func TestAPIDealFlowReal(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode")
|
||||
}
|
||||
lotuslog.SetupLogLevels()
|
||||
logging.SetLogLevel("miner", "ERROR")
|
||||
logging.SetLogLevel("chainstore", "ERROR")
|
||||
logging.SetLogLevel("chain", "ERROR")
|
||||
logging.SetLogLevel("sub", "ERROR")
|
||||
logging.SetLogLevel("storageminer", "ERROR")
|
||||
|
||||
// TODO: just set this globally?
|
||||
oldDelay := policy.GetPreCommitChallengeDelay()
|
||||
policy.SetPreCommitChallengeDelay(5)
|
||||
t.Cleanup(func() {
|
||||
policy.SetPreCommitChallengeDelay(oldDelay)
|
||||
})
|
||||
|
||||
t.Run("basic", func(t *testing.T) {
|
||||
test.TestDealFlow(t, builder.Builder, time.Second, false, false, 0)
|
||||
})
|
||||
|
||||
t.Run("fast-retrieval", func(t *testing.T) {
|
||||
test.TestDealFlow(t, builder.Builder, time.Second, false, true, 0)
|
||||
})
|
||||
|
||||
t.Run("retrieval-second", func(t *testing.T) {
|
||||
test.TestSecondDealRetrieval(t, builder.Builder, time.Second)
|
||||
})
|
||||
}
|
||||
|
||||
func TestDealMining(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode")
|
||||
}
|
||||
|
||||
logging.SetLogLevel("miner", "ERROR")
|
||||
logging.SetLogLevel("chainstore", "ERROR")
|
||||
logging.SetLogLevel("chain", "ERROR")
|
||||
logging.SetLogLevel("sub", "ERROR")
|
||||
logging.SetLogLevel("storageminer", "ERROR")
|
||||
|
||||
test.TestDealMining(t, builder.MockSbBuilder, 50*time.Millisecond, false)
|
||||
}
|
||||
|
||||
func TestSDRUpgrade(t *testing.T) {
|
||||
logging.SetLogLevel("miner", "ERROR")
|
||||
logging.SetLogLevel("chainstore", "ERROR")
|
||||
logging.SetLogLevel("chain", "ERROR")
|
||||
logging.SetLogLevel("sub", "ERROR")
|
||||
logging.SetLogLevel("storageminer", "ERROR")
|
||||
|
||||
oldDelay := policy.GetPreCommitChallengeDelay()
|
||||
policy.SetPreCommitChallengeDelay(5)
|
||||
t.Cleanup(func() {
|
||||
policy.SetPreCommitChallengeDelay(oldDelay)
|
||||
})
|
||||
|
||||
test.TestSDRUpgrade(t, builder.MockSbBuilder, 50*time.Millisecond)
|
||||
}
|
||||
|
||||
func TestPledgeSectors(t *testing.T) {
|
||||
logging.SetLogLevel("miner", "ERROR")
|
||||
logging.SetLogLevel("chainstore", "ERROR")
|
||||
logging.SetLogLevel("chain", "ERROR")
|
||||
logging.SetLogLevel("sub", "ERROR")
|
||||
logging.SetLogLevel("storageminer", "ERROR")
|
||||
|
||||
t.Run("1", func(t *testing.T) {
|
||||
test.TestPledgeSector(t, builder.MockSbBuilder, 50*time.Millisecond, 1)
|
||||
})
|
||||
|
||||
t.Run("100", func(t *testing.T) {
|
||||
test.TestPledgeSector(t, builder.MockSbBuilder, 50*time.Millisecond, 100)
|
||||
})
|
||||
|
||||
t.Run("1000", func(t *testing.T) {
|
||||
if testing.Short() { // takes ~16s
|
||||
t.Skip("skipping test in short mode")
|
||||
}
|
||||
|
||||
test.TestPledgeSector(t, builder.MockSbBuilder, 50*time.Millisecond, 1000)
|
||||
})
|
||||
}
|
||||
|
||||
func TestPledgeBatching(t *testing.T) {
|
||||
t.Run("100", func(t *testing.T) {
|
||||
test.TestPledgeBatching(t, builder.MockSbBuilder, 50*time.Millisecond, 100)
|
||||
})
|
||||
t.Run("100-before-nv13", func(t *testing.T) {
|
||||
test.TestPledgeBeforeNv13(t, builder.MockSbBuilder, 50*time.Millisecond, 100)
|
||||
})
|
||||
}
|
||||
|
||||
func TestTapeFix(t *testing.T) {
|
||||
logging.SetLogLevel("miner", "ERROR")
|
||||
logging.SetLogLevel("chainstore", "ERROR")
|
||||
logging.SetLogLevel("chain", "ERROR")
|
||||
logging.SetLogLevel("sub", "ERROR")
|
||||
logging.SetLogLevel("storageminer", "ERROR")
|
||||
|
||||
test.TestTapeFix(t, builder.MockSbBuilder, 2*time.Millisecond)
|
||||
}
|
||||
|
||||
func TestWindowedPost(t *testing.T) {
|
||||
if os.Getenv("LOTUS_TEST_WINDOW_POST") != "1" {
|
||||
t.Skip("this takes a few minutes, set LOTUS_TEST_WINDOW_POST=1 to run")
|
||||
}
|
||||
|
||||
logging.SetLogLevel("miner", "ERROR")
|
||||
logging.SetLogLevel("gen", "ERROR")
|
||||
logging.SetLogLevel("chainstore", "ERROR")
|
||||
logging.SetLogLevel("chain", "ERROR")
|
||||
logging.SetLogLevel("sub", "ERROR")
|
||||
logging.SetLogLevel("storageminer", "ERROR")
|
||||
|
||||
test.TestWindowPost(t, builder.MockSbBuilder, 2*time.Millisecond, 10)
|
||||
}
|
||||
|
||||
func TestTerminate(t *testing.T) {
|
||||
if os.Getenv("LOTUS_TEST_WINDOW_POST") != "1" {
|
||||
t.Skip("this takes a few minutes, set LOTUS_TEST_WINDOW_POST=1 to run")
|
||||
}
|
||||
|
||||
logging.SetLogLevel("miner", "ERROR")
|
||||
logging.SetLogLevel("chainstore", "ERROR")
|
||||
logging.SetLogLevel("chain", "ERROR")
|
||||
logging.SetLogLevel("sub", "ERROR")
|
||||
logging.SetLogLevel("storageminer", "ERROR")
|
||||
|
||||
test.TestTerminate(t, builder.MockSbBuilder, 2*time.Millisecond)
|
||||
}
|
||||
|
||||
func TestCCUpgrade(t *testing.T) {
|
||||
logging.SetLogLevel("miner", "ERROR")
|
||||
logging.SetLogLevel("chainstore", "ERROR")
|
||||
logging.SetLogLevel("chain", "ERROR")
|
||||
logging.SetLogLevel("sub", "ERROR")
|
||||
logging.SetLogLevel("storageminer", "ERROR")
|
||||
|
||||
test.TestCCUpgrade(t, builder.MockSbBuilder, 5*time.Millisecond)
|
||||
}
|
||||
|
||||
func TestPaymentChannels(t *testing.T) {
|
||||
logging.SetLogLevel("miner", "ERROR")
|
||||
logging.SetLogLevel("chainstore", "ERROR")
|
||||
logging.SetLogLevel("chain", "ERROR")
|
||||
logging.SetLogLevel("sub", "ERROR")
|
||||
logging.SetLogLevel("pubsub", "ERROR")
|
||||
logging.SetLogLevel("storageminer", "ERROR")
|
||||
|
||||
test.TestPaymentChannels(t, builder.MockSbBuilder, 5*time.Millisecond)
|
||||
}
|
||||
|
||||
func TestWindowPostDispute(t *testing.T) {
|
||||
if os.Getenv("LOTUS_TEST_WINDOW_POST") != "1" {
|
||||
t.Skip("this takes a few minutes, set LOTUS_TEST_WINDOW_POST=1 to run")
|
||||
}
|
||||
logging.SetLogLevel("miner", "ERROR")
|
||||
logging.SetLogLevel("gen", "ERROR")
|
||||
logging.SetLogLevel("chainstore", "ERROR")
|
||||
logging.SetLogLevel("chain", "ERROR")
|
||||
logging.SetLogLevel("sub", "ERROR")
|
||||
logging.SetLogLevel("storageminer", "ERROR")
|
||||
|
||||
test.TestWindowPostDispute(t, builder.MockSbBuilder, 2*time.Millisecond)
|
||||
}
|
||||
|
||||
func TestWindowPostDisputeFails(t *testing.T) {
|
||||
if os.Getenv("LOTUS_TEST_WINDOW_POST") != "1" {
|
||||
t.Skip("this takes a few minutes, set LOTUS_TEST_WINDOW_POST=1 to run")
|
||||
}
|
||||
logging.SetLogLevel("miner", "ERROR")
|
||||
logging.SetLogLevel("gen", "ERROR")
|
||||
logging.SetLogLevel("chainstore", "ERROR")
|
||||
logging.SetLogLevel("chain", "ERROR")
|
||||
logging.SetLogLevel("sub", "ERROR")
|
||||
logging.SetLogLevel("storageminer", "ERROR")
|
||||
|
||||
test.TestWindowPostDisputeFails(t, builder.MockSbBuilder, 2*time.Millisecond)
|
||||
}
|
||||
|
||||
func TestWindowPostBaseFeeNoBurn(t *testing.T) {
|
||||
if os.Getenv("LOTUS_TEST_WINDOW_POST") != "1" {
|
||||
t.Skip("this takes a few minutes, set LOTUS_TEST_WINDOW_POST=1 to run")
|
||||
}
|
||||
logging.SetLogLevel("miner", "ERROR")
|
||||
logging.SetLogLevel("gen", "ERROR")
|
||||
logging.SetLogLevel("chainstore", "ERROR")
|
||||
logging.SetLogLevel("chain", "ERROR")
|
||||
logging.SetLogLevel("sub", "ERROR")
|
||||
logging.SetLogLevel("storageminer", "ERROR")
|
||||
|
||||
test.TestWindowPostBaseFeeNoBurn(t, builder.MockSbBuilder, 2*time.Millisecond)
|
||||
}
|
||||
|
||||
func TestWindowPostBaseFeeBurn(t *testing.T) {
|
||||
if os.Getenv("LOTUS_TEST_WINDOW_POST") != "1" {
|
||||
t.Skip("this takes a few minutes, set LOTUS_TEST_WINDOW_POST=1 to run")
|
||||
}
|
||||
logging.SetLogLevel("miner", "ERROR")
|
||||
logging.SetLogLevel("gen", "ERROR")
|
||||
logging.SetLogLevel("chainstore", "ERROR")
|
||||
logging.SetLogLevel("chain", "ERROR")
|
||||
logging.SetLogLevel("sub", "ERROR")
|
||||
logging.SetLogLevel("storageminer", "ERROR")
|
||||
|
||||
test.TestWindowPostBaseFeeBurn(t, builder.MockSbBuilder, 2*time.Millisecond)
|
||||
}
|
||||
|
||||
func TestDeadlineToggling(t *testing.T) {
|
||||
if os.Getenv("LOTUS_TEST_DEADLINE_TOGGLING") != "1" {
|
||||
t.Skip("this takes a few minutes, set LOTUS_TEST_DEADLINE_TOGGLING=1 to run")
|
||||
}
|
||||
logging.SetLogLevel("miner", "ERROR")
|
||||
logging.SetLogLevel("gen", "ERROR")
|
||||
logging.SetLogLevel("chainstore", "ERROR")
|
||||
logging.SetLogLevel("chain", "ERROR")
|
||||
logging.SetLogLevel("sub", "ERROR")
|
||||
logging.SetLogLevel("storageminer", "FATAL")
|
||||
|
||||
test.TestDeadlineToggling(t, builder.MockSbBuilder, 2*time.Millisecond)
|
||||
}
|
||||
|
||||
func TestVerifiedClientTopUp(t *testing.T) {
|
||||
logging.SetLogLevel("storageminer", "FATAL")
|
||||
logging.SetLogLevel("chain", "ERROR")
|
||||
test.AddVerifiedClient(t, builder.MockSbBuilder)
|
||||
}
|
196
node/rpc.go
Normal file
196
node/rpc.go
Normal file
@ -0,0 +1,196 @@
|
||||
package node
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net"
|
||||
"net/http"
|
||||
_ "net/http/pprof"
|
||||
"runtime"
|
||||
"strconv"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/ipfs/go-cid"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
manet "github.com/multiformats/go-multiaddr/net"
|
||||
"go.opencensus.io/tag"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-jsonrpc"
|
||||
"github.com/filecoin-project/go-jsonrpc/auth"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/api/v0api"
|
||||
"github.com/filecoin-project/lotus/api/v1api"
|
||||
"github.com/filecoin-project/lotus/lib/rpcenc"
|
||||
"github.com/filecoin-project/lotus/metrics"
|
||||
"github.com/filecoin-project/lotus/node/impl"
|
||||
)
|
||||
|
||||
var rpclog = logging.Logger("rpc")
|
||||
|
||||
// ServeRPC serves an HTTP handler over the supplied listen multiaddr.
|
||||
//
|
||||
// This function spawns a goroutine to run the server, and returns immediately.
|
||||
// It returns the stop function to be called to terminate the endpoint.
|
||||
//
|
||||
// The supplied ID is used in tracing, by inserting a tag in the context.
|
||||
func ServeRPC(h http.Handler, id string, addr multiaddr.Multiaddr) (StopFunc, error) {
|
||||
// Start listening to the addr; if invalid or occupied, we will fail early.
|
||||
lst, err := manet.Listen(addr)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("could not listen: %w", err)
|
||||
}
|
||||
|
||||
// Instantiate the server and start listening.
|
||||
srv := &http.Server{
|
||||
Handler: h,
|
||||
BaseContext: func(listener net.Listener) context.Context {
|
||||
ctx, _ := tag.New(context.Background(), tag.Upsert(metrics.APIInterface, id))
|
||||
return ctx
|
||||
},
|
||||
}
|
||||
|
||||
go func() {
|
||||
err = srv.Serve(manet.NetListener(lst))
|
||||
if err != http.ErrServerClosed {
|
||||
rpclog.Warnf("rpc server failed: %s", err)
|
||||
}
|
||||
}()
|
||||
|
||||
return srv.Shutdown, err
|
||||
}
|
||||
|
||||
// FullNodeHandler returns a full node handler, to be mounted as-is on the server.
|
||||
func FullNodeHandler(a v1api.FullNode, permissioned bool, opts ...jsonrpc.ServerOption) (http.Handler, error) {
|
||||
m := mux.NewRouter()
|
||||
|
||||
serveRpc := func(path string, hnd interface{}) {
|
||||
rpcServer := jsonrpc.NewServer(opts...)
|
||||
rpcServer.Register("Filecoin", hnd)
|
||||
|
||||
var handler http.Handler = rpcServer
|
||||
if permissioned {
|
||||
handler = &auth.Handler{Verify: a.AuthVerify, Next: rpcServer.ServeHTTP}
|
||||
}
|
||||
|
||||
m.Handle(path, handler)
|
||||
}
|
||||
|
||||
fnapi := metrics.MetricedFullAPI(a)
|
||||
if permissioned {
|
||||
fnapi = api.PermissionedFullAPI(fnapi)
|
||||
}
|
||||
|
||||
serveRpc("/rpc/v1", fnapi)
|
||||
serveRpc("/rpc/v0", &v0api.WrapperV1Full{FullNode: fnapi})
|
||||
|
||||
// Import handler
|
||||
handleImportFunc := handleImport(a.(*impl.FullNodeAPI))
|
||||
if permissioned {
|
||||
importAH := &auth.Handler{
|
||||
Verify: a.AuthVerify,
|
||||
Next: handleImportFunc,
|
||||
}
|
||||
m.Handle("/rest/v0/import", importAH)
|
||||
} else {
|
||||
m.HandleFunc("/rest/v0/import", handleImportFunc)
|
||||
}
|
||||
|
||||
// debugging
|
||||
m.Handle("/debug/metrics", metrics.Exporter())
|
||||
m.Handle("/debug/pprof-set/block", handleFractionOpt("BlockProfileRate", runtime.SetBlockProfileRate))
|
||||
m.Handle("/debug/pprof-set/mutex", handleFractionOpt("MutexProfileFraction", func(x int) {
|
||||
runtime.SetMutexProfileFraction(x)
|
||||
}))
|
||||
m.PathPrefix("/").Handler(http.DefaultServeMux) // pprof
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// MinerHandler returns a miner handler, to be mounted as-is on the server.
|
||||
func MinerHandler(a api.StorageMiner, permissioned bool) (http.Handler, error) {
|
||||
m := mux.NewRouter()
|
||||
|
||||
mapi := metrics.MetricedStorMinerAPI(a)
|
||||
if permissioned {
|
||||
mapi = api.PermissionedStorMinerAPI(mapi)
|
||||
}
|
||||
|
||||
readerHandler, readerServerOpt := rpcenc.ReaderParamDecoder()
|
||||
rpcServer := jsonrpc.NewServer(readerServerOpt)
|
||||
rpcServer.Register("Filecoin", mapi)
|
||||
|
||||
m.Handle("/rpc/v0", rpcServer)
|
||||
m.Handle("/rpc/streams/v0/push/{uuid}", readerHandler)
|
||||
m.PathPrefix("/remote").HandlerFunc(a.(*impl.StorageMinerAPI).ServeRemote)
|
||||
|
||||
// debugging
|
||||
m.Handle("/debug/metrics", metrics.Exporter())
|
||||
m.PathPrefix("/").Handler(http.DefaultServeMux) // pprof
|
||||
|
||||
if !permissioned {
|
||||
return rpcServer, nil
|
||||
}
|
||||
|
||||
ah := &auth.Handler{
|
||||
Verify: a.AuthVerify,
|
||||
Next: m.ServeHTTP,
|
||||
}
|
||||
return ah, nil
|
||||
}
|
||||
|
||||
func handleImport(a *impl.FullNodeAPI) func(w http.ResponseWriter, r *http.Request) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != "PUT" {
|
||||
w.WriteHeader(404)
|
||||
return
|
||||
}
|
||||
if !auth.HasPerm(r.Context(), nil, api.PermWrite) {
|
||||
w.WriteHeader(401)
|
||||
_ = json.NewEncoder(w).Encode(struct{ Error string }{"unauthorized: missing write permission"})
|
||||
return
|
||||
}
|
||||
|
||||
c, err := a.ClientImportLocal(r.Context(), r.Body)
|
||||
if err != nil {
|
||||
w.WriteHeader(500)
|
||||
_ = json.NewEncoder(w).Encode(struct{ Error string }{err.Error()})
|
||||
return
|
||||
}
|
||||
w.WriteHeader(200)
|
||||
err = json.NewEncoder(w).Encode(struct{ Cid cid.Cid }{c})
|
||||
if err != nil {
|
||||
rpclog.Errorf("/rest/v0/import: Writing response failed: %+v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func handleFractionOpt(name string, setter func(int)) http.HandlerFunc {
|
||||
return func(rw http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodPost {
|
||||
http.Error(rw, "only POST allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
if err := r.ParseForm(); err != nil {
|
||||
http.Error(rw, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
asfr := r.Form.Get("x")
|
||||
if len(asfr) == 0 {
|
||||
http.Error(rw, "parameter 'x' must be set", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
fr, err := strconv.Atoi(asfr)
|
||||
if err != nil {
|
||||
http.Error(rw, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
rpclog.Infof("setting %s to %d", name, fr)
|
||||
setter(fr)
|
||||
}
|
||||
}
|
56
node/shutdown.go
Normal file
56
node/shutdown.go
Normal file
@ -0,0 +1,56 @@
|
||||
package node
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
type ShutdownHandler struct {
|
||||
Component string
|
||||
StopFunc StopFunc
|
||||
}
|
||||
|
||||
// MonitorShutdown manages shutdown requests, by watching signals and invoking
|
||||
// the supplied handlers in order.
|
||||
//
|
||||
// It watches SIGTERM and SIGINT OS signals, as well as the trigger channel.
|
||||
// When any of them fire, it calls the supplied handlers in order. If any of
|
||||
// them errors, it merely logs the error.
|
||||
//
|
||||
// Once the shutdown has completed, it closes the returned channel. The caller
|
||||
// can watch this channel
|
||||
func MonitorShutdown(triggerCh <-chan struct{}, handlers ...ShutdownHandler) <-chan struct{} {
|
||||
sigCh := make(chan os.Signal, 2)
|
||||
out := make(chan struct{})
|
||||
|
||||
go func() {
|
||||
select {
|
||||
case sig := <-sigCh:
|
||||
log.Warnw("received shutdown", "signal", sig)
|
||||
case <-triggerCh:
|
||||
log.Warn("received shutdown")
|
||||
}
|
||||
|
||||
log.Warn("Shutting down...")
|
||||
|
||||
// Call all the handlers, logging on failure and success.
|
||||
for _, h := range handlers {
|
||||
if err := h.StopFunc(context.TODO()); err != nil {
|
||||
log.Errorf("shutting down %s failed: %s", h.Component, err)
|
||||
continue
|
||||
}
|
||||
log.Infof("%s shut down successfully ", h.Component)
|
||||
}
|
||||
|
||||
log.Warn("Graceful shutdown successful")
|
||||
|
||||
// Sync all loggers.
|
||||
_ = log.Sync() //nolint:errcheck
|
||||
close(out)
|
||||
}()
|
||||
|
||||
signal.Notify(sigCh, syscall.SIGTERM, syscall.SIGINT)
|
||||
return out
|
||||
}
|
36
node/shutdown_test.go
Normal file
36
node/shutdown_test.go
Normal file
@ -0,0 +1,36 @@
|
||||
package node
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestMonitorShutdown(t *testing.T) {
|
||||
signalCh := make(chan struct{})
|
||||
|
||||
// Three shutdown handlers.
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(3)
|
||||
h := ShutdownHandler{
|
||||
Component: "handler",
|
||||
StopFunc: func(_ context.Context) error {
|
||||
wg.Done()
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
finishCh := MonitorShutdown(signalCh, h, h, h)
|
||||
|
||||
// Nothing here after 10ms.
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
require.Len(t, finishCh, 0)
|
||||
|
||||
// Now trigger the shutdown.
|
||||
close(signalCh)
|
||||
wg.Wait()
|
||||
<-finishCh
|
||||
}
|
@ -96,6 +96,7 @@ func (pcs *paymentChannelSettler) messageHandler(msg *types.Message, rec *types.
|
||||
msgLookup, err := pcs.api.StateWaitMsg(pcs.ctx, submitMessageCID, build.MessageConfidence, api.LookbackNoLimit, true)
|
||||
if err != nil {
|
||||
log.Errorf("submitting voucher: %s", err.Error())
|
||||
return
|
||||
}
|
||||
if msgLookup.Receipt.ExitCode != 0 {
|
||||
log.Errorf("failed submitting voucher: %+v", voucher)
|
||||
|
@ -360,6 +360,20 @@ func (s SealingAPIAdapter) ChainHead(ctx context.Context) (sealing.TipSetToken,
|
||||
return head.Key().Bytes(), head.Height(), nil
|
||||
}
|
||||
|
||||
func (s SealingAPIAdapter) ChainBaseFee(ctx context.Context, tok sealing.TipSetToken) (abi.TokenAmount, error) {
|
||||
tsk, err := types.TipSetKeyFromBytes(tok)
|
||||
if err != nil {
|
||||
return big.Zero(), err
|
||||
}
|
||||
|
||||
ts, err := s.delegate.ChainGetTipSet(ctx, tsk)
|
||||
if err != nil {
|
||||
return big.Zero(), err
|
||||
}
|
||||
|
||||
return ts.Blocks()[0].ParentBaseFee, nil
|
||||
}
|
||||
|
||||
func (s SealingAPIAdapter) ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error) {
|
||||
return s.delegate.ChainGetMessage(ctx, mc)
|
||||
}
|
||||
|
@ -171,12 +171,6 @@ func (m *Miner) Run(ctx context.Context) error {
|
||||
return xerrors.Errorf("getting miner info: %w", err)
|
||||
}
|
||||
|
||||
fc := sealing.FeeConfig{
|
||||
MaxPreCommitGasFee: abi.TokenAmount(m.feeCfg.MaxPreCommitGasFee),
|
||||
MaxCommitGasFee: abi.TokenAmount(m.feeCfg.MaxCommitGasFee),
|
||||
MaxTerminateGasFee: abi.TokenAmount(m.feeCfg.MaxTerminateGasFee),
|
||||
}
|
||||
|
||||
var (
|
||||
// consumer of chain head changes.
|
||||
evts = events.NewEvents(ctx, m.api)
|
||||
@ -205,7 +199,7 @@ func (m *Miner) Run(ctx context.Context) error {
|
||||
)
|
||||
|
||||
// Instantiate the sealing FSM.
|
||||
m.sealing = sealing.New(adaptedAPI, fc, evtsAdapter, m.maddr, m.ds, m.sealer, m.sc, m.verif, m.prover, &pcp, cfg, m.handleSealingNotifications, as)
|
||||
m.sealing = sealing.New(adaptedAPI, m.feeCfg, evtsAdapter, m.maddr, m.ds, m.sealer, m.sc, m.verif, m.prover, &pcp, cfg, m.handleSealingNotifications, as)
|
||||
|
||||
// Run the sealing FSM.
|
||||
go m.sealing.Run(ctx) //nolint:errcheck // logged intside the function
|
||||
|
Loading…
Reference in New Issue
Block a user