merge master

This commit is contained in:
aarshkshah1992 2021-06-11 09:35:20 +05:30
commit fed5afa704
91 changed files with 4720 additions and 4334 deletions

View File

@ -21,7 +21,7 @@ jobs:
steps:
- uses: actions/checkout@v2
- name: testground run
uses: coryschwartz/testground-github-action@v1.0
uses: coryschwartz/testground-github-action@v1.1
with:
backend_addr: ${{ matrix.backend_addr }}
backend_proto: ${{ matrix.backend_proto }}

View File

@ -70,143 +70,6 @@ This is an optional Lotus release that introduces various improvements to the se
- fix health report (https://github.com/filecoin-project/lotus/pull/6011)
- fix(ci): Use recent ubuntu LTS release; Update release params ((https://github.com/filecoin-project/lotus/pull/6011))
# 1.9.0-rc4 / 2021-05-13
This is an optional Lotus release that introduces various improvements to the sealing, mining, and deal-making processes.
## Highlights
- OpenRPC Support (https://github.com/filecoin-project/lotus/pull/5843)
- Take latency into account when making interactive deals (https://github.com/filecoin-project/lotus/pull/5876)
- Update go-commp-utils for >10x faster client commp calculation (https://github.com/filecoin-project/lotus/pull/5892)
- add `lotus client cancel-retrieval` cmd to lotus CLI (https://github.com/filecoin-project/lotus/pull/5871)
- add `inspect-deal` command to `lotus client` (https://github.com/filecoin-project/lotus/pull/5833)
- Local retrieval support (https://github.com/filecoin-project/lotus/pull/5917)
- go-fil-markets v1.1.9 -> v1.2.5
- For a detailed changelog see https://github.com/filecoin-project/go-fil-markets/blob/master/CHANGELOG.md
- rust-fil-proofs v5.4.1 -> v7.0.1
- For a detailed changelog see https://github.com/filecoin-project/rust-fil-proofs/blob/master/CHANGELOG.md
## Changes
- storagefsm: Apply global events even in broken states (https://github.com/filecoin-project/lotus/pull/5962)
- Default the AlwaysKeepUnsealedCopy flag to true (https://github.com/filecoin-project/lotus/pull/5743)
- splitstore: compact hotstore prior to garbage collection (https://github.com/filecoin-project/lotus/pull/5778)
- ipfs-force bootstrapper update (https://github.com/filecoin-project/lotus/pull/5799)
- better logging when unsealing fails (https://github.com/filecoin-project/lotus/pull/5851)
- perf: add cache for gas permium estimation (https://github.com/filecoin-project/lotus/pull/5709)
- backupds: Compact log on restart (https://github.com/filecoin-project/lotus/pull/5875)
- backupds: Improve truncated log handling (https://github.com/filecoin-project/lotus/pull/5891)
- State CLI improvements (State CLI improvements)
- API proxy struct codegen (https://github.com/filecoin-project/lotus/pull/5854)
- move DI stuff for paychmgr into modules (https://github.com/filecoin-project/lotus/pull/5791)
- Implement Event observer and Settings for 3rd party dep injection (https://github.com/filecoin-project/lotus/pull/5693)
- Export developer and network commands for consumption by derivatives of Lotus (https://github.com/filecoin-project/lotus/pull/5864)
- mock sealer: Simulate randomness sideeffects (https://github.com/filecoin-project/lotus/pull/5805)
- localstorage: Demote reservation stat error to debug (https://github.com/filecoin-project/lotus/pull/5976)
- shed command to unpack miner info dumps (https://github.com/filecoin-project/lotus/pull/5800)
- Add two utils to Lotus-shed (https://github.com/filecoin-project/lotus/pull/5867)
- add shed election estimate command (https://github.com/filecoin-project/lotus/pull/5092)
- Add --actor flag in lotus-shed sectors terminate (https://github.com/filecoin-project/lotus/pull/5819)
- Move lotus mpool clear to lotus-shed (https://github.com/filecoin-project/lotus/pull/5900)
- Centralize everything on ipfs/go-log/v2 (https://github.com/filecoin-project/lotus/pull/5974)
- expose NextID from nice market actor interface (https://github.com/filecoin-project/lotus/pull/5850)
- add available options for perm on error (https://github.com/filecoin-project/lotus/pull/5814)
- API docs clarification: Document StateSearchMsg replaced message behavior (https://github.com/filecoin-project/lotus/pull/5838)
- api: Document StateReplay replaced message behavior (https://github.com/filecoin-project/lotus/pull/5840)
- add godocs to miner objects (https://github.com/filecoin-project/lotus/pull/2184)
- Add description to the client deal CLI command (https://github.com/filecoin-project/lotus/pull/5999)
- lint: don't skip builtin (https://github.com/filecoin-project/lotus/pull/5881)
- use deal duration from actors (https://github.com/filecoin-project/lotus/pull/5270)
- remote calc winningpost proof (https://github.com/filecoin-project/lotus/pull/5884)
- packer: other network images (https://github.com/filecoin-project/lotus/pull/5930)
- Convert the chainstore lock to RW (https://github.com/filecoin-project/lotus/pull/5971)
- Remove CachedBlockstore (https://github.com/filecoin-project/lotus/pull/5972)
- remove messagepool CapGasFee duplicate code (https://github.com/filecoin-project/lotus/pull/5992)
- Add a mining-heartbeat INFO line at every epoch (https://github.com/filecoin-project/lotus/pull/6183)
- chore(ci): Enable build on RC tags (https://github.com/filecoin-project/lotus/pull/6245)
- Upgrade nerpa to actor v4 and bump the version to rc4 (https://github.com/filecoin-project/lotus/pull/6249)
## Fixes
- return buffers after canceling badger operation (https://github.com/filecoin-project/lotus/pull/5796)
- avoid holding a lock while calling the View callback (https://github.com/filecoin-project/lotus/pull/5792)
- storagefsm: Trigger input processing when below limits (https://github.com/filecoin-project/lotus/pull/5801)
- After importing a previously deleted key, be able to delete it again (https://github.com/filecoin-project/lotus/pull/4653)
- fix StateManager.Replay on reward actor (https://github.com/filecoin-project/lotus/pull/5804)
- make sure atomic 64bit fields are 64bit aligned (https://github.com/filecoin-project/lotus/pull/5794)
- Import secp sigs in paych tests (https://github.com/filecoin-project/lotus/pull/5879)
- fix ci build-macos (https://github.com/filecoin-project/lotus/pull/5934)
- Fix creation of remainder account when it's not a multisig (https://github.com/filecoin-project/lotus/pull/5807)
- Fix fallback chainstore (https://github.com/filecoin-project/lotus/pull/6003)
- fix 4857: show help for set-addrs (https://github.com/filecoin-project/lotus/pull/5943)
- fix health report (https://github.com/filecoin-project/lotus/pull/6011)
# 1.9.0-rc2 / 2021-04-30
This is an optional Lotus release that introduces various improvements to the sealing, mining, and deal-making processes.
## Highlights
- OpenRPC Support (https://github.com/filecoin-project/lotus/pull/5843)
- Take latency into account when making interactive deals (https://github.com/filecoin-project/lotus/pull/5876)
- Update go-commp-utils for >10x faster client commp calculation (https://github.com/filecoin-project/lotus/pull/5892)
- add `lotus client cancel-retrieval` cmd to lotus CLI (https://github.com/filecoin-project/lotus/pull/5871)
- add `inspect-deal` command to `lotus client` (https://github.com/filecoin-project/lotus/pull/5833)
- Local retrieval support (https://github.com/filecoin-project/lotus/pull/5917)
- go-fil-markets v1.1.9 -> v1.2.5
- For a detailed changelog see https://github.com/filecoin-project/go-fil-markets/blob/master/CHANGELOG.md
- rust-fil-proofs v5.4.1 -> v7
- For a detailed changelog see https://github.com/filecoin-project/rust-fil-proofs/blob/master/CHANGELOG.md
## Changes
- storagefsm: Apply global events even in broken states (https://github.com/filecoin-project/lotus/pull/5962)
- Default the AlwaysKeepUnsealedCopy flag to true (https://github.com/filecoin-project/lotus/pull/5743)
- splitstore: compact hotstore prior to garbage collection (https://github.com/filecoin-project/lotus/pull/5778)
- ipfs-force bootstrapper update (https://github.com/filecoin-project/lotus/pull/5799)
- better logging when unsealing fails (https://github.com/filecoin-project/lotus/pull/5851)
- perf: add cache for gas permium estimation (https://github.com/filecoin-project/lotus/pull/5709)
- backupds: Compact log on restart (https://github.com/filecoin-project/lotus/pull/5875)
- backupds: Improve truncated log handling (https://github.com/filecoin-project/lotus/pull/5891)
- State CLI improvements (State CLI improvements)
- API proxy struct codegen (https://github.com/filecoin-project/lotus/pull/5854)
- move DI stuff for paychmgr into modules (https://github.com/filecoin-project/lotus/pull/5791)
- Implement Event observer and Settings for 3rd party dep injection (https://github.com/filecoin-project/lotus/pull/5693)
- Export developer and network commands for consumption by derivatives of Lotus (https://github.com/filecoin-project/lotus/pull/5864)
- mock sealer: Simulate randomness sideeffects (https://github.com/filecoin-project/lotus/pull/5805)
- localstorage: Demote reservation stat error to debug (https://github.com/filecoin-project/lotus/pull/5976)
- shed command to unpack miner info dumps (https://github.com/filecoin-project/lotus/pull/5800)
- Add two utils to Lotus-shed (https://github.com/filecoin-project/lotus/pull/5867)
- add shed election estimate command (https://github.com/filecoin-project/lotus/pull/5092)
- Add --actor flag in lotus-shed sectors terminate (https://github.com/filecoin-project/lotus/pull/5819)
- Move lotus mpool clear to lotus-shed (https://github.com/filecoin-project/lotus/pull/5900)
- Centralize everything on ipfs/go-log/v2 (https://github.com/filecoin-project/lotus/pull/5974)
- expose NextID from nice market actor interface (https://github.com/filecoin-project/lotus/pull/5850)
- add available options for perm on error (https://github.com/filecoin-project/lotus/pull/5814)
- API docs clarification: Document StateSearchMsg replaced message behavior (https://github.com/filecoin-project/lotus/pull/5838)
- api: Document StateReplay replaced message behavior (https://github.com/filecoin-project/lotus/pull/5840)
- add godocs to miner objects (https://github.com/filecoin-project/lotus/pull/2184)
- Add description to the client deal CLI command (https://github.com/filecoin-project/lotus/pull/5999)
- lint: don't skip builtin (https://github.com/filecoin-project/lotus/pull/5881)
- use deal duration from actors (https://github.com/filecoin-project/lotus/pull/5270)
- remote calc winningpost proof (https://github.com/filecoin-project/lotus/pull/5884)
- packer: other network images (https://github.com/filecoin-project/lotus/pull/5930)
- Convert the chainstore lock to RW (https://github.com/filecoin-project/lotus/pull/5971)
- Remove CachedBlockstore (https://github.com/filecoin-project/lotus/pull/5972)
- remove messagepool CapGasFee duplicate code (https://github.com/filecoin-project/lotus/pull/5992)
## Fixes
- return buffers after canceling badger operation (https://github.com/filecoin-project/lotus/pull/5796)
- avoid holding a lock while calling the View callback (https://github.com/filecoin-project/lotus/pull/5792)
- storagefsm: Trigger input processing when below limits (https://github.com/filecoin-project/lotus/pull/5801)
- After importing a previously deleted key, be able to delete it again (https://github.com/filecoin-project/lotus/pull/4653)
- fix StateManager.Replay on reward actor (https://github.com/filecoin-project/lotus/pull/5804)
- make sure atomic 64bit fields are 64bit aligned (https://github.com/filecoin-project/lotus/pull/5794)
- Import secp sigs in paych tests (https://github.com/filecoin-project/lotus/pull/5879)
- fix ci build-macos (https://github.com/filecoin-project/lotus/pull/5934)
- Fix creation of remainder account when it's not a multisig (https://github.com/filecoin-project/lotus/pull/5807)
- Fix fallback chainstore (https://github.com/filecoin-project/lotus/pull/6003)
- fix 4857: show help for set-addrs (https://github.com/filecoin-project/lotus/pull/5943)
- fix health report (https://github.com/filecoin-project/lotus/pull/6011)
# 1.8.0 / 2021-04-05
This is a mandatory release of Lotus that upgrades the network to version 12, which introduces various performance improvements to the cron processing of the power actor. The network will upgrade at height 712320, which is 2021-04-29T06:00:00Z.

View File

@ -1,61 +0,0 @@
package test
import (
"context"
"fmt"
"sync/atomic"
"testing"
"time"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/miner"
)
type BlockMiner struct {
ctx context.Context
t *testing.T
miner TestStorageNode
blocktime time.Duration
mine int64
nulls int64
done chan struct{}
}
func NewBlockMiner(ctx context.Context, t *testing.T, miner TestStorageNode, blocktime time.Duration) *BlockMiner {
return &BlockMiner{
ctx: ctx,
t: t,
miner: miner,
blocktime: blocktime,
mine: int64(1),
done: make(chan struct{}),
}
}
func (bm *BlockMiner) MineBlocks() {
time.Sleep(time.Second)
go func() {
defer close(bm.done)
for atomic.LoadInt64(&bm.mine) == 1 {
select {
case <-bm.ctx.Done():
return
case <-time.After(bm.blocktime):
}
nulls := atomic.SwapInt64(&bm.nulls, 0)
if err := bm.miner.MineOne(bm.ctx, miner.MineReq{
InjectNulls: abi.ChainEpoch(nulls),
Done: func(bool, abi.ChainEpoch, error) {},
}); err != nil {
bm.t.Error(err)
}
}
}()
}
func (bm *BlockMiner) Stop() {
atomic.AddInt64(&bm.mine, -1)
fmt.Println("shutting down mining")
<-bm.done
}

View File

@ -1,784 +0,0 @@
package test
import (
"bytes"
"context"
"fmt"
"io/ioutil"
"math/rand"
"os"
"path/filepath"
"sort"
"testing"
"time"
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
"github.com/ipfs/go-cid"
files "github.com/ipfs/go-ipfs-files"
"github.com/ipld/go-car"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors/builtin/market"
"github.com/filecoin-project/lotus/chain/types"
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
"github.com/filecoin-project/lotus/extern/storage-sealing/sealiface"
"github.com/filecoin-project/lotus/markets/storageadapter"
"github.com/filecoin-project/lotus/node"
"github.com/filecoin-project/lotus/node/impl"
"github.com/filecoin-project/lotus/node/modules/dtypes"
market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market"
ipld "github.com/ipfs/go-ipld-format"
dag "github.com/ipfs/go-merkledag"
dstest "github.com/ipfs/go-merkledag/test"
unixfile "github.com/ipfs/go-unixfs/file"
)
func TestDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, carExport, fastRet bool, startEpoch abi.ChainEpoch) {
s := setupOneClientOneMiner(t, b, blocktime)
defer s.blockMiner.Stop()
MakeDeal(t, s.ctx, 6, s.client, s.miner, carExport, fastRet, startEpoch)
}
func TestDoubleDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch) {
s := setupOneClientOneMiner(t, b, blocktime)
defer s.blockMiner.Stop()
MakeDeal(t, s.ctx, 6, s.client, s.miner, false, false, startEpoch)
MakeDeal(t, s.ctx, 7, s.client, s.miner, false, false, startEpoch)
}
func MakeDeal(t *testing.T, ctx context.Context, rseed int, client api.FullNode, miner TestStorageNode, carExport, fastRet bool, startEpoch abi.ChainEpoch) {
data, info, fcid := mkStorageDeal(t, ctx, rseed, client, miner, carExport, fastRet, startEpoch)
testRetrieval(t, ctx, client, fcid, &info.PieceCID, carExport, data)
}
func mkStorageDeal(t *testing.T, ctx context.Context, rseed int, client api.FullNode, miner TestStorageNode, carExport, fastRet bool, startEpoch abi.ChainEpoch) ([]byte,
*api.DealInfo, cid.Cid) {
res, data, err := CreateClientFile(ctx, client, rseed, 0)
if err != nil {
t.Fatal(err)
}
fcid := res.Root
fmt.Println("FILE CID: ", fcid)
deal := startDeal(t, ctx, miner, client, fcid, fastRet, startEpoch)
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
time.Sleep(time.Second)
waitDealSealed(t, ctx, miner, client, deal, false, false, nil)
// Retrieval
info, err := client.ClientGetDealInfo(ctx, *deal)
require.NoError(t, err)
return data, info, fcid
}
func CreateClientFile(ctx context.Context, client api.FullNode, rseed, size int) (*api.ImportRes, []byte, error) {
data, path, err := createRandomFile(rseed, size)
if err != nil {
return nil, nil, err
}
res, err := client.ClientImport(ctx, api.FileRef{Path: path})
if err != nil {
return nil, nil, err
}
return res, data, nil
}
func createRandomFile(rseed, size int) ([]byte, string, error) {
if size == 0 {
size = 1600
}
data := make([]byte, size)
rand.New(rand.NewSource(int64(rseed))).Read(data)
dir, err := ioutil.TempDir(os.TempDir(), "test-make-deal-")
if err != nil {
return nil, "", err
}
path := filepath.Join(dir, "sourcefile.dat")
err = ioutil.WriteFile(path, data, 0644)
if err != nil {
return nil, "", err
}
return data, path, nil
}
func TestPublishDealsBatching(t *testing.T, b APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch) {
publishPeriod := 10 * time.Second
maxDealsPerMsg := uint64(2)
// Set max deals per publish deals message to 2
minerDef := []StorageMiner{{
Full: 0,
Opts: node.Override(
new(*storageadapter.DealPublisher),
storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{
Period: publishPeriod,
MaxDealsPerMsg: maxDealsPerMsg,
})),
Preseal: PresealGenesis,
}}
// Create a connect client and miner node
n, sn := b(t, OneFull, minerDef)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
s := connectAndStartMining(t, b, blocktime, client, miner)
defer s.blockMiner.Stop()
// Starts a deal and waits until it's published
runDealTillPublish := func(rseed int) {
res, _, err := CreateClientFile(s.ctx, s.client, rseed, 0)
require.NoError(t, err)
upds, err := client.ClientGetDealUpdates(s.ctx)
require.NoError(t, err)
startDeal(t, s.ctx, s.miner, s.client, res.Root, false, startEpoch)
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
time.Sleep(time.Second)
done := make(chan struct{})
go func() {
for upd := range upds {
if upd.DataRef.Root == res.Root && upd.State == storagemarket.StorageDealAwaitingPreCommit {
done <- struct{}{}
}
}
}()
<-done
}
// Run three deals in parallel
done := make(chan struct{}, maxDealsPerMsg+1)
for rseed := 1; rseed <= 3; rseed++ {
rseed := rseed
go func() {
runDealTillPublish(rseed)
done <- struct{}{}
}()
}
// Wait for two of the deals to be published
for i := 0; i < int(maxDealsPerMsg); i++ {
<-done
}
// Expect a single PublishStorageDeals message that includes the first two deals
msgCids, err := s.client.StateListMessages(s.ctx, &api.MessageMatch{To: market.Address}, types.EmptyTSK, 1)
require.NoError(t, err)
count := 0
for _, msgCid := range msgCids {
msg, err := s.client.ChainGetMessage(s.ctx, msgCid)
require.NoError(t, err)
if msg.Method == market.Methods.PublishStorageDeals {
count++
var pubDealsParams market2.PublishStorageDealsParams
err = pubDealsParams.UnmarshalCBOR(bytes.NewReader(msg.Params))
require.NoError(t, err)
require.Len(t, pubDealsParams.Deals, int(maxDealsPerMsg))
}
}
require.Equal(t, 1, count)
// The third deal should be published once the publish period expires.
// Allow a little padding as it takes a moment for the state change to
// be noticed by the client.
padding := 10 * time.Second
select {
case <-time.After(publishPeriod + padding):
require.Fail(t, "Expected 3rd deal to be published once publish period elapsed")
case <-done: // Success
}
}
func TestBatchDealInput(t *testing.T, b APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch) {
run := func(piece, deals, expectSectors int) func(t *testing.T) {
return func(t *testing.T) {
publishPeriod := 10 * time.Second
maxDealsPerMsg := uint64(deals)
// Set max deals per publish deals message to maxDealsPerMsg
minerDef := []StorageMiner{{
Full: 0,
Opts: node.Options(
node.Override(
new(*storageadapter.DealPublisher),
storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{
Period: publishPeriod,
MaxDealsPerMsg: maxDealsPerMsg,
})),
node.Override(new(dtypes.GetSealingConfigFunc), func() (dtypes.GetSealingConfigFunc, error) {
return func() (sealiface.Config, error) {
return sealiface.Config{
MaxWaitDealsSectors: 2,
MaxSealingSectors: 1,
MaxSealingSectorsForDeals: 3,
AlwaysKeepUnsealedCopy: true,
WaitDealsDelay: time.Hour,
}, nil
}, nil
}),
),
Preseal: PresealGenesis,
}}
// Create a connect client and miner node
n, sn := b(t, OneFull, minerDef)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
s := connectAndStartMining(t, b, blocktime, client, miner)
defer s.blockMiner.Stop()
err := miner.MarketSetAsk(s.ctx, big.Zero(), big.Zero(), 200, 128, 32<<30)
require.NoError(t, err)
checkNoPadding := func() {
sl, err := sn[0].SectorsList(s.ctx)
require.NoError(t, err)
sort.Slice(sl, func(i, j int) bool {
return sl[i] < sl[j]
})
for _, snum := range sl {
si, err := sn[0].SectorsStatus(s.ctx, snum, false)
require.NoError(t, err)
// fmt.Printf("S %d: %+v %s\n", snum, si.Deals, si.State)
for _, deal := range si.Deals {
if deal == 0 {
fmt.Printf("sector %d had a padding piece!\n", snum)
}
}
}
}
// Starts a deal and waits until it's published
runDealTillSeal := func(rseed int) {
res, _, err := CreateClientFile(s.ctx, s.client, rseed, piece)
require.NoError(t, err)
dc := startDeal(t, s.ctx, s.miner, s.client, res.Root, false, startEpoch)
waitDealSealed(t, s.ctx, s.miner, s.client, dc, false, true, checkNoPadding)
}
// Run maxDealsPerMsg deals in parallel
done := make(chan struct{}, maxDealsPerMsg)
for rseed := 0; rseed < int(maxDealsPerMsg); rseed++ {
rseed := rseed
go func() {
runDealTillSeal(rseed)
done <- struct{}{}
}()
}
// Wait for maxDealsPerMsg of the deals to be published
for i := 0; i < int(maxDealsPerMsg); i++ {
<-done
}
checkNoPadding()
sl, err := sn[0].SectorsList(s.ctx)
require.NoError(t, err)
require.Equal(t, len(sl), expectSectors)
}
}
t.Run("4-p1600B", run(1600, 4, 4))
t.Run("4-p513B", run(513, 4, 2))
if !testing.Short() {
t.Run("32-p257B", run(257, 32, 8))
t.Run("32-p10B", run(10, 32, 2))
// fixme: this appears to break data-transfer / markets in some really creative ways
//t.Run("128-p10B", run(10, 128, 8))
}
}
func TestFastRetrievalDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch) {
s := setupOneClientOneMiner(t, b, blocktime)
defer s.blockMiner.Stop()
data := make([]byte, 1600)
rand.New(rand.NewSource(int64(8))).Read(data)
r := bytes.NewReader(data)
fcid, err := s.client.ClientImportLocal(s.ctx, r)
if err != nil {
t.Fatal(err)
}
fmt.Println("FILE CID: ", fcid)
deal := startDeal(t, s.ctx, s.miner, s.client, fcid, true, startEpoch)
waitDealPublished(t, s.ctx, s.miner, deal)
fmt.Println("deal published, retrieving")
// Retrieval
info, err := s.client.ClientGetDealInfo(s.ctx, *deal)
require.NoError(t, err)
testRetrieval(t, s.ctx, s.client, fcid, &info.PieceCID, false, data)
}
func TestSecondDealRetrieval(t *testing.T, b APIBuilder, blocktime time.Duration) {
s := setupOneClientOneMiner(t, b, blocktime)
defer s.blockMiner.Stop()
{
data1 := make([]byte, 800)
rand.New(rand.NewSource(int64(3))).Read(data1)
r := bytes.NewReader(data1)
fcid1, err := s.client.ClientImportLocal(s.ctx, r)
if err != nil {
t.Fatal(err)
}
data2 := make([]byte, 800)
rand.New(rand.NewSource(int64(9))).Read(data2)
r2 := bytes.NewReader(data2)
fcid2, err := s.client.ClientImportLocal(s.ctx, r2)
if err != nil {
t.Fatal(err)
}
deal1 := startDeal(t, s.ctx, s.miner, s.client, fcid1, true, 0)
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
time.Sleep(time.Second)
waitDealSealed(t, s.ctx, s.miner, s.client, deal1, true, false, nil)
deal2 := startDeal(t, s.ctx, s.miner, s.client, fcid2, true, 0)
time.Sleep(time.Second)
waitDealSealed(t, s.ctx, s.miner, s.client, deal2, false, false, nil)
// Retrieval
info, err := s.client.ClientGetDealInfo(s.ctx, *deal2)
require.NoError(t, err)
rf, _ := s.miner.SectorsRefs(s.ctx)
fmt.Printf("refs: %+v\n", rf)
testRetrieval(t, s.ctx, s.client, fcid2, &info.PieceCID, false, data2)
}
}
func TestNonUnsealedRetrievalQuoteForDefaultPricing(t *testing.T, b APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch) {
ppb := int64(1)
unsealPrice := int64(77)
s := setupOneClientOneMiner(t, b, blocktime)
defer s.blockMiner.Stop()
// Set unsealed price to non-zero
ask, err := s.miner.MarketGetRetrievalAsk(s.ctx)
require.NoError(t, err)
ask.PricePerByte = abi.NewTokenAmount(ppb)
ask.UnsealPrice = abi.NewTokenAmount(unsealPrice)
err = s.miner.MarketSetRetrievalAsk(s.ctx, ask)
require.NoError(t, err)
_, info, fcid := mkStorageDeal(t, s.ctx, 6, s.client, s.miner, false, false, startEpoch)
// one more storage deal for the same data
_, _, fcid2 := mkStorageDeal(t, s.ctx, 6, s.client, s.miner, false, false, startEpoch)
require.Equal(t, fcid, fcid2)
// fetch quote -> zero for unsealed price since unsealed file already exists.
offers, err := s.client.ClientFindData(s.ctx, fcid, &info.PieceCID)
require.NoError(t, err)
require.Len(t, offers, 2)
require.Equal(t, offers[0], offers[1])
require.Equal(t, uint64(0), offers[0].UnsealPrice.Uint64())
require.Equal(t, info.Size*uint64(ppb), offers[0].MinPrice.Uint64())
// remove ONLY one unsealed file
ss, err := s.miner.StorageList(context.Background())
require.NoError(t, err)
_, err = s.miner.SectorsList(s.ctx)
require.NoError(t, err)
iLoop:
for storeID, sd := range ss {
for _, sector := range sd {
require.NoError(t, s.miner.StorageDropSector(s.ctx, storeID, sector.SectorID, storiface.FTUnsealed))
// remove ONLY one
break iLoop
}
}
// get retrieval quote -> zero for unsealed price as unsealed file exists.
offers, err = s.client.ClientFindData(s.ctx, fcid, &info.PieceCID)
require.NoError(t, err)
require.Len(t, offers, 2)
require.Equal(t, offers[0], offers[1])
require.Equal(t, uint64(0), offers[0].UnsealPrice.Uint64())
require.Equal(t, info.Size*uint64(ppb), offers[0].MinPrice.Uint64())
// remove the other unsealed file as well
ss, err = s.miner.StorageList(context.Background())
require.NoError(t, err)
_, err = s.miner.SectorsList(s.ctx)
require.NoError(t, err)
for storeID, sd := range ss {
for _, sector := range sd {
require.NoError(t, s.miner.StorageDropSector(s.ctx, storeID, sector.SectorID, storiface.FTUnsealed))
}
}
// fetch quote -> non-zero for unseal price as we no more unsealed files.
offers, err = s.client.ClientFindData(s.ctx, fcid, &info.PieceCID)
require.NoError(t, err)
require.Len(t, offers, 2)
require.Equal(t, offers[0], offers[1])
require.Equal(t, uint64(unsealPrice), offers[0].UnsealPrice.Uint64())
total := (info.Size * uint64(ppb)) + uint64(unsealPrice)
require.Equal(t, total, offers[0].MinPrice.Uint64())
}
func TestZeroPricePerByteRetrievalDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch) {
s := setupOneClientOneMiner(t, b, blocktime)
defer s.blockMiner.Stop()
// Set price-per-byte to zero
ask, err := s.miner.MarketGetRetrievalAsk(s.ctx)
require.NoError(t, err)
ask.PricePerByte = abi.NewTokenAmount(0)
err = s.miner.MarketSetRetrievalAsk(s.ctx, ask)
require.NoError(t, err)
MakeDeal(t, s.ctx, 6, s.client, s.miner, false, false, startEpoch)
}
func TestOfflineDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch, fastRet bool) {
s := setupOneClientOneMiner(t, b, blocktime)
defer s.blockMiner.Stop()
// Create a random file
data, path, err := createRandomFile(1, 0)
require.NoError(t, err)
// Import the file on the client
importRes, err := s.client.ClientImport(s.ctx, api.FileRef{Path: path})
require.NoError(t, err)
// Get the piece size and commP
fcid := importRes.Root
pieceInfo, err := s.client.ClientDealPieceCID(s.ctx, fcid)
require.NoError(t, err)
fmt.Println("FILE CID: ", fcid)
// Create a storage deal with the miner
maddr, err := s.miner.ActorAddress(s.ctx)
require.NoError(t, err)
addr, err := s.client.WalletDefaultAddress(s.ctx)
require.NoError(t, err)
// Manual storage deal (offline deal)
dataRef := &storagemarket.DataRef{
TransferType: storagemarket.TTManual,
Root: fcid,
PieceCid: &pieceInfo.PieceCID,
PieceSize: pieceInfo.PieceSize.Unpadded(),
}
proposalCid, err := s.client.ClientStartDeal(s.ctx, &api.StartDealParams{
Data: dataRef,
Wallet: addr,
Miner: maddr,
EpochPrice: types.NewInt(1000000),
DealStartEpoch: startEpoch,
MinBlocksDuration: uint64(build.MinDealDuration),
FastRetrieval: fastRet,
})
require.NoError(t, err)
// Wait for the deal to reach StorageDealCheckForAcceptance on the client
cd, err := s.client.ClientGetDealInfo(s.ctx, *proposalCid)
require.NoError(t, err)
require.Eventually(t, func() bool {
cd, _ := s.client.ClientGetDealInfo(s.ctx, *proposalCid)
return cd.State == storagemarket.StorageDealCheckForAcceptance
}, 30*time.Second, 1*time.Second, "actual deal status is %s", storagemarket.DealStates[cd.State])
// Create a CAR file from the raw file
carFileDir, err := ioutil.TempDir(os.TempDir(), "test-make-deal-car")
require.NoError(t, err)
carFilePath := filepath.Join(carFileDir, "out.car")
err = s.client.ClientGenCar(s.ctx, api.FileRef{Path: path}, carFilePath)
require.NoError(t, err)
// Import the CAR file on the miner - this is the equivalent to
// transferring the file across the wire in a normal (non-offline) deal
err = s.miner.DealsImportData(s.ctx, *proposalCid, carFilePath)
require.NoError(t, err)
// Wait for the deal to be published
waitDealPublished(t, s.ctx, s.miner, proposalCid)
t.Logf("deal published, retrieving")
// Retrieve the deal
testRetrieval(t, s.ctx, s.client, fcid, &pieceInfo.PieceCID, false, data)
}
func startDeal(t *testing.T, ctx context.Context, miner TestStorageNode, client api.FullNode, fcid cid.Cid, fastRet bool, startEpoch abi.ChainEpoch) *cid.Cid {
maddr, err := miner.ActorAddress(ctx)
if err != nil {
t.Fatal(err)
}
addr, err := client.WalletDefaultAddress(ctx)
if err != nil {
t.Fatal(err)
}
deal, err := client.ClientStartDeal(ctx, &api.StartDealParams{
Data: &storagemarket.DataRef{
TransferType: storagemarket.TTGraphsync,
Root: fcid,
},
Wallet: addr,
Miner: maddr,
EpochPrice: types.NewInt(1000000),
DealStartEpoch: startEpoch,
MinBlocksDuration: uint64(build.MinDealDuration),
FastRetrieval: fastRet,
})
if err != nil {
t.Fatalf("%+v", err)
}
return deal
}
func waitDealSealed(t *testing.T, ctx context.Context, miner TestStorageNode, client api.FullNode, deal *cid.Cid, noseal, noSealStart bool, cb func()) {
loop:
for {
di, err := client.ClientGetDealInfo(ctx, *deal)
if err != nil {
t.Fatal(err)
}
switch di.State {
case storagemarket.StorageDealAwaitingPreCommit, storagemarket.StorageDealSealing:
if noseal {
return
}
if !noSealStart {
startSealingWaiting(t, ctx, miner)
}
case storagemarket.StorageDealProposalRejected:
t.Fatal("deal rejected")
case storagemarket.StorageDealFailing:
t.Fatal("deal failed")
case storagemarket.StorageDealError:
t.Fatal("deal errored", di.Message)
case storagemarket.StorageDealActive:
fmt.Println("COMPLETE", di)
break loop
}
mds, err := miner.MarketListIncompleteDeals(ctx)
if err != nil {
t.Fatal(err)
}
var minerState storagemarket.StorageDealStatus
for _, md := range mds {
if md.DealID == di.DealID {
minerState = md.State
break
}
}
fmt.Printf("Deal %d state: client:%s provider:%s\n", di.DealID, storagemarket.DealStates[di.State], storagemarket.DealStates[minerState])
time.Sleep(time.Second / 2)
if cb != nil {
cb()
}
}
}
func waitDealPublished(t *testing.T, ctx context.Context, miner TestStorageNode, deal *cid.Cid) {
subCtx, cancel := context.WithCancel(ctx)
defer cancel()
updates, err := miner.MarketGetDealUpdates(subCtx)
if err != nil {
t.Fatal(err)
}
for {
select {
case <-ctx.Done():
t.Fatal("context timeout")
case di := <-updates:
if deal.Equals(di.ProposalCid) {
switch di.State {
case storagemarket.StorageDealProposalRejected:
t.Fatal("deal rejected")
case storagemarket.StorageDealFailing:
t.Fatal("deal failed")
case storagemarket.StorageDealError:
t.Fatal("deal errored", di.Message)
case storagemarket.StorageDealFinalizing, storagemarket.StorageDealAwaitingPreCommit, storagemarket.StorageDealSealing, storagemarket.StorageDealActive:
fmt.Println("COMPLETE", di)
return
}
fmt.Println("Deal state: ", storagemarket.DealStates[di.State])
}
}
}
}
func startSealingWaiting(t *testing.T, ctx context.Context, miner TestStorageNode) {
snums, err := miner.SectorsList(ctx)
require.NoError(t, err)
for _, snum := range snums {
si, err := miner.SectorsStatus(ctx, snum, false)
require.NoError(t, err)
t.Logf("Sector %d state: %s", snum, si.State)
if si.State == api.SectorState(sealing.WaitDeals) {
require.NoError(t, miner.SectorStartSealing(ctx, snum))
}
}
flushSealingBatches(t, ctx, miner)
}
func testRetrieval(t *testing.T, ctx context.Context, client api.FullNode, fcid cid.Cid, piece *cid.Cid, carExport bool, data []byte) {
offers, err := client.ClientFindData(ctx, fcid, piece)
if err != nil {
t.Fatal(err)
}
if len(offers) < 1 {
t.Fatal("no offers")
}
rpath, err := ioutil.TempDir("", "lotus-retrieve-test-")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(rpath) //nolint:errcheck
caddr, err := client.WalletDefaultAddress(ctx)
if err != nil {
t.Fatal(err)
}
ref := &api.FileRef{
Path: filepath.Join(rpath, "ret"),
IsCAR: carExport,
}
updates, err := client.ClientRetrieveWithEvents(ctx, offers[0].Order(caddr), ref)
if err != nil {
t.Fatal(err)
}
for update := range updates {
if update.Err != "" {
t.Fatalf("retrieval failed: %s", update.Err)
}
}
rdata, err := ioutil.ReadFile(filepath.Join(rpath, "ret"))
if err != nil {
t.Fatal(err)
}
if carExport {
rdata = extractCarData(t, ctx, rdata, rpath)
}
if !bytes.Equal(rdata, data) {
t.Fatal("wrong data retrieved")
}
}
func extractCarData(t *testing.T, ctx context.Context, rdata []byte, rpath string) []byte {
bserv := dstest.Bserv()
ch, err := car.LoadCar(bserv.Blockstore(), bytes.NewReader(rdata))
if err != nil {
t.Fatal(err)
}
b, err := bserv.GetBlock(ctx, ch.Roots[0])
if err != nil {
t.Fatal(err)
}
nd, err := ipld.Decode(b)
if err != nil {
t.Fatal(err)
}
dserv := dag.NewDAGService(bserv)
fil, err := unixfile.NewUnixfsFile(ctx, dserv, nd)
if err != nil {
t.Fatal(err)
}
outPath := filepath.Join(rpath, "retLoadedCAR")
if err := files.WriteTo(fil, outPath); err != nil {
t.Fatal(err)
}
rdata, err = ioutil.ReadFile(outPath)
if err != nil {
t.Fatal(err)
}
return rdata
}
type dealsScaffold struct {
ctx context.Context
client *impl.FullNodeAPI
miner TestStorageNode
blockMiner *BlockMiner
}
func setupOneClientOneMiner(t *testing.T, b APIBuilder, blocktime time.Duration) *dealsScaffold {
n, sn := b(t, OneFull, OneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
return connectAndStartMining(t, b, blocktime, client, miner)
}
func connectAndStartMining(t *testing.T, b APIBuilder, blocktime time.Duration, client *impl.FullNodeAPI, miner TestStorageNode) *dealsScaffold {
ctx := context.Background()
addrinfo, err := client.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := miner.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
time.Sleep(time.Second)
blockMiner := NewBlockMiner(ctx, t, miner, blocktime)
blockMiner.MineBlocks()
return &dealsScaffold{
ctx: ctx,
client: client,
miner: miner,
blockMiner: blockMiner,
}
}

View File

@ -1,240 +0,0 @@
package test
import (
"bytes"
"context"
"fmt"
"math/rand"
"sync/atomic"
"testing"
"time"
logging "github.com/ipfs/go-log/v2"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/miner"
"github.com/filecoin-project/lotus/node/impl"
)
//nolint:deadcode,varcheck
var log = logging.Logger("apitest")
func (ts *testSuite) testMining(t *testing.T) {
ctx := context.Background()
apis, sn := ts.makeNodes(t, OneFull, OneMiner)
api := apis[0]
newHeads, err := api.ChainNotify(ctx)
require.NoError(t, err)
initHead := (<-newHeads)[0]
baseHeight := initHead.Val.Height()
h1, err := api.ChainHead(ctx)
require.NoError(t, err)
require.Equal(t, int64(h1.Height()), int64(baseHeight))
MineUntilBlock(ctx, t, apis[0], sn[0], nil)
require.NoError(t, err)
<-newHeads
h2, err := api.ChainHead(ctx)
require.NoError(t, err)
require.Greater(t, int64(h2.Height()), int64(h1.Height()))
}
func (ts *testSuite) testMiningReal(t *testing.T) {
build.InsecurePoStValidation = false
defer func() {
build.InsecurePoStValidation = true
}()
ctx := context.Background()
apis, sn := ts.makeNodes(t, OneFull, OneMiner)
api := apis[0]
newHeads, err := api.ChainNotify(ctx)
require.NoError(t, err)
at := (<-newHeads)[0].Val.Height()
h1, err := api.ChainHead(ctx)
require.NoError(t, err)
require.Equal(t, int64(at), int64(h1.Height()))
MineUntilBlock(ctx, t, apis[0], sn[0], nil)
require.NoError(t, err)
<-newHeads
h2, err := api.ChainHead(ctx)
require.NoError(t, err)
require.Greater(t, int64(h2.Height()), int64(h1.Height()))
MineUntilBlock(ctx, t, apis[0], sn[0], nil)
require.NoError(t, err)
<-newHeads
h3, err := api.ChainHead(ctx)
require.NoError(t, err)
require.Greater(t, int64(h3.Height()), int64(h2.Height()))
}
func TestDealMining(t *testing.T, b APIBuilder, blocktime time.Duration, carExport bool) {
// test making a deal with a fresh miner, and see if it starts to mine
ctx := context.Background()
n, sn := b(t, OneFull, []StorageMiner{
{Full: 0, Preseal: PresealGenesis},
{Full: 0, Preseal: 0}, // TODO: Add support for miners on non-first full node
})
client := n[0].FullNode.(*impl.FullNodeAPI)
provider := sn[1]
genesisMiner := sn[0]
addrinfo, err := client.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := provider.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
if err := genesisMiner.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
time.Sleep(time.Second)
data := make([]byte, 600)
rand.New(rand.NewSource(5)).Read(data)
r := bytes.NewReader(data)
fcid, err := client.ClientImportLocal(ctx, r)
if err != nil {
t.Fatal(err)
}
fmt.Println("FILE CID: ", fcid)
var mine int32 = 1
done := make(chan struct{})
minedTwo := make(chan struct{})
m2addr, err := sn[1].ActorAddress(context.TODO())
if err != nil {
t.Fatal(err)
}
go func() {
defer close(done)
complChan := minedTwo
for atomic.LoadInt32(&mine) != 0 {
wait := make(chan int)
mdone := func(mined bool, _ abi.ChainEpoch, err error) {
n := 0
if mined {
n = 1
}
wait <- n
}
if err := sn[0].MineOne(ctx, miner.MineReq{Done: mdone}); err != nil {
t.Error(err)
}
if err := sn[1].MineOne(ctx, miner.MineReq{Done: mdone}); err != nil {
t.Error(err)
}
expect := <-wait
expect += <-wait
time.Sleep(blocktime)
if expect == 0 {
// null block
continue
}
var nodeOneMined bool
for _, node := range sn {
mb, err := node.MiningBase(ctx)
if err != nil {
t.Error(err)
return
}
for _, b := range mb.Blocks() {
if b.Miner == m2addr {
nodeOneMined = true
break
}
}
}
if nodeOneMined && complChan != nil {
close(complChan)
complChan = nil
}
}
}()
deal := startDeal(t, ctx, provider, client, fcid, false, 0)
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
time.Sleep(time.Second)
waitDealSealed(t, ctx, provider, client, deal, false, false, nil)
<-minedTwo
atomic.StoreInt32(&mine, 0)
fmt.Println("shutting down mining")
<-done
}
func (ts *testSuite) testNonGenesisMiner(t *testing.T) {
ctx := context.Background()
n, sn := ts.makeNodes(t, []FullNodeOpts{
FullNodeWithLatestActorsAt(-1),
}, []StorageMiner{
{Full: 0, Preseal: PresealGenesis},
})
full, ok := n[0].FullNode.(*impl.FullNodeAPI)
if !ok {
t.Skip("not testing with a full node")
return
}
genesisMiner := sn[0]
bm := NewBlockMiner(ctx, t, genesisMiner, 4*time.Millisecond)
bm.MineBlocks()
t.Cleanup(bm.Stop)
gaa, err := genesisMiner.ActorAddress(ctx)
require.NoError(t, err)
gmi, err := full.StateMinerInfo(ctx, gaa, types.EmptyTSK)
require.NoError(t, err)
testm := n[0].Stb(ctx, t, TestSpt, gmi.Owner)
ta, err := testm.ActorAddress(ctx)
require.NoError(t, err)
tid, err := address.IDFromAddress(ta)
require.NoError(t, err)
require.Equal(t, uint64(1001), tid)
}

View File

@ -1,389 +0,0 @@
package test
import (
"context"
"fmt"
"sort"
"strings"
"sync/atomic"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/stmgr"
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
bminer "github.com/filecoin-project/lotus/miner"
"github.com/filecoin-project/lotus/node"
"github.com/filecoin-project/lotus/node/impl"
)
func TestSDRUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
n, sn := b(t, []FullNodeOpts{FullNodeWithSDRAt(500, 1000)}, OneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
addrinfo, err := client.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := miner.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
build.Clock.Sleep(time.Second)
pledge := make(chan struct{})
mine := int64(1)
done := make(chan struct{})
go func() {
defer close(done)
round := 0
for atomic.LoadInt64(&mine) != 0 {
build.Clock.Sleep(blocktime)
if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) {
}}); err != nil {
t.Error(err)
}
// 3 sealing rounds: before, during after.
if round >= 3 {
continue
}
head, err := client.ChainHead(ctx)
assert.NoError(t, err)
// rounds happen every 100 blocks, with a 50 block offset.
if head.Height() >= abi.ChainEpoch(round*500+50) {
round++
pledge <- struct{}{}
ver, err := client.StateNetworkVersion(ctx, head.Key())
assert.NoError(t, err)
switch round {
case 1:
assert.Equal(t, network.Version6, ver)
case 2:
assert.Equal(t, network.Version7, ver)
case 3:
assert.Equal(t, network.Version8, ver)
}
}
}
}()
// before.
pledgeSectors(t, ctx, miner, 9, 0, pledge)
s, err := miner.SectorsList(ctx)
require.NoError(t, err)
sort.Slice(s, func(i, j int) bool {
return s[i] < s[j]
})
for i, id := range s {
info, err := miner.SectorsStatus(ctx, id, true)
require.NoError(t, err)
expectProof := abi.RegisteredSealProof_StackedDrg2KiBV1
if i >= 3 {
// after
expectProof = abi.RegisteredSealProof_StackedDrg2KiBV1_1
}
assert.Equal(t, expectProof, info.SealProof, "sector %d, id %d", i, id)
}
atomic.StoreInt64(&mine, 0)
<-done
}
func TestPledgeBatching(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
n, sn := b(t, []FullNodeOpts{FullNodeWithLatestActorsAt(-1)}, OneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
addrinfo, err := client.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := miner.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
build.Clock.Sleep(time.Second)
mine := int64(1)
done := make(chan struct{})
go func() {
defer close(done)
for atomic.LoadInt64(&mine) != 0 {
build.Clock.Sleep(blocktime)
if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) {
}}); err != nil {
t.Error(err)
}
}
}()
for {
h, err := client.ChainHead(ctx)
require.NoError(t, err)
if h.Height() > 10 {
break
}
}
toCheck := startPledge(t, ctx, miner, nSectors, 0, nil)
for len(toCheck) > 0 {
states := map[api.SectorState]int{}
for n := range toCheck {
st, err := miner.SectorsStatus(ctx, n, false)
require.NoError(t, err)
states[st.State]++
if st.State == api.SectorState(sealing.Proving) {
delete(toCheck, n)
}
if strings.Contains(string(st.State), "Fail") {
t.Fatal("sector in a failed state", st.State)
}
}
if states[api.SectorState(sealing.SubmitPreCommitBatch)] == nSectors ||
(states[api.SectorState(sealing.SubmitPreCommitBatch)] > 0 && states[api.SectorState(sealing.PreCommit1)] == 0 && states[api.SectorState(sealing.PreCommit2)] == 0) {
pcb, err := miner.SectorPreCommitFlush(ctx)
require.NoError(t, err)
if pcb != nil {
fmt.Printf("PRECOMMIT BATCH: %+v\n", pcb)
}
}
if states[api.SectorState(sealing.SubmitCommitAggregate)] == nSectors ||
(states[api.SectorState(sealing.SubmitCommitAggregate)] > 0 && states[api.SectorState(sealing.WaitSeed)] == 0 && states[api.SectorState(sealing.Committing)] == 0) {
cb, err := miner.SectorCommitFlush(ctx)
require.NoError(t, err)
if cb != nil {
fmt.Printf("COMMIT BATCH: %+v\n", cb)
}
}
build.Clock.Sleep(100 * time.Millisecond)
fmt.Printf("WaitSeal: %d %+v\n", len(toCheck), states)
}
atomic.StoreInt64(&mine, 0)
<-done
}
func TestPledgeBeforeNv13(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
n, sn := b(t, []FullNodeOpts{
{
Opts: func(nodes []TestNode) node.Option {
return node.Override(new(stmgr.UpgradeSchedule), stmgr.UpgradeSchedule{{
Network: network.Version9,
Height: 1,
Migration: stmgr.UpgradeActorsV2,
}, {
Network: network.Version10,
Height: 2,
Migration: stmgr.UpgradeActorsV3,
}, {
Network: network.Version12,
Height: 3,
Migration: stmgr.UpgradeActorsV4,
}, {
Network: network.Version13,
Height: 1000000000,
Migration: stmgr.UpgradeActorsV5,
}})
},
},
}, OneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
addrinfo, err := client.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := miner.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
build.Clock.Sleep(time.Second)
mine := int64(1)
done := make(chan struct{})
go func() {
defer close(done)
for atomic.LoadInt64(&mine) != 0 {
build.Clock.Sleep(blocktime)
if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) {
}}); err != nil {
t.Error(err)
}
}
}()
for {
h, err := client.ChainHead(ctx)
require.NoError(t, err)
if h.Height() > 10 {
break
}
}
toCheck := startPledge(t, ctx, miner, nSectors, 0, nil)
for len(toCheck) > 0 {
states := map[api.SectorState]int{}
for n := range toCheck {
st, err := miner.SectorsStatus(ctx, n, false)
require.NoError(t, err)
states[st.State]++
if st.State == api.SectorState(sealing.Proving) {
delete(toCheck, n)
}
if strings.Contains(string(st.State), "Fail") {
t.Fatal("sector in a failed state", st.State)
}
}
build.Clock.Sleep(100 * time.Millisecond)
fmt.Printf("WaitSeal: %d %+v\n", len(toCheck), states)
}
atomic.StoreInt64(&mine, 0)
<-done
}
func TestPledgeSector(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
n, sn := b(t, OneFull, OneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
addrinfo, err := client.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := miner.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
build.Clock.Sleep(time.Second)
mine := int64(1)
done := make(chan struct{})
go func() {
defer close(done)
for atomic.LoadInt64(&mine) != 0 {
build.Clock.Sleep(blocktime)
if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) {
}}); err != nil {
t.Error(err)
}
}
}()
pledgeSectors(t, ctx, miner, nSectors, 0, nil)
atomic.StoreInt64(&mine, 0)
<-done
}
func flushSealingBatches(t *testing.T, ctx context.Context, miner TestStorageNode) {
pcb, err := miner.SectorPreCommitFlush(ctx)
require.NoError(t, err)
if pcb != nil {
fmt.Printf("PRECOMMIT BATCH: %+v\n", pcb)
}
cb, err := miner.SectorCommitFlush(ctx)
require.NoError(t, err)
if cb != nil {
fmt.Printf("COMMIT BATCH: %+v\n", cb)
}
}
func startPledge(t *testing.T, ctx context.Context, miner TestStorageNode, n, existing int, blockNotif <-chan struct{}) map[abi.SectorNumber]struct{} {
for i := 0; i < n; i++ {
if i%3 == 0 && blockNotif != nil {
<-blockNotif
log.Errorf("WAIT")
}
log.Errorf("PLEDGING %d", i)
_, err := miner.PledgeSector(ctx)
require.NoError(t, err)
}
for {
s, err := miner.SectorsList(ctx) // Note - the test builder doesn't import genesis sectors into FSM
require.NoError(t, err)
fmt.Printf("Sectors: %d\n", len(s))
if len(s) >= n+existing {
break
}
build.Clock.Sleep(100 * time.Millisecond)
}
fmt.Printf("All sectors is fsm\n")
s, err := miner.SectorsList(ctx)
require.NoError(t, err)
toCheck := map[abi.SectorNumber]struct{}{}
for _, number := range s {
toCheck[number] = struct{}{}
}
return toCheck
}
func pledgeSectors(t *testing.T, ctx context.Context, miner TestStorageNode, n, existing int, blockNotif <-chan struct{}) {
toCheck := startPledge(t, ctx, miner, n, existing, blockNotif)
for len(toCheck) > 0 {
flushSealingBatches(t, ctx, miner)
states := map[api.SectorState]int{}
for n := range toCheck {
st, err := miner.SectorsStatus(ctx, n, false)
require.NoError(t, err)
states[st.State]++
if st.State == api.SectorState(sealing.Proving) {
delete(toCheck, n)
}
if strings.Contains(string(st.State), "Fail") {
t.Fatal("sector in a failed state", st.State)
}
}
build.Clock.Sleep(100 * time.Millisecond)
fmt.Printf("WaitSeal: %d %+v\n", len(toCheck), states)
}
}

View File

@ -1,313 +0,0 @@
package test
import (
"context"
"fmt"
"os"
"strings"
"testing"
"time"
logging "github.com/ipfs/go-log/v2"
"github.com/multiformats/go-multiaddr"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/network"
lapi "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/api/v1api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/miner"
"github.com/filecoin-project/lotus/node"
)
func init() {
logging.SetAllLoggers(logging.LevelInfo)
err := os.Setenv("BELLMAN_NO_GPU", "1")
if err != nil {
panic(fmt.Sprintf("failed to set BELLMAN_NO_GPU env variable: %s", err))
}
build.InsecurePoStValidation = true
}
type StorageBuilder func(context.Context, *testing.T, abi.RegisteredSealProof, address.Address) TestStorageNode
type TestNode struct {
v1api.FullNode
// ListenAddr is the address on which an API server is listening, if an
// API server is created for this Node
ListenAddr multiaddr.Multiaddr
Stb StorageBuilder
}
type TestStorageNode struct {
lapi.StorageMiner
// ListenAddr is the address on which an API server is listening, if an
// API server is created for this Node
ListenAddr multiaddr.Multiaddr
MineOne func(context.Context, miner.MineReq) error
Stop func(context.Context) error
}
var PresealGenesis = -1
const GenesisPreseals = 2
const TestSpt = abi.RegisteredSealProof_StackedDrg2KiBV1_1
// Options for setting up a mock storage miner
type StorageMiner struct {
Full int
Opts node.Option
Preseal int
}
type OptionGenerator func([]TestNode) node.Option
// Options for setting up a mock full node
type FullNodeOpts struct {
Lite bool // run node in "lite" mode
Opts OptionGenerator // generate dependency injection options
}
// APIBuilder is a function which is invoked in test suite to provide
// test nodes and networks
//
// fullOpts array defines options for each full node
// storage array defines storage nodes, numbers in the array specify full node
// index the storage node 'belongs' to
type APIBuilder func(t *testing.T, full []FullNodeOpts, storage []StorageMiner) ([]TestNode, []TestStorageNode)
type testSuite struct {
makeNodes APIBuilder
}
// TestApis is the entry point to API test suite
func TestApis(t *testing.T, b APIBuilder) {
ts := testSuite{
makeNodes: b,
}
t.Run("version", ts.testVersion)
t.Run("id", ts.testID)
t.Run("testConnectTwo", ts.testConnectTwo)
t.Run("testMining", ts.testMining)
t.Run("testMiningReal", ts.testMiningReal)
t.Run("testSearchMsg", ts.testSearchMsg)
t.Run("testNonGenesisMiner", ts.testNonGenesisMiner)
}
func DefaultFullOpts(nFull int) []FullNodeOpts {
full := make([]FullNodeOpts, nFull)
for i := range full {
full[i] = FullNodeOpts{
Opts: func(nodes []TestNode) node.Option {
return node.Options()
},
}
}
return full
}
var OneMiner = []StorageMiner{{Full: 0, Preseal: PresealGenesis}}
var OneFull = DefaultFullOpts(1)
var TwoFull = DefaultFullOpts(2)
var FullNodeWithLatestActorsAt = func(upgradeHeight abi.ChainEpoch) FullNodeOpts {
// Attention: Update this when introducing new actor versions or your tests will be sad
return FullNodeWithNetworkUpgradeAt(network.Version13, upgradeHeight)
}
var FullNodeWithNetworkUpgradeAt = func(version network.Version, upgradeHeight abi.ChainEpoch) FullNodeOpts {
fullSchedule := stmgr.UpgradeSchedule{{
// prepare for upgrade.
Network: network.Version9,
Height: 1,
Migration: stmgr.UpgradeActorsV2,
}, {
Network: network.Version10,
Height: 2,
Migration: stmgr.UpgradeActorsV3,
}, {
Network: network.Version12,
Height: 3,
Migration: stmgr.UpgradeActorsV4,
}, {
Network: network.Version13,
Height: 4,
Migration: stmgr.UpgradeActorsV5,
}}
schedule := stmgr.UpgradeSchedule{}
for _, upgrade := range fullSchedule {
if upgrade.Network > version {
break
}
schedule = append(schedule, upgrade)
}
if upgradeHeight > 0 {
schedule[len(schedule)-1].Height = upgradeHeight
}
return FullNodeOpts{
Opts: func(nodes []TestNode) node.Option {
return node.Override(new(stmgr.UpgradeSchedule), schedule)
},
}
}
var FullNodeWithSDRAt = func(calico, persian abi.ChainEpoch) FullNodeOpts {
return FullNodeOpts{
Opts: func(nodes []TestNode) node.Option {
return node.Override(new(stmgr.UpgradeSchedule), stmgr.UpgradeSchedule{{
Network: network.Version6,
Height: 1,
Migration: stmgr.UpgradeActorsV2,
}, {
Network: network.Version7,
Height: calico,
Migration: stmgr.UpgradeCalico,
}, {
Network: network.Version8,
Height: persian,
}})
},
}
}
var MineNext = miner.MineReq{
InjectNulls: 0,
Done: func(bool, abi.ChainEpoch, error) {},
}
func (ts *testSuite) testVersion(t *testing.T) {
lapi.RunningNodeType = lapi.NodeFull
t.Cleanup(func() {
lapi.RunningNodeType = lapi.NodeUnknown
})
ctx := context.Background()
apis, _ := ts.makeNodes(t, OneFull, OneMiner)
napi := apis[0]
v, err := napi.Version(ctx)
if err != nil {
t.Fatal(err)
}
versions := strings.Split(v.Version, "+")
if len(versions) <= 0 {
t.Fatal("empty version")
}
require.Equal(t, versions[0], build.BuildVersion)
}
func (ts *testSuite) testSearchMsg(t *testing.T) {
apis, miners := ts.makeNodes(t, OneFull, OneMiner)
api := apis[0]
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
senderAddr, err := api.WalletDefaultAddress(ctx)
if err != nil {
t.Fatal(err)
}
msg := &types.Message{
From: senderAddr,
To: senderAddr,
Value: big.Zero(),
}
bm := NewBlockMiner(ctx, t, miners[0], 100*time.Millisecond)
bm.MineBlocks()
defer bm.Stop()
sm, err := api.MpoolPushMessage(ctx, msg, nil)
if err != nil {
t.Fatal(err)
}
res, err := api.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true)
if err != nil {
t.Fatal(err)
}
if res.Receipt.ExitCode != 0 {
t.Fatal("did not successfully send message")
}
searchRes, err := api.StateSearchMsg(ctx, types.EmptyTSK, sm.Cid(), lapi.LookbackNoLimit, true)
if err != nil {
t.Fatal(err)
}
if searchRes.TipSet != res.TipSet {
t.Fatalf("search ts: %s, different from wait ts: %s", searchRes.TipSet, res.TipSet)
}
}
func (ts *testSuite) testID(t *testing.T) {
ctx := context.Background()
apis, _ := ts.makeNodes(t, OneFull, OneMiner)
api := apis[0]
id, err := api.ID(ctx)
if err != nil {
t.Fatal(err)
}
assert.Regexp(t, "^12", id.Pretty())
}
func (ts *testSuite) testConnectTwo(t *testing.T) {
ctx := context.Background()
apis, _ := ts.makeNodes(t, TwoFull, OneMiner)
p, err := apis[0].NetPeers(ctx)
if err != nil {
t.Fatal(err)
}
if len(p) != 0 {
t.Error("Node 0 has a peer")
}
p, err = apis[1].NetPeers(ctx)
if err != nil {
t.Fatal(err)
}
if len(p) != 0 {
t.Error("Node 1 has a peer")
}
addrs, err := apis[1].NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := apis[0].NetConnect(ctx, addrs); err != nil {
t.Fatal(err)
}
p, err = apis[0].NetPeers(ctx)
if err != nil {
t.Fatal(err)
}
if len(p) != 1 {
t.Error("Node 0 doesn't have 1 peer")
}
p, err = apis[1].NetPeers(ctx)
if err != nil {
t.Fatal(err)
}
if len(p) != 1 {
t.Error("Node 0 doesn't have 1 peer")
}
}

View File

@ -1,87 +0,0 @@
package test
import (
"context"
"testing"
"time"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-address"
lapi "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/miner"
)
func SendFunds(ctx context.Context, t *testing.T, sender TestNode, addr address.Address, amount abi.TokenAmount) {
senderAddr, err := sender.WalletDefaultAddress(ctx)
if err != nil {
t.Fatal(err)
}
msg := &types.Message{
From: senderAddr,
To: addr,
Value: amount,
}
sm, err := sender.MpoolPushMessage(ctx, msg, nil)
if err != nil {
t.Fatal(err)
}
res, err := sender.StateWaitMsg(ctx, sm.Cid(), 3, lapi.LookbackNoLimit, true)
if err != nil {
t.Fatal(err)
}
if res.Receipt.ExitCode != 0 {
t.Fatal("did not successfully send money")
}
}
func MineUntilBlock(ctx context.Context, t *testing.T, fn TestNode, sn TestStorageNode, cb func(abi.ChainEpoch)) {
for i := 0; i < 1000; i++ {
var success bool
var err error
var epoch abi.ChainEpoch
wait := make(chan struct{})
mineErr := sn.MineOne(ctx, miner.MineReq{
Done: func(win bool, ep abi.ChainEpoch, e error) {
success = win
err = e
epoch = ep
wait <- struct{}{}
},
})
if mineErr != nil {
t.Fatal(mineErr)
}
<-wait
if err != nil {
t.Fatal(err)
}
if success {
// Wait until it shows up on the given full nodes ChainHead
nloops := 50
for i := 0; i < nloops; i++ {
ts, err := fn.ChainHead(ctx)
if err != nil {
t.Fatal(err)
}
if ts.Height() == epoch {
break
}
if i == nloops-1 {
t.Fatal("block never managed to sync to node")
}
time.Sleep(time.Millisecond * 10)
}
if cb != nil {
cb(epoch)
}
return
}
t.Log("did not mine block, trying again", i)
}
t.Fatal("failed to mine 1000 times in a row...")
}

File diff suppressed because it is too large Load Diff

View File

@ -1,2 +1,2 @@
/dns4/bootstrap-0.butterfly.fildev.network/tcp/1347/p2p/12D3KooWRkaF18SR3E6qL6dkGrozT8QJUV5VbhE9E7BZtPmHqdWJ
/dns4/bootstrap-1.butterfly.fildev.network/tcp/1347/p2p/12D3KooWJcJUc23WJjJHGSboGcU3t76z9Lb7CghrH2tiBiDCY4ux
/dns4/bootstrap-0.butterfly.fildev.network/tcp/1347/p2p/12D3KooWBbZd7Su9XfLUQ12RynGQ3ZmGY1nGqFntmqop9pLNJE6g
/dns4/bootstrap-1.butterfly.fildev.network/tcp/1347/p2p/12D3KooWGKRzEY4tJFTmAmrYUpa1CVVohmV9YjJbC9v5XWY2gUji

Binary file not shown.

View File

@ -45,7 +45,8 @@ const UpgradeNorwegianHeight = 114000
const UpgradeTurboHeight = 193789
const UpgradeHyperdriveHeight = 9999999
// 2021-06-11T14:30:00Z
const UpgradeHyperdriveHeight = 321519
func init() {
policy.SetConsensusMinerMinPower(abi.NewStoragePower(32 << 30))

View File

@ -3,6 +3,8 @@ package policy
import (
"sort"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/lotus/chain/actors"
@ -367,3 +369,31 @@ func GetDeclarationsMax(nwVer network.Version) int {
panic("unsupported network version")
}
}
func AggregateNetworkFee(nwVer network.Version, aggregateSize int, baseFee abi.TokenAmount) abi.TokenAmount {
switch actors.VersionForNetwork(nwVer) {
case actors.Version0:
return big.Zero()
case actors.Version2:
return big.Zero()
case actors.Version3:
return big.Zero()
case actors.Version4:
return big.Zero()
case actors.Version5:
return miner5.AggregateNetworkFee(aggregateSize, baseFee)
default:
panic("unsupported network version")
}
}

View File

@ -3,6 +3,8 @@ package policy
import (
"sort"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/lotus/chain/actors"
@ -246,3 +248,18 @@ func GetDeclarationsMax(nwVer network.Version) int {
panic("unsupported network version")
}
}
func AggregateNetworkFee(nwVer network.Version, aggregateSize int, baseFee abi.TokenAmount) abi.TokenAmount {
switch actors.VersionForNetwork(nwVer) {
{{range .versions}}
case actors.Version{{.}}:
{{if (le . 4)}}
return big.Zero()
{{else}}
return miner{{.}}.AggregateNetworkFee(aggregateSize, baseFee)
{{end}}
{{end}}
default:
panic("unsupported network version")
}
}

View File

@ -243,7 +243,7 @@ func (mp *MessagePool) checkMessages(msgs []*types.Message, interned bool, flexi
},
}
if len(bytes) > 32*1024-128 { // 128 bytes to account for signature size
if len(bytes) > MaxMessageSize-128 { // 128 bytes to account for signature size
check.OK = false
check.Err = "message too big"
} else {

View File

@ -59,6 +59,8 @@ var MaxUntrustedActorPendingMessages = 10
var MaxNonceGap = uint64(4)
const MaxMessageSize = 64 << 10 // 64KiB
var (
ErrMessageTooBig = errors.New("message too big")
@ -665,7 +667,7 @@ func (mp *MessagePool) Push(ctx context.Context, m *types.SignedMessage) (cid.Ci
func (mp *MessagePool) checkMessage(m *types.SignedMessage) error {
// big messages are bad, anti DOS
if m.Size() > 32*1024 {
if m.Size() > MaxMessageSize {
return xerrors.Errorf("mpool message too large (%dB): %w", m.Size(), ErrMessageTooBig)
}

View File

@ -14,12 +14,14 @@ import (
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/messagepool/gasguess"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/types/mock"
"github.com/filecoin-project/lotus/chain/wallet"
_ "github.com/filecoin-project/lotus/lib/sigs/bls"
_ "github.com/filecoin-project/lotus/lib/sigs/secp"
"github.com/stretchr/testify/assert"
)
func init() {
@ -260,6 +262,72 @@ func TestMessagePool(t *testing.T) {
assertNonce(t, mp, sender, 2)
}
func TestCheckMessageBig(t *testing.T) {
tma := newTestMpoolAPI()
w, err := wallet.NewWallet(wallet.NewMemKeyStore())
assert.NoError(t, err)
from, err := w.WalletNew(context.Background(), types.KTBLS)
assert.NoError(t, err)
tma.setBalance(from, 1000e9)
ds := datastore.NewMapDatastore()
mp, err := New(tma, ds, "mptest", nil)
assert.NoError(t, err)
to := mock.Address(1001)
{
msg := &types.Message{
To: to,
From: from,
Value: types.NewInt(1),
Nonce: 0,
GasLimit: 50000000,
GasFeeCap: types.NewInt(100),
GasPremium: types.NewInt(1),
Params: make([]byte, 41<<10), // 41KiB payload
}
sig, err := w.WalletSign(context.TODO(), from, msg.Cid().Bytes(), api.MsgMeta{})
if err != nil {
panic(err)
}
sm := &types.SignedMessage{
Message: *msg,
Signature: *sig,
}
mustAdd(t, mp, sm)
}
{
msg := &types.Message{
To: to,
From: from,
Value: types.NewInt(1),
Nonce: 0,
GasLimit: 50000000,
GasFeeCap: types.NewInt(100),
GasPremium: types.NewInt(1),
Params: make([]byte, 64<<10), // 64KiB payload
}
sig, err := w.WalletSign(context.TODO(), from, msg.Cid().Bytes(), api.MsgMeta{})
if err != nil {
panic(err)
}
sm := &types.SignedMessage{
Message: *msg,
Signature: *sig,
}
err = mp.Add(context.TODO(), sm)
assert.ErrorIs(t, err, ErrMessageTooBig)
}
}
func TestMessagePoolMessagesInEachBlock(t *testing.T) {
tma := newTestMpoolAPI()

View File

@ -248,24 +248,18 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri
var errHaltExecution = fmt.Errorf("halt")
func (sm *StateManager) Replay(ctx context.Context, ts *types.TipSet, mcid cid.Cid) (*types.Message, *vm.ApplyRet, error) {
var outm *types.Message
var outr *vm.ApplyRet
var finder messageFinder
// message to find
finder.mcid = mcid
_, _, err := sm.computeTipSetState(ctx, ts, func(c cid.Cid, m *types.Message, ret *vm.ApplyRet) error {
if c == mcid {
outm = m
outr = ret
return errHaltExecution
}
return nil
})
_, _, err := sm.computeTipSetState(ctx, ts, &finder)
if err != nil && !xerrors.Is(err, errHaltExecution) {
return nil, nil, xerrors.Errorf("unexpected error during execution: %w", err)
}
if outr == nil {
if finder.outr == nil {
return nil, nil, xerrors.Errorf("given message not found in tipset")
}
return outm, outr, nil
return finder.outm, finder.outr, nil
}

View File

@ -61,7 +61,7 @@ type MigrationCache interface {
type MigrationFunc func(
ctx context.Context,
sm *StateManager, cache MigrationCache,
cb ExecCallback, oldState cid.Cid,
cb ExecMonitor, oldState cid.Cid,
height abi.ChainEpoch, ts *types.TipSet,
) (newState cid.Cid, err error)
@ -292,7 +292,7 @@ func (us UpgradeSchedule) Validate() error {
return nil
}
func (sm *StateManager) handleStateForks(ctx context.Context, root cid.Cid, height abi.ChainEpoch, cb ExecCallback, ts *types.TipSet) (cid.Cid, error) {
func (sm *StateManager) handleStateForks(ctx context.Context, root cid.Cid, height abi.ChainEpoch, cb ExecMonitor, ts *types.TipSet) (cid.Cid, error) {
retCid := root
var err error
u := sm.stateMigrations[height]
@ -472,7 +472,7 @@ func doTransfer(tree types.StateTree, from, to address.Address, amt abi.TokenAmo
return nil
}
func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, _ MigrationCache, em ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
// Some initial parameters
FundsForMiners := types.FromFil(1_000_000)
LookbackEpoch := abi.ChainEpoch(32000)
@ -722,12 +722,12 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, _ Migratio
return cid.Undef, xerrors.Errorf("resultant state tree account balance was not correct: %s", total)
}
if cb != nil {
if em != nil {
// record the transfer in execution traces
fakeMsg := makeFakeMsg(builtin.SystemActorAddr, builtin.SystemActorAddr, big.Zero(), uint64(epoch))
if err := cb(fakeMsg.Cid(), fakeMsg, &vm.ApplyRet{
if err := em.MessageApplied(ctx, ts, fakeMsg.Cid(), fakeMsg, &vm.ApplyRet{
MessageReceipt: *makeFakeRct(),
ActorErr: nil,
ExecutionTrace: types.ExecutionTrace{
@ -740,7 +740,7 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, _ Migratio
},
Duration: 0,
GasCosts: nil,
}); err != nil {
}, false); err != nil {
return cid.Undef, xerrors.Errorf("recording transfers: %w", err)
}
}
@ -748,7 +748,7 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, _ Migratio
return tree.Flush(ctx)
}
func UpgradeIgnition(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
func UpgradeIgnition(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
store := sm.cs.ActorStore(ctx)
if build.UpgradeLiftoffHeight <= epoch {
@ -785,12 +785,12 @@ func UpgradeIgnition(ctx context.Context, sm *StateManager, _ MigrationCache, cb
return cid.Undef, xerrors.Errorf("resetting genesis msig start epochs: %w", err)
}
err = splitGenesisMultisig0(ctx, cb, split1, store, tree, 50, epoch)
err = splitGenesisMultisig0(ctx, cb, split1, store, tree, 50, epoch, ts)
if err != nil {
return cid.Undef, xerrors.Errorf("splitting first msig: %w", err)
}
err = splitGenesisMultisig0(ctx, cb, split2, store, tree, 50, epoch)
err = splitGenesisMultisig0(ctx, cb, split2, store, tree, 50, epoch, ts)
if err != nil {
return cid.Undef, xerrors.Errorf("splitting second msig: %w", err)
}
@ -803,7 +803,7 @@ func UpgradeIgnition(ctx context.Context, sm *StateManager, _ MigrationCache, cb
return tree.Flush(ctx)
}
func UpgradeRefuel(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
func UpgradeRefuel(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
store := sm.cs.ActorStore(ctx)
tree, err := sm.StateTree(root)
@ -829,7 +829,7 @@ func UpgradeRefuel(ctx context.Context, sm *StateManager, _ MigrationCache, cb E
return tree.Flush(ctx)
}
func UpgradeActorsV2(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
func UpgradeActorsV2(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
buf := blockstore.NewTieredBstore(sm.cs.StateBlockstore(), blockstore.NewMemorySync())
store := store.ActorStore(ctx, buf)
@ -875,7 +875,7 @@ func UpgradeActorsV2(ctx context.Context, sm *StateManager, _ MigrationCache, cb
return newRoot, nil
}
func UpgradeLiftoff(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
func UpgradeLiftoff(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
tree, err := sm.StateTree(root)
if err != nil {
return cid.Undef, xerrors.Errorf("getting state tree: %w", err)
@ -889,7 +889,7 @@ func UpgradeLiftoff(ctx context.Context, sm *StateManager, _ MigrationCache, cb
return tree.Flush(ctx)
}
func UpgradeCalico(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
func UpgradeCalico(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
if build.BuildType != build.BuildMainnet {
return root, nil
}
@ -935,7 +935,7 @@ func UpgradeCalico(ctx context.Context, sm *StateManager, _ MigrationCache, cb E
return newRoot, nil
}
func terminateActor(ctx context.Context, tree *state.StateTree, addr address.Address, cb ExecCallback, epoch abi.ChainEpoch) error {
func terminateActor(ctx context.Context, tree *state.StateTree, addr address.Address, em ExecMonitor, epoch abi.ChainEpoch, ts *types.TipSet) error {
a, err := tree.GetActor(addr)
if xerrors.Is(err, types.ErrActorNotFound) {
return types.ErrActorNotFound
@ -950,18 +950,18 @@ func terminateActor(ctx context.Context, tree *state.StateTree, addr address.Add
return xerrors.Errorf("transferring terminated actor's balance: %w", err)
}
if cb != nil {
if em != nil {
// record the transfer in execution traces
fakeMsg := makeFakeMsg(builtin.SystemActorAddr, addr, big.Zero(), uint64(epoch))
if err := cb(fakeMsg.Cid(), fakeMsg, &vm.ApplyRet{
if err := em.MessageApplied(ctx, ts, fakeMsg.Cid(), fakeMsg, &vm.ApplyRet{
MessageReceipt: *makeFakeRct(),
ActorErr: nil,
ExecutionTrace: trace,
Duration: 0,
GasCosts: nil,
}); err != nil {
}, false); err != nil {
return xerrors.Errorf("recording transfers: %w", err)
}
}
@ -995,7 +995,7 @@ func terminateActor(ctx context.Context, tree *state.StateTree, addr address.Add
return tree.SetActor(init_.Address, ia)
}
func UpgradeActorsV3(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
func UpgradeActorsV3(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
// Use all the CPUs except 3.
workerCount := runtime.NumCPU() - 3
if workerCount <= 0 {
@ -1019,7 +1019,7 @@ func UpgradeActorsV3(ctx context.Context, sm *StateManager, cache MigrationCache
}
if build.BuildType == build.BuildMainnet {
err := terminateActor(ctx, tree, build.ZeroAddress, cb, epoch)
err := terminateActor(ctx, tree, build.ZeroAddress, cb, epoch, ts)
if err != nil && !xerrors.Is(err, types.ErrActorNotFound) {
return cid.Undef, xerrors.Errorf("deleting zero bls actor: %w", err)
}
@ -1097,7 +1097,7 @@ func upgradeActorsV3Common(
return newRoot, nil
}
func UpgradeActorsV4(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
func UpgradeActorsV4(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
// Use all the CPUs except 3.
workerCount := runtime.NumCPU() - 3
if workerCount <= 0 {
@ -1183,7 +1183,7 @@ func upgradeActorsV4Common(
return newRoot, nil
}
func UpgradeActorsV5(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
func UpgradeActorsV5(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
// Use all the CPUs except 3.
workerCount := runtime.NumCPU() - 3
if workerCount <= 0 {
@ -1296,7 +1296,7 @@ func setNetworkName(ctx context.Context, store adt.Store, tree *state.StateTree,
return nil
}
func splitGenesisMultisig0(ctx context.Context, cb ExecCallback, addr address.Address, store adt0.Store, tree *state.StateTree, portions uint64, epoch abi.ChainEpoch) error {
func splitGenesisMultisig0(ctx context.Context, em ExecMonitor, addr address.Address, store adt0.Store, tree *state.StateTree, portions uint64, epoch abi.ChainEpoch, ts *types.TipSet) error {
if portions < 1 {
return xerrors.Errorf("cannot split into 0 portions")
}
@ -1393,12 +1393,12 @@ func splitGenesisMultisig0(ctx context.Context, cb ExecCallback, addr address.Ad
i++
}
if cb != nil {
if em != nil {
// record the transfer in execution traces
fakeMsg := makeFakeMsg(builtin.SystemActorAddr, addr, big.Zero(), uint64(epoch))
if err := cb(fakeMsg.Cid(), fakeMsg, &vm.ApplyRet{
if err := em.MessageApplied(ctx, ts, fakeMsg.Cid(), fakeMsg, &vm.ApplyRet{
MessageReceipt: *makeFakeRct(),
ActorErr: nil,
ExecutionTrace: types.ExecutionTrace{
@ -1411,7 +1411,7 @@ func splitGenesisMultisig0(ctx context.Context, cb ExecCallback, addr address.Ad
},
Duration: 0,
GasCosts: nil,
}); err != nil {
}, false); err != nil {
return xerrors.Errorf("recording transfers: %w", err)
}
}

View File

@ -123,7 +123,7 @@ func TestForkHeightTriggers(t *testing.T) {
cg.ChainStore(), UpgradeSchedule{{
Network: 1,
Height: testForkHeight,
Migration: func(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecCallback,
Migration: func(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor,
root cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
cst := ipldcbor.NewCborStore(sm.ChainStore().StateBlockstore())
@ -253,7 +253,7 @@ func TestForkRefuseCall(t *testing.T) {
Network: 1,
Expensive: true,
Height: testForkHeight,
Migration: func(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecCallback,
Migration: func(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor,
root cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
return root, nil
}}})
@ -363,7 +363,7 @@ func TestForkPreMigration(t *testing.T) {
cg.ChainStore(), UpgradeSchedule{{
Network: 1,
Height: testForkHeight,
Migration: func(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecCallback,
Migration: func(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor,
root cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
// Make sure the test that should be canceled, is canceled.

View File

@ -103,6 +103,8 @@ type StateManager struct {
genesisPledge abi.TokenAmount
genesisMarketFunds abi.TokenAmount
tsExecMonitor ExecMonitor
}
// Caches a single state tree
@ -171,6 +173,15 @@ func NewStateManagerWithUpgradeSchedule(cs *store.ChainStore, us UpgradeSchedule
}, nil
}
func NewStateManagerWithUpgradeScheduleAndMonitor(cs *store.ChainStore, us UpgradeSchedule, em ExecMonitor) (*StateManager, error) {
sm, err := NewStateManagerWithUpgradeSchedule(cs, us)
if err != nil {
return nil, err
}
sm.tsExecMonitor = em
return sm, nil
}
func cidsToKey(cids []cid.Cid) string {
var out string
for _, c := range cids {
@ -255,7 +266,7 @@ func (sm *StateManager) TipSetState(ctx context.Context, ts *types.TipSet) (st c
return ts.Blocks()[0].ParentStateRoot, ts.Blocks()[0].ParentMessageReceipts, nil
}
st, rec, err = sm.computeTipSetState(ctx, ts, nil)
st, rec, err = sm.computeTipSetState(ctx, ts, sm.tsExecMonitor)
if err != nil {
return cid.Undef, cid.Undef, err
}
@ -263,39 +274,21 @@ func (sm *StateManager) TipSetState(ctx context.Context, ts *types.TipSet) (st c
return st, rec, nil
}
func traceFunc(trace *[]*api.InvocResult) func(mcid cid.Cid, msg *types.Message, ret *vm.ApplyRet) error {
return func(mcid cid.Cid, msg *types.Message, ret *vm.ApplyRet) error {
ir := &api.InvocResult{
MsgCid: mcid,
Msg: msg,
MsgRct: &ret.MessageReceipt,
ExecutionTrace: ret.ExecutionTrace,
Duration: ret.Duration,
}
if ret.ActorErr != nil {
ir.Error = ret.ActorErr.Error()
}
if ret.GasCosts != nil {
ir.GasCost = MakeMsgGasCost(msg, ret)
}
*trace = append(*trace, ir)
return nil
}
func (sm *StateManager) ExecutionTraceWithMonitor(ctx context.Context, ts *types.TipSet, em ExecMonitor) (cid.Cid, error) {
st, _, err := sm.computeTipSetState(ctx, ts, em)
return st, err
}
func (sm *StateManager) ExecutionTrace(ctx context.Context, ts *types.TipSet) (cid.Cid, []*api.InvocResult, error) {
var trace []*api.InvocResult
st, _, err := sm.computeTipSetState(ctx, ts, traceFunc(&trace))
var invocTrace []*api.InvocResult
st, err := sm.ExecutionTraceWithMonitor(ctx, ts, &InvocationTracer{trace: &invocTrace})
if err != nil {
return cid.Undef, nil, err
}
return st, trace, nil
return st, invocTrace, nil
}
type ExecCallback func(cid.Cid, *types.Message, *vm.ApplyRet) error
func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEpoch, pstate cid.Cid, bms []store.BlockMessages, epoch abi.ChainEpoch, r vm.Rand, cb ExecCallback, baseFee abi.TokenAmount, ts *types.TipSet) (cid.Cid, cid.Cid, error) {
func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEpoch, pstate cid.Cid, bms []store.BlockMessages, epoch abi.ChainEpoch, r vm.Rand, em ExecMonitor, baseFee abi.TokenAmount, ts *types.TipSet) (cid.Cid, cid.Cid, error) {
done := metrics.Timer(ctx, metrics.VMApplyBlocksTotal)
defer done()
@ -341,8 +334,8 @@ func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEp
if err != nil {
return err
}
if cb != nil {
if err := cb(cronMsg.Cid(), cronMsg, ret); err != nil {
if em != nil {
if err := em.MessageApplied(ctx, ts, cronMsg.Cid(), cronMsg, ret, true); err != nil {
return xerrors.Errorf("callback failed on cron message: %w", err)
}
}
@ -368,7 +361,7 @@ func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEp
// handle state forks
// XXX: The state tree
newState, err := sm.handleStateForks(ctx, pstate, i, cb, ts)
newState, err := sm.handleStateForks(ctx, pstate, i, em, ts)
if err != nil {
return cid.Undef, cid.Undef, xerrors.Errorf("error handling state forks: %w", err)
}
@ -407,8 +400,8 @@ func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEp
gasReward = big.Add(gasReward, r.GasCosts.MinerTip)
penalty = big.Add(penalty, r.GasCosts.MinerPenalty)
if cb != nil {
if err := cb(cm.Cid(), m, r); err != nil {
if em != nil {
if err := em.MessageApplied(ctx, ts, cm.Cid(), m, r, false); err != nil {
return cid.Undef, cid.Undef, err
}
}
@ -440,8 +433,8 @@ func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEp
if actErr != nil {
return cid.Undef, cid.Undef, xerrors.Errorf("failed to apply reward message for miner %s: %w", b.Miner, actErr)
}
if cb != nil {
if err := cb(rwMsg.Cid(), rwMsg, ret); err != nil {
if em != nil {
if err := em.MessageApplied(ctx, ts, rwMsg.Cid(), rwMsg, ret, true); err != nil {
return cid.Undef, cid.Undef, xerrors.Errorf("callback failed on reward message: %w", err)
}
}
@ -483,7 +476,7 @@ func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEp
return st, rectroot, nil
}
func (sm *StateManager) computeTipSetState(ctx context.Context, ts *types.TipSet, cb ExecCallback) (cid.Cid, cid.Cid, error) {
func (sm *StateManager) computeTipSetState(ctx context.Context, ts *types.TipSet, em ExecMonitor) (cid.Cid, cid.Cid, error) {
ctx, span := trace.StartSpan(ctx, "computeTipSetState")
defer span.End()
@ -519,7 +512,7 @@ func (sm *StateManager) computeTipSetState(ctx context.Context, ts *types.TipSet
baseFee := blks[0].ParentBaseFee
return sm.ApplyBlocks(ctx, parentEpoch, pstate, blkmsgs, blks[0].Height, r, cb, baseFee, ts)
return sm.ApplyBlocks(ctx, parentEpoch, pstate, blkmsgs, blks[0].Height, r, em, baseFee, ts)
}
func (sm *StateManager) parentState(ts *types.TipSet) cid.Cid {

56
chain/stmgr/tracers.go Normal file
View File

@ -0,0 +1,56 @@
package stmgr
import (
"context"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/vm"
"github.com/ipfs/go-cid"
)
type ExecMonitor interface {
// MessageApplied is called after a message has been applied. Returning an error will halt execution of any further messages.
MessageApplied(ctx context.Context, ts *types.TipSet, mcid cid.Cid, msg *types.Message, ret *vm.ApplyRet, implicit bool) error
}
var _ ExecMonitor = (*InvocationTracer)(nil)
type InvocationTracer struct {
trace *[]*api.InvocResult
}
func (i *InvocationTracer) MessageApplied(ctx context.Context, ts *types.TipSet, mcid cid.Cid, msg *types.Message, ret *vm.ApplyRet, implicit bool) error {
ir := &api.InvocResult{
MsgCid: mcid,
Msg: msg,
MsgRct: &ret.MessageReceipt,
ExecutionTrace: ret.ExecutionTrace,
Duration: ret.Duration,
}
if ret.ActorErr != nil {
ir.Error = ret.ActorErr.Error()
}
if ret.GasCosts != nil {
ir.GasCost = MakeMsgGasCost(msg, ret)
}
*i.trace = append(*i.trace, ir)
return nil
}
var _ ExecMonitor = (*messageFinder)(nil)
type messageFinder struct {
mcid cid.Cid // the message cid to find
outm *types.Message
outr *vm.ApplyRet
}
func (m *messageFinder) MessageApplied(ctx context.Context, ts *types.TipSet, mcid cid.Cid, msg *types.Message, ret *vm.ApplyRet, implicit bool) error {
if m.mcid == mcid {
m.outm = msg
m.outr = ret
return errHaltExecution // message was found, no need to continue
}
return nil
}

View File

@ -342,7 +342,7 @@ func ComputeState(ctx context.Context, sm *StateManager, height abi.ChainEpoch,
for i := ts.Height(); i < height; i++ {
// handle state forks
base, err = sm.handleStateForks(ctx, base, i, traceFunc(&trace), ts)
base, err = sm.handleStateForks(ctx, base, i, &InvocationTracer{trace: &trace}, ts)
if err != nil {
return cid.Undef, nil, xerrors.Errorf("error handling state forks: %w", err)
}

View File

@ -557,7 +557,7 @@ func (mv *MessageValidator) validateLocalMessage(ctx context.Context, msg *pubsu
return pubsub.ValidationIgnore
}
if m.Size() > 32*1024 {
if m.Size() > messagepool.MaxMessageSize {
log.Warnf("local message is too large! (%dB)", m.Size())
recordFailure(ctx, metrics.MessageValidationFailure, "oversize")
return pubsub.ValidationIgnore

View File

@ -267,7 +267,7 @@ func (ss *syscallShim) VerifySeal(info proof5.SealVerifyInfo) error {
proof := info.Proof
seed := []byte(info.InteractiveRandomness)
log.Debugf("Verif r:%x; d:%x; m:%s; t:%x; s:%x; N:%d; p:%x", info.SealedCID, info.UnsealedCID, miner, ticket, seed, info.SectorID.Number, proof)
log.Debugf("Verif r:%s; d:%s; m:%s; t:%x; s:%x; N:%d; p:%x", info.SealedCID, info.UnsealedCID, miner, ticket, seed, info.SectorID.Number, proof)
//func(ctx context.Context, maddr address.Address, ssize abi.SectorSize, commD, commR, ticket, proof, seed []byte, sectorID abi.SectorNumber)
ok, err := ss.verifier.VerifySeal(info)

View File

@ -1,22 +0,0 @@
package cli
import (
"context"
"os"
"testing"
"time"
clitest "github.com/filecoin-project/lotus/cli/test"
)
// TestClient does a basic test to exercise the client CLI
// commands
func TestClient(t *testing.T) {
_ = os.Setenv("BELLMAN_NO_GPU", "1")
clitest.QuietMiningLogs()
blocktime := 5 * time.Millisecond
ctx := context.Background()
clientNode, _ := clitest.StartOneNodeOneMiner(ctx, t, blocktime)
clitest.RunClientTest(t, Commands, clientNode)
}

View File

@ -1,22 +0,0 @@
package cli
import (
"context"
"os"
"testing"
"time"
clitest "github.com/filecoin-project/lotus/cli/test"
)
// TestMultisig does a basic test to exercise the multisig CLI
// commands
func TestMultisig(t *testing.T) {
_ = os.Setenv("BELLMAN_NO_GPU", "1")
clitest.QuietMiningLogs()
blocktime := 5 * time.Millisecond
ctx := context.Background()
clientNode, _ := clitest.StartOneNodeOneMiner(ctx, t, blocktime)
clitest.RunMultisigTest(t, Commands, clientNode)
}

View File

@ -281,17 +281,26 @@ var StatePowerCmd = &cli.Command{
ctx := ReqContext(cctx)
ts, err := LoadTipSet(ctx, cctx, api)
if err != nil {
return err
}
var maddr address.Address
if cctx.Args().Present() {
maddr, err = address.NewFromString(cctx.Args().First())
if err != nil {
return err
}
}
ts, err := LoadTipSet(ctx, cctx, api)
if err != nil {
return err
ma, err := api.StateGetActor(ctx, maddr, ts.Key())
if err != nil {
return err
}
if !builtin.IsStorageMinerActor(ma.Code) {
return xerrors.New("provided address does not correspond to a miner actor")
}
}
power, err := api.StateMinerPower(ctx, maddr, ts.Key())
@ -345,7 +354,7 @@ var StateSectorsCmd = &cli.Command{
}
for _, s := range sectors {
fmt.Printf("%d: %x\n", s.SectorNumber, s.SealedCID)
fmt.Printf("%d: %s\n", s.SectorNumber, s.SealedCID)
}
return nil
@ -385,7 +394,7 @@ var StateActiveSectorsCmd = &cli.Command{
}
for _, s := range sectors {
fmt.Printf("%d: %x\n", s.SectorNumber, s.SealedCID)
fmt.Printf("%d: %s\n", s.SectorNumber, s.SealedCID)
}
return nil

View File

@ -1,14 +0,0 @@
package test
import "github.com/ipfs/go-log/v2"
func QuietMiningLogs() {
_ = log.SetLogLevel("miner", "ERROR")
_ = log.SetLogLevel("chainstore", "ERROR")
_ = log.SetLogLevel("chain", "ERROR")
_ = log.SetLogLevel("sub", "ERROR")
_ = log.SetLogLevel("storageminer", "ERROR")
_ = log.SetLogLevel("pubsub", "ERROR")
_ = log.SetLogLevel("gen", "ERROR")
_ = log.SetLogLevel("dht/RtRefreshManager", "ERROR")
}

View File

@ -4,32 +4,29 @@ import (
"context"
"fmt"
"net"
"net/http"
"os"
"contrib.go.opencensus.io/exporter/prometheus"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/gateway"
"github.com/gorilla/mux"
logging "github.com/ipfs/go-log/v2"
promclient "github.com/prometheus/client_golang/prometheus"
"github.com/urfave/cli/v2"
"go.opencensus.io/stats/view"
"go.opencensus.io/tag"
"golang.org/x/xerrors"
logging "github.com/ipfs/go-log/v2"
"github.com/filecoin-project/go-jsonrpc"
"github.com/filecoin-project/go-state-types/abi"
manet "github.com/multiformats/go-multiaddr/net"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-jsonrpc"
lapi "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/api/client"
"github.com/filecoin-project/lotus/api/v0api"
"github.com/filecoin-project/lotus/api/v1api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/types"
lcli "github.com/filecoin-project/lotus/cli"
cliutil "github.com/filecoin-project/lotus/cli/util"
"github.com/filecoin-project/lotus/gateway"
"github.com/filecoin-project/lotus/lib/lotuslog"
"github.com/filecoin-project/lotus/metrics"
"github.com/filecoin-project/lotus/node"
)
var log = logging.Logger("gateway")
@ -140,10 +137,6 @@ var runCmd = &cli.Command{
Action: func(cctx *cli.Context) error {
log.Info("Starting lotus gateway")
ctx := lcli.ReqContext(cctx)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
// Register all metric views
if err := view.Register(
metrics.ChainNodeViews...,
@ -157,70 +150,44 @@ var runCmd = &cli.Command{
}
defer closer()
address := cctx.String("listen")
mux := mux.NewRouter()
var (
lookbackCap = cctx.Duration("api-max-lookback")
address = cctx.String("listen")
waitLookback = abi.ChainEpoch(cctx.Int64("api-wait-lookback-limit"))
)
log.Info("Setting up API endpoint at " + address)
serveRpc := func(path string, hnd interface{}) {
serverOptions := make([]jsonrpc.ServerOption, 0)
if maxRequestSize := cctx.Int("api-max-req-size"); maxRequestSize != 0 {
serverOptions = append(serverOptions, jsonrpc.WithMaxRequestSize(int64(maxRequestSize)))
}
rpcServer := jsonrpc.NewServer(serverOptions...)
rpcServer.Register("Filecoin", hnd)
mux.Handle(path, rpcServer)
serverOptions := make([]jsonrpc.ServerOption, 0)
if maxRequestSize := cctx.Int("api-max-req-size"); maxRequestSize != 0 {
serverOptions = append(serverOptions, jsonrpc.WithMaxRequestSize(int64(maxRequestSize)))
}
lookbackCap := cctx.Duration("api-max-lookback")
log.Info("setting up API endpoint at " + address)
waitLookback := abi.ChainEpoch(cctx.Int64("api-wait-lookback-limit"))
addr, err := net.ResolveTCPAddr("tcp", address)
if err != nil {
return xerrors.Errorf("failed to resolve endpoint address: %w", err)
}
ma := metrics.MetricedGatewayAPI(gateway.NewNode(api, lookbackCap, waitLookback))
maddr, err := manet.FromNetAddr(addr)
if err != nil {
return xerrors.Errorf("failed to convert endpoint address to multiaddr: %w", err)
}
serveRpc("/rpc/v1", ma)
serveRpc("/rpc/v0", lapi.Wrap(new(v1api.FullNodeStruct), new(v0api.WrapperV1Full), ma))
gwapi := gateway.NewNode(api, lookbackCap, waitLookback)
h, err := gateway.Handler(gwapi, serverOptions...)
if err != nil {
return xerrors.Errorf("failed to set up gateway HTTP handler")
}
registry := promclient.DefaultRegisterer.(*promclient.Registry)
exporter, err := prometheus.NewExporter(prometheus.Options{
Registry: registry,
Namespace: "lotus_gw",
stopFunc, err := node.ServeRPC(h, "lotus-gateway", maddr)
if err != nil {
return xerrors.Errorf("failed to serve rpc endpoint: %w", err)
}
<-node.MonitorShutdown(nil, node.ShutdownHandler{
Component: "rpc",
StopFunc: stopFunc,
})
if err != nil {
return err
}
mux.Handle("/debug/metrics", exporter)
mux.PathPrefix("/").Handler(http.DefaultServeMux)
/*ah := &auth.Handler{
Verify: nodeApi.AuthVerify,
Next: mux.ServeHTTP,
}*/
srv := &http.Server{
Handler: mux,
BaseContext: func(listener net.Listener) context.Context {
ctx, _ := tag.New(context.Background(), tag.Upsert(metrics.APIInterface, "lotus-gateway"))
return ctx
},
}
go func() {
<-ctx.Done()
log.Warn("Shutting down...")
if err := srv.Shutdown(context.TODO()); err != nil {
log.Errorf("shutting down RPC server failed: %s", err)
}
log.Warn("Graceful shutdown successful")
}()
nl, err := net.Listen("tcp", address)
if err != nil {
return err
}
return srv.Serve(nl)
return nil
},
}

View File

@ -0,0 +1,103 @@
package main
import (
"fmt"
"io"
"os"
"github.com/ipfs/go-blockservice"
"github.com/ipfs/go-cid"
offline "github.com/ipfs/go-ipfs-exchange-offline"
format "github.com/ipfs/go-ipld-format"
"github.com/ipfs/go-merkledag"
"github.com/ipld/go-car"
"github.com/urfave/cli/v2"
"golang.org/x/xerrors"
lcli "github.com/filecoin-project/lotus/cli"
"github.com/filecoin-project/lotus/node/repo"
)
func carWalkFunc(nd format.Node) (out []*format.Link, err error) {
for _, link := range nd.Links() {
if link.Cid.Prefix().Codec == cid.FilCommitmentSealed || link.Cid.Prefix().Codec == cid.FilCommitmentUnsealed {
continue
}
out = append(out, link)
}
return out, nil
}
var exportCarCmd = &cli.Command{
Name: "export-car",
Description: "Export a car from repo (requires node to be offline)",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "repo",
Value: "~/.lotus",
},
},
Action: func(cctx *cli.Context) error {
if cctx.Args().Len() != 2 {
return lcli.ShowHelp(cctx, fmt.Errorf("must specify file name and object"))
}
outfile := cctx.Args().First()
var roots []cid.Cid
for _, arg := range cctx.Args().Tail() {
c, err := cid.Decode(arg)
if err != nil {
return err
}
roots = append(roots, c)
}
ctx := lcli.ReqContext(cctx)
r, err := repo.NewFS(cctx.String("repo"))
if err != nil {
return xerrors.Errorf("opening fs repo: %w", err)
}
exists, err := r.Exists()
if err != nil {
return err
}
if !exists {
return xerrors.Errorf("lotus repo doesn't exist")
}
lr, err := r.Lock(repo.FullNode)
if err != nil {
return err
}
defer lr.Close() //nolint:errcheck
fi, err := os.Create(outfile)
if err != nil {
return xerrors.Errorf("opening the output file: %w", err)
}
defer fi.Close() //nolint:errcheck
bs, err := lr.Blockstore(ctx, repo.UniversalBlockstore)
if err != nil {
return fmt.Errorf("failed to open blockstore: %w", err)
}
defer func() {
if c, ok := bs.(io.Closer); ok {
if err := c.Close(); err != nil {
log.Warnf("failed to close blockstore: %s", err)
}
}
}()
dag := merkledag.NewDAGService(blockservice.New(bs, offline.Exchange(bs)))
err = car.WriteCarWithWalker(ctx, dag, roots, fi, carWalkFunc)
if err != nil {
return err
}
return nil
},
}

View File

@ -43,6 +43,7 @@ func main() {
minerCmd,
mpoolStatsCmd,
exportChainCmd,
exportCarCmd,
consensusCmd,
storageStatsCmd,
syncCmd,

View File

@ -7,7 +7,6 @@ import (
"fmt"
"regexp"
"strconv"
"sync/atomic"
"testing"
"time"
@ -18,13 +17,11 @@ import (
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/api/test"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/lib/lotuslog"
"github.com/filecoin-project/lotus/itests/kit"
"github.com/filecoin-project/lotus/node/repo"
builder "github.com/filecoin-project/lotus/node/test"
)
func TestWorkerKeyChange(t *testing.T) {
@ -41,20 +38,16 @@ func TestWorkerKeyChange(t *testing.T) {
policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1)
policy.SetMinVerifiedDealSize(abi.NewStoragePower(256))
lotuslog.SetupLogLevels()
logging.SetLogLevel("miner", "ERROR")
logging.SetLogLevel("chainstore", "ERROR")
logging.SetLogLevel("chain", "ERROR")
logging.SetLogLevel("pubsub", "ERROR")
logging.SetLogLevel("sub", "ERROR")
logging.SetLogLevel("storageminer", "ERROR")
kit.QuietMiningLogs()
blocktime := 1 * time.Millisecond
n, sn := builder.MockSbBuilder(t, []test.FullNodeOpts{test.FullNodeWithLatestActorsAt(-1), test.FullNodeWithLatestActorsAt(-1)}, test.OneMiner)
clients, miners := kit.MockMinerBuilder(t,
[]kit.FullNodeOpts{kit.FullNodeWithLatestActorsAt(-1), kit.FullNodeWithLatestActorsAt(-1)},
kit.OneMiner)
client1 := n[0]
client2 := n[1]
client1 := clients[0]
client2 := clients[1]
// Connect the nodes.
addrinfo, err := client1.NetAddrsListen(ctx)
@ -67,8 +60,8 @@ func TestWorkerKeyChange(t *testing.T) {
app := cli.NewApp()
app.Metadata = map[string]interface{}{
"repoType": repo.StorageMiner,
"testnode-full": n[0],
"testnode-storage": sn[0],
"testnode-full": clients[0],
"testnode-storage": miners[0],
}
app.Writer = output
api.RunningNodeType = api.NodeMiner
@ -85,29 +78,14 @@ func TestWorkerKeyChange(t *testing.T) {
return cmd.Action(cctx)
}
// setup miner
mine := int64(1)
done := make(chan struct{})
go func() {
defer close(done)
for atomic.LoadInt64(&mine) == 1 {
time.Sleep(blocktime)
if err := sn[0].MineOne(ctx, test.MineNext); err != nil {
t.Error(err)
}
}
}()
defer func() {
atomic.AddInt64(&mine, -1)
fmt.Println("shutting down mining")
<-done
}()
// start mining
kit.ConnectAndStartMining(t, blocktime, miners[0], client1, client2)
newKey, err := client1.WalletNew(ctx, types.KTBLS)
require.NoError(t, err)
// Initialize wallet.
test.SendFunds(ctx, t, client1, newKey, abi.NewTokenAmount(0))
kit.SendFunds(ctx, t, client1, newKey, abi.NewTokenAmount(0))
require.NoError(t, run(actorProposeChangeWorker, "--really-do-it", newKey.String()))

View File

@ -1,10 +1,13 @@
package main
import (
"context"
"flag"
"testing"
"time"
"github.com/filecoin-project/lotus/itests/kit"
"github.com/filecoin-project/lotus/node/impl"
logging "github.com/ipfs/go-log/v2"
"github.com/stretchr/testify/require"
"github.com/urfave/cli/v2"
@ -12,11 +15,8 @@ import (
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/api/test"
"github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/lotus/lib/lotuslog"
"github.com/filecoin-project/lotus/node/repo"
builder "github.com/filecoin-project/lotus/node/test"
)
func TestMinerAllInfo(t *testing.T) {
@ -32,12 +32,7 @@ func TestMinerAllInfo(t *testing.T) {
_test = true
lotuslog.SetupLogLevels()
logging.SetLogLevel("miner", "ERROR")
logging.SetLogLevel("chainstore", "ERROR")
logging.SetLogLevel("chain", "ERROR")
logging.SetLogLevel("sub", "ERROR")
logging.SetLogLevel("storageminer", "ERROR")
kit.QuietMiningLogs()
oldDelay := policy.GetPreCommitChallengeDelay()
policy.SetPreCommitChallengeDelay(5)
@ -45,8 +40,9 @@ func TestMinerAllInfo(t *testing.T) {
policy.SetPreCommitChallengeDelay(oldDelay)
})
var n []test.TestNode
var sn []test.TestStorageNode
n, sn := kit.Builder(t, kit.OneFull, kit.OneMiner)
client, miner := n[0].FullNode, sn[0]
kit.ConnectAndStartMining(t, time.Second, miner, client.(*impl.FullNodeAPI))
run := func(t *testing.T) {
app := cli.NewApp()
@ -62,15 +58,10 @@ func TestMinerAllInfo(t *testing.T) {
require.NoError(t, infoAllCmd.Action(cctx))
}
bp := func(t *testing.T, fullOpts []test.FullNodeOpts, storage []test.StorageMiner) ([]test.TestNode, []test.TestStorageNode) {
n, sn = builder.Builder(t, fullOpts, storage)
t.Run("pre-info-all", run)
t.Run("pre-info-all", run)
return n, sn
}
test.TestDealFlow(t, bp, time.Second, false, false, 0)
dh := kit.NewDealHarness(t, client, miner)
dh.MakeFullDeal(context.Background(), 6, false, false, 0)
t.Run("post-info-all", run)
}

View File

@ -1,37 +1,27 @@
package main
import (
"context"
"net"
"net/http"
"fmt"
_ "net/http/pprof"
"os"
"os/signal"
"syscall"
"github.com/filecoin-project/lotus/api/v1api"
"github.com/filecoin-project/lotus/api/v0api"
mux "github.com/gorilla/mux"
"github.com/multiformats/go-multiaddr"
manet "github.com/multiformats/go-multiaddr/net"
"github.com/urfave/cli/v2"
"go.opencensus.io/stats"
"go.opencensus.io/stats/view"
"go.opencensus.io/tag"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-jsonrpc"
"github.com/filecoin-project/go-jsonrpc/auth"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
lcli "github.com/filecoin-project/lotus/cli"
"github.com/filecoin-project/lotus/lib/ulimit"
"github.com/filecoin-project/lotus/metrics"
"github.com/filecoin-project/lotus/node"
"github.com/filecoin-project/lotus/node/impl"
"github.com/filecoin-project/lotus/node/modules/dtypes"
"github.com/filecoin-project/lotus/node/repo"
)
@ -164,54 +154,25 @@ var runCmd = &cli.Command{
log.Infof("Remote version %s", v)
lst, err := manet.Listen(endpoint)
// Instantiate the miner node handler.
handler, err := node.MinerHandler(minerapi, true)
if err != nil {
return xerrors.Errorf("could not listen: %w", err)
return xerrors.Errorf("failed to instantiate rpc handler: %w", err)
}
mux := mux.NewRouter()
rpcServer := jsonrpc.NewServer()
rpcServer.Register("Filecoin", api.PermissionedStorMinerAPI(metrics.MetricedStorMinerAPI(minerapi)))
mux.Handle("/rpc/v0", rpcServer)
mux.PathPrefix("/remote").HandlerFunc(minerapi.(*impl.StorageMinerAPI).ServeRemote)
mux.Handle("/debug/metrics", metrics.Exporter())
mux.PathPrefix("/").Handler(http.DefaultServeMux) // pprof
ah := &auth.Handler{
Verify: minerapi.AuthVerify,
Next: mux.ServeHTTP,
// Serve the RPC.
rpcStopper, err := node.ServeRPC(handler, "lotus-miner", endpoint)
if err != nil {
return fmt.Errorf("failed to start json-rpc endpoint: %s", err)
}
srv := &http.Server{
Handler: ah,
BaseContext: func(listener net.Listener) context.Context {
ctx, _ := tag.New(context.Background(), tag.Upsert(metrics.APIInterface, "lotus-miner"))
return ctx
},
}
// Monitor for shutdown.
finishCh := node.MonitorShutdown(shutdownChan,
node.ShutdownHandler{Component: "rpc server", StopFunc: rpcStopper},
node.ShutdownHandler{Component: "miner", StopFunc: stop},
)
sigChan := make(chan os.Signal, 2)
go func() {
select {
case sig := <-sigChan:
log.Warnw("received shutdown", "signal", sig)
case <-shutdownChan:
log.Warn("received shutdown")
}
log.Warn("Shutting down...")
if err := stop(context.TODO()); err != nil {
log.Errorf("graceful shutting down failed: %s", err)
}
if err := srv.Shutdown(context.TODO()); err != nil {
log.Errorf("shutting down RPC server failed: %s", err)
}
log.Warn("Graceful shutdown successful")
}()
signal.Notify(sigChan, syscall.SIGTERM, syscall.SIGINT)
return srv.Serve(manet.NetListener(lst))
<-finishCh
return nil
},
}

View File

@ -15,6 +15,7 @@ import (
"runtime/pprof"
"strings"
"github.com/filecoin-project/go-jsonrpc"
paramfetch "github.com/filecoin-project/go-paramfetch"
metricsprom "github.com/ipfs/go-metrics-prometheus"
"github.com/mitchellh/go-homedir"
@ -351,8 +352,37 @@ var DaemonCmd = &cli.Command{
return xerrors.Errorf("getting api endpoint: %w", err)
}
//
// Instantiate JSON-RPC endpoint.
// ----
// Populate JSON-RPC options.
serverOptions := make([]jsonrpc.ServerOption, 0)
if maxRequestSize := cctx.Int("api-max-req-size"); maxRequestSize != 0 {
serverOptions = append(serverOptions, jsonrpc.WithMaxRequestSize(int64(maxRequestSize)))
}
// Instantiate the full node handler.
h, err := node.FullNodeHandler(api, true, serverOptions...)
if err != nil {
return fmt.Errorf("failed to instantiate rpc handler: %s", err)
}
// Serve the RPC.
rpcStopper, err := node.ServeRPC(h, "lotus-daemon", endpoint)
if err != nil {
return fmt.Errorf("failed to start json-rpc endpoint: %s", err)
}
// Monitor for shutdown.
finishCh := node.MonitorShutdown(shutdownChan,
node.ShutdownHandler{Component: "rpc server", StopFunc: rpcStopper},
node.ShutdownHandler{Component: "node", StopFunc: stop},
)
<-finishCh // fires when shutdown is complete.
// TODO: properly parse api endpoint (or make it a URL)
return serveRPC(api, stop, endpoint, shutdownChan, int64(cctx.Int("api-max-req-size")))
return nil
},
Subcommands: []*cli.Command{
daemonStopCmd,

View File

@ -4,6 +4,7 @@ import (
"context"
"os"
logging "github.com/ipfs/go-log/v2"
"github.com/mattn/go-isatty"
"github.com/urfave/cli/v2"
"go.opencensus.io/trace"
@ -16,6 +17,8 @@ import (
"github.com/filecoin-project/lotus/node/repo"
)
var log = logging.Logger("main")
var AdvanceBlockCmd *cli.Command
func main() {

View File

@ -1,33 +0,0 @@
package main
import (
"net/http"
"strconv"
)
func handleFractionOpt(name string, setter func(int)) http.HandlerFunc {
return func(rw http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(rw, "only POST allowed", http.StatusMethodNotAllowed)
return
}
if err := r.ParseForm(); err != nil {
http.Error(rw, err.Error(), http.StatusBadRequest)
return
}
asfr := r.Form.Get("x")
if len(asfr) == 0 {
http.Error(rw, "parameter 'x' must be set", http.StatusBadRequest)
return
}
fr, err := strconv.Atoi(asfr)
if err != nil {
http.Error(rw, err.Error(), http.StatusBadRequest)
return
}
log.Infof("setting %s to %d", name, fr)
setter(fr)
}
}

View File

@ -1,138 +0,0 @@
package main
import (
"context"
"encoding/json"
"net"
"net/http"
_ "net/http/pprof"
"os"
"os/signal"
"runtime"
"syscall"
"github.com/ipfs/go-cid"
logging "github.com/ipfs/go-log/v2"
"github.com/multiformats/go-multiaddr"
manet "github.com/multiformats/go-multiaddr/net"
"go.opencensus.io/tag"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-jsonrpc"
"github.com/filecoin-project/go-jsonrpc/auth"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/api/v0api"
"github.com/filecoin-project/lotus/api/v1api"
"github.com/filecoin-project/lotus/metrics"
"github.com/filecoin-project/lotus/node"
"github.com/filecoin-project/lotus/node/impl"
)
var log = logging.Logger("main")
func serveRPC(a v1api.FullNode, stop node.StopFunc, addr multiaddr.Multiaddr, shutdownCh <-chan struct{}, maxRequestSize int64) error {
serverOptions := make([]jsonrpc.ServerOption, 0)
if maxRequestSize != 0 { // config set
serverOptions = append(serverOptions, jsonrpc.WithMaxRequestSize(maxRequestSize))
}
serveRpc := func(path string, hnd interface{}) {
rpcServer := jsonrpc.NewServer(serverOptions...)
rpcServer.Register("Filecoin", hnd)
ah := &auth.Handler{
Verify: a.AuthVerify,
Next: rpcServer.ServeHTTP,
}
http.Handle(path, ah)
}
pma := api.PermissionedFullAPI(metrics.MetricedFullAPI(a))
serveRpc("/rpc/v1", pma)
serveRpc("/rpc/v0", &v0api.WrapperV1Full{FullNode: pma})
importAH := &auth.Handler{
Verify: a.AuthVerify,
Next: handleImport(a.(*impl.FullNodeAPI)),
}
http.Handle("/rest/v0/import", importAH)
http.Handle("/debug/metrics", metrics.Exporter())
http.Handle("/debug/pprof-set/block", handleFractionOpt("BlockProfileRate", runtime.SetBlockProfileRate))
http.Handle("/debug/pprof-set/mutex", handleFractionOpt("MutexProfileFraction",
func(x int) { runtime.SetMutexProfileFraction(x) },
))
lst, err := manet.Listen(addr)
if err != nil {
return xerrors.Errorf("could not listen: %w", err)
}
srv := &http.Server{
Handler: http.DefaultServeMux,
BaseContext: func(listener net.Listener) context.Context {
ctx, _ := tag.New(context.Background(), tag.Upsert(metrics.APIInterface, "lotus-daemon"))
return ctx
},
}
sigCh := make(chan os.Signal, 2)
shutdownDone := make(chan struct{})
go func() {
select {
case sig := <-sigCh:
log.Warnw("received shutdown", "signal", sig)
case <-shutdownCh:
log.Warn("received shutdown")
}
log.Warn("Shutting down...")
if err := srv.Shutdown(context.TODO()); err != nil {
log.Errorf("shutting down RPC server failed: %s", err)
}
if err := stop(context.TODO()); err != nil {
log.Errorf("graceful shutting down failed: %s", err)
}
log.Warn("Graceful shutdown successful")
_ = log.Sync() //nolint:errcheck
close(shutdownDone)
}()
signal.Notify(sigCh, syscall.SIGTERM, syscall.SIGINT)
err = srv.Serve(manet.NetListener(lst))
if err == http.ErrServerClosed {
<-shutdownDone
return nil
}
return err
}
func handleImport(a *impl.FullNodeAPI) func(w http.ResponseWriter, r *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
if r.Method != "PUT" {
w.WriteHeader(404)
return
}
if !auth.HasPerm(r.Context(), nil, api.PermWrite) {
w.WriteHeader(401)
_ = json.NewEncoder(w).Encode(struct{ Error string }{"unauthorized: missing write permission"})
return
}
c, err := a.ClientImportLocal(r.Context(), r.Body)
if err != nil {
w.WriteHeader(500)
_ = json.NewEncoder(w).Encode(struct{ Error string }{err.Error()})
return
}
w.WriteHeader(200)
err = json.NewEncoder(w).Encode(struct{ Cid cid.Cid }{c})
if err != nil {
log.Errorf("/rest/v0/import: Writing response failed: %+v", err)
return
}
}
}

View File

@ -141,16 +141,11 @@ func (d *Driver) ExecuteTipset(bs blockstore.Blockstore, ds ds.Batching, params
blocks = append(blocks, sb)
}
var (
messages []*types.Message
results []*vm.ApplyRet
)
recordOutputs := func(_ cid.Cid, msg *types.Message, ret *vm.ApplyRet) error {
messages = append(messages, msg)
results = append(results, ret)
return nil
recordOutputs := &outputRecorder{
messages: []*types.Message{},
results: []*vm.ApplyRet{},
}
postcid, receiptsroot, err := sm.ApplyBlocks(context.Background(),
params.ParentEpoch,
params.Preroot,
@ -169,8 +164,8 @@ func (d *Driver) ExecuteTipset(bs blockstore.Blockstore, ds ds.Batching, params
ret := &ExecuteTipsetResult{
ReceiptsRoot: receiptsroot,
PostStateRoot: postcid,
AppliedMessages: messages,
AppliedResults: results,
AppliedMessages: recordOutputs.messages,
AppliedResults: recordOutputs.results,
}
return ret, nil
}
@ -284,3 +279,14 @@ func CircSupplyOrDefault(circSupply *gobig.Int) abi.TokenAmount {
}
return big.NewFromGo(circSupply)
}
type outputRecorder struct {
messages []*types.Message
results []*vm.ApplyRet
}
func (o *outputRecorder) MessageApplied(ctx context.Context, ts *types.TipSet, mcid cid.Cid, msg *types.Message, ret *vm.ApplyRet, implicit bool) error {
o.messages = append(o.messages, msg)
o.results = append(o.results, ret)
return nil
}

View File

@ -889,8 +889,8 @@ Inputs: `null`
Response:
```json
{
"Addrs": null,
"ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf"
"ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
"Addrs": []
}
```
@ -1039,8 +1039,8 @@ Inputs:
```json
[
{
"Addrs": null,
"ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf"
"ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
"Addrs": []
}
]
```
@ -1090,8 +1090,8 @@ Inputs:
Response:
```json
{
"Addrs": null,
"ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf"
"ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
"Addrs": []
}
```

View File

@ -2841,8 +2841,8 @@ Inputs: `null`
Response:
```json
{
"Addrs": null,
"ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf"
"ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
"Addrs": []
}
```
@ -2991,8 +2991,8 @@ Inputs:
```json
[
{
"Addrs": null,
"ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf"
"ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
"Addrs": []
}
]
```
@ -3042,8 +3042,8 @@ Inputs:
Response:
```json
{
"Addrs": null,
"ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf"
"ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
"Addrs": []
}
```

View File

@ -3068,8 +3068,8 @@ Inputs: `null`
Response:
```json
{
"Addrs": null,
"ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf"
"ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
"Addrs": []
}
```
@ -3218,8 +3218,8 @@ Inputs:
```json
[
{
"Addrs": null,
"ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf"
"ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
"Addrs": []
}
]
```
@ -3269,8 +3269,8 @@ Inputs:
Response:
```json
{
"Addrs": null,
"ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf"
"ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
"Addrs": []
}
```

View File

@ -25,7 +25,6 @@ We're happy to announce Lotus X.Y.Z...
First steps:
- [ ] Fork a new branch (`release/vX.Y.Z`) from `master` and make any further release related changes to this branch. If any "non-trivial" changes get added to the release, uncheck all the checkboxes and return to this stage.
- [ ] Prep the changelog using `scripts/mkreleaselog`, and add it to `CHANGELOG.md`
- [ ] Bump the version in `version.go` in the `master` branch to `vX.(Y+1).0-dev`.
Prepping an RC:
@ -93,7 +92,7 @@ Testing an RC:
- [ ] Final preparation
- [ ] Verify that version string in [`version.go`](https://github.com/ipfs/go-ipfs/tree/master/version.go) has been updated.
- [ ] Ensure that [CHANGELOG.md](https://github.com/filecoin-project/lotus/blob/master/CHANGELOG.md) is up to date
- [ ] Ensure that [README.md](https://github.com/filecoin-project/lotus/blob/master/README.md) is up to date
- [ ] Prep the changelog using `scripts/mkreleaselog`, and add it to `CHANGELOG.md`
- [ ] Merge `release-vX.Y.Z` into the `releases` branch.
- [ ] Tag this merge commit (on the `releases` branch) with `vX.Y.Z`
- [ ] Cut the release [here](https://github.com/filecoin-project/lotus/releases/new?prerelease=true&target=releases).

View File

@ -7,6 +7,10 @@ import (
"sync"
"time"
"github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/lotus/chain/actors"
"github.com/ipfs/go-cid"
"golang.org/x/xerrors"
@ -23,6 +27,7 @@ import (
"github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
"github.com/filecoin-project/lotus/extern/storage-sealing/sealiface"
"github.com/filecoin-project/lotus/node/config"
)
const arp = abi.RegisteredAggregationProof_SnarkPackV1
@ -31,9 +36,11 @@ type CommitBatcherApi interface {
SendMsg(ctx context.Context, from, to address.Address, method abi.MethodNum, value, maxFee abi.TokenAmount, params []byte) (cid.Cid, error)
StateMinerInfo(context.Context, address.Address, TipSetToken) (miner.MinerInfo, error)
ChainHead(ctx context.Context) (TipSetToken, abi.ChainEpoch, error)
ChainBaseFee(context.Context, TipSetToken) (abi.TokenAmount, error)
StateSectorPreCommitInfo(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok TipSetToken) (*miner.SectorPreCommitOnChainInfo, error)
StateMinerInitialPledgeCollateral(context.Context, address.Address, miner.SectorPreCommitInfo, TipSetToken) (big.Int, error)
StateNetworkVersion(ctx context.Context, tok TipSetToken) (network.Version, error)
}
type AggregateInput struct {
@ -47,20 +54,20 @@ type CommitBatcher struct {
maddr address.Address
mctx context.Context
addrSel AddrSel
feeCfg FeeConfig
feeCfg config.MinerFeeConfig
getConfig GetSealingConfigFunc
prover ffiwrapper.Prover
deadlines map[abi.SectorNumber]time.Time
todo map[abi.SectorNumber]AggregateInput
waiting map[abi.SectorNumber][]chan sealiface.CommitBatchRes
cutoffs map[abi.SectorNumber]time.Time
todo map[abi.SectorNumber]AggregateInput
waiting map[abi.SectorNumber][]chan sealiface.CommitBatchRes
notify, stop, stopped chan struct{}
force chan chan []sealiface.CommitBatchRes
lk sync.Mutex
}
func NewCommitBatcher(mctx context.Context, maddr address.Address, api CommitBatcherApi, addrSel AddrSel, feeCfg FeeConfig, getConfig GetSealingConfigFunc, prov ffiwrapper.Prover) *CommitBatcher {
func NewCommitBatcher(mctx context.Context, maddr address.Address, api CommitBatcherApi, addrSel AddrSel, feeCfg config.MinerFeeConfig, getConfig GetSealingConfigFunc, prov ffiwrapper.Prover) *CommitBatcher {
b := &CommitBatcher{
api: api,
maddr: maddr,
@ -70,9 +77,9 @@ func NewCommitBatcher(mctx context.Context, maddr address.Address, api CommitBat
getConfig: getConfig,
prover: prov,
deadlines: map[abi.SectorNumber]time.Time{},
todo: map[abi.SectorNumber]AggregateInput{},
waiting: map[abi.SectorNumber][]chan sealiface.CommitBatchRes{},
cutoffs: map[abi.SectorNumber]time.Time{},
todo: map[abi.SectorNumber]AggregateInput{},
waiting: map[abi.SectorNumber][]chan sealiface.CommitBatchRes{},
notify: make(chan struct{}, 1),
force: make(chan chan []sealiface.CommitBatchRes),
@ -132,30 +139,30 @@ func (b *CommitBatcher) batchWait(maxWait, slack time.Duration) <-chan time.Time
return nil
}
var deadline time.Time
var cutoff time.Time
for sn := range b.todo {
sectorDeadline := b.deadlines[sn]
if deadline.IsZero() || (!sectorDeadline.IsZero() && sectorDeadline.Before(deadline)) {
deadline = sectorDeadline
sectorCutoff := b.cutoffs[sn]
if cutoff.IsZero() || (!sectorCutoff.IsZero() && sectorCutoff.Before(cutoff)) {
cutoff = sectorCutoff
}
}
for sn := range b.waiting {
sectorDeadline := b.deadlines[sn]
if deadline.IsZero() || (!sectorDeadline.IsZero() && sectorDeadline.Before(deadline)) {
deadline = sectorDeadline
sectorCutoff := b.cutoffs[sn]
if cutoff.IsZero() || (!sectorCutoff.IsZero() && sectorCutoff.Before(cutoff)) {
cutoff = sectorCutoff
}
}
if deadline.IsZero() {
if cutoff.IsZero() {
return time.After(maxWait)
}
deadline = deadline.Add(-slack)
if deadline.Before(now) {
cutoff = cutoff.Add(-slack)
if cutoff.Before(now) {
return time.After(time.Nanosecond) // can't return 0
}
wait := deadline.Sub(now)
wait := cutoff.Sub(now)
if wait > maxWait {
wait = maxWait
}
@ -208,7 +215,7 @@ func (b *CommitBatcher) maybeStartBatch(notif, after bool) ([]sealiface.CommitBa
delete(b.waiting, sn)
delete(b.todo, sn)
delete(b.deadlines, sn)
delete(b.cutoffs, sn)
}
}
@ -285,14 +292,29 @@ func (b *CommitBatcher) processBatch(cfg sealiface.Config) ([]sealiface.CommitBa
return []sealiface.CommitBatchRes{res}, xerrors.Errorf("couldn't get miner info: %w", err)
}
goodFunds := big.Add(b.feeCfg.MaxCommitGasFee, collateral)
maxFee := b.feeCfg.MaxCommitBatchGasFee.FeeForSectors(len(infos))
bf, err := b.api.ChainBaseFee(b.mctx, tok)
if err != nil {
return []sealiface.CommitBatchRes{res}, xerrors.Errorf("couldn't get base fee: %w", err)
}
nv, err := b.api.StateNetworkVersion(b.mctx, tok)
if err != nil {
log.Errorf("getting network version: %s", err)
return []sealiface.CommitBatchRes{res}, xerrors.Errorf("getting network version: %s", err)
}
aggFee := policy.AggregateNetworkFee(nv, len(infos), bf)
goodFunds := big.Add(maxFee, big.Add(collateral, aggFee))
from, _, err := b.addrSel(b.mctx, mi, api.CommitAddr, goodFunds, collateral)
if err != nil {
return []sealiface.CommitBatchRes{res}, xerrors.Errorf("no good address found: %w", err)
}
mcid, err := b.api.SendMsg(b.mctx, from, b.maddr, miner.Methods.ProveCommitAggregate, collateral, b.feeCfg.MaxCommitGasFee, enc.Bytes())
mcid, err := b.api.SendMsg(b.mctx, from, b.maddr, miner.Methods.ProveCommitAggregate, collateral, maxFee, enc.Bytes())
if err != nil {
return []sealiface.CommitBatchRes{res}, xerrors.Errorf("sending message failed: %w", err)
}
@ -352,14 +374,14 @@ func (b *CommitBatcher) processSingle(mi miner.MinerInfo, sn abi.SectorNumber, i
return cid.Undef, err
}
goodFunds := big.Add(collateral, b.feeCfg.MaxCommitGasFee)
goodFunds := big.Add(collateral, big.Int(b.feeCfg.MaxCommitGasFee))
from, _, err := b.addrSel(b.mctx, mi, api.CommitAddr, goodFunds, collateral)
if err != nil {
return cid.Undef, xerrors.Errorf("no good address to send commit message from: %w", err)
}
mcid, err := b.api.SendMsg(b.mctx, from, b.maddr, miner.Methods.ProveCommitSector, collateral, b.feeCfg.MaxCommitGasFee, enc.Bytes())
mcid, err := b.api.SendMsg(b.mctx, from, b.maddr, miner.Methods.ProveCommitSector, collateral, big.Int(b.feeCfg.MaxCommitGasFee), enc.Bytes())
if err != nil {
return cid.Undef, xerrors.Errorf("pushing message to mpool: %w", err)
}
@ -369,16 +391,15 @@ func (b *CommitBatcher) processSingle(mi miner.MinerInfo, sn abi.SectorNumber, i
// register commit, wait for batch message, return message CID
func (b *CommitBatcher) AddCommit(ctx context.Context, s SectorInfo, in AggregateInput) (res sealiface.CommitBatchRes, err error) {
_, curEpoch, err := b.api.ChainHead(b.mctx)
if err != nil {
log.Errorf("getting chain head: %s", err)
return sealiface.CommitBatchRes{}, nil
}
sn := s.SectorNumber
cu, err := b.getCommitCutoff(s)
if err != nil {
return sealiface.CommitBatchRes{}, err
}
b.lk.Lock()
b.deadlines[sn] = getSectorDeadline(curEpoch, s)
b.cutoffs[sn] = cu
b.todo[sn] = in
sent := make(chan sealiface.CommitBatchRes, 1)
@ -452,24 +473,43 @@ func (b *CommitBatcher) Stop(ctx context.Context) error {
}
}
func getSectorDeadline(curEpoch abi.ChainEpoch, si SectorInfo) time.Time {
deadlineEpoch := si.TicketEpoch + policy.MaxPreCommitRandomnessLookback
// TODO: If this returned epochs, it would make testing much easier
func (b *CommitBatcher) getCommitCutoff(si SectorInfo) (time.Time, error) {
tok, curEpoch, err := b.api.ChainHead(b.mctx)
if err != nil {
return time.Now(), xerrors.Errorf("getting chain head: %s", err)
}
nv, err := b.api.StateNetworkVersion(b.mctx, tok)
if err != nil {
log.Errorf("getting network version: %s", err)
return time.Now(), xerrors.Errorf("getting network version: %s", err)
}
pci, err := b.api.StateSectorPreCommitInfo(b.mctx, b.maddr, si.SectorNumber, tok)
if err != nil {
log.Errorf("getting precommit info: %s", err)
return time.Now(), err
}
cutoffEpoch := pci.PreCommitEpoch + policy.GetMaxProveCommitDuration(actors.VersionForNetwork(nv), si.SectorType)
for _, p := range si.Pieces {
if p.DealInfo == nil {
continue
}
startEpoch := p.DealInfo.DealSchedule.StartEpoch
if startEpoch < deadlineEpoch {
deadlineEpoch = startEpoch
if startEpoch < cutoffEpoch {
cutoffEpoch = startEpoch
}
}
if deadlineEpoch <= curEpoch {
return time.Now()
if cutoffEpoch <= curEpoch {
return time.Now(), nil
}
return time.Now().Add(time.Duration(deadlineEpoch-curEpoch) * time.Duration(build.BlockDelaySecs) * time.Second)
return time.Now().Add(time.Duration(cutoffEpoch-curEpoch) * time.Duration(build.BlockDelaySecs) * time.Second), nil
}
func (b *CommitBatcher) getSectorCollateral(sn abi.SectorNumber, tok TipSetToken) (abi.TokenAmount, error) {

View File

@ -7,6 +7,9 @@ import (
"sync"
"time"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/ipfs/go-cid"
"golang.org/x/xerrors"
@ -19,6 +22,7 @@ import (
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/extern/storage-sealing/sealiface"
"github.com/filecoin-project/lotus/node/config"
)
type PreCommitBatcherApi interface {
@ -37,19 +41,19 @@ type PreCommitBatcher struct {
maddr address.Address
mctx context.Context
addrSel AddrSel
feeCfg FeeConfig
feeCfg config.MinerFeeConfig
getConfig GetSealingConfigFunc
deadlines map[abi.SectorNumber]time.Time
todo map[abi.SectorNumber]*preCommitEntry
waiting map[abi.SectorNumber][]chan sealiface.PreCommitBatchRes
cutoffs map[abi.SectorNumber]time.Time
todo map[abi.SectorNumber]*preCommitEntry
waiting map[abi.SectorNumber][]chan sealiface.PreCommitBatchRes
notify, stop, stopped chan struct{}
force chan chan []sealiface.PreCommitBatchRes
lk sync.Mutex
}
func NewPreCommitBatcher(mctx context.Context, maddr address.Address, api PreCommitBatcherApi, addrSel AddrSel, feeCfg FeeConfig, getConfig GetSealingConfigFunc) *PreCommitBatcher {
func NewPreCommitBatcher(mctx context.Context, maddr address.Address, api PreCommitBatcherApi, addrSel AddrSel, feeCfg config.MinerFeeConfig, getConfig GetSealingConfigFunc) *PreCommitBatcher {
b := &PreCommitBatcher{
api: api,
maddr: maddr,
@ -58,9 +62,9 @@ func NewPreCommitBatcher(mctx context.Context, maddr address.Address, api PreCom
feeCfg: feeCfg,
getConfig: getConfig,
deadlines: map[abi.SectorNumber]time.Time{},
todo: map[abi.SectorNumber]*preCommitEntry{},
waiting: map[abi.SectorNumber][]chan sealiface.PreCommitBatchRes{},
cutoffs: map[abi.SectorNumber]time.Time{},
todo: map[abi.SectorNumber]*preCommitEntry{},
waiting: map[abi.SectorNumber][]chan sealiface.PreCommitBatchRes{},
notify: make(chan struct{}, 1),
force: make(chan chan []sealiface.PreCommitBatchRes),
@ -120,30 +124,30 @@ func (b *PreCommitBatcher) batchWait(maxWait, slack time.Duration) <-chan time.T
return nil
}
var deadline time.Time
var cutoff time.Time
for sn := range b.todo {
sectorDeadline := b.deadlines[sn]
if deadline.IsZero() || (!sectorDeadline.IsZero() && sectorDeadline.Before(deadline)) {
deadline = sectorDeadline
sectorCutoff := b.cutoffs[sn]
if cutoff.IsZero() || (!sectorCutoff.IsZero() && sectorCutoff.Before(cutoff)) {
cutoff = sectorCutoff
}
}
for sn := range b.waiting {
sectorDeadline := b.deadlines[sn]
if deadline.IsZero() || (!sectorDeadline.IsZero() && sectorDeadline.Before(deadline)) {
deadline = sectorDeadline
sectorCutoff := b.cutoffs[sn]
if cutoff.IsZero() || (!sectorCutoff.IsZero() && sectorCutoff.Before(cutoff)) {
cutoff = sectorCutoff
}
}
if deadline.IsZero() {
if cutoff.IsZero() {
return time.After(maxWait)
}
deadline = deadline.Add(-slack)
if deadline.Before(now) {
cutoff = cutoff.Add(-slack)
if cutoff.Before(now) {
return time.After(time.Nanosecond) // can't return 0
}
wait := deadline.Sub(now)
wait := cutoff.Sub(now)
if wait > maxWait {
wait = maxWait
}
@ -191,7 +195,7 @@ func (b *PreCommitBatcher) maybeStartBatch(notif, after bool) ([]sealiface.PreCo
delete(b.waiting, sn)
delete(b.todo, sn)
delete(b.deadlines, sn)
delete(b.cutoffs, sn)
}
}
@ -224,14 +228,15 @@ func (b *PreCommitBatcher) processBatch(cfg sealiface.Config) ([]sealiface.PreCo
return []sealiface.PreCommitBatchRes{res}, xerrors.Errorf("couldn't get miner info: %w", err)
}
goodFunds := big.Add(deposit, b.feeCfg.MaxPreCommitGasFee)
maxFee := b.feeCfg.MaxPreCommitBatchGasFee.FeeForSectors(len(params.Sectors))
goodFunds := big.Add(deposit, maxFee)
from, _, err := b.addrSel(b.mctx, mi, api.PreCommitAddr, goodFunds, deposit)
if err != nil {
return []sealiface.PreCommitBatchRes{res}, xerrors.Errorf("no good address found: %w", err)
}
mcid, err := b.api.SendMsg(b.mctx, from, b.maddr, miner.Methods.PreCommitSectorBatch, deposit, b.feeCfg.MaxPreCommitGasFee, enc.Bytes())
mcid, err := b.api.SendMsg(b.mctx, from, b.maddr, miner.Methods.PreCommitSectorBatch, deposit, maxFee, enc.Bytes())
if err != nil {
return []sealiface.PreCommitBatchRes{res}, xerrors.Errorf("sending message failed: %w", err)
}
@ -254,7 +259,7 @@ func (b *PreCommitBatcher) AddPreCommit(ctx context.Context, s SectorInfo, depos
sn := s.SectorNumber
b.lk.Lock()
b.deadlines[sn] = getSectorDeadline(curEpoch, s)
b.cutoffs[sn] = getPreCommitCutoff(curEpoch, s)
b.todo[sn] = &preCommitEntry{
deposit: deposit,
pci: in,
@ -330,3 +335,24 @@ func (b *PreCommitBatcher) Stop(ctx context.Context) error {
return ctx.Err()
}
}
// TODO: If this returned epochs, it would make testing much easier
func getPreCommitCutoff(curEpoch abi.ChainEpoch, si SectorInfo) time.Time {
cutoffEpoch := si.TicketEpoch + policy.MaxPreCommitRandomnessLookback
for _, p := range si.Pieces {
if p.DealInfo == nil {
continue
}
startEpoch := p.DealInfo.DealSchedule.StartEpoch
if startEpoch < cutoffEpoch {
cutoffEpoch = startEpoch
}
}
if cutoffEpoch <= curEpoch {
return time.Now()
}
return time.Now().Add(time.Duration(cutoffEpoch-curEpoch) * time.Duration(build.BlockDelaySecs) * time.Second)
}

View File

@ -28,6 +28,7 @@ import (
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
"github.com/filecoin-project/lotus/extern/storage-sealing/sealiface"
"github.com/filecoin-project/lotus/node/config"
)
const SectorStorePrefix = "/sectors"
@ -66,6 +67,7 @@ type SealingAPI interface {
StateMinerPartitions(ctx context.Context, m address.Address, dlIdx uint64, tok TipSetToken) ([]api.Partition, error)
SendMsg(ctx context.Context, from, to address.Address, method abi.MethodNum, value, maxFee abi.TokenAmount, params []byte) (cid.Cid, error)
ChainHead(ctx context.Context) (TipSetToken, abi.ChainEpoch, error)
ChainBaseFee(context.Context, TipSetToken) (abi.TokenAmount, error)
ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error)
ChainGetRandomnessFromBeacon(ctx context.Context, tok TipSetToken, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error)
ChainGetRandomnessFromTickets(ctx context.Context, tok TipSetToken, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error)
@ -78,7 +80,7 @@ type AddrSel func(ctx context.Context, mi miner.MinerInfo, use api.AddrUse, good
type Sealing struct {
api SealingAPI
feeCfg FeeConfig
feeCfg config.MinerFeeConfig
events Events
maddr address.Address
@ -112,12 +114,6 @@ type Sealing struct {
dealInfo *CurrentDealInfoManager
}
type FeeConfig struct {
MaxPreCommitGasFee abi.TokenAmount
MaxCommitGasFee abi.TokenAmount
MaxTerminateGasFee abi.TokenAmount
}
type openSector struct {
used abi.UnpaddedPieceSize // change to bitfield/rle when AddPiece gains offset support to better fill sectors
@ -134,7 +130,7 @@ type pendingPiece struct {
accepted func(abi.SectorNumber, abi.UnpaddedPieceSize, error)
}
func New(api SealingAPI, fc FeeConfig, events Events, maddr address.Address, ds datastore.Batching, sealer sectorstorage.SectorManager, sc SectorIDCounter, verif ffiwrapper.Verifier, prov ffiwrapper.Prover, pcp PreCommitPolicy, gc GetSealingConfigFunc, notifee SectorStateNotifee, as AddrSel) *Sealing {
func New(api SealingAPI, fc config.MinerFeeConfig, events Events, maddr address.Address, ds datastore.Batching, sealer sectorstorage.SectorManager, sc SectorIDCounter, verif ffiwrapper.Verifier, prov ffiwrapper.Prover, pcp PreCommitPolicy, gc GetSealingConfigFunc, notifee SectorStateNotifee, as AddrSel) *Sealing {
s := &Sealing{
api: api,
feeCfg: fc,

View File

@ -334,7 +334,7 @@ func (m *Sealing) handlePreCommitting(ctx statemachine.Context, sector SectorInf
return nil
}
goodFunds := big.Add(deposit, m.feeCfg.MaxPreCommitGasFee)
goodFunds := big.Add(deposit, big.Int(m.feeCfg.MaxPreCommitGasFee))
from, _, err := m.addrSel(ctx.Context(), mi, api.PreCommitAddr, goodFunds, deposit)
if err != nil {
@ -342,7 +342,7 @@ func (m *Sealing) handlePreCommitting(ctx statemachine.Context, sector SectorInf
}
log.Infof("submitting precommit for sector %d (deposit: %s): ", sector.SectorNumber, deposit)
mcid, err := m.api.SendMsg(ctx.Context(), from, m.maddr, miner.Methods.PreCommitSector, deposit, m.feeCfg.MaxPreCommitGasFee, enc.Bytes())
mcid, err := m.api.SendMsg(ctx.Context(), from, m.maddr, miner.Methods.PreCommitSector, deposit, big.Int(m.feeCfg.MaxPreCommitGasFee), enc.Bytes())
if err != nil {
if params.ReplaceCapacity {
m.remarkForUpgrade(params.ReplaceSectorNumber)
@ -566,7 +566,7 @@ func (m *Sealing) handleSubmitCommit(ctx statemachine.Context, sector SectorInfo
collateral = big.Zero()
}
goodFunds := big.Add(collateral, m.feeCfg.MaxCommitGasFee)
goodFunds := big.Add(collateral, big.Int(m.feeCfg.MaxCommitGasFee))
from, _, err := m.addrSel(ctx.Context(), mi, api.CommitAddr, goodFunds, collateral)
if err != nil {
@ -574,7 +574,7 @@ func (m *Sealing) handleSubmitCommit(ctx statemachine.Context, sector SectorInfo
}
// TODO: check seed / ticket / deals are up to date
mcid, err := m.api.SendMsg(ctx.Context(), from, m.maddr, miner.Methods.ProveCommitSector, collateral, m.feeCfg.MaxCommitGasFee, enc.Bytes())
mcid, err := m.api.SendMsg(ctx.Context(), from, m.maddr, miner.Methods.ProveCommitSector, collateral, big.Int(m.feeCfg.MaxCommitGasFee), enc.Bytes())
if err != nil {
return ctx.Send(SectorCommitFailed{xerrors.Errorf("pushing message to mpool: %w", err)})
}

View File

@ -19,6 +19,7 @@ import (
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/node/config"
)
type TerminateBatcherApi interface {
@ -34,7 +35,7 @@ type TerminateBatcher struct {
maddr address.Address
mctx context.Context
addrSel AddrSel
feeCfg FeeConfig
feeCfg config.MinerFeeConfig
getConfig GetSealingConfigFunc
todo map[SectorLocation]*bitfield.BitField // MinerSectorLocation -> BitField
@ -46,7 +47,7 @@ type TerminateBatcher struct {
lk sync.Mutex
}
func NewTerminationBatcher(mctx context.Context, maddr address.Address, api TerminateBatcherApi, addrSel AddrSel, feeCfg FeeConfig, getConfig GetSealingConfigFunc) *TerminateBatcher {
func NewTerminationBatcher(mctx context.Context, maddr address.Address, api TerminateBatcherApi, addrSel AddrSel, feeCfg config.MinerFeeConfig, getConfig GetSealingConfigFunc) *TerminateBatcher {
b := &TerminateBatcher{
api: api,
maddr: maddr,
@ -214,12 +215,12 @@ func (b *TerminateBatcher) processBatch(notif, after bool) (*cid.Cid, error) {
return nil, xerrors.Errorf("couldn't get miner info: %w", err)
}
from, _, err := b.addrSel(b.mctx, mi, api.TerminateSectorsAddr, b.feeCfg.MaxTerminateGasFee, b.feeCfg.MaxTerminateGasFee)
from, _, err := b.addrSel(b.mctx, mi, api.TerminateSectorsAddr, big.Int(b.feeCfg.MaxTerminateGasFee), big.Int(b.feeCfg.MaxTerminateGasFee))
if err != nil {
return nil, xerrors.Errorf("no good address found: %w", err)
}
mcid, err := b.api.SendMsg(b.mctx, from, b.maddr, miner.Methods.TerminateSectors, big.Zero(), b.feeCfg.MaxTerminateGasFee, enc.Bytes())
mcid, err := b.api.SendMsg(b.mctx, from, b.maddr, miner.Methods.TerminateSectors, big.Zero(), big.Int(b.feeCfg.MaxTerminateGasFee), enc.Bytes())
if err != nil {
return nil, xerrors.Errorf("sending message failed: %w", err)
}

48
gateway/handler.go Normal file
View File

@ -0,0 +1,48 @@
package gateway
import (
"net/http"
"contrib.go.opencensus.io/exporter/prometheus"
"github.com/filecoin-project/go-jsonrpc"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/api/v0api"
"github.com/filecoin-project/lotus/api/v1api"
"github.com/filecoin-project/lotus/metrics"
"github.com/gorilla/mux"
promclient "github.com/prometheus/client_golang/prometheus"
)
// Handler returns a gateway http.Handler, to be mounted as-is on the server.
func Handler(a api.Gateway, opts ...jsonrpc.ServerOption) (http.Handler, error) {
m := mux.NewRouter()
serveRpc := func(path string, hnd interface{}) {
rpcServer := jsonrpc.NewServer(opts...)
rpcServer.Register("Filecoin", hnd)
m.Handle(path, rpcServer)
}
ma := metrics.MetricedGatewayAPI(a)
serveRpc("/rpc/v1", ma)
serveRpc("/rpc/v0", api.Wrap(new(v1api.FullNodeStruct), new(v0api.WrapperV1Full), ma))
registry := promclient.DefaultRegisterer.(*promclient.Registry)
exporter, err := prometheus.NewExporter(prometheus.Options{
Registry: registry,
Namespace: "lotus_gw",
})
if err != nil {
return nil, err
}
m.Handle("/debug/metrics", exporter)
m.PathPrefix("/").Handler(http.DefaultServeMux)
/*ah := &auth.Handler{
Verify: nodeApi.AuthVerify,
Next: mux.ServeHTTP,
}*/
return m, nil
}

34
go.mod
View File

@ -48,7 +48,7 @@ require (
github.com/filecoin-project/specs-actors/v2 v2.3.5
github.com/filecoin-project/specs-actors/v3 v3.1.1
github.com/filecoin-project/specs-actors/v4 v4.0.1
github.com/filecoin-project/specs-actors/v5 v5.0.0-20210602024058-0c296bb386bf
github.com/filecoin-project/specs-actors/v5 v5.0.0-20210609212542-73e0409ac77c
github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506
github.com/filecoin-project/test-vectors/schema v0.0.5
github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1
@ -89,7 +89,7 @@ require (
github.com/ipfs/go-ipfs-util v0.0.2
github.com/ipfs/go-ipld-cbor v0.0.5
github.com/ipfs/go-ipld-format v0.2.0
github.com/ipfs/go-log/v2 v2.1.2
github.com/ipfs/go-log/v2 v2.1.3
github.com/ipfs/go-merkledag v0.3.2
github.com/ipfs/go-metrics-interface v0.0.1
github.com/ipfs/go-metrics-prometheus v0.0.2
@ -102,21 +102,21 @@ require (
github.com/lib/pq v1.7.0
github.com/libp2p/go-buffer-pool v0.0.2
github.com/libp2p/go-eventbus v0.2.1
github.com/libp2p/go-libp2p v0.12.0
github.com/libp2p/go-libp2p v0.14.2
github.com/libp2p/go-libp2p-connmgr v0.2.4
github.com/libp2p/go-libp2p-core v0.7.0
github.com/libp2p/go-libp2p-core v0.8.5
github.com/libp2p/go-libp2p-discovery v0.5.0
github.com/libp2p/go-libp2p-kad-dht v0.11.0
github.com/libp2p/go-libp2p-mplex v0.3.0
github.com/libp2p/go-libp2p-noise v0.1.2
github.com/libp2p/go-libp2p-peerstore v0.2.6
github.com/libp2p/go-libp2p-mplex v0.4.1
github.com/libp2p/go-libp2p-noise v0.2.0
github.com/libp2p/go-libp2p-peerstore v0.2.7
github.com/libp2p/go-libp2p-pubsub v0.4.2-0.20210212194758-6c1addf493eb
github.com/libp2p/go-libp2p-quic-transport v0.9.0
github.com/libp2p/go-libp2p-quic-transport v0.10.0
github.com/libp2p/go-libp2p-record v0.1.3
github.com/libp2p/go-libp2p-routing-helpers v0.2.3
github.com/libp2p/go-libp2p-swarm v0.3.1
github.com/libp2p/go-libp2p-swarm v0.5.0
github.com/libp2p/go-libp2p-tls v0.1.3
github.com/libp2p/go-libp2p-yamux v0.4.1
github.com/libp2p/go-libp2p-yamux v0.5.4
github.com/libp2p/go-maddr-filter v0.1.0
github.com/mattn/go-colorable v0.1.6 // indirect
github.com/mattn/go-isatty v0.0.12
@ -124,10 +124,9 @@ require (
github.com/mitchellh/go-homedir v1.1.0
github.com/multiformats/go-base32 v0.0.3
github.com/multiformats/go-multiaddr v0.3.1
github.com/multiformats/go-multiaddr-dns v0.2.0
github.com/multiformats/go-multiaddr-dns v0.3.1
github.com/multiformats/go-multibase v0.0.3
github.com/multiformats/go-multihash v0.0.14
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
github.com/open-rpc/meta-schema v0.0.0-20201029221707-1b72ef2ea333
github.com/opentracing/opentracing-go v1.2.0
github.com/polydawn/refmt v0.0.0-20190809202753-05966cbd336a
@ -145,18 +144,17 @@ require (
github.com/whyrusleeping/pubsub v0.0.0-20190708150250-92bcb0691325
github.com/xorcare/golden v0.6.1-0.20191112154924-b87f686d7542
go.etcd.io/bbolt v1.3.4
go.opencensus.io v0.22.5
go.opencensus.io v0.23.0
go.uber.org/dig v1.10.0 // indirect
go.uber.org/fx v1.9.0
go.uber.org/multierr v1.6.0
go.uber.org/zap v1.16.0
golang.org/x/net v0.0.0-20201022231255-08b38378de70
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68
golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
golang.org/x/sys v0.0.0-20210426080607-c94f62235c83
golang.org/x/time v0.0.0-20191024005414-555d28b269f0
golang.org/x/tools v0.0.0-20201112185108-eeaa07dd7696
golang.org/x/tools v0.0.0-20210106214847-113979e3529a
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect
gopkg.in/cheggaaa/pb.v1 v1.0.28
gotest.tools v2.2.0+incompatible
honnef.co/go/tools v0.0.1-2020.1.3 // indirect

157
go.sum
View File

@ -107,14 +107,18 @@ github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32/go.mod h1:DrZx5ec/dm
github.com/btcsuite/btcd v0.0.0-20190523000118-16327141da8c/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI=
github.com/btcsuite/btcd v0.0.0-20190605094302-a0d1e3e36d50/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI=
github.com/btcsuite/btcd v0.0.0-20190824003749-130ea5bddde3/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI=
github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw=
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
github.com/btcsuite/btcd v0.21.0-beta h1:At9hIZdJW0s9E/fAz28nrz6AmcNlSVucCH796ZteX1M=
github.com/btcsuite/btcd v0.21.0-beta/go.mod h1:ZSWyehm27aAuS9bvkATT+Xte3hjHZ+MRgMY/8NJ7K94=
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
github.com/btcsuite/btcutil v0.0.0-20190207003914-4c204d697803/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
github.com/btcsuite/btcutil v1.0.2/go.mod h1:j9HUFwoQRsZL3V4n+qG+CUnEGHOarIxfC3Le2Yhbcts=
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg=
github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY=
github.com/btcsuite/goleveldb v1.0.0/go.mod h1:QiK9vBlgftBg6rWQIj6wFzbPfRjiykIEhBH4obrXJ/I=
github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
github.com/buger/goterm v0.0.0-20200322175922-2f3e71b85129 h1:gfAMKE626QEuKG3si0pdTRcr/YEbBoxY+3GOH3gWvl4=
@ -186,8 +190,10 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018/go.mod h1:rQYf4tfk5sSwFsnDg3qYaBxSjsD9S8+59vW0dKUgme4=
github.com/davidlazar/go-crypto v0.0.0-20190912175916-7055855a373f h1:BOaYiTvg8p9vBUXpklC22XSK/mifLF7lG9jtmYYi3Tc=
github.com/davidlazar/go-crypto v0.0.0-20190912175916-7055855a373f/go.mod h1:rQYf4tfk5sSwFsnDg3qYaBxSjsD9S8+59vW0dKUgme4=
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU=
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U=
github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218=
github.com/detailyang/go-fallocate v0.0.0-20180908115635-432fa640bd2e h1:lj77EKYUpYXTd8CD/+QMIf8b6OIOTsfEBSXiAzuEHTU=
github.com/detailyang/go-fallocate v0.0.0-20180908115635-432fa640bd2e/go.mod h1:3ZQK6DMPSz/QZ73jlWxBtUhNA8xZx7LzUFSq/OfP8vk=
github.com/dgraph-io/badger v1.5.5-0.20190226225317-8115aed38f8f/go.mod h1:VZxzAIRPHRVNRKRo6AXrX9BJegn6il06VMTZVJYCIjQ=
@ -325,15 +331,16 @@ github.com/filecoin-project/specs-actors/v4 v4.0.0/go.mod h1:TkHXf/l7Wyw4ZejyXIP
github.com/filecoin-project/specs-actors/v4 v4.0.1 h1:AiWrtvJZ63MHGe6rn7tPu4nSUY8bA1KDNszqJaD5+Fg=
github.com/filecoin-project/specs-actors/v4 v4.0.1/go.mod h1:TkHXf/l7Wyw4ZejyXIPS2rK8bBO0rdwhTZyQQgaglng=
github.com/filecoin-project/specs-actors/v5 v5.0.0-20210512015452-4fe3889fff57/go.mod h1:283yBMMUSDB2abcjP/hhrwTkhb9h3sfM6KGrep/ZlBI=
github.com/filecoin-project/specs-actors/v5 v5.0.0-20210602024058-0c296bb386bf h1:xt9A1omyhSDbQvpVk7Na1J15a/n8y0y4GQDLeiWLpFs=
github.com/filecoin-project/specs-actors/v5 v5.0.0-20210602024058-0c296bb386bf/go.mod h1:b/btpRl84Q9SeDKlyIoORBQwe2OTmq14POrYrVvBWCM=
github.com/filecoin-project/specs-actors/v5 v5.0.0-20210609212542-73e0409ac77c h1:GnDJ6q3QEm2ytTKjPFQSvczAltgCSb3j9F1FeynwvPA=
github.com/filecoin-project/specs-actors/v5 v5.0.0-20210609212542-73e0409ac77c/go.mod h1:b/btpRl84Q9SeDKlyIoORBQwe2OTmq14POrYrVvBWCM=
github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506 h1:Ur/l2+6qN+lQiqjozWWc5p9UDaAMDZKTlDS98oRnlIw=
github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506/go.mod h1:nJRRM7Aa9XVvygr3W9k6xGF46RWzr2zxF/iGoAIfA/g=
github.com/filecoin-project/test-vectors/schema v0.0.5 h1:w3zHQhzM4pYxJDl21avXjOKBLF8egrvwUwjpT8TquDg=
github.com/filecoin-project/test-vectors/schema v0.0.5/go.mod h1:iQ9QXLpYWL3m7warwvK1JC/pTri8mnfEmKygNDqqY6E=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6 h1:u/UEqS66A5ckRmS4yNpjmVH56sVtS/RfclBAYocb4as=
github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6/go.mod h1:1i71OnUq3iUe1ma7Lr6yG6/rjvM3emb6yoL7xLFzcVQ=
github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ=
github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk=
github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY=
github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
@ -398,8 +405,9 @@ github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/gogo/status v1.0.3/go.mod h1:SavQ51ycCLnc7dGyJxp8YAmudx8xqiVrRf+6IXRsugc=
github.com/gogo/status v1.1.0 h1:+eIkrewn5q6b30y+g/BJINVVdi2xH7je5MPJ3ZPK3JA=
github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM=
@ -430,8 +438,9 @@ github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrU
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.2-0.20190904063534-ff6b7dc882cf h1:gFVkHXmVAhEbxZVDln5V9GKrLaluNoFHDbrZwAWZgws=
@ -443,14 +452,16 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.3 h1:x95R7cp+rSeeqAMI2knLtQ0DKlaBhv2NrtrOvafPHRo=
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gopacket v1.1.17/go.mod h1:UdDNZ1OO62aGYVnPhxT1U6aI7ukYtA/kB8vaU0diBUM=
github.com/google/gopacket v1.1.18 h1:lum7VRA9kdlvBi7/v2p7/zcbkduHaCH/SVVyurs7OpY=
github.com/google/gopacket v1.1.18/go.mod h1:UdDNZ1OO62aGYVnPhxT1U6aI7ukYtA/kB8vaU0diBUM=
github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8=
github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
@ -691,8 +702,9 @@ github.com/ipfs/go-log/v2 v2.0.5/go.mod h1:eZs4Xt4ZUJQFM3DlanGhy7TkwwawCZcSByscw
github.com/ipfs/go-log/v2 v2.0.8/go.mod h1:eZs4Xt4ZUJQFM3DlanGhy7TkwwawCZcSByscwkWG+dw=
github.com/ipfs/go-log/v2 v2.1.1/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM=
github.com/ipfs/go-log/v2 v2.1.2-0.20200626104915-0016c0b4b3e4/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM=
github.com/ipfs/go-log/v2 v2.1.2 h1:a0dRiL098zY23vay1h3dimx6y94XchEUyt5h0l4VvQU=
github.com/ipfs/go-log/v2 v2.1.2/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM=
github.com/ipfs/go-log/v2 v2.1.3 h1:1iS3IU7aXRlbgUpN8yTTpJ53NXYjAe37vcI5+5nYrzk=
github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Axpmri6g=
github.com/ipfs/go-merkledag v0.0.3/go.mod h1:Oc5kIXLHokkE1hWGMBHw+oxehkAaTOqtEb7Zbh6BhLA=
github.com/ipfs/go-merkledag v0.0.6/go.mod h1:QYPdnlvkOg7GnQRofu9XZimC5ZW5Wi3bKys/4GQQfto=
github.com/ipfs/go-merkledag v0.2.3/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk=
@ -793,6 +805,7 @@ github.com/kilic/bls12-381 v0.0.0-20200820230200-6b2c19996391 h1:51kHw7l/dUDdOdW
github.com/kilic/bls12-381 v0.0.0-20200820230200-6b2c19996391/go.mod h1:XXfR6YFCRSrkEXbNlIyDsgXVNJWVUV30m/ebkVy9n6s=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@ -823,8 +836,9 @@ github.com/libp2p/go-conn-security v0.0.1/go.mod h1:bGmu51N0KU9IEjX7kl2PQjgZa40J
github.com/libp2p/go-conn-security-multistream v0.0.1/go.mod h1:nc9vud7inQ+d6SO0I/6dSWrdMnHnzZNHeyUQqrAJulE=
github.com/libp2p/go-conn-security-multistream v0.0.2/go.mod h1:nc9vud7inQ+d6SO0I/6dSWrdMnHnzZNHeyUQqrAJulE=
github.com/libp2p/go-conn-security-multistream v0.1.0/go.mod h1:aw6eD7LOsHEX7+2hJkDxw1MteijaVcI+/eP2/x3J1xc=
github.com/libp2p/go-conn-security-multistream v0.2.0 h1:uNiDjS58vrvJTg9jO6bySd1rMKejieG7v45ekqHbZ1M=
github.com/libp2p/go-conn-security-multistream v0.2.0/go.mod h1:hZN4MjlNetKD3Rq5Jb/P5ohUnFLNzEAR4DLSzpn2QLU=
github.com/libp2p/go-conn-security-multistream v0.2.1 h1:ft6/POSK7F+vl/2qzegnHDaXFU0iWB4yVTYrioC6Zy0=
github.com/libp2p/go-conn-security-multistream v0.2.1/go.mod h1:cR1d8gA0Hr59Fj6NhaTpFhJZrjSYuNmhpT2r25zYR70=
github.com/libp2p/go-eventbus v0.0.2/go.mod h1:Hr/yGlwxA/stuLnpMiu82lpNKpvRy3EaJxPu40XYOwk=
github.com/libp2p/go-eventbus v0.1.0/go.mod h1:vROgu5cs5T7cv7POWlWxBaVLxfSegC5UGQf8A2eEmx4=
github.com/libp2p/go-eventbus v0.2.1 h1:VanAdErQnpTioN2TowqNcOijf6YwhuODe4pPKSDpxGc=
@ -847,8 +861,9 @@ github.com/libp2p/go-libp2p v0.8.1/go.mod h1:QRNH9pwdbEBpx5DTJYg+qxcVaDMAz3Ee/qD
github.com/libp2p/go-libp2p v0.8.3/go.mod h1:EsH1A+8yoWK+L4iKcbPYu6MPluZ+CHWI9El8cTaefiM=
github.com/libp2p/go-libp2p v0.9.2/go.mod h1:cunHNLDVus66Ct9iXXcjKRLdmHdFdHVe1TAnbubJQqQ=
github.com/libp2p/go-libp2p v0.10.0/go.mod h1:yBJNpb+mGJdgrwbKAKrhPU0u3ogyNFTfjJ6bdM+Q/G8=
github.com/libp2p/go-libp2p v0.12.0 h1:+xai9RQnQ9l5elFOKvp5wRyjyWisSwEx+6nU2+onpUA=
github.com/libp2p/go-libp2p v0.12.0/go.mod h1:FpHZrfC1q7nA8jitvdjKBDF31hguaC676g/nT9PgQM0=
github.com/libp2p/go-libp2p v0.14.2 h1:qs0ABtjjNjS+RIXT1uM7sMJEvIc0pq2nKR0VQxFXhHI=
github.com/libp2p/go-libp2p v0.14.2/go.mod h1:0PQMADQEjCM2l8cSMYDpTgsb8gr6Zq7i4LUgq1mlW2E=
github.com/libp2p/go-libp2p-asn-util v0.0.0-20200825225859-85005c6cf052 h1:BM7aaOF7RpmNn9+9g6uTjGJ0cTzWr5j9i9IKeun2M8U=
github.com/libp2p/go-libp2p-asn-util v0.0.0-20200825225859-85005c6cf052/go.mod h1:nRMRTab+kZuk0LnKZpxhOVH/ndsdr2Nr//Zltc/vwgo=
github.com/libp2p/go-libp2p-autonat v0.0.2/go.mod h1:fs71q5Xk+pdnKU014o2iq1RhMs9/PMaG5zXRFNnIIT4=
@ -859,8 +874,9 @@ github.com/libp2p/go-libp2p-autonat v0.2.0/go.mod h1:DX+9teU4pEEoZUqR1PiMlqliONQ
github.com/libp2p/go-libp2p-autonat v0.2.1/go.mod h1:MWtAhV5Ko1l6QBsHQNSuM6b1sRkXrpk0/LqCr+vCVxI=
github.com/libp2p/go-libp2p-autonat v0.2.2/go.mod h1:HsM62HkqZmHR2k1xgX34WuWDzk/nBwNHoeyyT4IWV6A=
github.com/libp2p/go-libp2p-autonat v0.2.3/go.mod h1:2U6bNWCNsAG9LEbwccBDQbjzQ8Krdjge1jLTE9rdoMM=
github.com/libp2p/go-libp2p-autonat v0.4.0 h1:3y8XQbpr+ssX8QfZUHekjHCYK64sj6/4hnf/awD4+Ug=
github.com/libp2p/go-libp2p-autonat v0.4.0/go.mod h1:YxaJlpr81FhdOv3W3BTconZPfhaYivRdf53g+S2wobk=
github.com/libp2p/go-libp2p-autonat v0.4.2 h1:YMp7StMi2dof+baaxkbxaizXjY1RPvU71CXfxExzcUU=
github.com/libp2p/go-libp2p-autonat v0.4.2/go.mod h1:YxaJlpr81FhdOv3W3BTconZPfhaYivRdf53g+S2wobk=
github.com/libp2p/go-libp2p-autonat-svc v0.1.0/go.mod h1:fqi8Obl/z3R4PFVLm8xFtZ6PBL9MlV/xumymRFkKq5A=
github.com/libp2p/go-libp2p-blankhost v0.0.1/go.mod h1:Ibpbw/7cPPYwFb7PACIWdvxxv0t0XCCI10t7czjAjTc=
github.com/libp2p/go-libp2p-blankhost v0.1.1/go.mod h1:pf2fvdLJPsC1FsVrNP3DUUvMzUts2dsLLBEpo1vW1ro=
@ -907,8 +923,12 @@ github.com/libp2p/go-libp2p-core v0.5.6/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX
github.com/libp2p/go-libp2p-core v0.5.7/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo=
github.com/libp2p/go-libp2p-core v0.6.0/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo=
github.com/libp2p/go-libp2p-core v0.6.1/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8=
github.com/libp2p/go-libp2p-core v0.7.0 h1:4a0TMjrWNTZlNvcqxZmrMRDi/NQWrhwO2pkTuLSQ/IQ=
github.com/libp2p/go-libp2p-core v0.7.0/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8=
github.com/libp2p/go-libp2p-core v0.8.0/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8=
github.com/libp2p/go-libp2p-core v0.8.1/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8=
github.com/libp2p/go-libp2p-core v0.8.2/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8=
github.com/libp2p/go-libp2p-core v0.8.5 h1:aEgbIcPGsKy6zYcC+5AJivYFedhYa4sW7mIpWpUaLKw=
github.com/libp2p/go-libp2p-core v0.8.5/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8=
github.com/libp2p/go-libp2p-crypto v0.0.1/go.mod h1:yJkNyDmO341d5wwXxDUGO0LykUVT72ImHNUqh5D/dBE=
github.com/libp2p/go-libp2p-crypto v0.0.2/go.mod h1:eETI5OUfBnvARGOHrJz2eWNyTUxEGZnBxMcbUjfIj4I=
github.com/libp2p/go-libp2p-crypto v0.1.0 h1:k9MFy+o2zGDNGsaoZl0MA3iZ75qXxr9OOoAZF+sD5OQ=
@ -943,8 +963,10 @@ github.com/libp2p/go-libp2p-mplex v0.2.0/go.mod h1:Ejl9IyjvXJ0T9iqUTE1jpYATQ9NM3
github.com/libp2p/go-libp2p-mplex v0.2.1/go.mod h1:SC99Rxs8Vuzrf/6WhmH41kNn13TiYdAWNYHrwImKLnE=
github.com/libp2p/go-libp2p-mplex v0.2.2/go.mod h1:74S9eum0tVQdAfFiKxAyKzNdSuLqw5oadDq7+L/FELo=
github.com/libp2p/go-libp2p-mplex v0.2.3/go.mod h1:CK3p2+9qH9x+7ER/gWWDYJ3QW5ZxWDkm+dVvjfuG3ek=
github.com/libp2p/go-libp2p-mplex v0.3.0 h1:CZyqqKP0BSGQyPLvpRQougbfXaaaJZdGgzhCpJNuNSk=
github.com/libp2p/go-libp2p-mplex v0.3.0/go.mod h1:l9QWxRbbb5/hQMECEb908GbS9Sm2UAR2KFZKUJEynEs=
github.com/libp2p/go-libp2p-mplex v0.4.0/go.mod h1:yCyWJE2sc6TBTnFpjvLuEJgTSw/u+MamvzILKdX7asw=
github.com/libp2p/go-libp2p-mplex v0.4.1 h1:/pyhkP1nLwjG3OM+VuaNJkQT/Pqq73WzB3aDN3Fx1sc=
github.com/libp2p/go-libp2p-mplex v0.4.1/go.mod h1:cmy+3GfqfM1PceHTLL7zQzAAYaryDu6iPSC+CIb094g=
github.com/libp2p/go-libp2p-nat v0.0.2/go.mod h1:QrjXQSD5Dj4IJOdEcjHRkWTSomyxRo6HnUkf/TfQpLQ=
github.com/libp2p/go-libp2p-nat v0.0.4/go.mod h1:N9Js/zVtAXqaeT99cXgTV9e75KpnWCvVOiGzlcHmBbY=
github.com/libp2p/go-libp2p-nat v0.0.5/go.mod h1:1qubaE5bTZMJE+E/uu2URroMbzdubFz1ChgiN79yKPE=
@ -956,8 +978,8 @@ github.com/libp2p/go-libp2p-netutil v0.0.1/go.mod h1:GdusFvujWZI9Vt0X5BKqwWWmZFx
github.com/libp2p/go-libp2p-netutil v0.1.0 h1:zscYDNVEcGxyUpMd0JReUZTrpMfia8PmLKcKF72EAMQ=
github.com/libp2p/go-libp2p-netutil v0.1.0/go.mod h1:3Qv/aDqtMLTUyQeundkKsA+YCThNdbQD54k3TqjpbFU=
github.com/libp2p/go-libp2p-noise v0.1.1/go.mod h1:QDFLdKX7nluB7DEnlVPbz7xlLHdwHFA9HiohJRr3vwM=
github.com/libp2p/go-libp2p-noise v0.1.2 h1:IH9GRihQJTx56obm+GnpdPX4KeVIlvpXrP6xnJ0wxWk=
github.com/libp2p/go-libp2p-noise v0.1.2/go.mod h1:9B10b7ueo7TIxZHHcjcDCo5Hd6kfKT2m77by82SFRfE=
github.com/libp2p/go-libp2p-noise v0.2.0 h1:wmk5nhB9a2w2RxMOyvsoKjizgJOEaJdfAakr0jN8gds=
github.com/libp2p/go-libp2p-noise v0.2.0/go.mod h1:IEbYhBBzGyvdLBoxxULL/SGbJARhUeqlO8lVSREYu2Q=
github.com/libp2p/go-libp2p-peer v0.0.1/go.mod h1:nXQvOBbwVqoP+T5Y5nCjeH4sP9IX/J0AMzcDUVruVoo=
github.com/libp2p/go-libp2p-peer v0.1.1/go.mod h1:jkF12jGB4Gk/IOo+yomm+7oLWxF278F7UnrYUQ1Q8es=
github.com/libp2p/go-libp2p-peer v0.2.0 h1:EQ8kMjaCUwt/Y5uLgjT8iY2qg0mGUT0N1zUjer50DsY=
@ -972,8 +994,9 @@ github.com/libp2p/go-libp2p-peerstore v0.2.1/go.mod h1:NQxhNjWxf1d4w6PihR8btWIRj
github.com/libp2p/go-libp2p-peerstore v0.2.2/go.mod h1:NQxhNjWxf1d4w6PihR8btWIRjwRLBr4TYKfNgrUkOPA=
github.com/libp2p/go-libp2p-peerstore v0.2.3/go.mod h1:K8ljLdFn590GMttg/luh4caB/3g0vKuY01psze0upRw=
github.com/libp2p/go-libp2p-peerstore v0.2.4/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s=
github.com/libp2p/go-libp2p-peerstore v0.2.6 h1:2ACefBX23iMdJU9Ke+dcXt3w86MIryes9v7In4+Qq3U=
github.com/libp2p/go-libp2p-peerstore v0.2.6/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s=
github.com/libp2p/go-libp2p-peerstore v0.2.7 h1:83JoLxyR9OYTnNfB5vvFqvMUv/xDNa6NoPHnENhBsGw=
github.com/libp2p/go-libp2p-peerstore v0.2.7/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s=
github.com/libp2p/go-libp2p-pnet v0.2.0 h1:J6htxttBipJujEjz1y0a5+eYoiPcFHhSYHH6na5f0/k=
github.com/libp2p/go-libp2p-pnet v0.2.0/go.mod h1:Qqvq6JH/oMZGwqs3N1Fqhv8NVhrdYcO0BW4wssv21LA=
github.com/libp2p/go-libp2p-protocol v0.0.1/go.mod h1:Af9n4PiruirSDjHycM1QuiMi/1VZNHYcK8cLgFJLZ4s=
@ -984,8 +1007,8 @@ github.com/libp2p/go-libp2p-pubsub v0.4.2-0.20210212194758-6c1addf493eb h1:HExLc
github.com/libp2p/go-libp2p-pubsub v0.4.2-0.20210212194758-6c1addf493eb/go.mod h1:izkeMLvz6Ht8yAISXjx60XUQZMq9ZMe5h2ih4dLIBIQ=
github.com/libp2p/go-libp2p-quic-transport v0.1.1/go.mod h1:wqG/jzhF3Pu2NrhJEvE+IE0NTHNXslOPn9JQzyCAxzU=
github.com/libp2p/go-libp2p-quic-transport v0.5.0/go.mod h1:IEcuC5MLxvZ5KuHKjRu+dr3LjCT1Be3rcD/4d8JrX8M=
github.com/libp2p/go-libp2p-quic-transport v0.9.0 h1:WPuq5nV/chmIZIzvrkC2ulSdAQ0P0BDvgvAhZFOZ59E=
github.com/libp2p/go-libp2p-quic-transport v0.9.0/go.mod h1:xyY+IgxL0qsW7Kiutab0+NlxM0/p9yRtrGTYsuMWf70=
github.com/libp2p/go-libp2p-quic-transport v0.10.0 h1:koDCbWD9CCHwcHZL3/WEvP2A+e/o5/W5L3QS/2SPMA0=
github.com/libp2p/go-libp2p-quic-transport v0.10.0/go.mod h1:RfJbZ8IqXIhxBRm5hqUEJqjiiY8xmEuq3HUDS993MkA=
github.com/libp2p/go-libp2p-record v0.0.1/go.mod h1:grzqg263Rug/sRex85QrDOLntdFAymLDLm7lxMgU79Q=
github.com/libp2p/go-libp2p-record v0.1.0/go.mod h1:ujNc8iuE5dlKWVy6wuL6dd58t0n7xI4hAIl8pE6wu5Q=
github.com/libp2p/go-libp2p-record v0.1.1/go.mod h1:VRgKajOyMVgP/F0L5g3kH7SVskp17vFi2xheb5uMJtg=
@ -1012,8 +1035,9 @@ github.com/libp2p/go-libp2p-swarm v0.2.4/go.mod h1:/xIpHFPPh3wmSthtxdGbkHZ0OET1h
github.com/libp2p/go-libp2p-swarm v0.2.7/go.mod h1:ZSJ0Q+oq/B1JgfPHJAT2HTall+xYRNYp1xs4S2FBWKA=
github.com/libp2p/go-libp2p-swarm v0.2.8/go.mod h1:JQKMGSth4SMqonruY0a8yjlPVIkb0mdNSwckW7OYziM=
github.com/libp2p/go-libp2p-swarm v0.3.0/go.mod h1:hdv95GWCTmzkgeJpP+GK/9D9puJegb7H57B5hWQR5Kk=
github.com/libp2p/go-libp2p-swarm v0.3.1 h1:UTobu+oQHGdXTOGpZ4RefuVqYoJXcT0EBtSR74m2LkI=
github.com/libp2p/go-libp2p-swarm v0.3.1/go.mod h1:hdv95GWCTmzkgeJpP+GK/9D9puJegb7H57B5hWQR5Kk=
github.com/libp2p/go-libp2p-swarm v0.5.0 h1:HIK0z3Eqoo8ugmN8YqWAhD2RORgR+3iNXYG4U2PFd1E=
github.com/libp2p/go-libp2p-swarm v0.5.0/go.mod h1:sU9i6BoHE0Ve5SKz3y9WfKrh8dUat6JknzUehFx8xW4=
github.com/libp2p/go-libp2p-testing v0.0.1/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E=
github.com/libp2p/go-libp2p-testing v0.0.2/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E=
github.com/libp2p/go-libp2p-testing v0.0.3/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E=
@ -1021,8 +1045,9 @@ github.com/libp2p/go-libp2p-testing v0.0.4/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MB
github.com/libp2p/go-libp2p-testing v0.1.0/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0=
github.com/libp2p/go-libp2p-testing v0.1.1/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0=
github.com/libp2p/go-libp2p-testing v0.1.2-0.20200422005655-8775583591d8/go.mod h1:Qy8sAncLKpwXtS2dSnDOP8ktexIAHKu+J+pnZOFZLTc=
github.com/libp2p/go-libp2p-testing v0.3.0 h1:ZiBYstPamsi7y6NJZebRudUzsYmVkt998hltyLqf8+g=
github.com/libp2p/go-libp2p-testing v0.3.0/go.mod h1:efZkql4UZ7OVsEfaxNHZPzIehtsBXMrXnCfJIgDti5g=
github.com/libp2p/go-libp2p-testing v0.4.0 h1:PrwHRi0IGqOwVQWR3xzgigSlhlLfxgfXgkHxr77EghQ=
github.com/libp2p/go-libp2p-testing v0.4.0/go.mod h1:Q+PFXYoiYFN5CAEG2w3gLPEzotlKsNSbKQ/lImlOWF0=
github.com/libp2p/go-libp2p-tls v0.1.3 h1:twKMhMu44jQO+HgQK9X8NHO5HkeJu2QbhLzLJpa8oNM=
github.com/libp2p/go-libp2p-tls v0.1.3/go.mod h1:wZfuewxOndz5RTnCAxFliGjvYSDA40sKitV4c50uI1M=
github.com/libp2p/go-libp2p-transport v0.0.1/go.mod h1:UzbUs9X+PHOSw7S3ZmeOxfnwaQY5vGDzZmKPod3N3tk=
@ -1032,8 +1057,9 @@ github.com/libp2p/go-libp2p-transport-upgrader v0.0.1/go.mod h1:NJpUAgQab/8K6K0m
github.com/libp2p/go-libp2p-transport-upgrader v0.0.4/go.mod h1:RGq+tupk+oj7PzL2kn/m1w6YXxcIAYJYeI90h6BGgUc=
github.com/libp2p/go-libp2p-transport-upgrader v0.1.1/go.mod h1:IEtA6or8JUbsV07qPW4r01GnTenLW4oi3lOPbUMGJJA=
github.com/libp2p/go-libp2p-transport-upgrader v0.2.0/go.mod h1:mQcrHj4asu6ArfSoMuyojOdjx73Q47cYD7s5+gZOlns=
github.com/libp2p/go-libp2p-transport-upgrader v0.3.0 h1:q3ULhsknEQ34eVDhv4YwKS8iet69ffs9+Fir6a7weN4=
github.com/libp2p/go-libp2p-transport-upgrader v0.3.0/go.mod h1:i+SKzbRnvXdVbU3D1dwydnTmKRPXiAR/fyvi1dXuL4o=
github.com/libp2p/go-libp2p-transport-upgrader v0.4.2 h1:4JsnbfJzgZeRS9AWN7B9dPqn/LY/HoQTlO9gtdJTIYM=
github.com/libp2p/go-libp2p-transport-upgrader v0.4.2/go.mod h1:NR8ne1VwfreD5VIWIU62Agt/J18ekORFU/j1i2y8zvk=
github.com/libp2p/go-libp2p-yamux v0.1.2/go.mod h1:xUoV/RmYkg6BW/qGxA9XJyg+HzXFYkeXbnhjmnYzKp8=
github.com/libp2p/go-libp2p-yamux v0.1.3/go.mod h1:VGSQVrqkh6y4nm0189qqxMtvyBft44MOYYPpYKXiVt4=
github.com/libp2p/go-libp2p-yamux v0.2.0/go.mod h1:Db2gU+XfLpm6E4rG5uGCFX6uXA8MEXOxFcRoXUODaK8=
@ -1043,8 +1069,9 @@ github.com/libp2p/go-libp2p-yamux v0.2.5/go.mod h1:Zpgj6arbyQrmZ3wxSZxfBmbdnWtbZ
github.com/libp2p/go-libp2p-yamux v0.2.7/go.mod h1:X28ENrBMU/nm4I3Nx4sZ4dgjZ6VhLEn0XhIoZ5viCwU=
github.com/libp2p/go-libp2p-yamux v0.2.8/go.mod h1:/t6tDqeuZf0INZMTgd0WxIRbtK2EzI2h7HbFm9eAKI4=
github.com/libp2p/go-libp2p-yamux v0.4.0/go.mod h1:+DWDjtFMzoAwYLVkNZftoucn7PelNoy5nm3tZ3/Zw30=
github.com/libp2p/go-libp2p-yamux v0.4.1 h1:TJxRVPY9SjH7TNrNC80l1OJMBiWhs1qpKmeB+1Ug3xU=
github.com/libp2p/go-libp2p-yamux v0.4.1/go.mod h1:FA/NjRYRVNjqOzpGuGqcruH7jAU2mYIjtKBicVOL3dc=
github.com/libp2p/go-libp2p-yamux v0.5.0/go.mod h1:AyR8k5EzyM2QN9Bbdg6X1SkVVuqLwTGf0L4DFq9g6po=
github.com/libp2p/go-libp2p-yamux v0.5.4 h1:/UOPtT/6DHPtr3TtKXBHa6g0Le0szYuI33Xc/Xpd7fQ=
github.com/libp2p/go-libp2p-yamux v0.5.4/go.mod h1:tfrXbyaTqqSU654GTvK3ocnSZL3BuHoeTSqhcel1wsE=
github.com/libp2p/go-maddr-filter v0.0.1/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q=
github.com/libp2p/go-maddr-filter v0.0.4/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q=
github.com/libp2p/go-maddr-filter v0.0.5/go.mod h1:Jk+36PMfIqCJhAnaASRH83bdAvfDRp/w6ENFaC9bG+M=
@ -1056,8 +1083,9 @@ github.com/libp2p/go-mplex v0.0.4/go.mod h1:pK5yMLmOoBR1pNCqDlA2GQrdAVTMkqFalaTW
github.com/libp2p/go-mplex v0.1.0/go.mod h1:SXgmdki2kwCUlCCbfGLEgHjC4pFqhTp0ZoV6aiKgxDU=
github.com/libp2p/go-mplex v0.1.1/go.mod h1:Xgz2RDCi3co0LeZfgjm4OgUF15+sVR8SRcu3SFXI1lk=
github.com/libp2p/go-mplex v0.1.2/go.mod h1:Xgz2RDCi3co0LeZfgjm4OgUF15+sVR8SRcu3SFXI1lk=
github.com/libp2p/go-mplex v0.2.0 h1:Ov/D+8oBlbRkjBs1R1Iua8hJ8cUfbdiW8EOdZuxcgaI=
github.com/libp2p/go-mplex v0.2.0/go.mod h1:0Oy/A9PQlwBytDRp4wSkFnzHYDKcpLot35JQ6msjvYQ=
github.com/libp2p/go-mplex v0.3.0 h1:U1T+vmCYJaEoDJPV1aq31N56hS+lJgb397GsylNSgrU=
github.com/libp2p/go-mplex v0.3.0/go.mod h1:0Oy/A9PQlwBytDRp4wSkFnzHYDKcpLot35JQ6msjvYQ=
github.com/libp2p/go-msgio v0.0.1/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ=
github.com/libp2p/go-msgio v0.0.2/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ=
github.com/libp2p/go-msgio v0.0.3/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ=
@ -1069,8 +1097,9 @@ github.com/libp2p/go-nat v0.0.4/go.mod h1:Nmw50VAvKuk38jUBcmNh6p9lUJLoODbJRvYAa/
github.com/libp2p/go-nat v0.0.5 h1:qxnwkco8RLKqVh1NmjQ+tJ8p8khNLFxuElYG/TwqW4Q=
github.com/libp2p/go-nat v0.0.5/go.mod h1:B7NxsVNPZmRLvMOwiEO1scOSyjA56zxYAGv1yQgRkEU=
github.com/libp2p/go-netroute v0.1.2/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk=
github.com/libp2p/go-netroute v0.1.3 h1:1ngWRx61us/EpaKkdqkMjKk/ufr/JlIFYQAxV2XX8Ig=
github.com/libp2p/go-netroute v0.1.3/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk=
github.com/libp2p/go-netroute v0.1.6 h1:ruPJStbYyXVYGQ81uzEDzuvbYRLKRrLvTYd33yomC38=
github.com/libp2p/go-netroute v0.1.6/go.mod h1:AqhkMh0VuWmfgtxKPp3Oc1LdU5QSWS7wl0QLhSZqXxQ=
github.com/libp2p/go-openssl v0.0.2/go.mod h1:v8Zw2ijCSWBQi8Pq5GAixw6DbFfa9u6VIYDXnvOXkc0=
github.com/libp2p/go-openssl v0.0.3/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc=
github.com/libp2p/go-openssl v0.0.4/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc=
@ -1086,8 +1115,9 @@ github.com/libp2p/go-reuseport-transport v0.0.3/go.mod h1:Spv+MPft1exxARzP2Sruj2
github.com/libp2p/go-reuseport-transport v0.0.4 h1:OZGz0RB620QDGpv300n1zaOcKGGAoGVf8h9txtt/1uM=
github.com/libp2p/go-reuseport-transport v0.0.4/go.mod h1:trPa7r/7TJK/d+0hdBLOCGvpQQVOU74OXbNCIMkufGw=
github.com/libp2p/go-sockaddr v0.0.2/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k=
github.com/libp2p/go-sockaddr v0.1.0 h1:Y4s3/jNoryVRKEBrkJ576F17CPOaMIzUeCsg7dlTDj0=
github.com/libp2p/go-sockaddr v0.1.0/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k=
github.com/libp2p/go-sockaddr v0.1.1 h1:yD80l2ZOdGksnOyHrhxDdTDFrf7Oy+v3FMVArIRgZxQ=
github.com/libp2p/go-sockaddr v0.1.1/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k=
github.com/libp2p/go-stream-muxer v0.0.1/go.mod h1:bAo8x7YkSpadMTbtTaxGVHWUQsR/l5MEaHbKaliuT14=
github.com/libp2p/go-stream-muxer v0.1.0/go.mod h1:8JAVsjeRBCWwPoZeH0W1imLOcriqXJyFvB0mR4A04sQ=
github.com/libp2p/go-stream-muxer-multistream v0.1.1/go.mod h1:zmGdfkQ1AzOECIAcccoL8L//laqawOsO03zX8Sa+eGw=
@ -1109,8 +1139,9 @@ github.com/libp2p/go-ws-transport v0.1.0/go.mod h1:rjw1MG1LU9YDC6gzmwObkPd/Sqwhw
github.com/libp2p/go-ws-transport v0.1.2/go.mod h1:dsh2Ld8F+XNmzpkaAijmg5Is+e9l6/1tK/6VFOdN69Y=
github.com/libp2p/go-ws-transport v0.2.0/go.mod h1:9BHJz/4Q5A9ludYWKoGCFC5gUElzlHoKzu0yY9p/klM=
github.com/libp2p/go-ws-transport v0.3.0/go.mod h1:bpgTJmRZAvVHrgHybCVyqoBmyLQ1fiZuEaBYusP5zsk=
github.com/libp2p/go-ws-transport v0.3.1 h1:ZX5rWB8nhRRJVaPO6tmkGI/Xx8XNboYX20PW5hXIscw=
github.com/libp2p/go-ws-transport v0.3.1/go.mod h1:bpgTJmRZAvVHrgHybCVyqoBmyLQ1fiZuEaBYusP5zsk=
github.com/libp2p/go-ws-transport v0.4.0 h1:9tvtQ9xbws6cA5LvqdE6Ne3vcmGB4f1z9SByggk4s0k=
github.com/libp2p/go-ws-transport v0.4.0/go.mod h1:EcIEKqf/7GDjth6ksuS/6p7R49V4CBY6/E7R/iyhYUA=
github.com/libp2p/go-yamux v1.2.1/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow=
github.com/libp2p/go-yamux v1.2.2/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow=
github.com/libp2p/go-yamux v1.2.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow=
@ -1122,12 +1153,14 @@ github.com/libp2p/go-yamux v1.3.7/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/h
github.com/libp2p/go-yamux v1.4.0/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE=
github.com/libp2p/go-yamux v1.4.1 h1:P1Fe9vF4th5JOxxgQvfbOHkrGqIZniTLf+ddhZp8YTI=
github.com/libp2p/go-yamux v1.4.1/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE=
github.com/libp2p/go-yamux/v2 v2.2.0 h1:RwtpYZ2/wVviZ5+3pjC8qdQ4TKnrak0/E01N1UWoAFU=
github.com/libp2p/go-yamux/v2 v2.2.0/go.mod h1:3So6P6TV6r75R9jiBpiIKgU/66lOarCZjqROGxzPpPQ=
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
github.com/lucas-clemente/quic-go v0.11.2/go.mod h1:PpMmPfPKO9nKJ/psF49ESTAGQSdfXxlg1otPbEB2nOw=
github.com/lucas-clemente/quic-go v0.16.0/go.mod h1:I0+fcNTdb9eS1ZcjQZbDVPGchJ86chcIxPALn9lEJqE=
github.com/lucas-clemente/quic-go v0.18.1 h1:DMR7guC0NtVS8zNZR3IO7NARZvZygkSC56GGtC6cyys=
github.com/lucas-clemente/quic-go v0.18.1/go.mod h1:yXttHsSNxQi8AWijC/vLP+OJczXqzHSOcJrM5ITUlCg=
github.com/lucas-clemente/quic-go v0.19.3 h1:eCDQqvGBB+kCTkA0XrAFtNe81FMa0/fn4QSoeAbmiF4=
github.com/lucas-clemente/quic-go v0.19.3/go.mod h1:ADXpNbTQjq1hIzCpB+y/k5iz4n4z4IwqoLb94Kh5Hu8=
github.com/lucasb-eyer/go-colorful v1.0.3 h1:QIbQXiugsb+q10B+MI+7DI1oQLdmnep86tWFlaaUAac=
github.com/lucasb-eyer/go-colorful v1.0.3/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
github.com/lufia/iostat v1.1.0/go.mod h1:rEPNA0xXgjHQjuI5Cy05sLlS2oRcSlWHRLrvh/AQ+Pg=
@ -1142,13 +1175,13 @@ github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7
github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA=
github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/marten-seemann/qpack v0.1.0/go.mod h1:LFt1NU/Ptjip0C2CPkhimBz5CGE3WGDAUWqna+CNTrI=
github.com/marten-seemann/qpack v0.2.0/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc=
github.com/marten-seemann/qpack v0.2.1/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc=
github.com/marten-seemann/qtls v0.2.3/go.mod h1:xzjG7avBwGGbdZ8dTGxlBnLArsVKLvwmjgmPuiQEcYk=
github.com/marten-seemann/qtls v0.9.1/go.mod h1:T1MmAdDPyISzxlK6kjRr0pcZFBVd1OZbBb/j3cvzHhk=
github.com/marten-seemann/qtls v0.10.0 h1:ECsuYUKalRL240rRD4Ri33ISb7kAQ3qGDlrrl55b2pc=
github.com/marten-seemann/qtls v0.10.0/go.mod h1:UvMd1oaYDACI99/oZUYLzMCkBXQVT0aGm99sJhbT8hs=
github.com/marten-seemann/qtls-go1-15 v0.1.0 h1:i/YPXVxz8q9umso/5y474CNcHmTpA+5DH+mFPjx6PZg=
github.com/marten-seemann/qtls-go1-15 v0.1.0/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I=
github.com/marten-seemann/qtls-go1-15 v0.1.1 h1:LIH6K34bPVttyXnUWixk0bzH6/N07VxbSabxn5A5gZQ=
github.com/marten-seemann/qtls-go1-15 v0.1.1/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
@ -1182,6 +1215,8 @@ github.com/miekg/dns v1.1.4/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nr
github.com/miekg/dns v1.1.12/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/miekg/dns v1.1.28/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
github.com/miekg/dns v1.1.41 h1:WMszZWJG0XmzbK9FEmzH2TVcqYzFesusSIB41b8KHxY=
github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g=
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ=
github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U=
@ -1228,8 +1263,9 @@ github.com/multiformats/go-multiaddr-dns v0.0.1/go.mod h1:9kWcqw/Pj6FwxAwW38n/94
github.com/multiformats/go-multiaddr-dns v0.0.2/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q=
github.com/multiformats/go-multiaddr-dns v0.0.3/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q=
github.com/multiformats/go-multiaddr-dns v0.1.0/go.mod h1:01k2RAqtoXIuPa3DCavAE9/6jc6nM0H3EgZyfUhN2oY=
github.com/multiformats/go-multiaddr-dns v0.2.0 h1:YWJoIDwLePniH7OU5hBnDZV6SWuvJqJ0YtN6pLeH9zA=
github.com/multiformats/go-multiaddr-dns v0.2.0/go.mod h1:TJ5pr5bBO7Y1B18djPuRsVkduhQH2YqYSbxWJzYGdK0=
github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2y0Mkk+QRVJbW3A=
github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk=
github.com/multiformats/go-multiaddr-fmt v0.0.1/go.mod h1:aBYjqL4T/7j4Qx+R73XSv/8JsgnRFlf0w2KGLCmXl3Q=
github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E=
github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo=
@ -1259,8 +1295,10 @@ github.com/multiformats/go-multistream v0.0.1/go.mod h1:fJTiDfXJVmItycydCnNx4+wS
github.com/multiformats/go-multistream v0.0.4/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg=
github.com/multiformats/go-multistream v0.1.0/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg=
github.com/multiformats/go-multistream v0.1.1/go.mod h1:KmHZ40hzVxiaiwlj3MEbYgK9JFk2/9UktWZAF54Du38=
github.com/multiformats/go-multistream v0.2.0 h1:6AuNmQVKUkRnddw2YiDjt5Elit40SFxMJkVnhmETXtU=
github.com/multiformats/go-multistream v0.2.0/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k=
github.com/multiformats/go-multistream v0.2.1/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k=
github.com/multiformats/go-multistream v0.2.2 h1:TCYu1BHTDr1F/Qm75qwYISQdzGcRdC21nFgQW7l7GBo=
github.com/multiformats/go-multistream v0.2.2/go.mod h1:UIcnm7Zuo8HKG+HkWgfQsGL+/MIEhyTqbODbIUwSXKs=
github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE=
github.com/multiformats/go-varint v0.0.2/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE=
github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE=
@ -1277,8 +1315,6 @@ github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OS
github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
github.com/ngdinhtoan/glide-cleanup v0.2.0/go.mod h1:UQzsmiDOb8YV3nOsCxK/c9zPpCZVNoHScRE3EO9pVMM=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/nikkolasg/hexjson v0.0.0-20181101101858-78e39397e00c h1:5bFTChQxSKNwy8ALwOebjekYExl9HTT9urdawqC95tA=
github.com/nikkolasg/hexjson v0.0.0-20181101101858-78e39397e00c/go.mod h1:7qN3Y0BvzRUf4LofcoJplQL10lsFDb4PYlePTVwrP28=
github.com/nkovacs/streamquote v0.0.0-20170412213628-49af9bddb229 h1:E2B8qYyeSgv5MXpmzZXRNp8IAQ4vjxIjhpAf5hv/tAg=
@ -1296,6 +1332,7 @@ github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA=
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
@ -1580,6 +1617,7 @@ github.com/xorcare/golden v0.6.0/go.mod h1:7T39/ZMvaSEZlBPoYfVFmsBLmUl3uz9IuzWj/
github.com/xorcare/golden v0.6.1-0.20191112154924-b87f686d7542 h1:oWgZJmC1DorFZDpfMfWg7xk29yEOZiXmo/wZl+utTI8=
github.com/xorcare/golden v0.6.1-0.20191112154924-b87f686d7542/go.mod h1:7T39/ZMvaSEZlBPoYfVFmsBLmUl3uz9IuzWj/U6FtvQ=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/zondax/hid v0.9.0 h1:eiT3P6vNxAEVxXMw66eZUAAnU2zD33JBkfG/EnfAKl8=
@ -1607,8 +1645,8 @@ go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.5 h1:dntmOdLpSpHlVqbW5Eay97DelsZHe+55D+xC6i0dDS0=
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M=
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
@ -1661,16 +1699,19 @@ golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200117160349-530e935923ad/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a h1:vclmkQCjlDX5OydZ9wv8rBCcS0QyQY66Mpf/7BZbInM=
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2 h1:It14KIkyBFYkHkwZ7k45minvA9aorojkyjGk9KJ5B/w=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/exp v0.0.0-20181106170214-d68db9428509/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@ -1693,8 +1734,9 @@ golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHl
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
golang.org/x/lint v0.0.0-20200130185559-910be7a94367 h1:0IiAsCRByjO2QjX7ZPkw5oU9x+n1YqRL802rjC0c3Aw=
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
@ -1705,6 +1747,7 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180524181706-dfa909b99c79/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -1740,6 +1783,7 @@ golang.org/x/net v0.0.0-20191007182048-72f939374954/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200519113804-d87ec0cfa476/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
@ -1747,8 +1791,11 @@ golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201022231255-08b38378de70 h1:Z6x4N9mAi4oF0TbHweCsH618MO6OI6UFgV0FP5n0wBY=
golang.org/x/net v0.0.0-20201022231255-08b38378de70/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6 h1:0PC75Fz/kyMGhL0e1QnypqK2kQMqKt9csD1GnMJR+Zk=
golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@ -1766,8 +1813,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180202135801-37707fdb30a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -1834,16 +1881,21 @@ golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200812155832-6a926be9bd1d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200926100807-9d91bd62050c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68 h1:nxC68pudNYkKU6jWhgrqdreuFiOQWj1Fs7T3VrH4Pjw=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210426080607-c94f62235c83 h1:kHSDPqCtsHZOg0nVylfTo20DDhE9gG4Y0jn7hKQ0QAM=
golang.org/x/sys v0.0.0-20210426080607-c94f62235c83/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf h1:MZ2shdL+ZM/XzY3ZGOnh4Nlpnxz5GSOhOmtHo3iPU6M=
golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@ -1887,10 +1939,12 @@ golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapK
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200216192241-b320d3a0f5a2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200711155855-7342f9734a7d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200827010519-17fd2f27a9e3/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20201112185108-eeaa07dd7696 h1:Bfazo+enXJET5SbHeh95NtxabJF6fJ9r/jpfRJgd3j4=
golang.org/x/tools v0.0.0-20201112185108-eeaa07dd7696/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a h1:CB3a9Nez8M13wwlr/E2YtwoU+qYHKfC+JrDa45RXXoQ=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@ -1958,8 +2012,9 @@ google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.28.1/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
google.golang.org/grpc v1.31.1 h1:SfXqXS5hkufcdZ/mHtYCh53P2b+92WQq/DZcKLgsFRs=
google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.33.2 h1:EQyQC3sa8M+p6Ulc8yy9SWSS2GVwyRc83gAbG8lrl4o=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@ -1975,8 +2030,8 @@ gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLks
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
gopkg.in/cheggaaa/pb.v1 v1.0.28 h1:n1tBJnnK2r7g9OW2btFH91V92STTUevLXYFb8gy9EMk=
gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=

267
itests/api_test.go Normal file
View File

@ -0,0 +1,267 @@
package itests
import (
"context"
"strings"
"testing"
"time"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/big"
lapi "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/itests/kit"
"github.com/filecoin-project/lotus/node/impl"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestAPI(t *testing.T) {
t.Run("direct", func(t *testing.T) {
runAPITest(t, kit.Builder)
})
t.Run("rpc", func(t *testing.T) {
runAPITest(t, kit.RPCBuilder)
})
}
type apiSuite struct {
makeNodes kit.APIBuilder
}
// runAPITest is the entry point to API test suite
func runAPITest(t *testing.T, b kit.APIBuilder) {
ts := apiSuite{
makeNodes: b,
}
t.Run("version", ts.testVersion)
t.Run("id", ts.testID)
t.Run("testConnectTwo", ts.testConnectTwo)
t.Run("testMining", ts.testMining)
t.Run("testMiningReal", ts.testMiningReal)
t.Run("testSearchMsg", ts.testSearchMsg)
t.Run("testNonGenesisMiner", ts.testNonGenesisMiner)
}
func (ts *apiSuite) testVersion(t *testing.T) {
lapi.RunningNodeType = lapi.NodeFull
t.Cleanup(func() {
lapi.RunningNodeType = lapi.NodeUnknown
})
ctx := context.Background()
apis, _ := ts.makeNodes(t, kit.OneFull, kit.OneMiner)
napi := apis[0]
v, err := napi.Version(ctx)
if err != nil {
t.Fatal(err)
}
versions := strings.Split(v.Version, "+")
if len(versions) <= 0 {
t.Fatal("empty version")
}
require.Equal(t, versions[0], build.BuildVersion)
}
func (ts *apiSuite) testSearchMsg(t *testing.T) {
apis, miners := ts.makeNodes(t, kit.OneFull, kit.OneMiner)
api := apis[0]
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
senderAddr, err := api.WalletDefaultAddress(ctx)
if err != nil {
t.Fatal(err)
}
msg := &types.Message{
From: senderAddr,
To: senderAddr,
Value: big.Zero(),
}
bm := kit.NewBlockMiner(t, miners[0])
bm.MineBlocks(ctx, 100*time.Millisecond)
defer bm.Stop()
sm, err := api.MpoolPushMessage(ctx, msg, nil)
if err != nil {
t.Fatal(err)
}
res, err := api.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true)
if err != nil {
t.Fatal(err)
}
if res.Receipt.ExitCode != 0 {
t.Fatal("did not successfully send message")
}
searchRes, err := api.StateSearchMsg(ctx, types.EmptyTSK, sm.Cid(), lapi.LookbackNoLimit, true)
if err != nil {
t.Fatal(err)
}
if searchRes.TipSet != res.TipSet {
t.Fatalf("search ts: %s, different from wait ts: %s", searchRes.TipSet, res.TipSet)
}
}
func (ts *apiSuite) testID(t *testing.T) {
ctx := context.Background()
apis, _ := ts.makeNodes(t, kit.OneFull, kit.OneMiner)
api := apis[0]
id, err := api.ID(ctx)
if err != nil {
t.Fatal(err)
}
assert.Regexp(t, "^12", id.Pretty())
}
func (ts *apiSuite) testConnectTwo(t *testing.T) {
ctx := context.Background()
apis, _ := ts.makeNodes(t, kit.TwoFull, kit.OneMiner)
p, err := apis[0].NetPeers(ctx)
if err != nil {
t.Fatal(err)
}
if len(p) != 0 {
t.Error("Node 0 has a peer")
}
p, err = apis[1].NetPeers(ctx)
if err != nil {
t.Fatal(err)
}
if len(p) != 0 {
t.Error("Node 1 has a peer")
}
addrs, err := apis[1].NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := apis[0].NetConnect(ctx, addrs); err != nil {
t.Fatal(err)
}
p, err = apis[0].NetPeers(ctx)
if err != nil {
t.Fatal(err)
}
if len(p) != 1 {
t.Error("Node 0 doesn't have 1 peer")
}
p, err = apis[1].NetPeers(ctx)
if err != nil {
t.Fatal(err)
}
if len(p) != 1 {
t.Error("Node 0 doesn't have 1 peer")
}
}
func (ts *apiSuite) testMining(t *testing.T) {
ctx := context.Background()
fulls, miners := ts.makeNodes(t, kit.OneFull, kit.OneMiner)
api := fulls[0]
newHeads, err := api.ChainNotify(ctx)
require.NoError(t, err)
initHead := (<-newHeads)[0]
baseHeight := initHead.Val.Height()
h1, err := api.ChainHead(ctx)
require.NoError(t, err)
require.Equal(t, int64(h1.Height()), int64(baseHeight))
bm := kit.NewBlockMiner(t, miners[0])
bm.MineUntilBlock(ctx, fulls[0], nil)
require.NoError(t, err)
<-newHeads
h2, err := api.ChainHead(ctx)
require.NoError(t, err)
require.Greater(t, int64(h2.Height()), int64(h1.Height()))
}
func (ts *apiSuite) testMiningReal(t *testing.T) {
build.InsecurePoStValidation = false
defer func() {
build.InsecurePoStValidation = true
}()
ctx := context.Background()
fulls, miners := ts.makeNodes(t, kit.OneFull, kit.OneMiner)
api := fulls[0]
newHeads, err := api.ChainNotify(ctx)
require.NoError(t, err)
at := (<-newHeads)[0].Val.Height()
h1, err := api.ChainHead(ctx)
require.NoError(t, err)
require.Equal(t, int64(at), int64(h1.Height()))
bm := kit.NewBlockMiner(t, miners[0])
bm.MineUntilBlock(ctx, fulls[0], nil)
require.NoError(t, err)
<-newHeads
h2, err := api.ChainHead(ctx)
require.NoError(t, err)
require.Greater(t, int64(h2.Height()), int64(h1.Height()))
bm.MineUntilBlock(ctx, fulls[0], nil)
require.NoError(t, err)
<-newHeads
h3, err := api.ChainHead(ctx)
require.NoError(t, err)
require.Greater(t, int64(h3.Height()), int64(h2.Height()))
}
func (ts *apiSuite) testNonGenesisMiner(t *testing.T) {
ctx := context.Background()
n, sn := ts.makeNodes(t,
[]kit.FullNodeOpts{kit.FullNodeWithLatestActorsAt(-1)},
[]kit.StorageMiner{{Full: 0, Preseal: kit.PresealGenesis}},
)
full, ok := n[0].FullNode.(*impl.FullNodeAPI)
if !ok {
t.Skip("not testing with a full node")
return
}
genesisMiner := sn[0]
bm := kit.NewBlockMiner(t, genesisMiner)
bm.MineBlocks(ctx, 4*time.Millisecond)
t.Cleanup(bm.Stop)
gaa, err := genesisMiner.ActorAddress(ctx)
require.NoError(t, err)
gmi, err := full.StateMinerInfo(ctx, gaa, types.EmptyTSK)
require.NoError(t, err)
testm := n[0].Stb(ctx, t, kit.TestSpt, gmi.Owner)
ta, err := testm.ActorAddress(ctx)
require.NoError(t, err)
tid, err := address.IDFromAddress(ta)
require.NoError(t, err)
require.Equal(t, uint64(1001), tid)
}

140
itests/batch_deal_test.go Normal file
View File

@ -0,0 +1,140 @@
package itests
import (
"context"
"fmt"
"sort"
"testing"
"time"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/extern/storage-sealing/sealiface"
"github.com/filecoin-project/lotus/itests/kit"
"github.com/filecoin-project/lotus/markets/storageadapter"
"github.com/filecoin-project/lotus/node"
"github.com/filecoin-project/lotus/node/impl"
"github.com/filecoin-project/lotus/node/modules/dtypes"
"github.com/stretchr/testify/require"
)
func TestBatchDealInput(t *testing.T) {
kit.QuietMiningLogs()
var (
blockTime = 10 * time.Millisecond
// For these tests where the block time is artificially short, just use
// a deal start epoch that is guaranteed to be far enough in the future
// so that the deal starts sealing in time
dealStartEpoch = abi.ChainEpoch(2 << 12)
)
run := func(piece, deals, expectSectors int) func(t *testing.T) {
return func(t *testing.T) {
publishPeriod := 10 * time.Second
maxDealsPerMsg := uint64(deals)
// Set max deals per publish deals message to maxDealsPerMsg
minerDef := []kit.StorageMiner{{
Full: 0,
Opts: node.Options(
node.Override(
new(*storageadapter.DealPublisher),
storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{
Period: publishPeriod,
MaxDealsPerMsg: maxDealsPerMsg,
})),
node.Override(new(dtypes.GetSealingConfigFunc), func() (dtypes.GetSealingConfigFunc, error) {
return func() (sealiface.Config, error) {
return sealiface.Config{
MaxWaitDealsSectors: 2,
MaxSealingSectors: 1,
MaxSealingSectorsForDeals: 3,
AlwaysKeepUnsealedCopy: true,
WaitDealsDelay: time.Hour,
}, nil
}, nil
}),
),
Preseal: kit.PresealGenesis,
}}
// Create a connect client and miner node
n, sn := kit.MockMinerBuilder(t, kit.OneFull, minerDef)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
blockMiner := kit.ConnectAndStartMining(t, blockTime, miner, client)
t.Cleanup(blockMiner.Stop)
dh := kit.NewDealHarness(t, client, miner)
ctx := context.Background()
err := miner.MarketSetAsk(ctx, big.Zero(), big.Zero(), 200, 128, 32<<30)
require.NoError(t, err)
checkNoPadding := func() {
sl, err := sn[0].SectorsList(ctx)
require.NoError(t, err)
sort.Slice(sl, func(i, j int) bool {
return sl[i] < sl[j]
})
for _, snum := range sl {
si, err := sn[0].SectorsStatus(ctx, snum, false)
require.NoError(t, err)
// fmt.Printf("S %d: %+v %s\n", snum, si.Deals, si.State)
for _, deal := range si.Deals {
if deal == 0 {
fmt.Printf("sector %d had a padding piece!\n", snum)
}
}
}
}
// Starts a deal and waits until it's published
runDealTillSeal := func(rseed int) {
res, _, _, err := kit.CreateImportFile(ctx, client, rseed, piece)
require.NoError(t, err)
deal := dh.StartDeal(ctx, res.Root, false, dealStartEpoch)
dh.WaitDealSealed(ctx, deal, false, true, checkNoPadding)
}
// Run maxDealsPerMsg deals in parallel
done := make(chan struct{}, maxDealsPerMsg)
for rseed := 0; rseed < int(maxDealsPerMsg); rseed++ {
rseed := rseed
go func() {
runDealTillSeal(rseed)
done <- struct{}{}
}()
}
// Wait for maxDealsPerMsg of the deals to be published
for i := 0; i < int(maxDealsPerMsg); i++ {
<-done
}
checkNoPadding()
sl, err := sn[0].SectorsList(ctx)
require.NoError(t, err)
require.Equal(t, len(sl), expectSectors)
}
}
t.Run("4-p1600B", run(1600, 4, 4))
t.Run("4-p513B", run(513, 4, 2))
if !testing.Short() {
t.Run("32-p257B", run(257, 32, 8))
t.Run("32-p10B", run(10, 32, 2))
// fixme: this appears to break data-transfer / markets in some really creative ways
// t.Run("128-p10B", run(10, 128, 8))
}
}

View File

@ -1,4 +1,4 @@
package test
package itests
import (
"context"
@ -7,6 +7,7 @@ import (
"testing"
"time"
"github.com/filecoin-project/lotus/itests/kit"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/go-state-types/abi"
@ -15,7 +16,9 @@ import (
"github.com/filecoin-project/lotus/node/impl"
)
func TestCCUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration) {
func TestCCUpgrade(t *testing.T) {
kit.QuietMiningLogs()
for _, height := range []abi.ChainEpoch{
-1, // before
162, // while sealing
@ -24,14 +27,14 @@ func TestCCUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration) {
} {
height := height // make linters happy by copying
t.Run(fmt.Sprintf("upgrade-%d", height), func(t *testing.T) {
testCCUpgrade(t, b, blocktime, height)
runTestCCUpgrade(t, kit.MockMinerBuilder, 5*time.Millisecond, height)
})
}
}
func testCCUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration, upgradeHeight abi.ChainEpoch) {
func runTestCCUpgrade(t *testing.T, b kit.APIBuilder, blocktime time.Duration, upgradeHeight abi.ChainEpoch) {
ctx := context.Background()
n, sn := b(t, []FullNodeOpts{FullNodeWithLatestActorsAt(upgradeHeight)}, OneMiner)
n, sn := b(t, []kit.FullNodeOpts{kit.FullNodeWithLatestActorsAt(upgradeHeight)}, kit.OneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
@ -51,7 +54,7 @@ func testCCUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration, upgradeH
defer close(done)
for atomic.LoadInt64(&mine) == 1 {
time.Sleep(blocktime)
if err := sn[0].MineOne(ctx, MineNext); err != nil {
if err := sn[0].MineOne(ctx, kit.MineNext); err != nil {
t.Error(err)
}
}
@ -62,10 +65,10 @@ func testCCUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration, upgradeH
t.Fatal(err)
}
CC := abi.SectorNumber(GenesisPreseals + 1)
CC := abi.SectorNumber(kit.GenesisPreseals + 1)
Upgraded := CC + 1
pledgeSectors(t, ctx, miner, 1, 0, nil)
kit.PledgeSectors(t, ctx, miner, 1, 0, nil)
sl, err := miner.SectorsList(ctx)
if err != nil {
@ -89,7 +92,9 @@ func testCCUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration, upgradeH
t.Fatal(err)
}
MakeDeal(t, ctx, 6, client, miner, false, false, 0)
dh := kit.NewDealHarness(t, client, miner)
dh.MakeFullDeal(context.Background(), 6, false, false, 0)
// Validate upgrade

22
itests/cli_test.go Normal file
View File

@ -0,0 +1,22 @@
package itests
import (
"context"
"os"
"testing"
"time"
"github.com/filecoin-project/lotus/cli"
"github.com/filecoin-project/lotus/itests/kit"
)
// TestClient does a basic test to exercise the client CLI commands.
func TestClient(t *testing.T) {
_ = os.Setenv("BELLMAN_NO_GPU", "1")
kit.QuietMiningLogs()
blocktime := 5 * time.Millisecond
ctx := context.Background()
clientNode, _ := kit.StartOneNodeOneMiner(ctx, t, blocktime)
kit.RunClientTest(t, cli.Commands, clientNode)
}

View File

@ -1,26 +1,20 @@
package test
package itests
import (
"bytes"
"context"
"fmt"
"os"
"testing"
"time"
"github.com/filecoin-project/lotus/api"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/exitcode"
"github.com/filecoin-project/go-state-types/network"
miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner"
"github.com/ipfs/go-cid"
cbor "github.com/ipfs/go-ipld-cbor"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/blockstore"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors"
@ -28,7 +22,13 @@ import (
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/extern/sector-storage/mock"
"github.com/filecoin-project/lotus/itests/kit"
"github.com/filecoin-project/lotus/node/impl"
miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner"
"github.com/ipfs/go-cid"
cbor "github.com/ipfs/go-ipld-cbor"
logging "github.com/ipfs/go-log/v2"
"github.com/stretchr/testify/require"
)
// TestDeadlineToggling:
@ -54,16 +54,28 @@ import (
// * goes through another PP
// * asserts that miner B loses power
// * asserts that miner D loses power, is inactive
func TestDeadlineToggling(t *testing.T, b APIBuilder, blocktime time.Duration) {
var upgradeH abi.ChainEpoch = 4000
var provingPeriod abi.ChainEpoch = 2880
func TestDeadlineToggling(t *testing.T) {
if os.Getenv("LOTUS_TEST_DEADLINE_TOGGLING") != "1" {
t.Skip("this takes a few minutes, set LOTUS_TEST_DEADLINE_TOGGLING=1 to run")
}
_ = logging.SetLogLevel("miner", "ERROR")
_ = logging.SetLogLevel("chainstore", "ERROR")
_ = logging.SetLogLevel("chain", "ERROR")
_ = logging.SetLogLevel("sub", "ERROR")
_ = logging.SetLogLevel("storageminer", "FATAL")
const sectorsC, sectorsD, sectersB = 10, 9, 8
const sectorsC, sectorsD, sectorsB = 10, 9, 8
var (
upgradeH abi.ChainEpoch = 4000
provingPeriod abi.ChainEpoch = 2880
blocktime = 2 * time.Millisecond
)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
n, sn := b(t, []FullNodeOpts{FullNodeWithNetworkUpgradeAt(network.Version12, upgradeH)}, OneMiner)
n, sn := kit.MockMinerBuilder(t, []kit.FullNodeOpts{kit.FullNodeWithNetworkUpgradeAt(network.Version12, upgradeH)}, kit.OneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI)
minerA := sn[0]
@ -92,7 +104,7 @@ func TestDeadlineToggling(t *testing.T, b APIBuilder, blocktime time.Duration) {
defer close(done)
for ctx.Err() == nil {
build.Clock.Sleep(blocktime)
if err := minerA.MineOne(ctx, MineNext); err != nil {
if err := minerA.MineOne(ctx, kit.MineNext); err != nil {
if ctx.Err() != nil {
// context was canceled, ignore the error.
return
@ -106,8 +118,8 @@ func TestDeadlineToggling(t *testing.T, b APIBuilder, blocktime time.Duration) {
<-done
}()
minerB := n[0].Stb(ctx, t, TestSpt, defaultFrom)
minerC := n[0].Stb(ctx, t, TestSpt, defaultFrom)
minerB := n[0].Stb(ctx, t, kit.TestSpt, defaultFrom)
minerC := n[0].Stb(ctx, t, kit.TestSpt, defaultFrom)
maddrB, err := minerB.ActorAddress(ctx)
require.NoError(t, err)
@ -119,7 +131,7 @@ func TestDeadlineToggling(t *testing.T, b APIBuilder, blocktime time.Duration) {
// pledge sectors on C, go through a PP, check for power
{
pledgeSectors(t, ctx, minerC, sectorsC, 0, nil)
kit.PledgeSectors(t, ctx, minerC, sectorsC, 0, nil)
di, err := client.StateMinerProvingDeadline(ctx, maddrC, types.EmptyTSK)
require.NoError(t, err)
@ -204,8 +216,8 @@ func TestDeadlineToggling(t *testing.T, b APIBuilder, blocktime time.Duration) {
require.NoError(t, err)
require.GreaterOrEqual(t, nv, network.Version12)
minerD := n[0].Stb(ctx, t, TestSpt, defaultFrom)
minerE := n[0].Stb(ctx, t, TestSpt, defaultFrom)
minerD := n[0].Stb(ctx, t, kit.TestSpt, defaultFrom)
minerE := n[0].Stb(ctx, t, kit.TestSpt, defaultFrom)
maddrD, err := minerD.ActorAddress(ctx)
require.NoError(t, err)
@ -213,7 +225,7 @@ func TestDeadlineToggling(t *testing.T, b APIBuilder, blocktime time.Duration) {
require.NoError(t, err)
// first round of miner checks
checkMiner(maddrA, types.NewInt(uint64(ssz)*GenesisPreseals), true, true, types.EmptyTSK)
checkMiner(maddrA, types.NewInt(uint64(ssz)*kit.GenesisPreseals), true, true, types.EmptyTSK)
checkMiner(maddrC, types.NewInt(uint64(ssz)*sectorsC), true, true, types.EmptyTSK)
checkMiner(maddrB, types.NewInt(0), false, false, types.EmptyTSK)
@ -221,10 +233,10 @@ func TestDeadlineToggling(t *testing.T, b APIBuilder, blocktime time.Duration) {
checkMiner(maddrE, types.NewInt(0), false, false, types.EmptyTSK)
// pledge sectors on minerB/minerD, stop post on minerC
pledgeSectors(t, ctx, minerB, sectersB, 0, nil)
kit.PledgeSectors(t, ctx, minerB, sectorsB, 0, nil)
checkMiner(maddrB, types.NewInt(0), true, true, types.EmptyTSK)
pledgeSectors(t, ctx, minerD, sectorsD, 0, nil)
kit.PledgeSectors(t, ctx, minerD, sectorsD, 0, nil)
checkMiner(maddrD, types.NewInt(0), true, true, types.EmptyTSK)
minerC.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).Fail()
@ -240,7 +252,7 @@ func TestDeadlineToggling(t *testing.T, b APIBuilder, blocktime time.Duration) {
params := &miner.SectorPreCommitInfo{
Expiration: 2880 * 300,
SectorNumber: 22,
SealProof: TestSpt,
SealProof: kit.TestSpt,
SealedCID: cr,
SealRandEpoch: head.Height() - 200,
@ -290,9 +302,9 @@ func TestDeadlineToggling(t *testing.T, b APIBuilder, blocktime time.Duration) {
}
// second round of miner checks
checkMiner(maddrA, types.NewInt(uint64(ssz)*GenesisPreseals), true, true, types.EmptyTSK)
checkMiner(maddrA, types.NewInt(uint64(ssz)*kit.GenesisPreseals), true, true, types.EmptyTSK)
checkMiner(maddrC, types.NewInt(0), true, true, types.EmptyTSK)
checkMiner(maddrB, types.NewInt(uint64(ssz)*sectersB), true, true, types.EmptyTSK)
checkMiner(maddrB, types.NewInt(uint64(ssz)*sectorsB), true, true, types.EmptyTSK)
checkMiner(maddrD, types.NewInt(uint64(ssz)*sectorsD), true, true, types.EmptyTSK)
checkMiner(maddrE, types.NewInt(0), false, false, types.EmptyTSK)
@ -361,8 +373,7 @@ func TestDeadlineToggling(t *testing.T, b APIBuilder, blocktime time.Duration) {
build.Clock.Sleep(blocktime)
}
// third round of miner checks
checkMiner(maddrA, types.NewInt(uint64(ssz)*GenesisPreseals), true, true, types.EmptyTSK)
checkMiner(maddrA, types.NewInt(uint64(ssz)*kit.GenesisPreseals), true, true, types.EmptyTSK)
checkMiner(maddrC, types.NewInt(0), true, true, types.EmptyTSK)
checkMiner(maddrB, types.NewInt(0), true, true, types.EmptyTSK)
checkMiner(maddrD, types.NewInt(0), false, false, types.EmptyTSK)

605
itests/deals_test.go Normal file
View File

@ -0,0 +1,605 @@
package itests
import (
"bytes"
"context"
"fmt"
"io/ioutil"
"math/rand"
"os"
"path/filepath"
"sync/atomic"
"testing"
"time"
"github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors/builtin/market"
"github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
"github.com/filecoin-project/lotus/itests/kit"
"github.com/filecoin-project/lotus/markets/storageadapter"
"github.com/filecoin-project/lotus/miner"
"github.com/filecoin-project/lotus/node"
"github.com/filecoin-project/lotus/node/impl"
market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market"
"github.com/stretchr/testify/require"
)
func TestDealCycle(t *testing.T) {
kit.QuietMiningLogs()
blockTime := 10 * time.Millisecond
// For these tests where the block time is artificially short, just use
// a deal start epoch that is guaranteed to be far enough in the future
// so that the deal starts sealing in time
dealStartEpoch := abi.ChainEpoch(2 << 12)
t.Run("TestFullDealCycle_Single", func(t *testing.T) {
runFullDealCycles(t, 1, kit.MockMinerBuilder, blockTime, false, false, dealStartEpoch)
})
t.Run("TestFullDealCycle_Two", func(t *testing.T) {
runFullDealCycles(t, 2, kit.MockMinerBuilder, blockTime, false, false, dealStartEpoch)
})
t.Run("WithExportedCAR", func(t *testing.T) {
runFullDealCycles(t, 1, kit.MockMinerBuilder, blockTime, true, false, dealStartEpoch)
})
t.Run("TestFastRetrievalDealCycle", func(t *testing.T) {
runFastRetrievalDealFlowT(t, kit.MockMinerBuilder, blockTime, dealStartEpoch)
})
t.Run("TestZeroPricePerByteRetrievalDealFlow", func(t *testing.T) {
runZeroPricePerByteRetrievalDealFlow(t, kit.MockMinerBuilder, blockTime, dealStartEpoch)
})
}
func TestAPIDealFlowReal(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode")
}
kit.QuietMiningLogs()
// TODO: just set this globally?
oldDelay := policy.GetPreCommitChallengeDelay()
policy.SetPreCommitChallengeDelay(5)
t.Cleanup(func() {
policy.SetPreCommitChallengeDelay(oldDelay)
})
t.Run("basic", func(t *testing.T) {
runFullDealCycles(t, 1, kit.Builder, time.Second, false, false, 0)
})
t.Run("fast-retrieval", func(t *testing.T) {
runFullDealCycles(t, 1, kit.Builder, time.Second, false, true, 0)
})
t.Run("retrieval-second", func(t *testing.T) {
runSecondDealRetrievalTest(t, kit.Builder, time.Second)
})
t.Run("quote-price-for-non-unsealed-retrieval", func(t *testing.T) {
runQuotePriceForUnsealedRetrieval(t, kit.Builder, time.Second, 0)
})
}
func runQuotePriceForUnsealedRetrieval(t *testing.T, b kit.APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch) {
ctx := context.Background()
fulls, miners := b(t, kit.OneFull, kit.OneMiner)
client, miner := fulls[0].FullNode.(*impl.FullNodeAPI), miners[0]
kit.ConnectAndStartMining(t, blocktime, miner, client)
ppb := int64(1)
unsealPrice := int64(77)
// Set unsealed price to non-zero
ask, err := miner.MarketGetRetrievalAsk(ctx)
require.NoError(t, err)
ask.PricePerByte = abi.NewTokenAmount(ppb)
ask.UnsealPrice = abi.NewTokenAmount(unsealPrice)
err = miner.MarketSetRetrievalAsk(ctx, ask)
require.NoError(t, err)
dh := kit.NewDealHarness(t, client, miner)
_, info, fcid := dh.MakeFullDealNoRetrieval(ctx, 6, false, startEpoch)
// one more storage deal for the same data
_, _, fcid2 := dh.MakeFullDealNoRetrieval(ctx, 6, false, startEpoch)
require.Equal(t, fcid, fcid2)
// fetch quote -> zero for unsealed price since unsealed file already exists.
offers, err := client.ClientFindData(ctx, fcid, &info.PieceCID)
require.NoError(t, err)
require.Len(t, offers, 2)
require.Equal(t, offers[0], offers[1])
require.Equal(t, uint64(0), offers[0].UnsealPrice.Uint64())
require.Equal(t, info.Size*uint64(ppb), offers[0].MinPrice.Uint64())
// remove ONLY one unsealed file
ss, err := miner.StorageList(context.Background())
require.NoError(t, err)
_, err = miner.SectorsList(ctx)
require.NoError(t, err)
iLoop:
for storeID, sd := range ss {
for _, sector := range sd {
require.NoError(t, miner.StorageDropSector(ctx, storeID, sector.SectorID, storiface.FTUnsealed))
// remove ONLY one
break iLoop
}
}
// get retrieval quote -> zero for unsealed price as unsealed file exists.
offers, err = client.ClientFindData(ctx, fcid, &info.PieceCID)
require.NoError(t, err)
require.Len(t, offers, 2)
require.Equal(t, offers[0], offers[1])
require.Equal(t, uint64(0), offers[0].UnsealPrice.Uint64())
require.Equal(t, info.Size*uint64(ppb), offers[0].MinPrice.Uint64())
// remove the other unsealed file as well
ss, err = miner.StorageList(context.Background())
require.NoError(t, err)
_, err = miner.SectorsList(ctx)
require.NoError(t, err)
for storeID, sd := range ss {
for _, sector := range sd {
require.NoError(t, miner.StorageDropSector(ctx, storeID, sector.SectorID, storiface.FTUnsealed))
}
}
// fetch quote -> non-zero for unseal price as we no more unsealed files.
offers, err = client.ClientFindData(ctx, fcid, &info.PieceCID)
require.NoError(t, err)
require.Len(t, offers, 2)
require.Equal(t, offers[0], offers[1])
require.Equal(t, uint64(unsealPrice), offers[0].UnsealPrice.Uint64())
total := (info.Size * uint64(ppb)) + uint64(unsealPrice)
require.Equal(t, total, offers[0].MinPrice.Uint64())
}
func TestPublishDealsBatching(t *testing.T) {
ctx := context.Background()
kit.QuietMiningLogs()
b := kit.MockMinerBuilder
blocktime := 10 * time.Millisecond
startEpoch := abi.ChainEpoch(2 << 12)
publishPeriod := 10 * time.Second
maxDealsPerMsg := uint64(2)
// Set max deals per publish deals message to 2
minerDef := []kit.StorageMiner{{
Full: 0,
Opts: node.Override(
new(*storageadapter.DealPublisher),
storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{
Period: publishPeriod,
MaxDealsPerMsg: maxDealsPerMsg,
})),
Preseal: kit.PresealGenesis,
}}
// Create a connect client and miner node
n, sn := b(t, kit.OneFull, minerDef)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
kit.ConnectAndStartMining(t, blocktime, miner, client)
dh := kit.NewDealHarness(t, client, miner)
// Starts a deal and waits until it's published
runDealTillPublish := func(rseed int) {
res, _, _, err := kit.CreateImportFile(ctx, client, rseed, 0)
require.NoError(t, err)
upds, err := client.ClientGetDealUpdates(ctx)
require.NoError(t, err)
dh.StartDeal(ctx, res.Root, false, startEpoch)
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
time.Sleep(time.Second)
done := make(chan struct{})
go func() {
for upd := range upds {
if upd.DataRef.Root == res.Root && upd.State == storagemarket.StorageDealAwaitingPreCommit {
done <- struct{}{}
}
}
}()
<-done
}
// Run three deals in parallel
done := make(chan struct{}, maxDealsPerMsg+1)
for rseed := 1; rseed <= 3; rseed++ {
rseed := rseed
go func() {
runDealTillPublish(rseed)
done <- struct{}{}
}()
}
// Wait for two of the deals to be published
for i := 0; i < int(maxDealsPerMsg); i++ {
<-done
}
// Expect a single PublishStorageDeals message that includes the first two deals
msgCids, err := client.StateListMessages(ctx, &api.MessageMatch{To: market.Address}, types.EmptyTSK, 1)
require.NoError(t, err)
count := 0
for _, msgCid := range msgCids {
msg, err := client.ChainGetMessage(ctx, msgCid)
require.NoError(t, err)
if msg.Method == market.Methods.PublishStorageDeals {
count++
var pubDealsParams market2.PublishStorageDealsParams
err = pubDealsParams.UnmarshalCBOR(bytes.NewReader(msg.Params))
require.NoError(t, err)
require.Len(t, pubDealsParams.Deals, int(maxDealsPerMsg))
}
}
require.Equal(t, 1, count)
// The third deal should be published once the publish period expires.
// Allow a little padding as it takes a moment for the state change to
// be noticed by the client.
padding := 10 * time.Second
select {
case <-time.After(publishPeriod + padding):
require.Fail(t, "Expected 3rd deal to be published once publish period elapsed")
case <-done: // Success
}
}
func TestDealMining(t *testing.T) {
// test making a deal with a fresh miner, and see if it starts to mine.
if testing.Short() {
t.Skip("skipping test in short mode")
}
kit.QuietMiningLogs()
b := kit.MockMinerBuilder
blocktime := 50 * time.Millisecond
ctx := context.Background()
fulls, miners := b(t,
kit.OneFull,
[]kit.StorageMiner{
{Full: 0, Preseal: kit.PresealGenesis},
{Full: 0, Preseal: 0}, // TODO: Add support for miners on non-first full node
})
client := fulls[0].FullNode.(*impl.FullNodeAPI)
genesisMiner := miners[0]
provider := miners[1]
addrinfo, err := client.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := provider.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
if err := genesisMiner.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
time.Sleep(time.Second)
data := make([]byte, 600)
rand.New(rand.NewSource(5)).Read(data)
r := bytes.NewReader(data)
fcid, err := client.ClientImportLocal(ctx, r)
if err != nil {
t.Fatal(err)
}
fmt.Println("FILE CID: ", fcid)
var mine int32 = 1
done := make(chan struct{})
minedTwo := make(chan struct{})
m2addr, err := miners[1].ActorAddress(context.TODO())
if err != nil {
t.Fatal(err)
}
go func() {
defer close(done)
complChan := minedTwo
for atomic.LoadInt32(&mine) != 0 {
wait := make(chan int)
mdone := func(mined bool, _ abi.ChainEpoch, err error) {
n := 0
if mined {
n = 1
}
wait <- n
}
if err := miners[0].MineOne(ctx, miner.MineReq{Done: mdone}); err != nil {
t.Error(err)
}
if err := miners[1].MineOne(ctx, miner.MineReq{Done: mdone}); err != nil {
t.Error(err)
}
expect := <-wait
expect += <-wait
time.Sleep(blocktime)
if expect == 0 {
// null block
continue
}
var nodeOneMined bool
for _, node := range miners {
mb, err := node.MiningBase(ctx)
if err != nil {
t.Error(err)
return
}
for _, b := range mb.Blocks() {
if b.Miner == m2addr {
nodeOneMined = true
break
}
}
}
if nodeOneMined && complChan != nil {
close(complChan)
complChan = nil
}
}
}()
dh := kit.NewDealHarness(t, client, provider)
deal := dh.StartDeal(ctx, fcid, false, 0)
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
time.Sleep(time.Second)
dh.WaitDealSealed(ctx, deal, false, false, nil)
<-minedTwo
atomic.StoreInt32(&mine, 0)
fmt.Println("shutting down mining")
<-done
}
func TestOfflineDealFlow(t *testing.T) {
blocktime := 10 * time.Millisecond
// For these tests where the block time is artificially short, just use
// a deal start epoch that is guaranteed to be far enough in the future
// so that the deal starts sealing in time
startEpoch := abi.ChainEpoch(2 << 12)
runTest := func(t *testing.T, fastRet bool) {
ctx := context.Background()
fulls, miners := kit.MockMinerBuilder(t, kit.OneFull, kit.OneMiner)
client, miner := fulls[0].FullNode.(*impl.FullNodeAPI), miners[0]
kit.ConnectAndStartMining(t, blocktime, miner, client)
dh := kit.NewDealHarness(t, client, miner)
// Create a random file and import on the client.
res, path, data, err := kit.CreateImportFile(ctx, client, 1, 0)
require.NoError(t, err)
// Get the piece size and commP
fcid := res.Root
pieceInfo, err := client.ClientDealPieceCID(ctx, fcid)
require.NoError(t, err)
fmt.Println("FILE CID: ", fcid)
// Create a storage deal with the miner
maddr, err := miner.ActorAddress(ctx)
require.NoError(t, err)
addr, err := client.WalletDefaultAddress(ctx)
require.NoError(t, err)
// Manual storage deal (offline deal)
dataRef := &storagemarket.DataRef{
TransferType: storagemarket.TTManual,
Root: fcid,
PieceCid: &pieceInfo.PieceCID,
PieceSize: pieceInfo.PieceSize.Unpadded(),
}
proposalCid, err := client.ClientStartDeal(ctx, &api.StartDealParams{
Data: dataRef,
Wallet: addr,
Miner: maddr,
EpochPrice: types.NewInt(1000000),
DealStartEpoch: startEpoch,
MinBlocksDuration: uint64(build.MinDealDuration),
FastRetrieval: fastRet,
})
require.NoError(t, err)
// Wait for the deal to reach StorageDealCheckForAcceptance on the client
cd, err := client.ClientGetDealInfo(ctx, *proposalCid)
require.NoError(t, err)
require.Eventually(t, func() bool {
cd, _ := client.ClientGetDealInfo(ctx, *proposalCid)
return cd.State == storagemarket.StorageDealCheckForAcceptance
}, 30*time.Second, 1*time.Second, "actual deal status is %s", storagemarket.DealStates[cd.State])
// Create a CAR file from the raw file
carFileDir, err := ioutil.TempDir(os.TempDir(), "test-make-deal-car")
require.NoError(t, err)
carFilePath := filepath.Join(carFileDir, "out.car")
err = client.ClientGenCar(ctx, api.FileRef{Path: path}, carFilePath)
require.NoError(t, err)
// Import the CAR file on the miner - this is the equivalent to
// transferring the file across the wire in a normal (non-offline) deal
err = miner.DealsImportData(ctx, *proposalCid, carFilePath)
require.NoError(t, err)
// Wait for the deal to be published
dh.WaitDealPublished(ctx, proposalCid)
t.Logf("deal published, retrieving")
// Retrieve the deal
dh.TestRetrieval(ctx, fcid, &pieceInfo.PieceCID, false, data)
}
t.Run("NormalRetrieval", func(t *testing.T) {
runTest(t, false)
})
t.Run("FastRetrieval", func(t *testing.T) {
runTest(t, true)
})
}
func runFullDealCycles(t *testing.T, n int, b kit.APIBuilder, blocktime time.Duration, carExport, fastRet bool, startEpoch abi.ChainEpoch) {
fulls, miners := b(t, kit.OneFull, kit.OneMiner)
client, miner := fulls[0].FullNode.(*impl.FullNodeAPI), miners[0]
kit.ConnectAndStartMining(t, blocktime, miner, client)
dh := kit.NewDealHarness(t, client, miner)
baseseed := 6
for i := 0; i < n; i++ {
dh.MakeFullDeal(context.Background(), baseseed+i, carExport, fastRet, startEpoch)
}
}
func runFastRetrievalDealFlowT(t *testing.T, b kit.APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch) {
ctx := context.Background()
fulls, miners := b(t, kit.OneFull, kit.OneMiner)
client, miner := fulls[0].FullNode.(*impl.FullNodeAPI), miners[0]
kit.ConnectAndStartMining(t, blocktime, miner, client)
dh := kit.NewDealHarness(t, client, miner)
data := make([]byte, 1600)
rand.New(rand.NewSource(int64(8))).Read(data)
r := bytes.NewReader(data)
fcid, err := client.ClientImportLocal(ctx, r)
if err != nil {
t.Fatal(err)
}
fmt.Println("FILE CID: ", fcid)
deal := dh.StartDeal(ctx, fcid, true, startEpoch)
dh.WaitDealPublished(ctx, deal)
fmt.Println("deal published, retrieving")
// Retrieval
info, err := client.ClientGetDealInfo(ctx, *deal)
require.NoError(t, err)
dh.TestRetrieval(ctx, fcid, &info.PieceCID, false, data)
}
func runSecondDealRetrievalTest(t *testing.T, b kit.APIBuilder, blocktime time.Duration) {
ctx := context.Background()
fulls, miners := b(t, kit.OneFull, kit.OneMiner)
client, miner := fulls[0].FullNode.(*impl.FullNodeAPI), miners[0]
kit.ConnectAndStartMining(t, blocktime, miner, client)
dh := kit.NewDealHarness(t, client, miner)
{
data1 := make([]byte, 800)
rand.New(rand.NewSource(int64(3))).Read(data1)
r := bytes.NewReader(data1)
fcid1, err := client.ClientImportLocal(ctx, r)
if err != nil {
t.Fatal(err)
}
data2 := make([]byte, 800)
rand.New(rand.NewSource(int64(9))).Read(data2)
r2 := bytes.NewReader(data2)
fcid2, err := client.ClientImportLocal(ctx, r2)
if err != nil {
t.Fatal(err)
}
deal1 := dh.StartDeal(ctx, fcid1, true, 0)
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
time.Sleep(time.Second)
dh.WaitDealSealed(ctx, deal1, true, false, nil)
deal2 := dh.StartDeal(ctx, fcid2, true, 0)
time.Sleep(time.Second)
dh.WaitDealSealed(ctx, deal2, false, false, nil)
// Retrieval
info, err := client.ClientGetDealInfo(ctx, *deal2)
require.NoError(t, err)
rf, _ := miner.SectorsRefs(ctx)
fmt.Printf("refs: %+v\n", rf)
dh.TestRetrieval(ctx, fcid2, &info.PieceCID, false, data2)
}
}
func runZeroPricePerByteRetrievalDealFlow(t *testing.T, b kit.APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch) {
ctx := context.Background()
fulls, miners := b(t, kit.OneFull, kit.OneMiner)
client, miner := fulls[0].FullNode.(*impl.FullNodeAPI), miners[0]
kit.ConnectAndStartMining(t, blocktime, miner, client)
dh := kit.NewDealHarness(t, client, miner)
// Set price-per-byte to zero
ask, err := miner.MarketGetRetrievalAsk(ctx)
require.NoError(t, err)
ask.PricePerByte = abi.NewTokenAmount(0)
err = miner.MarketSetRetrievalAsk(ctx, ask)
require.NoError(t, err)
dh.MakeFullDeal(ctx, 6, false, false, startEpoch)
}

2
itests/doc.go Normal file
View File

@ -0,0 +1,2 @@
// Package itests contains integration tests for Lotus.
package itests

View File

@ -1,4 +1,4 @@
package main
package itests
import (
"bytes"
@ -10,30 +10,25 @@ import (
"time"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/cli"
clitest "github.com/filecoin-project/lotus/cli/test"
"github.com/filecoin-project/lotus/gateway"
init2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/init"
multisig2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/multisig"
"github.com/stretchr/testify/require"
"golang.org/x/xerrors"
"github.com/ipfs/go-cid"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-jsonrpc"
"github.com/filecoin-project/go-state-types/abi"
"github.com/ipfs/go-cid"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/api/client"
"github.com/filecoin-project/lotus/api/test"
"github.com/filecoin-project/lotus/api/v0api"
"github.com/filecoin-project/lotus/api/v1api"
"github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/cli"
"github.com/filecoin-project/lotus/gateway"
"github.com/filecoin-project/lotus/itests/kit"
"github.com/filecoin-project/lotus/node"
builder "github.com/filecoin-project/lotus/node/test"
init2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/init"
multisig2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/multisig"
)
const (
@ -47,11 +42,11 @@ func init() {
policy.SetMinVerifiedDealSize(abi.NewStoragePower(256))
}
// TestWalletMsig tests that API calls to wallet and msig can be made on a lite
// TestGatewayWalletMsig tests that API calls to wallet and msig can be made on a lite
// node that is connected through a gateway to a full API node
func TestWalletMsig(t *testing.T) {
func TestGatewayWalletMsig(t *testing.T) {
_ = os.Setenv("BELLMAN_NO_GPU", "1")
clitest.QuietMiningLogs()
kit.QuietMiningLogs()
blocktime := 5 * time.Millisecond
ctx := context.Background()
@ -181,11 +176,11 @@ func TestWalletMsig(t *testing.T) {
require.True(t, approveReturn.Applied)
}
// TestMsigCLI tests that msig CLI calls can be made
// TestGatewayMsigCLI tests that msig CLI calls can be made
// on a lite node that is connected through a gateway to a full API node
func TestMsigCLI(t *testing.T) {
func TestGatewayMsigCLI(t *testing.T) {
_ = os.Setenv("BELLMAN_NO_GPU", "1")
clitest.QuietMiningLogs()
kit.QuietMiningLogs()
blocktime := 5 * time.Millisecond
ctx := context.Background()
@ -193,12 +188,12 @@ func TestMsigCLI(t *testing.T) {
defer nodes.closer()
lite := nodes.lite
clitest.RunMultisigTest(t, cli.Commands, lite)
runMultisigTests(t, lite)
}
func TestDealFlow(t *testing.T) {
func TestGatewayDealFlow(t *testing.T) {
_ = os.Setenv("BELLMAN_NO_GPU", "1")
clitest.QuietMiningLogs()
kit.QuietMiningLogs()
blocktime := 5 * time.Millisecond
ctx := context.Background()
@ -209,25 +204,27 @@ func TestDealFlow(t *testing.T) {
// a deal start epoch that is guaranteed to be far enough in the future
// so that the deal starts sealing in time
dealStartEpoch := abi.ChainEpoch(2 << 12)
test.MakeDeal(t, ctx, 6, nodes.lite, nodes.miner, false, false, dealStartEpoch)
dh := kit.NewDealHarness(t, nodes.lite, nodes.miner)
dh.MakeFullDeal(ctx, 6, false, false, dealStartEpoch)
}
func TestCLIDealFlow(t *testing.T) {
func TestGatewayCLIDealFlow(t *testing.T) {
_ = os.Setenv("BELLMAN_NO_GPU", "1")
clitest.QuietMiningLogs()
kit.QuietMiningLogs()
blocktime := 5 * time.Millisecond
ctx := context.Background()
nodes := startNodesWithFunds(ctx, t, blocktime, maxLookbackCap, maxStateWaitLookbackLimit)
defer nodes.closer()
clitest.RunClientTest(t, cli.Commands, nodes.lite)
kit.RunClientTest(t, cli.Commands, nodes.lite)
}
type testNodes struct {
lite test.TestNode
full test.TestNode
miner test.TestStorageNode
lite kit.TestFullNode
full kit.TestFullNode
miner kit.TestMiner
closer jsonrpc.ClientCloser
}
@ -270,24 +267,23 @@ func startNodes(
// - Connect lite node -> gateway server -> full node
opts := append(
// Full node
test.OneFull,
kit.OneFull,
// Lite node
test.FullNodeOpts{
kit.FullNodeOpts{
Lite: true,
Opts: func(nodes []test.TestNode) node.Option {
Opts: func(nodes []kit.TestFullNode) node.Option {
fullNode := nodes[0]
// Create a gateway server in front of the full node
gapiImpl := gateway.NewNode(fullNode, lookbackCap, stateWaitLookbackLimit)
_, addr, err := builder.CreateRPCServer(t, map[string]interface{}{
"/rpc/v1": gapiImpl,
"/rpc/v0": api.Wrap(new(v1api.FullNodeStruct), new(v0api.WrapperV1Full), gapiImpl),
})
gwapi := gateway.NewNode(fullNode, lookbackCap, stateWaitLookbackLimit)
handler, err := gateway.Handler(gwapi)
require.NoError(t, err)
srv, _ := kit.CreateRPCServer(t, handler)
// Create a gateway client API that connects to the gateway server
var gapi api.Gateway
gapi, closer, err = client.NewGatewayRPCV1(ctx, addr+"/rpc/v1", nil)
gapi, closer, err = client.NewGatewayRPCV1(ctx, "ws://"+srv.Listener.Addr().String()+"/rpc/v1", nil)
require.NoError(t, err)
// Provide the gateway API to dependency injection
@ -295,7 +291,7 @@ func startNodes(
},
},
)
n, sn := builder.RPCMockSbBuilder(t, opts, test.OneMiner)
n, sn := kit.RPCMockMinerBuilder(t, opts, kit.OneMiner)
full := n[0]
lite := n[1]
@ -317,14 +313,14 @@ func startNodes(
require.NoError(t, err)
// Start mining blocks
bm := test.NewBlockMiner(ctx, t, miner, blocktime)
bm.MineBlocks()
bm := kit.NewBlockMiner(t, miner)
bm.MineBlocks(ctx, blocktime)
t.Cleanup(bm.Stop)
return &testNodes{lite: lite, full: full, miner: miner, closer: closer}
}
func sendFunds(ctx context.Context, fromNode test.TestNode, fromAddr address.Address, toAddr address.Address, amt types.BigInt) error {
func sendFunds(ctx context.Context, fromNode kit.TestFullNode, fromAddr address.Address, toAddr address.Address, amt types.BigInt) error {
msg := &types.Message{
From: fromAddr,
To: toAddr,

124
itests/kit/blockminer.go Normal file
View File

@ -0,0 +1,124 @@
package kit
import (
"context"
"sync"
"sync/atomic"
"testing"
"time"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/miner"
"github.com/stretchr/testify/require"
)
// BlockMiner is a utility that makes a test miner Mine blocks on a timer.
type BlockMiner struct {
t *testing.T
miner TestMiner
nextNulls int64
wg sync.WaitGroup
cancel context.CancelFunc
}
func NewBlockMiner(t *testing.T, miner TestMiner) *BlockMiner {
return &BlockMiner{
t: t,
miner: miner,
cancel: func() {},
}
}
func (bm *BlockMiner) MineBlocks(ctx context.Context, blocktime time.Duration) {
time.Sleep(time.Second)
// wrap context in a cancellable context.
ctx, bm.cancel = context.WithCancel(ctx)
bm.wg.Add(1)
go func() {
defer bm.wg.Done()
for {
select {
case <-time.After(blocktime):
case <-ctx.Done():
return
}
nulls := atomic.SwapInt64(&bm.nextNulls, 0)
err := bm.miner.MineOne(ctx, miner.MineReq{
InjectNulls: abi.ChainEpoch(nulls),
Done: func(bool, abi.ChainEpoch, error) {},
})
switch {
case err == nil: // wrap around
case ctx.Err() != nil: // context fired.
return
default: // log error
bm.t.Error(err)
}
}
}()
}
// InjectNulls injects the specified amount of null rounds in the next
// mining rounds.
func (bm *BlockMiner) InjectNulls(rounds abi.ChainEpoch) {
atomic.AddInt64(&bm.nextNulls, int64(rounds))
}
func (bm *BlockMiner) MineUntilBlock(ctx context.Context, fn TestFullNode, cb func(abi.ChainEpoch)) {
for i := 0; i < 1000; i++ {
var (
success bool
err error
epoch abi.ChainEpoch
wait = make(chan struct{})
)
doneFn := func(win bool, ep abi.ChainEpoch, e error) {
success = win
err = e
epoch = ep
wait <- struct{}{}
}
mineErr := bm.miner.MineOne(ctx, miner.MineReq{Done: doneFn})
require.NoError(bm.t, mineErr)
<-wait
require.NoError(bm.t, err)
if success {
// Wait until it shows up on the given full nodes ChainHead
nloops := 50
for i := 0; i < nloops; i++ {
ts, err := fn.ChainHead(ctx)
require.NoError(bm.t, err)
if ts.Height() == epoch {
break
}
require.NotEqual(bm.t, i, nloops-1, "block never managed to sync to node")
time.Sleep(time.Millisecond * 10)
}
if cb != nil {
cb(epoch)
}
return
}
bm.t.Log("did not Mine block, trying again", i)
}
bm.t.Fatal("failed to Mine 1000 times in a row...")
}
// Stop stops the block miner.
func (bm *BlockMiner) Stop() {
bm.t.Log("shutting down mining")
bm.cancel()
bm.wg.Wait()
}

View File

@ -1,9 +1,10 @@
package test
package kit
import (
"context"
"fmt"
"io/ioutil"
"math/rand"
"os"
"path/filepath"
"regexp"
@ -11,9 +12,7 @@ import (
"testing"
"time"
"golang.org/x/xerrors"
"github.com/filecoin-project/lotus/api/test"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/specs-actors/v2/actors/builtin"
@ -21,8 +20,8 @@ import (
lcli "github.com/urfave/cli/v2"
)
// RunClientTest exercises some of the client CLI commands
func RunClientTest(t *testing.T, cmds []*lcli.Command, clientNode test.TestNode) {
// RunClientTest exercises some of the Client CLI commands
func RunClientTest(t *testing.T, cmds []*lcli.Command, clientNode TestFullNode) {
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
@ -30,7 +29,7 @@ func RunClientTest(t *testing.T, cmds []*lcli.Command, clientNode test.TestNode)
mockCLI := NewMockCLI(ctx, t, cmds)
clientCLI := mockCLI.Client(clientNode.ListenAddr)
// Get the miner address
// Get the Miner address
addrs, err := clientNode.StateListMiners(ctx, types.EmptyTSK)
require.NoError(t, err)
require.Len(t, addrs, 1)
@ -38,13 +37,14 @@ func RunClientTest(t *testing.T, cmds []*lcli.Command, clientNode test.TestNode)
minerAddr := addrs[0]
fmt.Println("Miner:", minerAddr)
// client query-ask <miner addr>
// client query-ask <Miner addr>
out := clientCLI.RunCmd("client", "query-ask", minerAddr.String())
require.Regexp(t, regexp.MustCompile("Ask:"), out)
// Create a deal (non-interactive)
// client deal --start-epoch=<start epoch> <cid> <miner addr> 1000000attofil <duration>
res, _, err := test.CreateClientFile(ctx, clientNode, 1, 0)
// client deal --start-epoch=<start epoch> <cid> <Miner addr> 1000000attofil <duration>
res, _, _, err := CreateImportFile(ctx, clientNode, 1, 0)
require.NoError(t, err)
startEpoch := fmt.Sprintf("--start-epoch=%d", 2<<12)
dataCid := res.Root
@ -58,9 +58,9 @@ func RunClientTest(t *testing.T, cmds []*lcli.Command, clientNode test.TestNode)
// <cid>
// <duration> (in days)
// <miner addr>
// "no" (verified client)
// "no" (verified Client)
// "yes" (confirm deal)
res, _, err = test.CreateClientFile(ctx, clientNode, 2, 0)
res, _, _, err = CreateImportFile(ctx, clientNode, 2, 0)
require.NoError(t, err)
dataCid2 := res.Root
duration = fmt.Sprintf("%d", build.MinDealDuration/builtin.EpochsInDay)
@ -91,16 +91,19 @@ func RunClientTest(t *testing.T, cmds []*lcli.Command, clientNode test.TestNode)
}
dealStatus = parts[3]
fmt.Println(" Deal status:", dealStatus)
if dealComplete(t, dealStatus) {
st := CategorizeDealState(dealStatus)
require.NotEqual(t, TestDealStateFailed, st)
if st == TestDealStateComplete {
break
}
time.Sleep(time.Second)
}
// Retrieve the first file from the miner
// Retrieve the first file from the Miner
// client retrieve <cid> <file path>
tmpdir, err := ioutil.TempDir(os.TempDir(), "test-cli-client")
tmpdir, err := ioutil.TempDir(os.TempDir(), "test-cli-Client")
require.NoError(t, err)
path := filepath.Join(tmpdir, "outfile.dat")
out = clientCLI.RunCmd("client", "retrieve", dataCid.String(), path)
@ -108,13 +111,36 @@ func RunClientTest(t *testing.T, cmds []*lcli.Command, clientNode test.TestNode)
require.Regexp(t, regexp.MustCompile("Success"), out)
}
func dealComplete(t *testing.T, dealStatus string) bool {
switch dealStatus {
case "StorageDealFailing", "StorageDealError":
t.Fatal(xerrors.Errorf("Storage deal failed with status: " + dealStatus))
case "StorageDealStaged", "StorageDealAwaitingPreCommit", "StorageDealSealing", "StorageDealActive", "StorageDealExpired", "StorageDealSlashed":
return true
func CreateImportFile(ctx context.Context, client api.FullNode, rseed int, size int) (res *api.ImportRes, path string, data []byte, err error) {
data, path, err = createRandomFile(rseed, size)
if err != nil {
return nil, "", nil, err
}
return false
res, err = client.ClientImport(ctx, api.FileRef{Path: path})
if err != nil {
return nil, "", nil, err
}
return res, path, data, nil
}
func createRandomFile(rseed, size int) ([]byte, string, error) {
if size == 0 {
size = 1600
}
data := make([]byte, size)
rand.New(rand.NewSource(int64(rseed))).Read(data)
dir, err := ioutil.TempDir(os.TempDir(), "test-make-deal-")
if err != nil {
return nil, "", err
}
path := filepath.Join(dir, "sourcefile.dat")
err = ioutil.WriteFile(path, data, 0644)
if err != nil {
return nil, "", err
}
return data, path, nil
}

335
itests/kit/deals.go Normal file
View File

@ -0,0 +1,335 @@
package kit
import (
"bytes"
"context"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"testing"
"time"
"github.com/ipfs/go-cid"
files "github.com/ipfs/go-ipfs-files"
"github.com/ipld/go-car"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/types"
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
"github.com/filecoin-project/lotus/node/impl"
ipld "github.com/ipfs/go-ipld-format"
dag "github.com/ipfs/go-merkledag"
dstest "github.com/ipfs/go-merkledag/test"
unixfile "github.com/ipfs/go-unixfs/file"
)
type DealHarness struct {
t *testing.T
client api.FullNode
miner TestMiner
}
// NewDealHarness creates a test harness that contains testing utilities for deals.
func NewDealHarness(t *testing.T, client api.FullNode, miner TestMiner) *DealHarness {
return &DealHarness{
t: t,
client: client,
miner: miner,
}
}
func (dh *DealHarness) MakeFullDealNoRetrieval(ctx context.Context, rseed int, fastRet bool, startEpoch abi.ChainEpoch) ([]byte,
*api.DealInfo, cid.Cid) {
res, _, data, err := CreateImportFile(ctx, dh.client, rseed, 0)
if err != nil {
dh.t.Fatal(err)
}
fcid := res.Root
fmt.Println("FILE CID: ", fcid)
deal := dh.StartDeal(ctx, fcid, fastRet, startEpoch)
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
time.Sleep(time.Second)
dh.WaitDealSealed(ctx, deal, false, false, nil)
// Retrieval
info, err := dh.client.ClientGetDealInfo(ctx, *deal)
require.NoError(dh.t, err)
return data, info, fcid
}
func (dh *DealHarness) MakeFullDeal(ctx context.Context, rseed int, carExport, fastRet bool, startEpoch abi.ChainEpoch) {
res, _, data, err := CreateImportFile(ctx, dh.client, rseed, 0)
if err != nil {
dh.t.Fatal(err)
}
fcid := res.Root
fmt.Println("FILE CID: ", fcid)
deal := dh.StartDeal(ctx, fcid, fastRet, startEpoch)
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
time.Sleep(time.Second)
dh.WaitDealSealed(ctx, deal, false, false, nil)
// Retrieval
info, err := dh.client.ClientGetDealInfo(ctx, *deal)
require.NoError(dh.t, err)
dh.TestRetrieval(ctx, fcid, &info.PieceCID, carExport, data)
}
func (dh *DealHarness) StartDeal(ctx context.Context, fcid cid.Cid, fastRet bool, startEpoch abi.ChainEpoch) *cid.Cid {
maddr, err := dh.miner.ActorAddress(ctx)
if err != nil {
dh.t.Fatal(err)
}
addr, err := dh.client.WalletDefaultAddress(ctx)
if err != nil {
dh.t.Fatal(err)
}
deal, err := dh.client.ClientStartDeal(ctx, &api.StartDealParams{
Data: &storagemarket.DataRef{
TransferType: storagemarket.TTGraphsync,
Root: fcid,
},
Wallet: addr,
Miner: maddr,
EpochPrice: types.NewInt(1000000),
DealStartEpoch: startEpoch,
MinBlocksDuration: uint64(build.MinDealDuration),
FastRetrieval: fastRet,
})
if err != nil {
dh.t.Fatalf("%+v", err)
}
return deal
}
func (dh *DealHarness) WaitDealSealed(ctx context.Context, deal *cid.Cid, noseal, noSealStart bool, cb func()) {
loop:
for {
di, err := dh.client.ClientGetDealInfo(ctx, *deal)
require.NoError(dh.t, err)
switch di.State {
case storagemarket.StorageDealAwaitingPreCommit, storagemarket.StorageDealSealing:
if noseal {
return
}
if !noSealStart {
dh.StartSealingWaiting(ctx)
}
case storagemarket.StorageDealProposalRejected:
dh.t.Fatal("deal rejected")
case storagemarket.StorageDealFailing:
dh.t.Fatal("deal failed")
case storagemarket.StorageDealError:
dh.t.Fatal("deal errored", di.Message)
case storagemarket.StorageDealActive:
fmt.Println("COMPLETE", di)
break loop
}
mds, err := dh.miner.MarketListIncompleteDeals(ctx)
require.NoError(dh.t, err)
var minerState storagemarket.StorageDealStatus
for _, md := range mds {
if md.DealID == di.DealID {
minerState = md.State
break
}
}
fmt.Printf("Deal %d state: client:%s provider:%s\n", di.DealID, storagemarket.DealStates[di.State], storagemarket.DealStates[minerState])
time.Sleep(time.Second / 2)
if cb != nil {
cb()
}
}
}
func (dh *DealHarness) WaitDealPublished(ctx context.Context, deal *cid.Cid) {
subCtx, cancel := context.WithCancel(ctx)
defer cancel()
updates, err := dh.miner.MarketGetDealUpdates(subCtx)
if err != nil {
dh.t.Fatal(err)
}
for {
select {
case <-ctx.Done():
dh.t.Fatal("context timeout")
case di := <-updates:
if deal.Equals(di.ProposalCid) {
switch di.State {
case storagemarket.StorageDealProposalRejected:
dh.t.Fatal("deal rejected")
case storagemarket.StorageDealFailing:
dh.t.Fatal("deal failed")
case storagemarket.StorageDealError:
dh.t.Fatal("deal errored", di.Message)
case storagemarket.StorageDealFinalizing, storagemarket.StorageDealAwaitingPreCommit, storagemarket.StorageDealSealing, storagemarket.StorageDealActive:
fmt.Println("COMPLETE", di)
return
}
fmt.Println("Deal state: ", storagemarket.DealStates[di.State])
}
}
}
}
func (dh *DealHarness) StartSealingWaiting(ctx context.Context) {
snums, err := dh.miner.SectorsList(ctx)
require.NoError(dh.t, err)
for _, snum := range snums {
si, err := dh.miner.SectorsStatus(ctx, snum, false)
require.NoError(dh.t, err)
dh.t.Logf("Sector state: %s", si.State)
if si.State == api.SectorState(sealing.WaitDeals) {
require.NoError(dh.t, dh.miner.SectorStartSealing(ctx, snum))
}
flushSealingBatches(dh.t, ctx, dh.miner)
}
}
func (dh *DealHarness) TestRetrieval(ctx context.Context, fcid cid.Cid, piece *cid.Cid, carExport bool, expect []byte) {
offers, err := dh.client.ClientFindData(ctx, fcid, piece)
if err != nil {
dh.t.Fatal(err)
}
if len(offers) < 1 {
dh.t.Fatal("no offers")
}
rpath, err := ioutil.TempDir("", "lotus-retrieve-test-")
if err != nil {
dh.t.Fatal(err)
}
defer os.RemoveAll(rpath) //nolint:errcheck
caddr, err := dh.client.WalletDefaultAddress(ctx)
if err != nil {
dh.t.Fatal(err)
}
ref := &api.FileRef{
Path: filepath.Join(rpath, "ret"),
IsCAR: carExport,
}
updates, err := dh.client.ClientRetrieveWithEvents(ctx, offers[0].Order(caddr), ref)
if err != nil {
dh.t.Fatal(err)
}
for update := range updates {
if update.Err != "" {
dh.t.Fatalf("retrieval failed: %s", update.Err)
}
}
rdata, err := ioutil.ReadFile(filepath.Join(rpath, "ret"))
if err != nil {
dh.t.Fatal(err)
}
if carExport {
rdata = dh.ExtractCarData(ctx, rdata, rpath)
}
if !bytes.Equal(rdata, expect) {
dh.t.Fatal("wrong expect retrieved")
}
}
func (dh *DealHarness) ExtractCarData(ctx context.Context, rdata []byte, rpath string) []byte {
bserv := dstest.Bserv()
ch, err := car.LoadCar(bserv.Blockstore(), bytes.NewReader(rdata))
if err != nil {
dh.t.Fatal(err)
}
b, err := bserv.GetBlock(ctx, ch.Roots[0])
if err != nil {
dh.t.Fatal(err)
}
nd, err := ipld.Decode(b)
if err != nil {
dh.t.Fatal(err)
}
dserv := dag.NewDAGService(bserv)
fil, err := unixfile.NewUnixfsFile(ctx, dserv, nd)
if err != nil {
dh.t.Fatal(err)
}
outPath := filepath.Join(rpath, "retLoadedCAR")
if err := files.WriteTo(fil, outPath); err != nil {
dh.t.Fatal(err)
}
rdata, err = ioutil.ReadFile(outPath)
if err != nil {
dh.t.Fatal(err)
}
return rdata
}
type DealsScaffold struct {
Ctx context.Context
Client *impl.FullNodeAPI
Miner TestMiner
BlockMiner *BlockMiner
}
func ConnectAndStartMining(t *testing.T, blocktime time.Duration, miner TestMiner, clients ...api.FullNode) *BlockMiner {
ctx := context.Background()
for _, c := range clients {
addrinfo, err := c.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := miner.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
}
time.Sleep(time.Second)
blockMiner := NewBlockMiner(t, miner)
blockMiner.MineBlocks(ctx, blocktime)
t.Cleanup(blockMiner.Stop)
return blockMiner
}
type TestDealState int
const (
TestDealStateFailed = TestDealState(-1)
TestDealStateInProgress = TestDealState(0)
TestDealStateComplete = TestDealState(1)
)
// CategorizeDealState categorizes deal states into one of three states:
// Complete, InProgress, Failed.
func CategorizeDealState(dealStatus string) TestDealState {
switch dealStatus {
case "StorageDealFailing", "StorageDealError":
return TestDealStateFailed
case "StorageDealStaged", "StorageDealAwaitingPreCommit", "StorageDealSealing", "StorageDealActive", "StorageDealExpired", "StorageDealSlashed":
return TestDealStateComplete
}
return TestDealStateInProgress
}

39
itests/kit/funds.go Normal file
View File

@ -0,0 +1,39 @@
package kit
import (
"context"
"testing"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/types"
)
// SendFunds sends funds from the default wallet of the specified sender node
// to the recipient address.
func SendFunds(ctx context.Context, t *testing.T, sender TestFullNode, recipient address.Address, amount abi.TokenAmount) {
senderAddr, err := sender.WalletDefaultAddress(ctx)
if err != nil {
t.Fatal(err)
}
msg := &types.Message{
From: senderAddr,
To: recipient,
Value: amount,
}
sm, err := sender.MpoolPushMessage(ctx, msg, nil)
if err != nil {
t.Fatal(err)
}
res, err := sender.StateWaitMsg(ctx, sm.Cid(), 3, api.LookbackNoLimit, true)
if err != nil {
t.Fatal(err)
}
if res.Receipt.ExitCode != 0 {
t.Fatal("did not successfully send money")
}
}

32
itests/kit/init.go Normal file
View File

@ -0,0 +1,32 @@
package kit
import (
"fmt"
"os"
"strings"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors/policy"
logging "github.com/ipfs/go-log/v2"
)
func init() {
bin := os.Args[0]
if !strings.HasSuffix(bin, ".test") {
panic("package itests/kit must only be imported from tests")
}
_ = logging.SetLogLevel("*", "INFO")
policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048))
policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1)
policy.SetMinVerifiedDealSize(abi.NewStoragePower(256))
err := os.Setenv("BELLMAN_NO_GPU", "1")
if err != nil {
panic(fmt.Sprintf("failed to set BELLMAN_NO_GPU env variable: %s", err))
}
build.InsecurePoStValidation = true
}

19
itests/kit/log.go Normal file
View File

@ -0,0 +1,19 @@
package kit
import (
"github.com/filecoin-project/lotus/lib/lotuslog"
logging "github.com/ipfs/go-log/v2"
)
func QuietMiningLogs() {
lotuslog.SetupLogLevels()
_ = logging.SetLogLevel("miner", "ERROR")
_ = logging.SetLogLevel("chainstore", "ERROR")
_ = logging.SetLogLevel("chain", "ERROR")
_ = logging.SetLogLevel("sub", "ERROR")
_ = logging.SetLogLevel("storageminer", "ERROR")
_ = logging.SetLogLevel("pubsub", "ERROR")
_ = logging.SetLogLevel("gen", "ERROR")
_ = logging.SetLogLevel("dht/RtRefreshManager", "ERROR")
}

View File

@ -1,4 +1,4 @@
package test
package kit
import (
"bytes"

View File

@ -1,4 +1,4 @@
package test
package kit
import (
"context"
@ -9,12 +9,10 @@ import (
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/lotus/api/test"
test2 "github.com/filecoin-project/lotus/node/test"
)
func StartOneNodeOneMiner(ctx context.Context, t *testing.T, blocktime time.Duration) (test.TestNode, address.Address) {
n, sn := test2.RPCMockSbBuilder(t, test.OneFull, test.OneMiner)
func StartOneNodeOneMiner(ctx context.Context, t *testing.T, blocktime time.Duration) (TestFullNode, address.Address) {
n, sn := RPCMockMinerBuilder(t, OneFull, OneMiner)
full := n[0]
miner := sn[0]
@ -30,8 +28,8 @@ func StartOneNodeOneMiner(ctx context.Context, t *testing.T, blocktime time.Dura
}
// Start mining blocks
bm := test.NewBlockMiner(ctx, t, miner, blocktime)
bm.MineBlocks()
bm := NewBlockMiner(t, miner)
bm.MineBlocks(ctx, blocktime)
t.Cleanup(bm.Stop)
// Get the full node's wallet address
@ -44,8 +42,8 @@ func StartOneNodeOneMiner(ctx context.Context, t *testing.T, blocktime time.Dura
return full, fullAddr
}
func StartTwoNodesOneMiner(ctx context.Context, t *testing.T, blocktime time.Duration) ([]test.TestNode, []address.Address) {
n, sn := test2.RPCMockSbBuilder(t, test.TwoFull, test.OneMiner)
func StartTwoNodesOneMiner(ctx context.Context, t *testing.T, blocktime time.Duration) ([]TestFullNode, []address.Address) {
n, sn := RPCMockMinerBuilder(t, TwoFull, OneMiner)
fullNode1 := n[0]
fullNode2 := n[1]
@ -66,8 +64,8 @@ func StartTwoNodesOneMiner(ctx context.Context, t *testing.T, blocktime time.Dur
}
// Start mining blocks
bm := test.NewBlockMiner(ctx, t, miner, blocktime)
bm.MineBlocks()
bm := NewBlockMiner(t, miner)
bm.MineBlocks(ctx, blocktime)
t.Cleanup(bm.Stop)
// Send some funds to register the second node
@ -76,7 +74,7 @@ func StartTwoNodesOneMiner(ctx context.Context, t *testing.T, blocktime time.Dur
t.Fatal(err)
}
test.SendFunds(ctx, t, fullNode1, fullNodeAddr2, abi.NewTokenAmount(1e18))
SendFunds(ctx, t, fullNode1, fullNodeAddr2, abi.NewTokenAmount(1e18))
// Get the first node's address
fullNodeAddr1, err := fullNode1.WalletDefaultAddress(ctx)

View File

@ -1,32 +1,25 @@
package test
package kit
import (
"bytes"
"context"
"crypto/rand"
"io/ioutil"
"net"
"net/http"
"net/http/httptest"
"strings"
"sync"
"testing"
"time"
"github.com/filecoin-project/go-state-types/network"
"github.com/gorilla/mux"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-jsonrpc"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/exitcode"
"github.com/filecoin-project/go-storedcounter"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/api/client"
"github.com/filecoin-project/lotus/api/test"
"github.com/filecoin-project/lotus/api/v0api"
"github.com/filecoin-project/lotus/api/v1api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain"
@ -57,6 +50,7 @@ import (
"github.com/libp2p/go-libp2p-core/peer"
mocknet "github.com/libp2p/go-libp2p/p2p/net/mock"
"github.com/multiformats/go-multiaddr"
manet "github.com/multiformats/go-multiaddr/net"
"github.com/stretchr/testify/require"
)
@ -67,7 +61,7 @@ func init() {
messagepool.HeadChangeCoalesceMergeInterval = 100 * time.Nanosecond
}
func CreateTestStorageNode(ctx context.Context, t *testing.T, waddr address.Address, act address.Address, pk crypto.PrivKey, tnd test.TestNode, mn mocknet.Mocknet, opts node.Option) test.TestStorageNode {
func CreateTestStorageNode(ctx context.Context, t *testing.T, waddr address.Address, act address.Address, pk crypto.PrivKey, tnd TestFullNode, mn mocknet.Mocknet, opts node.Option) TestMiner {
r := repo.NewMemory(nil)
lr, err := r.Lock(repo.StorageMiner)
@ -91,7 +85,7 @@ func CreateTestStorageNode(ctx context.Context, t *testing.T, waddr address.Addr
require.NoError(t, err)
nic := storedcounter.New(ds, datastore.NewKey(modules.StorageCounterDSPrefix))
for i := 0; i < test.GenesisPreseals; i++ {
for i := 0; i < GenesisPreseals; i++ {
_, err := nic.Next()
require.NoError(t, err)
}
@ -142,10 +136,10 @@ func CreateTestStorageNode(ctx context.Context, t *testing.T, waddr address.Addr
t.Cleanup(func() { _ = stop(context.Background()) })
/*// Bootstrap with full node
remoteAddrs, err := tnd.NetAddrsListen(ctx)
remoteAddrs, err := tnd.NetAddrsListen(Ctx)
require.NoError(t, err)
err = minerapi.NetConnect(ctx, remoteAddrs)
err = minerapi.NetConnect(Ctx, remoteAddrs)
require.NoError(t, err)*/
mineOne := func(ctx context.Context, req lotusminer.MineReq) error {
select {
@ -156,11 +150,11 @@ func CreateTestStorageNode(ctx context.Context, t *testing.T, waddr address.Addr
}
}
return test.TestStorageNode{StorageMiner: minerapi, MineOne: mineOne, Stop: stop}
return TestMiner{StorageMiner: minerapi, MineOne: mineOne, Stop: stop}
}
func storageBuilder(parentNode test.TestNode, mn mocknet.Mocknet, opts node.Option) test.StorageBuilder {
return func(ctx context.Context, t *testing.T, spt abi.RegisteredSealProof, owner address.Address) test.TestStorageNode {
func storageBuilder(parentNode TestFullNode, mn mocknet.Mocknet, opts node.Option) MinerBuilder {
return func(ctx context.Context, t *testing.T, spt abi.RegisteredSealProof, owner address.Address) TestMiner {
pk, _, err := crypto.GenerateEd25519Key(rand.Reader)
require.NoError(t, err)
@ -202,31 +196,32 @@ func storageBuilder(parentNode test.TestNode, mn mocknet.Mocknet, opts node.Opti
}
}
func Builder(t *testing.T, fullOpts []test.FullNodeOpts, storage []test.StorageMiner) ([]test.TestNode, []test.TestStorageNode) {
func Builder(t *testing.T, fullOpts []FullNodeOpts, storage []StorageMiner) ([]TestFullNode, []TestMiner) {
return mockBuilderOpts(t, fullOpts, storage, false)
}
func MockSbBuilder(t *testing.T, fullOpts []test.FullNodeOpts, storage []test.StorageMiner) ([]test.TestNode, []test.TestStorageNode) {
return mockSbBuilderOpts(t, fullOpts, storage, false)
}
func RPCBuilder(t *testing.T, fullOpts []test.FullNodeOpts, storage []test.StorageMiner) ([]test.TestNode, []test.TestStorageNode) {
func RPCBuilder(t *testing.T, fullOpts []FullNodeOpts, storage []StorageMiner) ([]TestFullNode, []TestMiner) {
return mockBuilderOpts(t, fullOpts, storage, true)
}
func RPCMockSbBuilder(t *testing.T, fullOpts []test.FullNodeOpts, storage []test.StorageMiner) ([]test.TestNode, []test.TestStorageNode) {
return mockSbBuilderOpts(t, fullOpts, storage, true)
func MockMinerBuilder(t *testing.T, fullOpts []FullNodeOpts, storage []StorageMiner) ([]TestFullNode, []TestMiner) {
return mockMinerBuilderOpts(t, fullOpts, storage, false)
}
func mockBuilderOpts(t *testing.T, fullOpts []test.FullNodeOpts, storage []test.StorageMiner, rpc bool) ([]test.TestNode, []test.TestStorageNode) {
func RPCMockMinerBuilder(t *testing.T, fullOpts []FullNodeOpts, storage []StorageMiner) ([]TestFullNode, []TestMiner) {
return mockMinerBuilderOpts(t, fullOpts, storage, true)
}
func mockBuilderOpts(t *testing.T, fullOpts []FullNodeOpts, storage []StorageMiner, rpc bool) ([]TestFullNode, []TestMiner) {
ctx, cancel := context.WithCancel(context.Background())
t.Cleanup(cancel)
mn := mocknet.New(ctx)
fulls := make([]test.TestNode, len(fullOpts))
storers := make([]test.TestStorageNode, len(storage))
fulls := make([]TestFullNode, len(fullOpts))
miners := make([]TestMiner, len(storage))
// *****
pk, _, err := crypto.GenerateEd25519Key(rand.Reader)
require.NoError(t, err)
@ -238,13 +233,17 @@ func mockBuilderOpts(t *testing.T, fullOpts []test.FullNodeOpts, storage []test.
if len(storage) > 1 {
panic("need more peer IDs")
}
// *****
// PRESEAL SECTION, TRY TO REPLACE WITH BETTER IN THE FUTURE
// TODO: would be great if there was a better way to fake the preseals
var genms []genesis.Miner
var maddrs []address.Address
var genaccs []genesis.Actor
var keys []*wallet.Key
var (
genms []genesis.Miner
maddrs []address.Address
genaccs []genesis.Actor
keys []*wallet.Key
)
var presealDirs []string
for i := 0; i < len(storage); i++ {
@ -256,7 +255,7 @@ func mockBuilderOpts(t *testing.T, fullOpts []test.FullNodeOpts, storage []test.
if err != nil {
t.Fatal(err)
}
genm, k, err := seed.PreSeal(maddr, abi.RegisteredSealProof_StackedDrg2KiBV1, 0, test.GenesisPreseals, tdir, []byte("make genesis mem random"), nil, true)
genm, k, err := seed.PreSeal(maddr, abi.RegisteredSealProof_StackedDrg2KiBV1, 0, GenesisPreseals, tdir, []byte("make genesis mem random"), nil, true)
if err != nil {
t.Fatal(err)
}
@ -364,17 +363,17 @@ func mockBuilderOpts(t *testing.T, fullOpts []test.FullNodeOpts, storage []test.
if opts == nil {
opts = node.Options()
}
storers[i] = CreateTestStorageNode(ctx, t, wa, genMiner, pk, f, mn, opts)
if err := storers[i].StorageAddLocal(ctx, presealDirs[i]); err != nil {
miners[i] = CreateTestStorageNode(ctx, t, wa, genMiner, pk, f, mn, opts)
if err := miners[i].StorageAddLocal(ctx, presealDirs[i]); err != nil {
t.Fatalf("%+v", err)
}
/*
sma := storers[i].StorageMiner.(*impl.StorageMinerAPI)
sma := miners[i].StorageMiner.(*impl.StorageMinerAPI)
psd := presealDirs[i]
*/
if rpc {
storers[i] = storerRpc(t, storers[i])
miners[i] = storerRpc(t, miners[i])
}
}
@ -382,44 +381,49 @@ func mockBuilderOpts(t *testing.T, fullOpts []test.FullNodeOpts, storage []test.
t.Fatal(err)
}
if len(storers) > 0 {
if len(miners) > 0 {
// Mine 2 blocks to setup some CE stuff in some actors
var wait sync.Mutex
wait.Lock()
test.MineUntilBlock(ctx, t, fulls[0], storers[0], func(epoch abi.ChainEpoch) {
bm := NewBlockMiner(t, miners[0])
t.Cleanup(bm.Stop)
bm.MineUntilBlock(ctx, fulls[0], func(epoch abi.ChainEpoch) {
wait.Unlock()
})
wait.Lock()
test.MineUntilBlock(ctx, t, fulls[0], storers[0], func(epoch abi.ChainEpoch) {
bm.MineUntilBlock(ctx, fulls[0], func(epoch abi.ChainEpoch) {
wait.Unlock()
})
wait.Lock()
}
return fulls, storers
return fulls, miners
}
func mockSbBuilderOpts(t *testing.T, fullOpts []test.FullNodeOpts, storage []test.StorageMiner, rpc bool) ([]test.TestNode, []test.TestStorageNode) {
func mockMinerBuilderOpts(t *testing.T, fullOpts []FullNodeOpts, storage []StorageMiner, rpc bool) ([]TestFullNode, []TestMiner) {
ctx, cancel := context.WithCancel(context.Background())
t.Cleanup(cancel)
mn := mocknet.New(ctx)
fulls := make([]test.TestNode, len(fullOpts))
storers := make([]test.TestStorageNode, len(storage))
fulls := make([]TestFullNode, len(fullOpts))
miners := make([]TestMiner, len(storage))
var genbuf bytes.Buffer
// PRESEAL SECTION, TRY TO REPLACE WITH BETTER IN THE FUTURE
// TODO: would be great if there was a better way to fake the preseals
var genms []genesis.Miner
var genaccs []genesis.Actor
var maddrs []address.Address
var keys []*wallet.Key
var pidKeys []crypto.PrivKey
var (
genms []genesis.Miner
genaccs []genesis.Actor
maddrs []address.Address
keys []*wallet.Key
pidKeys []crypto.PrivKey
)
for i := 0; i < len(storage); i++ {
maddr, err := address.NewIDAddress(genesis2.MinerStart + uint64(i))
if err != nil {
@ -427,8 +431,8 @@ func mockSbBuilderOpts(t *testing.T, fullOpts []test.FullNodeOpts, storage []tes
}
preseals := storage[i].Preseal
if preseals == test.PresealGenesis {
preseals = test.GenesisPreseals
if preseals == PresealGenesis {
preseals = GenesisPreseals
}
genm, k, err := mockstorage.PreSeal(abi.RegisteredSealProof_StackedDrg2KiBV1, maddr, preseals)
@ -568,7 +572,7 @@ func mockSbBuilderOpts(t *testing.T, fullOpts []test.FullNodeOpts, storage []tes
if opts == nil {
opts = node.Options()
}
storers[i] = CreateTestStorageNode(ctx, t, genms[i].Worker, maddrs[i], pidKeys[i], f, mn, node.Options(
miners[i] = CreateTestStorageNode(ctx, t, genms[i].Worker, maddrs[i], pidKeys[i], f, mn, node.Options(
node.Override(new(*mock.SectorMgr), func() (*mock.SectorMgr, error) {
return mock.NewMockSectorMgr(sectors), nil
}),
@ -584,7 +588,7 @@ func mockSbBuilderOpts(t *testing.T, fullOpts []test.FullNodeOpts, storage []tes
))
if rpc {
storers[i] = storerRpc(t, storers[i])
miners[i] = storerRpc(t, miners[i])
}
}
@ -592,99 +596,63 @@ func mockSbBuilderOpts(t *testing.T, fullOpts []test.FullNodeOpts, storage []tes
t.Fatal(err)
}
if len(storers) > 0 {
bm := NewBlockMiner(t, miners[0])
if len(miners) > 0 {
// Mine 2 blocks to setup some CE stuff in some actors
var wait sync.Mutex
wait.Lock()
test.MineUntilBlock(ctx, t, fulls[0], storers[0], func(abi.ChainEpoch) {
bm.MineUntilBlock(ctx, fulls[0], func(abi.ChainEpoch) {
wait.Unlock()
})
wait.Lock()
test.MineUntilBlock(ctx, t, fulls[0], storers[0], func(abi.ChainEpoch) {
bm.MineUntilBlock(ctx, fulls[0], func(abi.ChainEpoch) {
wait.Unlock()
})
wait.Lock()
}
return fulls, storers
return fulls, miners
}
func fullRpc(t *testing.T, nd test.TestNode) test.TestNode {
ma, listenAddr, err := CreateRPCServer(t, map[string]interface{}{
"/rpc/v1": nd,
"/rpc/v0": &v0api.WrapperV1Full{FullNode: nd},
})
require.NoError(t, err)
var stop func()
var full test.TestNode
full.FullNode, stop, err = client.NewFullNodeRPCV1(context.Background(), listenAddr+"/rpc/v1", nil)
require.NoError(t, err)
t.Cleanup(stop)
full.ListenAddr = ma
return full
}
func storerRpc(t *testing.T, nd test.TestStorageNode) test.TestStorageNode {
ma, listenAddr, err := CreateRPCServer(t, map[string]interface{}{
"/rpc/v0": nd,
})
require.NoError(t, err)
var stop func()
var storer test.TestStorageNode
storer.StorageMiner, stop, err = client.NewStorageMinerRPCV0(context.Background(), listenAddr+"/rpc/v0", nil)
require.NoError(t, err)
t.Cleanup(stop)
storer.ListenAddr = ma
storer.MineOne = nd.MineOne
return storer
}
func CreateRPCServer(t *testing.T, handlers map[string]interface{}) (multiaddr.Multiaddr, string, error) {
m := mux.NewRouter()
for path, handler := range handlers {
rpcServer := jsonrpc.NewServer()
rpcServer.Register("Filecoin", handler)
m.Handle(path, rpcServer)
}
testServ := httptest.NewServer(m) // todo: close
func CreateRPCServer(t *testing.T, handler http.Handler) (*httptest.Server, multiaddr.Multiaddr) {
testServ := httptest.NewServer(handler)
t.Cleanup(testServ.Close)
t.Cleanup(testServ.CloseClientConnections)
addr := testServ.Listener.Addr()
listenAddr := "ws://" + addr.String()
ma, err := parseWSMultiAddr(addr)
if err != nil {
return nil, "", err
}
return ma, listenAddr, err
maddr, err := manet.FromNetAddr(addr)
require.NoError(t, err)
return testServ, maddr
}
func parseWSMultiAddr(addr net.Addr) (multiaddr.Multiaddr, error) {
host, port, err := net.SplitHostPort(addr.String())
if err != nil {
return nil, err
}
ma, err := multiaddr.NewMultiaddr("/ip4/" + host + "/" + addr.Network() + "/" + port + "/ws")
if err != nil {
return nil, err
}
return ma, nil
func fullRpc(t *testing.T, nd TestFullNode) TestFullNode {
handler, err := node.FullNodeHandler(nd.FullNode, false)
require.NoError(t, err)
srv, maddr := CreateRPCServer(t, handler)
var ret TestFullNode
cl, stop, err := client.NewFullNodeRPCV1(context.Background(), "ws://"+srv.Listener.Addr().String()+"/rpc/v1", nil)
require.NoError(t, err)
t.Cleanup(stop)
ret.ListenAddr, ret.FullNode = maddr, cl
return ret
}
func WSMultiAddrToString(addr multiaddr.Multiaddr) (string, error) {
parts := strings.Split(addr.String(), "/")
if len(parts) != 6 || parts[0] != "" {
return "", xerrors.Errorf("Malformed ws multiaddr %s", addr)
}
func storerRpc(t *testing.T, nd TestMiner) TestMiner {
handler, err := node.MinerHandler(nd.StorageMiner, false)
require.NoError(t, err)
host := parts[2]
port := parts[4]
proto := parts[5]
srv, maddr := CreateRPCServer(t, handler)
return proto + "://" + host + ":" + port + "/rpc/v0", nil
var ret TestMiner
cl, stop, err := client.NewStorageMinerRPCV0(context.Background(), "ws://"+srv.Listener.Addr().String()+"/rpc/v0", nil)
require.NoError(t, err)
t.Cleanup(stop)
ret.ListenAddr, ret.StorageMiner, ret.MineOne = maddr, cl, nd.MineOne
return ret
}

153
itests/kit/nodes.go Normal file
View File

@ -0,0 +1,153 @@
package kit
import (
"context"
"testing"
"github.com/multiformats/go-multiaddr"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/network"
lapi "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/api/v1api"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/miner"
"github.com/filecoin-project/lotus/node"
)
type MinerBuilder func(context.Context, *testing.T, abi.RegisteredSealProof, address.Address) TestMiner
type TestFullNode struct {
v1api.FullNode
// ListenAddr is the address on which an API server is listening, if an
// API server is created for this Node
ListenAddr multiaddr.Multiaddr
Stb MinerBuilder
}
type TestMiner struct {
lapi.StorageMiner
// ListenAddr is the address on which an API server is listening, if an
// API server is created for this Node
ListenAddr multiaddr.Multiaddr
MineOne func(context.Context, miner.MineReq) error
Stop func(context.Context) error
}
var PresealGenesis = -1
const GenesisPreseals = 2
const TestSpt = abi.RegisteredSealProof_StackedDrg2KiBV1_1
// Options for setting up a mock storage Miner
type StorageMiner struct {
Full int
Opts node.Option
Preseal int
}
type OptionGenerator func([]TestFullNode) node.Option
// Options for setting up a mock full node
type FullNodeOpts struct {
Lite bool // run node in "lite" mode
Opts OptionGenerator // generate dependency injection options
}
// APIBuilder is a function which is invoked in test suite to provide
// test nodes and networks
//
// fullOpts array defines options for each full node
// storage array defines storage nodes, numbers in the array specify full node
// index the storage node 'belongs' to
type APIBuilder func(t *testing.T, full []FullNodeOpts, storage []StorageMiner) ([]TestFullNode, []TestMiner)
func DefaultFullOpts(nFull int) []FullNodeOpts {
full := make([]FullNodeOpts, nFull)
for i := range full {
full[i] = FullNodeOpts{
Opts: func(nodes []TestFullNode) node.Option {
return node.Options()
},
}
}
return full
}
var OneMiner = []StorageMiner{{Full: 0, Preseal: PresealGenesis}}
var OneFull = DefaultFullOpts(1)
var TwoFull = DefaultFullOpts(2)
var FullNodeWithLatestActorsAt = func(upgradeHeight abi.ChainEpoch) FullNodeOpts {
// Attention: Update this when introducing new actor versions or your tests will be sad
return FullNodeWithNetworkUpgradeAt(network.Version13, upgradeHeight)
}
var FullNodeWithNetworkUpgradeAt = func(version network.Version, upgradeHeight abi.ChainEpoch) FullNodeOpts {
fullSchedule := stmgr.UpgradeSchedule{{
// prepare for upgrade.
Network: network.Version9,
Height: 1,
Migration: stmgr.UpgradeActorsV2,
}, {
Network: network.Version10,
Height: 2,
Migration: stmgr.UpgradeActorsV3,
}, {
Network: network.Version12,
Height: 3,
Migration: stmgr.UpgradeActorsV4,
}, {
Network: network.Version13,
Height: 4,
Migration: stmgr.UpgradeActorsV5,
}}
schedule := stmgr.UpgradeSchedule{}
for _, upgrade := range fullSchedule {
if upgrade.Network > version {
break
}
schedule = append(schedule, upgrade)
}
if upgradeHeight > 0 {
schedule[len(schedule)-1].Height = upgradeHeight
}
return FullNodeOpts{
Opts: func(nodes []TestFullNode) node.Option {
return node.Override(new(stmgr.UpgradeSchedule), schedule)
},
}
}
var FullNodeWithSDRAt = func(calico, persian abi.ChainEpoch) FullNodeOpts {
return FullNodeOpts{
Opts: func(nodes []TestFullNode) node.Option {
return node.Override(new(stmgr.UpgradeSchedule), stmgr.UpgradeSchedule{{
Network: network.Version6,
Height: 1,
Migration: stmgr.UpgradeActorsV2,
}, {
Network: network.Version7,
Height: calico,
Migration: stmgr.UpgradeCalico,
}, {
Network: network.Version8,
Height: persian,
}})
},
}
}
var MineNext = miner.MineReq{
InjectNulls: 0,
Done: func(bool, abi.ChainEpoch, error) {},
}

88
itests/kit/pledge.go Normal file
View File

@ -0,0 +1,88 @@
package kit
import (
"context"
"fmt"
"strings"
"testing"
"time"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
"github.com/stretchr/testify/require"
)
func PledgeSectors(t *testing.T, ctx context.Context, miner TestMiner, n, existing int, blockNotif <-chan struct{}) { //nolint:golint
toCheck := StartPledge(t, ctx, miner, n, existing, blockNotif)
for len(toCheck) > 0 {
flushSealingBatches(t, ctx, miner)
states := map[api.SectorState]int{}
for n := range toCheck {
st, err := miner.SectorsStatus(ctx, n, false)
require.NoError(t, err)
states[st.State]++
if st.State == api.SectorState(sealing.Proving) {
delete(toCheck, n)
}
if strings.Contains(string(st.State), "Fail") {
t.Fatal("sector in a failed state", st.State)
}
}
build.Clock.Sleep(100 * time.Millisecond)
fmt.Printf("WaitSeal: %d %+v\n", len(toCheck), states)
}
}
func flushSealingBatches(t *testing.T, ctx context.Context, miner TestMiner) { //nolint:golint
pcb, err := miner.SectorPreCommitFlush(ctx)
require.NoError(t, err)
if pcb != nil {
fmt.Printf("PRECOMMIT BATCH: %+v\n", pcb)
}
cb, err := miner.SectorCommitFlush(ctx)
require.NoError(t, err)
if cb != nil {
fmt.Printf("COMMIT BATCH: %+v\n", cb)
}
}
func StartPledge(t *testing.T, ctx context.Context, miner TestMiner, n, existing int, blockNotif <-chan struct{}) map[abi.SectorNumber]struct{} { //nolint:golint
for i := 0; i < n; i++ {
if i%3 == 0 && blockNotif != nil {
<-blockNotif
t.Log("WAIT")
}
t.Logf("PLEDGING %d", i)
_, err := miner.PledgeSector(ctx)
require.NoError(t, err)
}
for {
s, err := miner.SectorsList(ctx) // Note - the test builder doesn't import genesis sectors into FSM
require.NoError(t, err)
fmt.Printf("Sectors: %d\n", len(s))
if len(s) >= n+existing {
break
}
build.Clock.Sleep(100 * time.Millisecond)
}
fmt.Printf("All sectors is fsm\n")
s, err := miner.SectorsList(ctx)
require.NoError(t, err)
toCheck := map[abi.SectorNumber]struct{}{}
for _, number := range s {
toCheck[number] = struct{}{}
}
return toCheck
}

View File

@ -1,24 +1,37 @@
package test
package itests
import (
"context"
"fmt"
"os"
"regexp"
"strings"
"testing"
"time"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/lotus/api/test"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/cli"
"github.com/filecoin-project/lotus/itests/kit"
"github.com/stretchr/testify/require"
lcli "github.com/urfave/cli/v2"
)
func RunMultisigTest(t *testing.T, cmds []*lcli.Command, clientNode test.TestNode) {
ctx := context.Background()
// TestMultisig does a basic test to exercise the multisig CLI commands
func TestMultisig(t *testing.T) {
_ = os.Setenv("BELLMAN_NO_GPU", "1")
kit.QuietMiningLogs()
blocktime := 5 * time.Millisecond
ctx := context.Background()
clientNode, _ := kit.StartOneNodeOneMiner(ctx, t, blocktime)
runMultisigTests(t, clientNode)
}
func runMultisigTests(t *testing.T, clientNode kit.TestFullNode) {
// Create mock CLI
mockCLI := NewMockCLI(ctx, t, cmds)
ctx := context.Background()
mockCLI := kit.NewMockCLI(ctx, t, cli.Commands)
clientCLI := mockCLI.Client(clientNode.ListenAddr)
// Create some wallets on the node to use for testing multisig
@ -29,7 +42,7 @@ func RunMultisigTest(t *testing.T, cmds []*lcli.Command, clientNode test.TestNod
walletAddrs = append(walletAddrs, addr)
test.SendFunds(ctx, t, clientNode, addr, types.NewInt(1e15))
kit.SendFunds(ctx, t, clientNode, addr, types.NewInt(1e15))
}
// Create an msig with three of the addresses and threshold of two sigs

View File

@ -1,14 +1,14 @@
package test
package itests
import (
"context"
"fmt"
"sync/atomic"
"testing"
"time"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/itests/kit"
"github.com/ipfs/go-cid"
"github.com/filecoin-project/go-address"
@ -26,9 +26,11 @@ import (
"github.com/filecoin-project/lotus/chain/types"
)
func TestPaymentChannels(t *testing.T, b APIBuilder, blocktime time.Duration) {
func TestPaymentChannelsAPI(t *testing.T) {
kit.QuietMiningLogs()
ctx := context.Background()
n, sn := b(t, TwoFull, OneMiner)
n, sn := kit.MockMinerBuilder(t, kit.TwoFull, kit.OneMiner)
paymentCreator := n[0]
paymentReceiver := n[1]
@ -49,8 +51,9 @@ func TestPaymentChannels(t *testing.T, b APIBuilder, blocktime time.Duration) {
}
// start mining blocks
bm := NewBlockMiner(ctx, t, miner, blocktime)
bm.MineBlocks()
bm := kit.NewBlockMiner(t, miner)
bm.MineBlocks(ctx, 5*time.Millisecond)
t.Cleanup(bm.Stop)
// send some funds to register the receiver
receiverAddr, err := paymentReceiver.WalletNew(ctx, types.KTSecp256k1)
@ -58,7 +61,7 @@ func TestPaymentChannels(t *testing.T, b APIBuilder, blocktime time.Duration) {
t.Fatal(err)
}
SendFunds(ctx, t, paymentCreator, receiverAddr, abi.NewTokenAmount(1e18))
kit.SendFunds(ctx, t, paymentCreator, receiverAddr, abi.NewTokenAmount(1e18))
// setup the payment channel
createrAddr, err := paymentCreator.WalletDefaultAddress(ctx)
@ -173,7 +176,7 @@ func TestPaymentChannels(t *testing.T, b APIBuilder, blocktime time.Duration) {
select {
case <-finished:
case <-time.After(time.Second):
case <-time.After(10 * time.Second):
t.Fatal("Timed out waiting for receiver to submit vouchers")
}
@ -265,7 +268,7 @@ func TestPaymentChannels(t *testing.T, b APIBuilder, blocktime time.Duration) {
bm.Stop()
}
func waitForBlocks(ctx context.Context, t *testing.T, bm *BlockMiner, paymentReceiver TestNode, receiverAddr address.Address, count int) {
func waitForBlocks(ctx context.Context, t *testing.T, bm *kit.BlockMiner, paymentReceiver kit.TestFullNode, receiverAddr address.Address, count int) {
// We need to add null blocks in batches, if we add too many the chain can't sync
batchSize := 60
for i := 0; i < count; i += batchSize {
@ -274,8 +277,8 @@ func waitForBlocks(ctx context.Context, t *testing.T, bm *BlockMiner, paymentRec
size = count - i
}
// Add a batch of null blocks
atomic.StoreInt64(&bm.nulls, int64(size-1))
// Add a batch of null blocks to advance the chain quicker through finalities.
bm.InjectNulls(abi.ChainEpoch(size - 1))
// Add a real block
m, err := paymentReceiver.MpoolPushMessage(ctx, &types.Message{
@ -294,7 +297,7 @@ func waitForBlocks(ctx context.Context, t *testing.T, bm *BlockMiner, paymentRec
}
}
func waitForMessage(ctx context.Context, t *testing.T, paymentCreator TestNode, msgCid cid.Cid, duration time.Duration, desc string) *api.MsgLookup {
func waitForMessage(ctx context.Context, t *testing.T, paymentCreator kit.TestFullNode, msgCid cid.Cid, duration time.Duration, desc string) *api.MsgLookup {
ctx, cancel := context.WithTimeout(ctx, duration)
defer cancel()

View File

@ -1,4 +1,4 @@
package cli
package itests
import (
"context"
@ -10,7 +10,8 @@ import (
"testing"
"time"
clitest "github.com/filecoin-project/lotus/cli/test"
"github.com/filecoin-project/lotus/cli"
"github.com/filecoin-project/lotus/itests/kit"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
@ -20,7 +21,6 @@ import (
cbor "github.com/ipfs/go-ipld-cbor"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/lotus/api/test"
"github.com/filecoin-project/lotus/blockstore"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/events"
@ -35,20 +35,20 @@ func init() {
// TestPaymentChannels does a basic test to exercise the payment channel CLI
// commands
func TestPaymentChannels(t *testing.T) {
func TestPaymentChannelsBasic(t *testing.T) {
_ = os.Setenv("BELLMAN_NO_GPU", "1")
clitest.QuietMiningLogs()
kit.QuietMiningLogs()
blocktime := 5 * time.Millisecond
ctx := context.Background()
nodes, addrs := clitest.StartTwoNodesOneMiner(ctx, t, blocktime)
nodes, addrs := kit.StartTwoNodesOneMiner(ctx, t, blocktime)
paymentCreator := nodes[0]
paymentReceiver := nodes[1]
creatorAddr := addrs[0]
receiverAddr := addrs[1]
// Create mock CLI
mockCLI := clitest.NewMockCLI(ctx, t, Commands)
mockCLI := kit.NewMockCLI(ctx, t, cli.Commands)
creatorCLI := mockCLI.Client(paymentCreator.ListenAddr)
receiverCLI := mockCLI.Client(paymentReceiver.ListenAddr)
@ -89,17 +89,17 @@ type voucherSpec struct {
// TestPaymentChannelStatus tests the payment channel status CLI command
func TestPaymentChannelStatus(t *testing.T) {
_ = os.Setenv("BELLMAN_NO_GPU", "1")
clitest.QuietMiningLogs()
kit.QuietMiningLogs()
blocktime := 5 * time.Millisecond
ctx := context.Background()
nodes, addrs := clitest.StartTwoNodesOneMiner(ctx, t, blocktime)
nodes, addrs := kit.StartTwoNodesOneMiner(ctx, t, blocktime)
paymentCreator := nodes[0]
creatorAddr := addrs[0]
receiverAddr := addrs[1]
// Create mock CLI
mockCLI := clitest.NewMockCLI(ctx, t, Commands)
mockCLI := kit.NewMockCLI(ctx, t, cli.Commands)
creatorCLI := mockCLI.Client(paymentCreator.ListenAddr)
// creator: paych status-by-from-to <creator> <receiver>
@ -168,18 +168,18 @@ func TestPaymentChannelStatus(t *testing.T) {
// channel voucher commands
func TestPaymentChannelVouchers(t *testing.T) {
_ = os.Setenv("BELLMAN_NO_GPU", "1")
clitest.QuietMiningLogs()
kit.QuietMiningLogs()
blocktime := 5 * time.Millisecond
ctx := context.Background()
nodes, addrs := clitest.StartTwoNodesOneMiner(ctx, t, blocktime)
nodes, addrs := kit.StartTwoNodesOneMiner(ctx, t, blocktime)
paymentCreator := nodes[0]
paymentReceiver := nodes[1]
creatorAddr := addrs[0]
receiverAddr := addrs[1]
// Create mock CLI
mockCLI := clitest.NewMockCLI(ctx, t, Commands)
mockCLI := kit.NewMockCLI(ctx, t, cli.Commands)
creatorCLI := mockCLI.Client(paymentCreator.ListenAddr)
receiverCLI := mockCLI.Client(paymentReceiver.ListenAddr)
@ -300,17 +300,17 @@ func TestPaymentChannelVouchers(t *testing.T) {
// is greater than what's left in the channel, voucher create fails
func TestPaymentChannelVoucherCreateShortfall(t *testing.T) {
_ = os.Setenv("BELLMAN_NO_GPU", "1")
clitest.QuietMiningLogs()
kit.QuietMiningLogs()
blocktime := 5 * time.Millisecond
ctx := context.Background()
nodes, addrs := clitest.StartTwoNodesOneMiner(ctx, t, blocktime)
nodes, addrs := kit.StartTwoNodesOneMiner(ctx, t, blocktime)
paymentCreator := nodes[0]
creatorAddr := addrs[0]
receiverAddr := addrs[1]
// Create mock CLI
mockCLI := clitest.NewMockCLI(ctx, t, Commands)
mockCLI := kit.NewMockCLI(ctx, t, cli.Commands)
creatorCLI := mockCLI.Client(paymentCreator.ListenAddr)
// creator: paych add-funds <creator> <receiver> <amount>
@ -378,7 +378,7 @@ func checkVoucherOutput(t *testing.T, list string, vouchers []voucherSpec) {
}
// waitForHeight waits for the node to reach the given chain epoch
func waitForHeight(ctx context.Context, t *testing.T, node test.TestNode, height abi.ChainEpoch) {
func waitForHeight(ctx context.Context, t *testing.T, node kit.TestFullNode, height abi.ChainEpoch) {
atHeight := make(chan struct{})
chainEvents := events.NewEvents(ctx, node)
err := chainEvents.ChainAt(func(ctx context.Context, ts *types.TipSet, curH abi.ChainEpoch) error {
@ -396,7 +396,7 @@ func waitForHeight(ctx context.Context, t *testing.T, node test.TestNode, height
}
// getPaychState gets the state of the payment channel with the given address
func getPaychState(ctx context.Context, t *testing.T, node test.TestNode, chAddr address.Address) paych.State {
func getPaychState(ctx context.Context, t *testing.T, node kit.TestFullNode, chAddr address.Address) paych.State {
act, err := node.StateGetActor(ctx, chAddr, types.EmptyTSK)
require.NoError(t, err)

112
itests/sdr_upgrade_test.go Normal file
View File

@ -0,0 +1,112 @@
package itests
import (
"context"
"sort"
"sync/atomic"
"testing"
"time"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/itests/kit"
bminer "github.com/filecoin-project/lotus/miner"
"github.com/filecoin-project/lotus/node/impl"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestSDRUpgrade(t *testing.T) {
kit.QuietMiningLogs()
// oldDelay := policy.GetPreCommitChallengeDelay()
// policy.SetPreCommitChallengeDelay(5)
// t.Cleanup(func() {
// policy.SetPreCommitChallengeDelay(oldDelay)
// })
blocktime := 50 * time.Millisecond
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
n, sn := kit.MockMinerBuilder(t, []kit.FullNodeOpts{kit.FullNodeWithSDRAt(500, 1000)}, kit.OneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
addrinfo, err := client.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := miner.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
build.Clock.Sleep(time.Second)
pledge := make(chan struct{})
mine := int64(1)
done := make(chan struct{})
go func() {
defer close(done)
round := 0
for atomic.LoadInt64(&mine) != 0 {
build.Clock.Sleep(blocktime)
if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) {
}}); err != nil {
t.Error(err)
}
// 3 sealing rounds: before, during after.
if round >= 3 {
continue
}
head, err := client.ChainHead(ctx)
assert.NoError(t, err)
// rounds happen every 100 blocks, with a 50 block offset.
if head.Height() >= abi.ChainEpoch(round*500+50) {
round++
pledge <- struct{}{}
ver, err := client.StateNetworkVersion(ctx, head.Key())
assert.NoError(t, err)
switch round {
case 1:
assert.Equal(t, network.Version6, ver)
case 2:
assert.Equal(t, network.Version7, ver)
case 3:
assert.Equal(t, network.Version8, ver)
}
}
}
}()
// before.
kit.PledgeSectors(t, ctx, miner, 9, 0, pledge)
s, err := miner.SectorsList(ctx)
require.NoError(t, err)
sort.Slice(s, func(i, j int) bool {
return s[i] < s[j]
})
for i, id := range s {
info, err := miner.SectorsStatus(ctx, id, true)
require.NoError(t, err)
expectProof := abi.RegisteredSealProof_StackedDrg2KiBV1
if i >= 3 {
// after
expectProof = abi.RegisteredSealProof_StackedDrg2KiBV1_1
}
assert.Equal(t, expectProof, info.SealProof, "sector %d, id %d", i, id)
}
atomic.StoreInt64(&mine, 0)
<-done
}

View File

@ -0,0 +1,261 @@
package itests
import (
"context"
"fmt"
"strings"
"sync/atomic"
"testing"
"time"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/stmgr"
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
"github.com/filecoin-project/lotus/itests/kit"
bminer "github.com/filecoin-project/lotus/miner"
"github.com/filecoin-project/lotus/node"
"github.com/filecoin-project/lotus/node/impl"
"github.com/stretchr/testify/require"
)
func TestPledgeSectors(t *testing.T) {
kit.QuietMiningLogs()
runTest := func(t *testing.T, b kit.APIBuilder, blocktime time.Duration, nSectors int) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
n, sn := b(t, kit.OneFull, kit.OneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
addrinfo, err := client.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := miner.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
build.Clock.Sleep(time.Second)
mine := int64(1)
done := make(chan struct{})
go func() {
defer close(done)
for atomic.LoadInt64(&mine) != 0 {
build.Clock.Sleep(blocktime)
if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) {
}}); err != nil {
t.Error(err)
}
}
}()
kit.PledgeSectors(t, ctx, miner, nSectors, 0, nil)
atomic.StoreInt64(&mine, 0)
<-done
}
t.Run("1", func(t *testing.T) {
runTest(t, kit.MockMinerBuilder, 50*time.Millisecond, 1)
})
t.Run("100", func(t *testing.T) {
runTest(t, kit.MockMinerBuilder, 50*time.Millisecond, 100)
})
t.Run("1000", func(t *testing.T) {
if testing.Short() { // takes ~16s
t.Skip("skipping test in short mode")
}
runTest(t, kit.MockMinerBuilder, 50*time.Millisecond, 1000)
})
}
func TestPledgeBatching(t *testing.T) {
runTest := func(t *testing.T, b kit.APIBuilder, blocktime time.Duration, nSectors int) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
n, sn := b(t, []kit.FullNodeOpts{kit.FullNodeWithLatestActorsAt(-1)}, kit.OneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
addrinfo, err := client.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := miner.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
build.Clock.Sleep(time.Second)
mine := int64(1)
done := make(chan struct{})
go func() {
defer close(done)
for atomic.LoadInt64(&mine) != 0 {
build.Clock.Sleep(blocktime)
if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) {
}}); err != nil {
t.Error(err)
}
}
}()
for {
h, err := client.ChainHead(ctx)
require.NoError(t, err)
if h.Height() > 10 {
break
}
}
toCheck := kit.StartPledge(t, ctx, miner, nSectors, 0, nil)
for len(toCheck) > 0 {
states := map[api.SectorState]int{}
for n := range toCheck {
st, err := miner.SectorsStatus(ctx, n, false)
require.NoError(t, err)
states[st.State]++
if st.State == api.SectorState(sealing.Proving) {
delete(toCheck, n)
}
if strings.Contains(string(st.State), "Fail") {
t.Fatal("sector in a failed state", st.State)
}
}
if states[api.SectorState(sealing.SubmitPreCommitBatch)] == nSectors ||
(states[api.SectorState(sealing.SubmitPreCommitBatch)] > 0 && states[api.SectorState(sealing.PreCommit1)] == 0 && states[api.SectorState(sealing.PreCommit2)] == 0) {
pcb, err := miner.SectorPreCommitFlush(ctx)
require.NoError(t, err)
if pcb != nil {
fmt.Printf("PRECOMMIT BATCH: %+v\n", pcb)
}
}
if states[api.SectorState(sealing.SubmitCommitAggregate)] == nSectors ||
(states[api.SectorState(sealing.SubmitCommitAggregate)] > 0 && states[api.SectorState(sealing.WaitSeed)] == 0 && states[api.SectorState(sealing.Committing)] == 0) {
cb, err := miner.SectorCommitFlush(ctx)
require.NoError(t, err)
if cb != nil {
fmt.Printf("COMMIT BATCH: %+v\n", cb)
}
}
build.Clock.Sleep(100 * time.Millisecond)
fmt.Printf("WaitSeal: %d %+v\n", len(toCheck), states)
}
atomic.StoreInt64(&mine, 0)
<-done
}
t.Run("100", func(t *testing.T) {
runTest(t, kit.MockMinerBuilder, 50*time.Millisecond, 100)
})
}
func TestPledgeBeforeNv13(t *testing.T) {
runTest := func(t *testing.T, b kit.APIBuilder, blocktime time.Duration, nSectors int) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
n, sn := b(t, []kit.FullNodeOpts{
{
Opts: func(nodes []kit.TestFullNode) node.Option {
return node.Override(new(stmgr.UpgradeSchedule), stmgr.UpgradeSchedule{{
Network: network.Version9,
Height: 1,
Migration: stmgr.UpgradeActorsV2,
}, {
Network: network.Version10,
Height: 2,
Migration: stmgr.UpgradeActorsV3,
}, {
Network: network.Version12,
Height: 3,
Migration: stmgr.UpgradeActorsV4,
}, {
Network: network.Version13,
Height: 1000000000,
Migration: stmgr.UpgradeActorsV5,
}})
},
},
}, kit.OneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
addrinfo, err := client.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := miner.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
build.Clock.Sleep(time.Second)
mine := int64(1)
done := make(chan struct{})
go func() {
defer close(done)
for atomic.LoadInt64(&mine) != 0 {
build.Clock.Sleep(blocktime)
if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) {
}}); err != nil {
t.Error(err)
}
}
}()
for {
h, err := client.ChainHead(ctx)
require.NoError(t, err)
if h.Height() > 10 {
break
}
}
toCheck := kit.StartPledge(t, ctx, miner, nSectors, 0, nil)
for len(toCheck) > 0 {
states := map[api.SectorState]int{}
for n := range toCheck {
st, err := miner.SectorsStatus(ctx, n, false)
require.NoError(t, err)
states[st.State]++
if st.State == api.SectorState(sealing.Proving) {
delete(toCheck, n)
}
if strings.Contains(string(st.State), "Fail") {
t.Fatal("sector in a failed state", st.State)
}
}
build.Clock.Sleep(100 * time.Millisecond)
fmt.Printf("WaitSeal: %d %+v\n", len(toCheck), states)
}
atomic.StoreInt64(&mine, 0)
<-done
}
t.Run("100-before-nv13", func(t *testing.T) {
runTest(t, kit.MockMinerBuilder, 50*time.Millisecond, 100)
})
}

View File

@ -0,0 +1,201 @@
package itests
import (
"context"
"fmt"
"os"
"testing"
"time"
"github.com/filecoin-project/go-bitfield"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/types"
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
"github.com/filecoin-project/lotus/itests/kit"
"github.com/filecoin-project/lotus/node/impl"
"github.com/stretchr/testify/require"
)
func TestTerminate(t *testing.T) {
if os.Getenv("LOTUS_TEST_WINDOW_POST") != "1" {
t.Skip("this takes a few minutes, set LOTUS_TEST_WINDOW_POST=1 to run")
}
kit.QuietMiningLogs()
const blocktime = 2 * time.Millisecond
nSectors := uint64(2)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
n, sn := kit.MockMinerBuilder(t,
[]kit.FullNodeOpts{kit.FullNodeWithLatestActorsAt(-1)},
[]kit.StorageMiner{{Full: 0, Preseal: int(nSectors)}},
)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
addrinfo, err := client.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := miner.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
build.Clock.Sleep(time.Second)
done := make(chan struct{})
go func() {
defer close(done)
for ctx.Err() == nil {
build.Clock.Sleep(blocktime)
if err := sn[0].MineOne(ctx, kit.MineNext); err != nil {
if ctx.Err() != nil {
// context was canceled, ignore the error.
return
}
t.Error(err)
}
}
}()
defer func() {
cancel()
<-done
}()
maddr, err := miner.ActorAddress(ctx)
require.NoError(t, err)
ssz, err := miner.ActorSectorSize(ctx, maddr)
require.NoError(t, err)
p, err := client.StateMinerPower(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
require.Equal(t, p.MinerPower, p.TotalPower)
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*nSectors))
fmt.Printf("Seal a sector\n")
kit.PledgeSectors(t, ctx, miner, 1, 0, nil)
fmt.Printf("wait for power\n")
{
// Wait until proven.
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
waitUntil := di.PeriodStart + di.WPoStProvingPeriod + 2
fmt.Printf("End for head.Height > %d\n", waitUntil)
for {
head, err := client.ChainHead(ctx)
require.NoError(t, err)
if head.Height() > waitUntil {
fmt.Printf("Now head.Height = %d\n", head.Height())
break
}
}
}
nSectors++
p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
require.Equal(t, p.MinerPower, p.TotalPower)
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*nSectors))
fmt.Println("Terminate a sector")
toTerminate := abi.SectorNumber(3)
err = miner.SectorTerminate(ctx, toTerminate)
require.NoError(t, err)
msgTriggerred := false
loop:
for {
si, err := miner.SectorsStatus(ctx, toTerminate, false)
require.NoError(t, err)
fmt.Println("state: ", si.State, msgTriggerred)
switch sealing.SectorState(si.State) {
case sealing.Terminating:
if !msgTriggerred {
{
p, err := miner.SectorTerminatePending(ctx)
require.NoError(t, err)
require.Len(t, p, 1)
require.Equal(t, abi.SectorNumber(3), p[0].Number)
}
c, err := miner.SectorTerminateFlush(ctx)
require.NoError(t, err)
if c != nil {
msgTriggerred = true
fmt.Println("terminate message:", c)
{
p, err := miner.SectorTerminatePending(ctx)
require.NoError(t, err)
require.Len(t, p, 0)
}
}
}
case sealing.TerminateWait, sealing.TerminateFinality, sealing.Removed:
break loop
}
time.Sleep(100 * time.Millisecond)
}
// check power decreased
p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
require.Equal(t, p.MinerPower, p.TotalPower)
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*(nSectors-1)))
// check in terminated set
{
parts, err := client.StateMinerPartitions(ctx, maddr, 1, types.EmptyTSK)
require.NoError(t, err)
require.Greater(t, len(parts), 0)
bflen := func(b bitfield.BitField) uint64 {
l, err := b.Count()
require.NoError(t, err)
return l
}
require.Equal(t, uint64(1), bflen(parts[0].AllSectors))
require.Equal(t, uint64(0), bflen(parts[0].LiveSectors))
}
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
for {
head, err := client.ChainHead(ctx)
require.NoError(t, err)
if head.Height() > di.PeriodStart+di.WPoStProvingPeriod+2 {
fmt.Printf("Now head.Height = %d\n", head.Height())
break
}
build.Clock.Sleep(blocktime)
}
require.NoError(t, err)
fmt.Printf("End for head.Height > %d\n", di.PeriodStart+di.WPoStProvingPeriod+2)
p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
require.Equal(t, p.MinerPower, p.TotalPower)
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*(nSectors-1)))
}

View File

@ -1,4 +1,4 @@
package test
package itests
import (
"context"
@ -11,18 +11,24 @@ import (
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/stmgr"
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
"github.com/filecoin-project/lotus/itests/kit"
"github.com/filecoin-project/lotus/node"
"github.com/filecoin-project/lotus/node/impl"
"github.com/stretchr/testify/require"
)
func TestTapeFix(t *testing.T, b APIBuilder, blocktime time.Duration) {
func TestTapeFix(t *testing.T) {
kit.QuietMiningLogs()
var blocktime = 2 * time.Millisecond
// The "before" case is disabled, because we need the builder to mock 32 GiB sectors to accurately repro this case
// TODO: Make the mock sector size configurable and reenable this
//t.Run("before", func(t *testing.T) { testTapeFix(t, b, blocktime, false) })
t.Run("after", func(t *testing.T) { testTapeFix(t, b, blocktime, true) })
// t.Run("before", func(t *testing.T) { testTapeFix(t, b, blocktime, false) })
t.Run("after", func(t *testing.T) { testTapeFix(t, kit.MockMinerBuilder, blocktime, true) })
}
func testTapeFix(t *testing.T, b APIBuilder, blocktime time.Duration, after bool) {
func testTapeFix(t *testing.T, b kit.APIBuilder, blocktime time.Duration, after bool) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@ -38,9 +44,9 @@ func testTapeFix(t *testing.T, b APIBuilder, blocktime time.Duration, after bool
})
}
n, sn := b(t, []FullNodeOpts{{Opts: func(_ []TestNode) node.Option {
n, sn := b(t, []kit.FullNodeOpts{{Opts: func(_ []kit.TestFullNode) node.Option {
return node.Override(new(stmgr.UpgradeSchedule), upgradeSchedule)
}}}, OneMiner)
}}}, kit.OneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
@ -60,7 +66,7 @@ func testTapeFix(t *testing.T, b APIBuilder, blocktime time.Duration, after bool
defer close(done)
for ctx.Err() == nil {
build.Clock.Sleep(blocktime)
if err := sn[0].MineOne(ctx, MineNext); err != nil {
if err := sn[0].MineOne(ctx, kit.MineNext); err != nil {
if ctx.Err() != nil {
// context was canceled, ignore the error.
return
@ -97,5 +103,4 @@ func testTapeFix(t *testing.T, b APIBuilder, blocktime time.Duration, after bool
build.Clock.Sleep(100 * time.Millisecond)
fmt.Println("WaitSeal")
}
}

View File

@ -1,10 +1,13 @@
package test
package itests
import (
"context"
"strings"
"testing"
"time"
"github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/lotus/itests/kit"
lapi "github.com/filecoin-project/lotus/api"
@ -13,18 +16,14 @@ import (
"github.com/filecoin-project/lotus/node/impl"
verifreg4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/verifreg"
"testing"
"time"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/chain/types"
)
func AddVerifiedClient(t *testing.T, b APIBuilder) {
func TestVerifiedClientTopUp(t *testing.T) {
test := func(nv network.Version, shouldWork bool) func(*testing.T) {
return func(t *testing.T) {
nodes, miners := b(t, []FullNodeOpts{FullNodeWithNetworkUpgradeAt(nv, -1)}, OneMiner)
nodes, miners := kit.MockMinerBuilder(t, []kit.FullNodeOpts{kit.FullNodeWithNetworkUpgradeAt(nv, -1)}, kit.OneMiner)
api := nodes[0].FullNode.(*impl.FullNodeAPI)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@ -53,9 +52,9 @@ func AddVerifiedClient(t *testing.T, b APIBuilder) {
Value: big.Zero(),
}
bm := NewBlockMiner(ctx, t, miners[0], 100*time.Millisecond)
bm.MineBlocks()
defer bm.Stop()
bm := kit.NewBlockMiner(t, miners[0])
bm.MineBlocks(ctx, 100*time.Millisecond)
t.Cleanup(bm.Stop)
sm, err := api.MpoolPushMessage(ctx, msg, nil)
if err != nil {

View File

@ -0,0 +1,458 @@
package itests
import (
"context"
"fmt"
"os"
"testing"
"time"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield"
"github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/go-state-types/dline"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors"
minerActor "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/itests/kit"
"github.com/filecoin-project/lotus/node/impl"
proof3 "github.com/filecoin-project/specs-actors/v3/actors/runtime/proof"
"github.com/stretchr/testify/require"
)
func TestWindowPostDispute(t *testing.T) {
if os.Getenv("LOTUS_TEST_WINDOW_POST") != "1" {
t.Skip("this takes a few minutes, set LOTUS_TEST_WINDOW_POST=1 to run")
}
kit.QuietMiningLogs()
b := kit.MockMinerBuilder
blocktime := 2 * time.Millisecond
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// First, we configure two miners. After sealing, we're going to turn off the first miner so
// it doesn't submit proofs.
//
// Then we're going to manually submit bad proofs.
n, sn := b(t,
[]kit.FullNodeOpts{kit.FullNodeWithLatestActorsAt(-1)},
[]kit.StorageMiner{
{Full: 0, Preseal: kit.PresealGenesis},
{Full: 0},
})
client := n[0].FullNode.(*impl.FullNodeAPI)
chainMiner := sn[0]
evilMiner := sn[1]
{
addrinfo, err := client.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := chainMiner.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
if err := evilMiner.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
}
defaultFrom, err := client.WalletDefaultAddress(ctx)
require.NoError(t, err)
build.Clock.Sleep(time.Second)
// Mine with the _second_ node (the good one).
done := make(chan struct{})
go func() {
defer close(done)
for ctx.Err() == nil {
build.Clock.Sleep(blocktime)
if err := chainMiner.MineOne(ctx, kit.MineNext); err != nil {
if ctx.Err() != nil {
// context was canceled, ignore the error.
return
}
t.Error(err)
}
}
}()
defer func() {
cancel()
<-done
}()
// Give the chain miner enough sectors to win every block.
kit.PledgeSectors(t, ctx, chainMiner, 10, 0, nil)
// And the evil one 1 sector. No cookie for you.
kit.PledgeSectors(t, ctx, evilMiner, 1, 0, nil)
// Let the evil miner's sectors gain power.
evilMinerAddr, err := evilMiner.ActorAddress(ctx)
require.NoError(t, err)
di, err := client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK)
require.NoError(t, err)
fmt.Printf("Running one proving period\n")
fmt.Printf("End for head.Height > %d\n", di.PeriodStart+di.WPoStProvingPeriod*2)
for {
head, err := client.ChainHead(ctx)
require.NoError(t, err)
if head.Height() > di.PeriodStart+di.WPoStProvingPeriod*2 {
fmt.Printf("Now head.Height = %d\n", head.Height())
break
}
build.Clock.Sleep(blocktime)
}
p, err := client.StateMinerPower(ctx, evilMinerAddr, types.EmptyTSK)
require.NoError(t, err)
ssz, err := evilMiner.ActorSectorSize(ctx, evilMinerAddr)
require.NoError(t, err)
// make sure it has gained power.
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)))
evilSectors, err := evilMiner.SectorsList(ctx)
require.NoError(t, err)
evilSectorNo := evilSectors[0] // only one.
evilSectorLoc, err := client.StateSectorPartition(ctx, evilMinerAddr, evilSectorNo, types.EmptyTSK)
require.NoError(t, err)
fmt.Println("evil miner stopping")
// Now stop the evil miner, and start manually submitting bad proofs.
require.NoError(t, evilMiner.Stop(ctx))
fmt.Println("evil miner stopped")
// Wait until we need to prove our sector.
for {
di, err = client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK)
require.NoError(t, err)
if di.Index == evilSectorLoc.Deadline && di.CurrentEpoch-di.PeriodStart > 1 {
break
}
build.Clock.Sleep(blocktime)
}
err = submitBadProof(ctx, client, evilMinerAddr, di, evilSectorLoc.Deadline, evilSectorLoc.Partition)
require.NoError(t, err, "evil proof not accepted")
// Wait until after the proving period.
for {
di, err = client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK)
require.NoError(t, err)
if di.Index != evilSectorLoc.Deadline {
break
}
build.Clock.Sleep(blocktime)
}
fmt.Println("accepted evil proof")
// Make sure the evil node didn't lose any power.
p, err = client.StateMinerPower(ctx, evilMinerAddr, types.EmptyTSK)
require.NoError(t, err)
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)))
// OBJECTION! The good miner files a DISPUTE!!!!
{
params := &minerActor.DisputeWindowedPoStParams{
Deadline: evilSectorLoc.Deadline,
PoStIndex: 0,
}
enc, aerr := actors.SerializeParams(params)
require.NoError(t, aerr)
msg := &types.Message{
To: evilMinerAddr,
Method: minerActor.Methods.DisputeWindowedPoSt,
Params: enc,
Value: types.NewInt(0),
From: defaultFrom,
}
sm, err := client.MpoolPushMessage(ctx, msg, nil)
require.NoError(t, err)
fmt.Println("waiting dispute")
rec, err := client.StateWaitMsg(ctx, sm.Cid(), build.MessageConfidence, api.LookbackNoLimit, true)
require.NoError(t, err)
require.Zero(t, rec.Receipt.ExitCode, "dispute not accepted: %s", rec.Receipt.ExitCode.Error())
}
// Objection SUSTAINED!
// Make sure the evil node lost power.
p, err = client.StateMinerPower(ctx, evilMinerAddr, types.EmptyTSK)
require.NoError(t, err)
require.True(t, p.MinerPower.RawBytePower.IsZero())
// Now we begin the redemption arc.
require.True(t, p.MinerPower.RawBytePower.IsZero())
// First, recover the sector.
{
minerInfo, err := client.StateMinerInfo(ctx, evilMinerAddr, types.EmptyTSK)
require.NoError(t, err)
params := &minerActor.DeclareFaultsRecoveredParams{
Recoveries: []minerActor.RecoveryDeclaration{{
Deadline: evilSectorLoc.Deadline,
Partition: evilSectorLoc.Partition,
Sectors: bitfield.NewFromSet([]uint64{uint64(evilSectorNo)}),
}},
}
enc, aerr := actors.SerializeParams(params)
require.NoError(t, aerr)
msg := &types.Message{
To: evilMinerAddr,
Method: minerActor.Methods.DeclareFaultsRecovered,
Params: enc,
Value: types.FromFil(30), // repay debt.
From: minerInfo.Owner,
}
sm, err := client.MpoolPushMessage(ctx, msg, nil)
require.NoError(t, err)
rec, err := client.StateWaitMsg(ctx, sm.Cid(), build.MessageConfidence, api.LookbackNoLimit, true)
require.NoError(t, err)
require.Zero(t, rec.Receipt.ExitCode, "recovery not accepted: %s", rec.Receipt.ExitCode.Error())
}
// Then wait for the deadline.
for {
di, err = client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK)
require.NoError(t, err)
if di.Index == evilSectorLoc.Deadline {
break
}
build.Clock.Sleep(blocktime)
}
// Now try to be evil again
err = submitBadProof(ctx, client, evilMinerAddr, di, evilSectorLoc.Deadline, evilSectorLoc.Partition)
require.Error(t, err)
require.Contains(t, err.Error(), "message execution failed: exit 16, reason: window post failed: invalid PoSt")
// It didn't work because we're recovering.
}
func TestWindowPostDisputeFails(t *testing.T) {
if os.Getenv("LOTUS_TEST_WINDOW_POST") != "1" {
t.Skip("this takes a few minutes, set LOTUS_TEST_WINDOW_POST=1 to run")
}
kit.QuietMiningLogs()
b := kit.MockMinerBuilder
blocktime := 2 * time.Millisecond
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
n, sn := b(t, []kit.FullNodeOpts{kit.FullNodeWithLatestActorsAt(-1)}, kit.OneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
{
addrinfo, err := client.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := miner.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
}
defaultFrom, err := client.WalletDefaultAddress(ctx)
require.NoError(t, err)
maddr, err := miner.ActorAddress(ctx)
require.NoError(t, err)
build.Clock.Sleep(time.Second)
// Mine with the _second_ node (the good one).
done := make(chan struct{})
go func() {
defer close(done)
for ctx.Err() == nil {
build.Clock.Sleep(blocktime)
if err := miner.MineOne(ctx, kit.MineNext); err != nil {
if ctx.Err() != nil {
// context was canceled, ignore the error.
return
}
t.Error(err)
}
}
}()
defer func() {
cancel()
<-done
}()
kit.PledgeSectors(t, ctx, miner, 10, 0, nil)
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
fmt.Printf("Running one proving period\n")
fmt.Printf("End for head.Height > %d\n", di.PeriodStart+di.WPoStProvingPeriod*2)
for {
head, err := client.ChainHead(ctx)
require.NoError(t, err)
if head.Height() > di.PeriodStart+di.WPoStProvingPeriod*2 {
fmt.Printf("Now head.Height = %d\n", head.Height())
break
}
build.Clock.Sleep(blocktime)
}
ssz, err := miner.ActorSectorSize(ctx, maddr)
require.NoError(t, err)
expectedPower := types.NewInt(uint64(ssz) * (kit.GenesisPreseals + 10))
p, err := client.StateMinerPower(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
// make sure it has gained power.
require.Equal(t, p.MinerPower.RawBytePower, expectedPower)
// Wait until a proof has been submitted.
var targetDeadline uint64
waitForProof:
for {
deadlines, err := client.StateMinerDeadlines(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
for dlIdx, dl := range deadlines {
nonEmpty, err := dl.PostSubmissions.IsEmpty()
require.NoError(t, err)
if nonEmpty {
targetDeadline = uint64(dlIdx)
break waitForProof
}
}
build.Clock.Sleep(blocktime)
}
for {
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
// wait until the deadline finishes.
if di.Index == ((targetDeadline + 1) % di.WPoStPeriodDeadlines) {
break
}
build.Clock.Sleep(blocktime)
}
// Try to object to the proof. This should fail.
{
params := &minerActor.DisputeWindowedPoStParams{
Deadline: targetDeadline,
PoStIndex: 0,
}
enc, aerr := actors.SerializeParams(params)
require.NoError(t, aerr)
msg := &types.Message{
To: maddr,
Method: minerActor.Methods.DisputeWindowedPoSt,
Params: enc,
Value: types.NewInt(0),
From: defaultFrom,
}
_, err := client.MpoolPushMessage(ctx, msg, nil)
require.Error(t, err)
require.Contains(t, err.Error(), "failed to dispute valid post (RetCode=16)")
}
}
func submitBadProof(
ctx context.Context,
client api.FullNode, maddr address.Address,
di *dline.Info, dlIdx, partIdx uint64,
) error {
head, err := client.ChainHead(ctx)
if err != nil {
return err
}
from, err := client.WalletDefaultAddress(ctx)
if err != nil {
return err
}
minerInfo, err := client.StateMinerInfo(ctx, maddr, head.Key())
if err != nil {
return err
}
commEpoch := di.Open
commRand, err := client.ChainGetRandomnessFromTickets(
ctx, head.Key(), crypto.DomainSeparationTag_PoStChainCommit,
commEpoch, nil,
)
if err != nil {
return err
}
params := &minerActor.SubmitWindowedPoStParams{
ChainCommitEpoch: commEpoch,
ChainCommitRand: commRand,
Deadline: dlIdx,
Partitions: []minerActor.PoStPartition{{Index: partIdx}},
Proofs: []proof3.PoStProof{{
PoStProof: minerInfo.WindowPoStProofType,
ProofBytes: []byte("I'm soooo very evil."),
}},
}
enc, aerr := actors.SerializeParams(params)
if aerr != nil {
return aerr
}
msg := &types.Message{
To: maddr,
Method: minerActor.Methods.SubmitWindowedPoSt,
Params: enc,
Value: types.NewInt(0),
From: from,
}
sm, err := client.MpoolPushMessage(ctx, msg, nil)
if err != nil {
return err
}
rec, err := client.StateWaitMsg(ctx, sm.Cid(), build.MessageConfidence, api.LookbackNoLimit, true)
if err != nil {
return err
}
if rec.Receipt.ExitCode.IsError() {
return rec.Receipt.ExitCode
}
return nil
}

374
itests/wdpost_test.go Normal file
View File

@ -0,0 +1,374 @@
package itests
import (
"context"
"fmt"
"os"
"testing"
"time"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/itests/kit"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/extern/sector-storage/mock"
"github.com/filecoin-project/specs-storage/storage"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/node/impl"
)
func TestWindowedPost(t *testing.T) {
if os.Getenv("LOTUS_TEST_WINDOW_POST") != "1" {
t.Skip("this takes a few minutes, set LOTUS_TEST_WINDOW_POST=1 to run")
}
kit.QuietMiningLogs()
var (
blocktime = 2 * time.Millisecond
nSectors = 10
)
for _, height := range []abi.ChainEpoch{
-1, // before
162, // while sealing
5000, // while proving
} {
height := height // copy to satisfy lints
t.Run(fmt.Sprintf("upgrade-%d", height), func(t *testing.T) {
testWindowPostUpgrade(t, kit.MockMinerBuilder, blocktime, nSectors, height)
})
}
}
func testWindowPostUpgrade(t *testing.T, b kit.APIBuilder, blocktime time.Duration, nSectors int, upgradeHeight abi.ChainEpoch) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
n, sn := b(t, []kit.FullNodeOpts{kit.FullNodeWithLatestActorsAt(upgradeHeight)}, kit.OneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
addrinfo, err := client.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := miner.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
build.Clock.Sleep(time.Second)
done := make(chan struct{})
go func() {
defer close(done)
for ctx.Err() == nil {
build.Clock.Sleep(blocktime)
if err := sn[0].MineOne(ctx, kit.MineNext); err != nil {
if ctx.Err() != nil {
// context was canceled, ignore the error.
return
}
t.Error(err)
}
}
}()
defer func() {
cancel()
<-done
}()
kit.PledgeSectors(t, ctx, miner, nSectors, 0, nil)
maddr, err := miner.ActorAddress(ctx)
require.NoError(t, err)
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
mid, err := address.IDFromAddress(maddr)
require.NoError(t, err)
fmt.Printf("Running one proving period\n")
fmt.Printf("End for head.Height > %d\n", di.PeriodStart+di.WPoStProvingPeriod+2)
for {
head, err := client.ChainHead(ctx)
require.NoError(t, err)
if head.Height() > di.PeriodStart+di.WPoStProvingPeriod+2 {
fmt.Printf("Now head.Height = %d\n", head.Height())
break
}
build.Clock.Sleep(blocktime)
}
p, err := client.StateMinerPower(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
ssz, err := miner.ActorSectorSize(ctx, maddr)
require.NoError(t, err)
require.Equal(t, p.MinerPower, p.TotalPower)
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*uint64(nSectors+kit.GenesisPreseals)))
fmt.Printf("Drop some sectors\n")
// Drop 2 sectors from deadline 2 partition 0 (full partition / deadline)
{
parts, err := client.StateMinerPartitions(ctx, maddr, 2, types.EmptyTSK)
require.NoError(t, err)
require.Greater(t, len(parts), 0)
secs := parts[0].AllSectors
n, err := secs.Count()
require.NoError(t, err)
require.Equal(t, uint64(2), n)
// Drop the partition
err = secs.ForEach(func(sid uint64) error {
return miner.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).MarkCorrupted(storage.SectorRef{
ID: abi.SectorID{
Miner: abi.ActorID(mid),
Number: abi.SectorNumber(sid),
},
}, true)
})
require.NoError(t, err)
}
var s storage.SectorRef
// Drop 1 sectors from deadline 3 partition 0
{
parts, err := client.StateMinerPartitions(ctx, maddr, 3, types.EmptyTSK)
require.NoError(t, err)
require.Greater(t, len(parts), 0)
secs := parts[0].AllSectors
n, err := secs.Count()
require.NoError(t, err)
require.Equal(t, uint64(2), n)
// Drop the sector
sn, err := secs.First()
require.NoError(t, err)
all, err := secs.All(2)
require.NoError(t, err)
fmt.Println("the sectors", all)
s = storage.SectorRef{
ID: abi.SectorID{
Miner: abi.ActorID(mid),
Number: abi.SectorNumber(sn),
},
}
err = miner.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).MarkFailed(s, true)
require.NoError(t, err)
}
di, err = client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
fmt.Printf("Go through another PP, wait for sectors to become faulty\n")
fmt.Printf("End for head.Height > %d\n", di.PeriodStart+di.WPoStProvingPeriod+2)
for {
head, err := client.ChainHead(ctx)
require.NoError(t, err)
if head.Height() > di.PeriodStart+(di.WPoStProvingPeriod)+2 {
fmt.Printf("Now head.Height = %d\n", head.Height())
break
}
build.Clock.Sleep(blocktime)
}
p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
require.Equal(t, p.MinerPower, p.TotalPower)
sectors := p.MinerPower.RawBytePower.Uint64() / uint64(ssz)
require.Equal(t, nSectors+kit.GenesisPreseals-3, int(sectors)) // -3 just removed sectors
fmt.Printf("Recover one sector\n")
err = miner.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).MarkFailed(s, false)
require.NoError(t, err)
di, err = client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
fmt.Printf("End for head.Height > %d\n", di.PeriodStart+di.WPoStProvingPeriod+2)
for {
head, err := client.ChainHead(ctx)
require.NoError(t, err)
if head.Height() > di.PeriodStart+di.WPoStProvingPeriod+2 {
fmt.Printf("Now head.Height = %d\n", head.Height())
break
}
build.Clock.Sleep(blocktime)
}
p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
require.Equal(t, p.MinerPower, p.TotalPower)
sectors = p.MinerPower.RawBytePower.Uint64() / uint64(ssz)
require.Equal(t, nSectors+kit.GenesisPreseals-2, int(sectors)) // -2 not recovered sectors
// pledge a sector after recovery
kit.PledgeSectors(t, ctx, miner, 1, nSectors, nil)
{
// Wait until proven.
di, err = client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
waitUntil := di.PeriodStart + di.WPoStProvingPeriod + 2
fmt.Printf("End for head.Height > %d\n", waitUntil)
for {
head, err := client.ChainHead(ctx)
require.NoError(t, err)
if head.Height() > waitUntil {
fmt.Printf("Now head.Height = %d\n", head.Height())
break
}
}
}
p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
require.Equal(t, p.MinerPower, p.TotalPower)
sectors = p.MinerPower.RawBytePower.Uint64() / uint64(ssz)
require.Equal(t, nSectors+kit.GenesisPreseals-2+1, int(sectors)) // -2 not recovered sectors + 1 just pledged
}
func TestWindowPostBaseFeeNoBurn(t *testing.T) {
if os.Getenv("LOTUS_TEST_WINDOW_POST") != "1" {
t.Skip("this takes a few minutes, set LOTUS_TEST_WINDOW_POST=1 to run")
}
kit.QuietMiningLogs()
var (
blocktime = 2 * time.Millisecond
nSectors = 10
)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
och := build.UpgradeClausHeight
build.UpgradeClausHeight = 10
n, sn := kit.MockMinerBuilder(t, kit.DefaultFullOpts(1), kit.OneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
bm := kit.ConnectAndStartMining(t, blocktime, miner, client)
t.Cleanup(bm.Stop)
maddr, err := miner.ActorAddress(ctx)
require.NoError(t, err)
mi, err := client.StateMinerInfo(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
kit.PledgeSectors(t, ctx, miner, nSectors, 0, nil)
wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK)
require.NoError(t, err)
en := wact.Nonce
// wait for a new message to be sent from worker address, it will be a PoSt
waitForProof:
for {
wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK)
require.NoError(t, err)
if wact.Nonce > en {
break waitForProof
}
build.Clock.Sleep(blocktime)
}
slm, err := client.StateListMessages(ctx, &api.MessageMatch{To: maddr}, types.EmptyTSK, 0)
require.NoError(t, err)
pmr, err := client.StateReplay(ctx, types.EmptyTSK, slm[0])
require.NoError(t, err)
require.Equal(t, pmr.GasCost.BaseFeeBurn, big.Zero())
build.UpgradeClausHeight = och
}
func TestWindowPostBaseFeeBurn(t *testing.T) {
if os.Getenv("LOTUS_TEST_WINDOW_POST") != "1" {
t.Skip("this takes a few minutes, set LOTUS_TEST_WINDOW_POST=1 to run")
}
kit.QuietMiningLogs()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
blocktime := 2 * time.Millisecond
n, sn := kit.MockMinerBuilder(t, []kit.FullNodeOpts{kit.FullNodeWithLatestActorsAt(-1)}, kit.OneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
bm := kit.ConnectAndStartMining(t, blocktime, miner, client)
t.Cleanup(bm.Stop)
maddr, err := miner.ActorAddress(ctx)
require.NoError(t, err)
mi, err := client.StateMinerInfo(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
kit.PledgeSectors(t, ctx, miner, 10, 0, nil)
wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK)
require.NoError(t, err)
en := wact.Nonce
// wait for a new message to be sent from worker address, it will be a PoSt
waitForProof:
for {
wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK)
require.NoError(t, err)
if wact.Nonce > en {
break waitForProof
}
build.Clock.Sleep(blocktime)
}
slm, err := client.StateListMessages(ctx, &api.MessageMatch{To: maddr}, types.EmptyTSK, 0)
require.NoError(t, err)
pmr, err := client.StateReplay(ctx, types.EmptyTSK, slm[0])
require.NoError(t, err)
require.NotEqual(t, pmr.GasCost.BaseFeeBurn, big.Zero())
}

View File

@ -6,6 +6,8 @@ import (
"github.com/ipfs/go-cid"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/types"
@ -122,7 +124,7 @@ type SealingConfig struct {
MinPreCommitBatch int
// how long to wait before submitting a batch after crossing the minimum batch size
PreCommitBatchWait Duration
// time buffer for forceful batch submission before sectors in batch would start expiring
// time buffer for forceful batch submission before sectors/deal in batch would start expiring
PreCommitBatchSlack Duration
// enable / disable commit aggregation (takes effect after nv13)
@ -132,7 +134,7 @@ type SealingConfig struct {
MaxCommitBatch int
// how long to wait before submitting a batch after crossing the minimum batch size
CommitBatchWait Duration
// time buffer for forceful batch submission before sectors in batch would start expiring
// time buffer for forceful batch submission before sectors/deals in batch would start expiring
CommitBatchSlack Duration
TerminateBatchMax uint64
@ -145,9 +147,23 @@ type SealingConfig struct {
// todo TargetSectors - stop auto-pleding new sectors after this many sectors are sealed, default CC upgrade for deals sectors if above
}
type BatchFeeConfig struct {
Base types.FIL
PerSector types.FIL
}
func (b *BatchFeeConfig) FeeForSectors(nSectors int) abi.TokenAmount {
return big.Add(big.Int(b.Base), big.Mul(big.NewInt(int64(nSectors)), big.Int(b.PerSector)))
}
type MinerFeeConfig struct {
MaxPreCommitGasFee types.FIL
MaxCommitGasFee types.FIL
MaxPreCommitGasFee types.FIL
MaxCommitGasFee types.FIL
// maxBatchFee = maxBase + maxPerSector * nSectors
MaxPreCommitBatchGasFee BatchFeeConfig
MaxCommitBatchGasFee BatchFeeConfig
MaxTerminateGasFee types.FIL
MaxWindowPoStGasFee types.FIL
MaxPublishDealsFee types.FIL
@ -296,16 +312,16 @@ func DefaultStorageMiner() *StorageMiner {
AlwaysKeepUnsealedCopy: true,
BatchPreCommits: true,
MinPreCommitBatch: 1, // we must have at least one proof to aggregate
MaxPreCommitBatch: miner5.PreCommitSectorBatchMaxSize, //
PreCommitBatchWait: Duration(24 * time.Hour), // this can be up to 6 days
PreCommitBatchSlack: Duration(3 * time.Hour),
MinPreCommitBatch: 1, // we must have at least one precommit to batch
MaxPreCommitBatch: miner5.PreCommitSectorBatchMaxSize, // up to 256 sectors
PreCommitBatchWait: Duration(24 * time.Hour), // this should be less than 31.5 hours, which is the expiration of a precommit ticket
PreCommitBatchSlack: Duration(3 * time.Hour), // time buffer for forceful batch submission before sectors/deals in batch would start expiring, higher value will lower the chances for message fail due to expiration
AggregateCommits: true,
MinCommitBatch: miner5.MinAggregatedSectors, // we must have at least four proofs to aggregate
MaxCommitBatch: miner5.MaxAggregatedSectors, // this is the maximum aggregation per FIP13
CommitBatchWait: Duration(24 * time.Hour), // this can be up to 6 days
CommitBatchSlack: Duration(1 * time.Hour),
MinCommitBatch: miner5.MinAggregatedSectors, // per FIP13, we must have at least four proofs to aggregate, where 4 is the cross over point where aggregation wins out on single provecommit gas costs
MaxCommitBatch: miner5.MaxAggregatedSectors, // maximum 819 sectors, this is the maximum aggregation per FIP13
CommitBatchWait: Duration(24 * time.Hour), // this can be up to 30 days
CommitBatchSlack: Duration(1 * time.Hour), // time buffer for forceful batch submission before sectors/deals in batch would start expiring, higher value will lower the chances for message fail due to expiration
TerminateBatchMin: 1,
TerminateBatchMax: 100,
@ -350,8 +366,18 @@ func DefaultStorageMiner() *StorageMiner {
},
Fees: MinerFeeConfig{
MaxPreCommitGasFee: types.MustParseFIL("0.025"),
MaxCommitGasFee: types.MustParseFIL("0.05"),
MaxPreCommitGasFee: types.MustParseFIL("0.025"),
MaxCommitGasFee: types.MustParseFIL("0.05"),
MaxPreCommitBatchGasFee: BatchFeeConfig{
Base: types.MustParseFIL("0.025"), // TODO: update before v1.10.0
PerSector: types.MustParseFIL("0.025"), // TODO: update before v1.10.0
},
MaxCommitBatchGasFee: BatchFeeConfig{
Base: types.MustParseFIL("0.05"), // TODO: update before v1.10.0
PerSector: types.MustParseFIL("0.05"), // TODO: update before v1.10.0
},
MaxTerminateGasFee: types.MustParseFIL("0.5"),
MaxWindowPoStGasFee: types.MustParseFIL("5"),
MaxPublishDealsFee: types.MustParseFIL("0.05"),

View File

@ -5,7 +5,7 @@ import (
"time"
"github.com/filecoin-project/go-state-types/abi"
xerrors "golang.org/x/xerrors"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-state-types/big"
"github.com/ipfs/go-cid"
@ -13,7 +13,7 @@ import (
"github.com/libp2p/go-libp2p-core/host"
inet "github.com/libp2p/go-libp2p-core/network"
"github.com/libp2p/go-libp2p-core/peer"
protocol "github.com/libp2p/go-libp2p-core/protocol"
"github.com/libp2p/go-libp2p-core/protocol"
cborutil "github.com/filecoin-project/go-cbor-util"
"github.com/filecoin-project/lotus/build"
@ -23,6 +23,8 @@ import (
"github.com/filecoin-project/lotus/lib/peermgr"
)
// TODO(TEST): missing test coverage.
const ProtocolID = "/fil/hello/1.0.0"
var log = logging.Logger("hello")
@ -33,12 +35,14 @@ type HelloMessage struct {
HeaviestTipSetWeight big.Int
GenesisHash cid.Cid
}
type LatencyMessage struct {
TArrival int64
TSent int64
}
type NewStreamFunc func(context.Context, peer.ID, ...protocol.ID) (inet.Stream, error)
type Service struct {
h host.Host
@ -62,7 +66,6 @@ func NewHelloService(h host.Host, cs *store.ChainStore, syncer *chain.Syncer, pm
}
func (hs *Service) HandleStream(s inet.Stream) {
var hmsg HelloMessage
if err := cborutil.ReadCborRPC(s, &hmsg); err != nil {
log.Infow("failed to read hello message, disconnecting", "error", err)
@ -121,7 +124,6 @@ func (hs *Service) HandleStream(s inet.Stream) {
log.Debugf("Got new tipset through Hello: %s from %s", ts.Cids(), s.Conn().RemotePeer())
hs.syncer.InformNewHead(s.Conn().RemotePeer(), ts)
}
}
func (hs *Service) SayHello(ctx context.Context, pid peer.ID) error {

View File

@ -156,7 +156,7 @@ func (a *CommonAPI) NetFindPeer(ctx context.Context, p peer.ID) (peer.AddrInfo,
}
func (a *CommonAPI) NetAutoNatStatus(ctx context.Context) (i api.NatInfo, err error) {
autonat := a.RawHost.(*basichost.BasicHost).AutoNat
autonat := a.RawHost.(*basichost.BasicHost).GetAutoNat()
if autonat == nil {
return api.NatInfo{

View File

@ -1,323 +0,0 @@
package node_test
import (
"os"
"testing"
"time"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/api/test"
"github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/lotus/lib/lotuslog"
builder "github.com/filecoin-project/lotus/node/test"
logging "github.com/ipfs/go-log/v2"
)
func init() {
_ = logging.SetLogLevel("*", "INFO")
policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048))
policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1)
policy.SetMinVerifiedDealSize(abi.NewStoragePower(256))
}
func TestAPI(t *testing.T) {
test.TestApis(t, builder.Builder)
}
func TestAPIRPC(t *testing.T) {
test.TestApis(t, builder.RPCBuilder)
}
func TestAPIDealFlow(t *testing.T) {
logging.SetLogLevel("miner", "ERROR")
logging.SetLogLevel("chainstore", "ERROR")
logging.SetLogLevel("chain", "ERROR")
logging.SetLogLevel("sub", "ERROR")
logging.SetLogLevel("storageminer", "ERROR")
blockTime := 10 * time.Millisecond
// For these tests where the block time is artificially short, just use
// a deal start epoch that is guaranteed to be far enough in the future
// so that the deal starts sealing in time
dealStartEpoch := abi.ChainEpoch(2 << 12)
t.Run("TestDealFlow", func(t *testing.T) {
test.TestDealFlow(t, builder.MockSbBuilder, blockTime, false, false, dealStartEpoch)
})
t.Run("WithExportedCAR", func(t *testing.T) {
test.TestDealFlow(t, builder.MockSbBuilder, blockTime, true, false, dealStartEpoch)
})
t.Run("TestDoubleDealFlow", func(t *testing.T) {
test.TestDoubleDealFlow(t, builder.MockSbBuilder, blockTime, dealStartEpoch)
})
t.Run("TestFastRetrievalDealFlow", func(t *testing.T) {
test.TestFastRetrievalDealFlow(t, builder.MockSbBuilder, blockTime, dealStartEpoch)
})
t.Run("TestPublishDealsBatching", func(t *testing.T) {
test.TestPublishDealsBatching(t, builder.MockSbBuilder, blockTime, dealStartEpoch)
})
t.Run("TestOfflineDealFlow", func(t *testing.T) {
test.TestOfflineDealFlow(t, builder.MockSbBuilder, blockTime, dealStartEpoch, false)
})
t.Run("TestOfflineDealFlowFastRetrieval", func(t *testing.T) {
test.TestOfflineDealFlow(t, builder.MockSbBuilder, blockTime, dealStartEpoch, true)
})
}
func TestBatchDealInput(t *testing.T) {
logging.SetLogLevel("miner", "ERROR")
logging.SetLogLevel("chainstore", "ERROR")
logging.SetLogLevel("chain", "ERROR")
logging.SetLogLevel("sub", "ERROR")
logging.SetLogLevel("storageminer", "ERROR")
logging.SetLogLevel("sectors", "DEBUG")
blockTime := 10 * time.Millisecond
// For these tests where the block time is artificially short, just use
// a deal start epoch that is guaranteed to be far enough in the future
// so that the deal starts sealing in time
dealStartEpoch := abi.ChainEpoch(2 << 12)
test.TestBatchDealInput(t, builder.MockSbBuilder, blockTime, dealStartEpoch)
}
func TestAPIDealFlowReal(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode")
}
lotuslog.SetupLogLevels()
logging.SetLogLevel("miner", "ERROR")
logging.SetLogLevel("chainstore", "ERROR")
logging.SetLogLevel("chain", "ERROR")
logging.SetLogLevel("sub", "ERROR")
logging.SetLogLevel("storageminer", "ERROR")
// TODO: just set this globally?
oldDelay := policy.GetPreCommitChallengeDelay()
policy.SetPreCommitChallengeDelay(5)
t.Cleanup(func() {
policy.SetPreCommitChallengeDelay(oldDelay)
})
t.Run("basic", func(t *testing.T) {
test.TestDealFlow(t, builder.Builder, time.Second, false, false, 0)
})
t.Run("fast-retrieval", func(t *testing.T) {
test.TestDealFlow(t, builder.Builder, time.Second, false, true, 0)
})
t.Run("retrieval-second", func(t *testing.T) {
test.TestSecondDealRetrieval(t, builder.Builder, time.Second)
})
t.Run("zeroppb-retrieval", func(t *testing.T) {
test.TestZeroPricePerByteRetrievalDealFlow(t, builder.Builder, time.Second, 0)
})
t.Run("quote-price-for-non-unsealed-retrieval", func(t *testing.T) {
test.TestNonUnsealedRetrievalQuoteForDefaultPricing(t, builder.Builder, time.Second, 0)
})
}
func TestDealMining(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode")
}
logging.SetLogLevel("miner", "ERROR")
logging.SetLogLevel("chainstore", "ERROR")
logging.SetLogLevel("chain", "ERROR")
logging.SetLogLevel("sub", "ERROR")
logging.SetLogLevel("storageminer", "ERROR")
test.TestDealMining(t, builder.MockSbBuilder, 50*time.Millisecond, false)
}
func TestSDRUpgrade(t *testing.T) {
logging.SetLogLevel("miner", "ERROR")
logging.SetLogLevel("chainstore", "ERROR")
logging.SetLogLevel("chain", "ERROR")
logging.SetLogLevel("sub", "ERROR")
logging.SetLogLevel("storageminer", "ERROR")
oldDelay := policy.GetPreCommitChallengeDelay()
policy.SetPreCommitChallengeDelay(5)
t.Cleanup(func() {
policy.SetPreCommitChallengeDelay(oldDelay)
})
test.TestSDRUpgrade(t, builder.MockSbBuilder, 50*time.Millisecond)
}
func TestPledgeSectors(t *testing.T) {
logging.SetLogLevel("miner", "ERROR")
logging.SetLogLevel("chainstore", "ERROR")
logging.SetLogLevel("chain", "ERROR")
logging.SetLogLevel("sub", "ERROR")
logging.SetLogLevel("storageminer", "ERROR")
t.Run("1", func(t *testing.T) {
test.TestPledgeSector(t, builder.MockSbBuilder, 50*time.Millisecond, 1)
})
t.Run("100", func(t *testing.T) {
test.TestPledgeSector(t, builder.MockSbBuilder, 50*time.Millisecond, 100)
})
t.Run("1000", func(t *testing.T) {
if testing.Short() { // takes ~16s
t.Skip("skipping test in short mode")
}
test.TestPledgeSector(t, builder.MockSbBuilder, 50*time.Millisecond, 1000)
})
}
func TestPledgeBatching(t *testing.T) {
t.Run("100", func(t *testing.T) {
test.TestPledgeBatching(t, builder.MockSbBuilder, 50*time.Millisecond, 100)
})
t.Run("100-before-nv13", func(t *testing.T) {
test.TestPledgeBeforeNv13(t, builder.MockSbBuilder, 50*time.Millisecond, 100)
})
}
func TestTapeFix(t *testing.T) {
logging.SetLogLevel("miner", "ERROR")
logging.SetLogLevel("chainstore", "ERROR")
logging.SetLogLevel("chain", "ERROR")
logging.SetLogLevel("sub", "ERROR")
logging.SetLogLevel("storageminer", "ERROR")
test.TestTapeFix(t, builder.MockSbBuilder, 2*time.Millisecond)
}
func TestWindowedPost(t *testing.T) {
if os.Getenv("LOTUS_TEST_WINDOW_POST") != "1" {
t.Skip("this takes a few minutes, set LOTUS_TEST_WINDOW_POST=1 to run")
}
logging.SetLogLevel("miner", "ERROR")
logging.SetLogLevel("gen", "ERROR")
logging.SetLogLevel("chainstore", "ERROR")
logging.SetLogLevel("chain", "ERROR")
logging.SetLogLevel("sub", "ERROR")
logging.SetLogLevel("storageminer", "ERROR")
test.TestWindowPost(t, builder.MockSbBuilder, 2*time.Millisecond, 10)
}
func TestTerminate(t *testing.T) {
if os.Getenv("LOTUS_TEST_WINDOW_POST") != "1" {
t.Skip("this takes a few minutes, set LOTUS_TEST_WINDOW_POST=1 to run")
}
logging.SetLogLevel("miner", "ERROR")
logging.SetLogLevel("chainstore", "ERROR")
logging.SetLogLevel("chain", "ERROR")
logging.SetLogLevel("sub", "ERROR")
logging.SetLogLevel("storageminer", "ERROR")
test.TestTerminate(t, builder.MockSbBuilder, 2*time.Millisecond)
}
func TestCCUpgrade(t *testing.T) {
logging.SetLogLevel("miner", "ERROR")
logging.SetLogLevel("chainstore", "ERROR")
logging.SetLogLevel("chain", "ERROR")
logging.SetLogLevel("sub", "ERROR")
logging.SetLogLevel("storageminer", "ERROR")
test.TestCCUpgrade(t, builder.MockSbBuilder, 5*time.Millisecond)
}
func TestPaymentChannels(t *testing.T) {
logging.SetLogLevel("miner", "ERROR")
logging.SetLogLevel("chainstore", "ERROR")
logging.SetLogLevel("chain", "ERROR")
logging.SetLogLevel("sub", "ERROR")
logging.SetLogLevel("pubsub", "ERROR")
logging.SetLogLevel("storageminer", "ERROR")
test.TestPaymentChannels(t, builder.MockSbBuilder, 5*time.Millisecond)
}
func TestWindowPostDispute(t *testing.T) {
if os.Getenv("LOTUS_TEST_WINDOW_POST") != "1" {
t.Skip("this takes a few minutes, set LOTUS_TEST_WINDOW_POST=1 to run")
}
logging.SetLogLevel("miner", "ERROR")
logging.SetLogLevel("gen", "ERROR")
logging.SetLogLevel("chainstore", "ERROR")
logging.SetLogLevel("chain", "ERROR")
logging.SetLogLevel("sub", "ERROR")
logging.SetLogLevel("storageminer", "ERROR")
test.TestWindowPostDispute(t, builder.MockSbBuilder, 2*time.Millisecond)
}
func TestWindowPostDisputeFails(t *testing.T) {
if os.Getenv("LOTUS_TEST_WINDOW_POST") != "1" {
t.Skip("this takes a few minutes, set LOTUS_TEST_WINDOW_POST=1 to run")
}
logging.SetLogLevel("miner", "ERROR")
logging.SetLogLevel("gen", "ERROR")
logging.SetLogLevel("chainstore", "ERROR")
logging.SetLogLevel("chain", "ERROR")
logging.SetLogLevel("sub", "ERROR")
logging.SetLogLevel("storageminer", "ERROR")
test.TestWindowPostDisputeFails(t, builder.MockSbBuilder, 2*time.Millisecond)
}
func TestWindowPostBaseFeeNoBurn(t *testing.T) {
if os.Getenv("LOTUS_TEST_WINDOW_POST") != "1" {
t.Skip("this takes a few minutes, set LOTUS_TEST_WINDOW_POST=1 to run")
}
logging.SetLogLevel("miner", "ERROR")
logging.SetLogLevel("gen", "ERROR")
logging.SetLogLevel("chainstore", "ERROR")
logging.SetLogLevel("chain", "ERROR")
logging.SetLogLevel("sub", "ERROR")
logging.SetLogLevel("storageminer", "ERROR")
test.TestWindowPostBaseFeeNoBurn(t, builder.MockSbBuilder, 2*time.Millisecond)
}
func TestWindowPostBaseFeeBurn(t *testing.T) {
if os.Getenv("LOTUS_TEST_WINDOW_POST") != "1" {
t.Skip("this takes a few minutes, set LOTUS_TEST_WINDOW_POST=1 to run")
}
logging.SetLogLevel("miner", "ERROR")
logging.SetLogLevel("gen", "ERROR")
logging.SetLogLevel("chainstore", "ERROR")
logging.SetLogLevel("chain", "ERROR")
logging.SetLogLevel("sub", "ERROR")
logging.SetLogLevel("storageminer", "ERROR")
test.TestWindowPostBaseFeeBurn(t, builder.MockSbBuilder, 2*time.Millisecond)
}
func TestDeadlineToggling(t *testing.T) {
if os.Getenv("LOTUS_TEST_DEADLINE_TOGGLING") != "1" {
t.Skip("this takes a few minutes, set LOTUS_TEST_DEADLINE_TOGGLING=1 to run")
}
logging.SetLogLevel("miner", "ERROR")
logging.SetLogLevel("gen", "ERROR")
logging.SetLogLevel("chainstore", "ERROR")
logging.SetLogLevel("chain", "ERROR")
logging.SetLogLevel("sub", "ERROR")
logging.SetLogLevel("storageminer", "FATAL")
test.TestDeadlineToggling(t, builder.MockSbBuilder, 2*time.Millisecond)
}
func TestVerifiedClientTopUp(t *testing.T) {
logging.SetLogLevel("storageminer", "FATAL")
logging.SetLogLevel("chain", "ERROR")
test.AddVerifiedClient(t, builder.MockSbBuilder)
}

193
node/rpc.go Normal file
View File

@ -0,0 +1,193 @@
package node
import (
"context"
"encoding/json"
"net"
"net/http"
_ "net/http/pprof"
"runtime"
"strconv"
"github.com/gorilla/mux"
"github.com/ipfs/go-cid"
logging "github.com/ipfs/go-log/v2"
"github.com/multiformats/go-multiaddr"
manet "github.com/multiformats/go-multiaddr/net"
"go.opencensus.io/tag"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-jsonrpc"
"github.com/filecoin-project/go-jsonrpc/auth"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/api/v0api"
"github.com/filecoin-project/lotus/api/v1api"
"github.com/filecoin-project/lotus/metrics"
"github.com/filecoin-project/lotus/node/impl"
)
var rpclog = logging.Logger("rpc")
// ServeRPC serves an HTTP handler over the supplied listen multiaddr.
//
// This function spawns a goroutine to run the server, and returns immediately.
// It returns the stop function to be called to terminate the endpoint.
//
// The supplied ID is used in tracing, by inserting a tag in the context.
func ServeRPC(h http.Handler, id string, addr multiaddr.Multiaddr) (StopFunc, error) {
// Start listening to the addr; if invalid or occupied, we will fail early.
lst, err := manet.Listen(addr)
if err != nil {
return nil, xerrors.Errorf("could not listen: %w", err)
}
// Instantiate the server and start listening.
srv := &http.Server{
Handler: h,
BaseContext: func(listener net.Listener) context.Context {
ctx, _ := tag.New(context.Background(), tag.Upsert(metrics.APIInterface, id))
return ctx
},
}
go func() {
err = srv.Serve(manet.NetListener(lst))
if err != http.ErrServerClosed {
rpclog.Warnf("rpc server failed: %s", err)
}
}()
return srv.Shutdown, err
}
// FullNodeHandler returns a full node handler, to be mounted as-is on the server.
func FullNodeHandler(a v1api.FullNode, permissioned bool, opts ...jsonrpc.ServerOption) (http.Handler, error) {
m := mux.NewRouter()
serveRpc := func(path string, hnd interface{}) {
rpcServer := jsonrpc.NewServer(opts...)
rpcServer.Register("Filecoin", hnd)
var handler http.Handler = rpcServer
if permissioned {
handler = &auth.Handler{Verify: a.AuthVerify, Next: rpcServer.ServeHTTP}
}
m.Handle(path, handler)
}
fnapi := metrics.MetricedFullAPI(a)
if permissioned {
fnapi = api.PermissionedFullAPI(fnapi)
}
serveRpc("/rpc/v1", fnapi)
serveRpc("/rpc/v0", &v0api.WrapperV1Full{FullNode: fnapi})
// Import handler
handleImportFunc := handleImport(a.(*impl.FullNodeAPI))
if permissioned {
importAH := &auth.Handler{
Verify: a.AuthVerify,
Next: handleImportFunc,
}
m.Handle("/rest/v0/import", importAH)
} else {
m.HandleFunc("/rest/v0/import", handleImportFunc)
}
// debugging
m.Handle("/debug/metrics", metrics.Exporter())
m.Handle("/debug/pprof-set/block", handleFractionOpt("BlockProfileRate", runtime.SetBlockProfileRate))
m.Handle("/debug/pprof-set/mutex", handleFractionOpt("MutexProfileFraction", func(x int) {
runtime.SetMutexProfileFraction(x)
}))
m.PathPrefix("/").Handler(http.DefaultServeMux) // pprof
return m, nil
}
// MinerHandler returns a miner handler, to be mounted as-is on the server.
func MinerHandler(a api.StorageMiner, permissioned bool) (http.Handler, error) {
m := mux.NewRouter()
mapi := metrics.MetricedStorMinerAPI(a)
if permissioned {
mapi = api.PermissionedStorMinerAPI(mapi)
}
rpcServer := jsonrpc.NewServer()
rpcServer.Register("Filecoin", mapi)
m.Handle("/rpc/v0", rpcServer)
m.PathPrefix("/remote").HandlerFunc(a.(*impl.StorageMinerAPI).ServeRemote)
// debugging
m.Handle("/debug/metrics", metrics.Exporter())
m.PathPrefix("/").Handler(http.DefaultServeMux) // pprof
if !permissioned {
return rpcServer, nil
}
ah := &auth.Handler{
Verify: a.AuthVerify,
Next: m.ServeHTTP,
}
return ah, nil
}
func handleImport(a *impl.FullNodeAPI) func(w http.ResponseWriter, r *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
if r.Method != "PUT" {
w.WriteHeader(404)
return
}
if !auth.HasPerm(r.Context(), nil, api.PermWrite) {
w.WriteHeader(401)
_ = json.NewEncoder(w).Encode(struct{ Error string }{"unauthorized: missing write permission"})
return
}
c, err := a.ClientImportLocal(r.Context(), r.Body)
if err != nil {
w.WriteHeader(500)
_ = json.NewEncoder(w).Encode(struct{ Error string }{err.Error()})
return
}
w.WriteHeader(200)
err = json.NewEncoder(w).Encode(struct{ Cid cid.Cid }{c})
if err != nil {
rpclog.Errorf("/rest/v0/import: Writing response failed: %+v", err)
return
}
}
}
func handleFractionOpt(name string, setter func(int)) http.HandlerFunc {
return func(rw http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(rw, "only POST allowed", http.StatusMethodNotAllowed)
return
}
if err := r.ParseForm(); err != nil {
http.Error(rw, err.Error(), http.StatusBadRequest)
return
}
asfr := r.Form.Get("x")
if len(asfr) == 0 {
http.Error(rw, "parameter 'x' must be set", http.StatusBadRequest)
return
}
fr, err := strconv.Atoi(asfr)
if err != nil {
http.Error(rw, err.Error(), http.StatusBadRequest)
return
}
rpclog.Infof("setting %s to %d", name, fr)
setter(fr)
}
}

56
node/shutdown.go Normal file
View File

@ -0,0 +1,56 @@
package node
import (
"context"
"os"
"os/signal"
"syscall"
)
type ShutdownHandler struct {
Component string
StopFunc StopFunc
}
// MonitorShutdown manages shutdown requests, by watching signals and invoking
// the supplied handlers in order.
//
// It watches SIGTERM and SIGINT OS signals, as well as the trigger channel.
// When any of them fire, it calls the supplied handlers in order. If any of
// them errors, it merely logs the error.
//
// Once the shutdown has completed, it closes the returned channel. The caller
// can watch this channel
func MonitorShutdown(triggerCh <-chan struct{}, handlers ...ShutdownHandler) <-chan struct{} {
sigCh := make(chan os.Signal, 2)
out := make(chan struct{})
go func() {
select {
case sig := <-sigCh:
log.Warnw("received shutdown", "signal", sig)
case <-triggerCh:
log.Warn("received shutdown")
}
log.Warn("Shutting down...")
// Call all the handlers, logging on failure and success.
for _, h := range handlers {
if err := h.StopFunc(context.TODO()); err != nil {
log.Errorf("shutting down %s failed: %s", h.Component, err)
continue
}
log.Infof("%s shut down successfully ", h.Component)
}
log.Warn("Graceful shutdown successful")
// Sync all loggers.
_ = log.Sync() //nolint:errcheck
close(out)
}()
signal.Notify(sigCh, syscall.SIGTERM, syscall.SIGINT)
return out
}

36
node/shutdown_test.go Normal file
View File

@ -0,0 +1,36 @@
package node
import (
"context"
"sync"
"testing"
"time"
"github.com/stretchr/testify/require"
)
func TestMonitorShutdown(t *testing.T) {
signalCh := make(chan struct{})
// Three shutdown handlers.
var wg sync.WaitGroup
wg.Add(3)
h := ShutdownHandler{
Component: "handler",
StopFunc: func(_ context.Context) error {
wg.Done()
return nil
},
}
finishCh := MonitorShutdown(signalCh, h, h, h)
// Nothing here after 10ms.
time.Sleep(10 * time.Millisecond)
require.Len(t, finishCh, 0)
// Now trigger the shutdown.
close(signalCh)
wg.Wait()
<-finishCh
}

View File

@ -96,6 +96,7 @@ func (pcs *paymentChannelSettler) messageHandler(msg *types.Message, rec *types.
msgLookup, err := pcs.api.StateWaitMsg(pcs.ctx, submitMessageCID, build.MessageConfidence, api.LookbackNoLimit, true)
if err != nil {
log.Errorf("submitting voucher: %s", err.Error())
return
}
if msgLookup.Receipt.ExitCode != 0 {
log.Errorf("failed submitting voucher: %+v", voucher)

View File

@ -360,6 +360,20 @@ func (s SealingAPIAdapter) ChainHead(ctx context.Context) (sealing.TipSetToken,
return head.Key().Bytes(), head.Height(), nil
}
func (s SealingAPIAdapter) ChainBaseFee(ctx context.Context, tok sealing.TipSetToken) (abi.TokenAmount, error) {
tsk, err := types.TipSetKeyFromBytes(tok)
if err != nil {
return big.Zero(), err
}
ts, err := s.delegate.ChainGetTipSet(ctx, tsk)
if err != nil {
return big.Zero(), err
}
return ts.Blocks()[0].ParentBaseFee, nil
}
func (s SealingAPIAdapter) ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error) {
return s.delegate.ChainGetMessage(ctx, mc)
}

View File

@ -171,12 +171,6 @@ func (m *Miner) Run(ctx context.Context) error {
return xerrors.Errorf("getting miner info: %w", err)
}
fc := sealing.FeeConfig{
MaxPreCommitGasFee: abi.TokenAmount(m.feeCfg.MaxPreCommitGasFee),
MaxCommitGasFee: abi.TokenAmount(m.feeCfg.MaxCommitGasFee),
MaxTerminateGasFee: abi.TokenAmount(m.feeCfg.MaxTerminateGasFee),
}
var (
// consumer of chain head changes.
evts = events.NewEvents(ctx, m.api)
@ -205,7 +199,7 @@ func (m *Miner) Run(ctx context.Context) error {
)
// Instantiate the sealing FSM.
m.sealing = sealing.New(adaptedAPI, fc, evtsAdapter, m.maddr, m.ds, m.sealer, m.sc, m.verif, m.prover, &pcp, cfg, m.handleSealingNotifications, as)
m.sealing = sealing.New(adaptedAPI, m.feeCfg, evtsAdapter, m.maddr, m.ds, m.sealer, m.sc, m.verif, m.prover, &pcp, cfg, m.handleSealingNotifications, as)
// Run the sealing FSM.
go m.sealing.Run(ctx) //nolint:errcheck // logged intside the function