test: actors: manual CC onboarding and proving integration test (#12017)
* remove client CLI * remove markets CLI from miner * remove markets from all CLI * remove client API * update go mod * remove EnableMarkets flag * remove market subsystem * remove dagstore * remove index provider * remove graphsync and data-transfer * remove markets * go mod tidy * fix cbor gen deps * remove deal making from config * remove eol alert * go mod tidy * changes as per review * make jen * changes as per review * test: actors: manual CC onboarding and proving integration test * test: actors: manual CC onboarding itest with real proofs * test: actors: fix lint issue, require proofs in CI * test: actors: rename real proofs test, fix dispute window wait * feat: add TestUnmanagedMiner in the itest kit for non-storage managed miners * feat: test: improve UnmanagedMiner test harness * feat: test: MineBlocksMustPost can watch for >1 miners (#12063) * feat: test: MineBlocksMustPost can watch for >1 miners * feat: test: wait for both sectors at the end of test * feat: test: minor manual onboarding test fixups and speed up * feat: test: handle case where miners have close deadline ends * Implement snap deals test for manual sector onboarding (#12066) * changes as per review * thread safety * test for snap deals * remove extraneous change * Apply suggestions from code review Co-authored-by: Rod Vagg <rod@vagg.org> * cancel CC Post after snap deals --------- Co-authored-by: Rod Vagg <rod@vagg.org> * fix config --------- Co-authored-by: aarshkshah1992 <aarshkshah1992@gmail.com>
This commit is contained in:
parent
e3deda0b2b
commit
59938414fc
2
.github/workflows/test.yml
vendored
2
.github/workflows/test.yml
vendored
@ -97,6 +97,7 @@ jobs:
|
||||
"itest-get_messages_in_ts": ["self-hosted", "linux", "x64", "xlarge"],
|
||||
"itest-lite_migration": ["self-hosted", "linux", "x64", "xlarge"],
|
||||
"itest-lookup_robust_address": ["self-hosted", "linux", "x64", "xlarge"],
|
||||
"itest-manual_onboarding": ["self-hosted", "linux", "x64", "xlarge"],
|
||||
"itest-mempool": ["self-hosted", "linux", "x64", "xlarge"],
|
||||
"itest-mpool_msg_uuid": ["self-hosted", "linux", "x64", "xlarge"],
|
||||
"itest-mpool_push_with_uuid": ["self-hosted", "linux", "x64", "xlarge"],
|
||||
@ -129,6 +130,7 @@ jobs:
|
||||
"itest-deals",
|
||||
"itest-direct_data_onboard_verified",
|
||||
"itest-direct_data_onboard",
|
||||
"itest-manual_onboarding",
|
||||
"itest-net",
|
||||
"itest-path_detach_redeclare",
|
||||
"itest-path_type_filters",
|
||||
|
@ -12,6 +12,7 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-bitfield"
|
||||
"github.com/filecoin-project/go-jsonrpc"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
@ -20,6 +21,7 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/dline"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/api/v1api"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/miner"
|
||||
)
|
||||
@ -29,11 +31,13 @@ type BlockMiner struct {
|
||||
t *testing.T
|
||||
miner *TestMiner
|
||||
|
||||
nextNulls int64
|
||||
pause chan struct{}
|
||||
unpause chan struct{}
|
||||
wg sync.WaitGroup
|
||||
cancel context.CancelFunc
|
||||
nextNulls int64
|
||||
postWatchMiners []address.Address
|
||||
postWatchMinersLk sync.Mutex
|
||||
pause chan struct{}
|
||||
unpause chan struct{}
|
||||
wg sync.WaitGroup
|
||||
cancel context.CancelFunc
|
||||
}
|
||||
|
||||
func NewBlockMiner(t *testing.T, miner *TestMiner) *BlockMiner {
|
||||
@ -46,19 +50,58 @@ func NewBlockMiner(t *testing.T, miner *TestMiner) *BlockMiner {
|
||||
}
|
||||
}
|
||||
|
||||
type minerDeadline struct {
|
||||
addr address.Address
|
||||
deadline dline.Info
|
||||
}
|
||||
|
||||
type minerDeadlines []minerDeadline
|
||||
|
||||
func (mds minerDeadlines) CloseList() []abi.ChainEpoch {
|
||||
var ret []abi.ChainEpoch
|
||||
for _, md := range mds {
|
||||
ret = append(ret, md.deadline.Last())
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func (mds minerDeadlines) MinerStringList() []string {
|
||||
var ret []string
|
||||
for _, md := range mds {
|
||||
ret = append(ret, md.addr.String())
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// FilterByLast returns a new minerDeadlines with only the deadlines that have a Last() epoch
|
||||
// greater than or equal to last.
|
||||
func (mds minerDeadlines) FilterByLast(last abi.ChainEpoch) minerDeadlines {
|
||||
var ret minerDeadlines
|
||||
for _, md := range mds {
|
||||
if last >= md.deadline.Last() {
|
||||
ret = append(ret, md)
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
type partitionTracker struct {
|
||||
minerAddr address.Address
|
||||
partitions []api.Partition
|
||||
posted bitfield.BitField
|
||||
}
|
||||
|
||||
func newPartitionTracker(ctx context.Context, dlIdx uint64, bm *BlockMiner) *partitionTracker {
|
||||
dlines, err := bm.miner.FullNode.StateMinerDeadlines(ctx, bm.miner.ActorAddr, types.EmptyTSK)
|
||||
require.NoError(bm.t, err)
|
||||
// newPartitionTracker creates a new partitionTracker that tracks the deadline index dlIdx for the
|
||||
// given minerAddr. It uses the BlockMiner bm to interact with the chain.
|
||||
func newPartitionTracker(ctx context.Context, t *testing.T, client v1api.FullNode, minerAddr address.Address, dlIdx uint64) *partitionTracker {
|
||||
dlines, err := client.StateMinerDeadlines(ctx, minerAddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
dl := dlines[dlIdx]
|
||||
|
||||
parts, err := bm.miner.FullNode.StateMinerPartitions(ctx, bm.miner.ActorAddr, dlIdx, types.EmptyTSK)
|
||||
require.NoError(bm.t, err)
|
||||
parts, err := client.StateMinerPartitions(ctx, minerAddr, dlIdx, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
return &partitionTracker{
|
||||
minerAddr: minerAddr,
|
||||
partitions: parts,
|
||||
posted: dl.PostSubmissions,
|
||||
}
|
||||
@ -74,11 +117,11 @@ func (p *partitionTracker) done(t *testing.T) bool {
|
||||
return uint64(len(p.partitions)) == p.count(t)
|
||||
}
|
||||
|
||||
func (p *partitionTracker) recordIfPost(t *testing.T, bm *BlockMiner, msg *types.Message) (ret bool) {
|
||||
func (p *partitionTracker) recordIfPost(t *testing.T, msg *types.Message) (ret bool) {
|
||||
defer func() {
|
||||
ret = p.done(t)
|
||||
}()
|
||||
if !(msg.To == bm.miner.ActorAddr) {
|
||||
if !(msg.To == p.minerAddr) {
|
||||
return
|
||||
}
|
||||
if msg.Method != builtin.MethodsMiner.SubmitWindowedPoSt {
|
||||
@ -92,19 +135,18 @@ func (p *partitionTracker) recordIfPost(t *testing.T, bm *BlockMiner, msg *types
|
||||
return
|
||||
}
|
||||
|
||||
func (bm *BlockMiner) forcePoSt(ctx context.Context, ts *types.TipSet, dlinfo *dline.Info) {
|
||||
|
||||
tracker := newPartitionTracker(ctx, dlinfo.Index, bm)
|
||||
func (bm *BlockMiner) forcePoSt(ctx context.Context, ts *types.TipSet, minerAddr address.Address, dlinfo dline.Info) {
|
||||
tracker := newPartitionTracker(ctx, bm.t, bm.miner.FullNode, minerAddr, dlinfo.Index)
|
||||
if !tracker.done(bm.t) { // need to wait for post
|
||||
bm.t.Logf("expect %d partitions proved but only see %d", len(tracker.partitions), tracker.count(bm.t))
|
||||
poolEvts, err := bm.miner.FullNode.MpoolSub(ctx) //subscribe before checking pending so we don't miss any events
|
||||
poolEvts, err := bm.miner.FullNode.MpoolSub(ctx) // subscribe before checking pending so we don't miss any events
|
||||
require.NoError(bm.t, err)
|
||||
|
||||
// First check pending messages we'll mine this epoch
|
||||
msgs, err := bm.miner.FullNode.MpoolPending(ctx, types.EmptyTSK)
|
||||
require.NoError(bm.t, err)
|
||||
for _, msg := range msgs {
|
||||
if tracker.recordIfPost(bm.t, bm, &msg.Message) {
|
||||
if tracker.recordIfPost(bm.t, &msg.Message) {
|
||||
fmt.Printf("found post in mempool pending\n")
|
||||
}
|
||||
}
|
||||
@ -114,13 +156,13 @@ func (bm *BlockMiner) forcePoSt(ctx context.Context, ts *types.TipSet, dlinfo *d
|
||||
msgs, err := bm.miner.FullNode.ChainGetBlockMessages(ctx, bc)
|
||||
require.NoError(bm.t, err)
|
||||
for _, msg := range msgs.BlsMessages {
|
||||
if tracker.recordIfPost(bm.t, bm, msg) {
|
||||
if tracker.recordIfPost(bm.t, msg) {
|
||||
fmt.Printf("found post in message of prev tipset\n")
|
||||
}
|
||||
|
||||
}
|
||||
for _, msg := range msgs.SecpkMessages {
|
||||
if tracker.recordIfPost(bm.t, bm, &msg.Message) {
|
||||
if tracker.recordIfPost(bm.t, &msg.Message) {
|
||||
fmt.Printf("found post in message of prev tipset\n")
|
||||
}
|
||||
}
|
||||
@ -139,7 +181,7 @@ func (bm *BlockMiner) forcePoSt(ctx context.Context, ts *types.TipSet, dlinfo *d
|
||||
bm.t.Logf("pool event: %d", evt.Type)
|
||||
if evt.Type == api.MpoolAdd {
|
||||
bm.t.Logf("incoming message %v", evt.Message)
|
||||
if tracker.recordIfPost(bm.t, bm, &evt.Message.Message) {
|
||||
if tracker.recordIfPost(bm.t, &evt.Message.Message) {
|
||||
fmt.Printf("found post in mempool evt\n")
|
||||
break POOL
|
||||
}
|
||||
@ -151,11 +193,24 @@ func (bm *BlockMiner) forcePoSt(ctx context.Context, ts *types.TipSet, dlinfo *d
|
||||
}
|
||||
}
|
||||
|
||||
// WatchMinerForPost adds a miner to the list of miners that the BlockMiner will watch for window
|
||||
// post submissions when using MineBlocksMustPost. This is useful when we have more than just the
|
||||
// BlockMiner submitting posts, particularly in the case of UnmanagedMiners which don't participate
|
||||
// in block mining.
|
||||
func (bm *BlockMiner) WatchMinerForPost(minerAddr address.Address) {
|
||||
bm.postWatchMinersLk.Lock()
|
||||
bm.postWatchMiners = append(bm.postWatchMiners, minerAddr)
|
||||
bm.postWatchMinersLk.Unlock()
|
||||
}
|
||||
|
||||
// Like MineBlocks but refuses to mine until the window post scheduler has wdpost messages in the mempool
|
||||
// and everything shuts down if a post fails. It also enforces that every block mined succeeds
|
||||
func (bm *BlockMiner) MineBlocksMustPost(ctx context.Context, blocktime time.Duration) {
|
||||
time.Sleep(time.Second)
|
||||
|
||||
// watch for our own window posts
|
||||
bm.WatchMinerForPost(bm.miner.ActorAddr)
|
||||
|
||||
// wrap context in a cancellable context.
|
||||
ctx, bm.cancel = context.WithCancel(ctx)
|
||||
bm.wg.Add(1)
|
||||
@ -182,11 +237,25 @@ func (bm *BlockMiner) MineBlocksMustPost(ctx context.Context, blocktime time.Dur
|
||||
ts, err := bm.miner.FullNode.ChainHead(ctx)
|
||||
require.NoError(bm.t, err)
|
||||
|
||||
dlinfo, err := bm.miner.FullNode.StateMinerProvingDeadline(ctx, bm.miner.ActorAddr, ts.Key())
|
||||
require.NoError(bm.t, err)
|
||||
if ts.Height()+5+abi.ChainEpoch(nulls) >= dlinfo.Last() { // Next block brings us past the last epoch in dline, we need to wait for miner to post
|
||||
bm.t.Logf("forcing post to get in before deadline closes at %d", dlinfo.Last())
|
||||
bm.forcePoSt(ctx, ts, dlinfo)
|
||||
// Get current deadline information for all miners, then filter by the ones that are about to
|
||||
// close so we can force a post for them.
|
||||
bm.postWatchMinersLk.Lock()
|
||||
var impendingDeadlines minerDeadlines
|
||||
for _, minerAddr := range bm.postWatchMiners {
|
||||
dlinfo, err := bm.miner.FullNode.StateMinerProvingDeadline(ctx, minerAddr, ts.Key())
|
||||
require.NoError(bm.t, err)
|
||||
require.NotNil(bm.t, dlinfo, "no deadline info for miner %s", minerAddr)
|
||||
impendingDeadlines = append(impendingDeadlines, minerDeadline{addr: minerAddr, deadline: *dlinfo})
|
||||
}
|
||||
bm.postWatchMinersLk.Unlock()
|
||||
impendingDeadlines = impendingDeadlines.FilterByLast(ts.Height() + 5 + abi.ChainEpoch(nulls))
|
||||
|
||||
if len(impendingDeadlines) > 0 {
|
||||
// Next block brings us too close for at least one deadline, we need to wait for miners to post
|
||||
bm.t.Logf("forcing post to get in if due before deadline closes at %v for %v", impendingDeadlines.CloseList(), impendingDeadlines.MinerStringList())
|
||||
for _, md := range impendingDeadlines {
|
||||
bm.forcePoSt(ctx, ts, md.addr, md.deadline)
|
||||
}
|
||||
}
|
||||
|
||||
var target abi.ChainEpoch
|
||||
@ -216,10 +285,13 @@ func (bm *BlockMiner) MineBlocksMustPost(ctx context.Context, blocktime time.Dur
|
||||
return
|
||||
}
|
||||
if !success {
|
||||
// if we are mining a new null block and it brings us past deadline boundary we need to wait for miner to post
|
||||
if ts.Height()+5+abi.ChainEpoch(nulls+i) >= dlinfo.Last() {
|
||||
bm.t.Logf("forcing post to get in before deadline closes at %d", dlinfo.Last())
|
||||
bm.forcePoSt(ctx, ts, dlinfo)
|
||||
// if we are mining a new null block and it brings us past deadline boundary we need to wait for miners to post
|
||||
impendingDeadlines = impendingDeadlines.FilterByLast(ts.Height() + 5 + abi.ChainEpoch(nulls+i))
|
||||
if len(impendingDeadlines) > 0 {
|
||||
bm.t.Logf("forcing post to get in if due before deadline closes at %v for %v", impendingDeadlines.CloseList(), impendingDeadlines.MinerStringList())
|
||||
for _, md := range impendingDeadlines {
|
||||
bm.forcePoSt(ctx, ts, md.addr, md.deadline)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -378,4 +450,7 @@ func (bm *BlockMiner) Stop() {
|
||||
close(bm.pause)
|
||||
bm.pause = nil
|
||||
}
|
||||
bm.postWatchMinersLk.Lock()
|
||||
bm.postWatchMiners = nil
|
||||
bm.postWatchMinersLk.Unlock()
|
||||
}
|
||||
|
@ -118,15 +118,17 @@ type Ensemble struct {
|
||||
options *ensembleOpts
|
||||
|
||||
inactive struct {
|
||||
fullnodes []*TestFullNode
|
||||
miners []*TestMiner
|
||||
workers []*TestWorker
|
||||
fullnodes []*TestFullNode
|
||||
miners []*TestMiner
|
||||
unmanagedMiners []*TestUnmanagedMiner
|
||||
workers []*TestWorker
|
||||
}
|
||||
active struct {
|
||||
fullnodes []*TestFullNode
|
||||
miners []*TestMiner
|
||||
workers []*TestWorker
|
||||
bms map[*TestMiner]*BlockMiner
|
||||
fullnodes []*TestFullNode
|
||||
miners []*TestMiner
|
||||
unmanagedMiners []*TestUnmanagedMiner
|
||||
workers []*TestWorker
|
||||
bms map[*TestMiner]*BlockMiner
|
||||
}
|
||||
genesis struct {
|
||||
version network.Version
|
||||
@ -239,9 +241,7 @@ func (n *Ensemble) MinerEnroll(minerNode *TestMiner, full *TestFullNode, opts ..
|
||||
tdir, err := os.MkdirTemp("", "preseal-memgen")
|
||||
require.NoError(n.t, err)
|
||||
|
||||
minerCnt := len(n.inactive.miners) + len(n.active.miners)
|
||||
|
||||
actorAddr, err := address.NewIDAddress(genesis2.MinerStart + uint64(minerCnt))
|
||||
actorAddr, err := address.NewIDAddress(genesis2.MinerStart + n.minerCount())
|
||||
require.NoError(n.t, err)
|
||||
|
||||
if options.mainMiner != nil {
|
||||
@ -313,12 +313,25 @@ func (n *Ensemble) AddInactiveMiner(m *TestMiner) {
|
||||
n.inactive.miners = append(n.inactive.miners, m)
|
||||
}
|
||||
|
||||
func (n *Ensemble) AddInactiveUnmanagedMiner(m *TestUnmanagedMiner) {
|
||||
n.inactive.unmanagedMiners = append(n.inactive.unmanagedMiners, m)
|
||||
}
|
||||
|
||||
func (n *Ensemble) Miner(minerNode *TestMiner, full *TestFullNode, opts ...NodeOpt) *Ensemble {
|
||||
n.MinerEnroll(minerNode, full, opts...)
|
||||
n.AddInactiveMiner(minerNode)
|
||||
return n
|
||||
}
|
||||
|
||||
func (n *Ensemble) UnmanagedMiner(full *TestFullNode, opts ...NodeOpt) (*TestUnmanagedMiner, *Ensemble) {
|
||||
actorAddr, err := address.NewIDAddress(genesis2.MinerStart + n.minerCount())
|
||||
require.NoError(n.t, err)
|
||||
|
||||
minerNode := NewTestUnmanagedMiner(n.t, full, actorAddr, opts...)
|
||||
n.AddInactiveUnmanagedMiner(minerNode)
|
||||
return minerNode, n
|
||||
}
|
||||
|
||||
// Worker enrolls a new worker, using the provided full node for chain
|
||||
// interactions.
|
||||
func (n *Ensemble) Worker(minerNode *TestMiner, worker *TestWorker, opts ...NodeOpt) *Ensemble {
|
||||
@ -805,6 +818,79 @@ func (n *Ensemble) Start() *Ensemble {
|
||||
// to active, so clear the slice.
|
||||
n.inactive.miners = n.inactive.miners[:0]
|
||||
|
||||
// Create all inactive manual miners.
|
||||
for _, m := range n.inactive.unmanagedMiners {
|
||||
proofType, err := miner.WindowPoStProofTypeFromSectorSize(m.options.sectorSize, n.genesis.version)
|
||||
require.NoError(n.t, err)
|
||||
|
||||
params, aerr := actors.SerializeParams(&power3.CreateMinerParams{
|
||||
Owner: m.OwnerKey.Address,
|
||||
Worker: m.OwnerKey.Address,
|
||||
WindowPoStProofType: proofType,
|
||||
Peer: abi.PeerID(m.Libp2p.PeerID),
|
||||
})
|
||||
require.NoError(n.t, aerr)
|
||||
|
||||
createStorageMinerMsg := &types.Message{
|
||||
From: m.OwnerKey.Address,
|
||||
To: power.Address,
|
||||
Value: big.Zero(),
|
||||
|
||||
Method: power.Methods.CreateMiner,
|
||||
Params: params,
|
||||
}
|
||||
signed, err := m.FullNode.FullNode.MpoolPushMessage(ctx, createStorageMinerMsg, &api.MessageSendSpec{
|
||||
MsgUuid: uuid.New(),
|
||||
})
|
||||
require.NoError(n.t, err)
|
||||
|
||||
mw, err := m.FullNode.FullNode.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence, api.LookbackNoLimit, true)
|
||||
require.NoError(n.t, err)
|
||||
require.Equal(n.t, exitcode.Ok, mw.Receipt.ExitCode)
|
||||
|
||||
var retval power3.CreateMinerReturn
|
||||
err = retval.UnmarshalCBOR(bytes.NewReader(mw.Receipt.Return))
|
||||
require.NoError(n.t, err, "failed to create miner")
|
||||
|
||||
m.ActorAddr = retval.IDAddress
|
||||
|
||||
has, err := m.FullNode.WalletHas(ctx, m.OwnerKey.Address)
|
||||
require.NoError(n.t, err)
|
||||
|
||||
// Only import the owner's full key into our companion full node, if we
|
||||
// don't have it still.
|
||||
if !has {
|
||||
_, err = m.FullNode.WalletImport(ctx, &m.OwnerKey.KeyInfo)
|
||||
require.NoError(n.t, err)
|
||||
}
|
||||
|
||||
enc, err := actors.SerializeParams(&miner2.ChangePeerIDParams{NewID: abi.PeerID(m.Libp2p.PeerID)})
|
||||
require.NoError(n.t, err)
|
||||
|
||||
msg := &types.Message{
|
||||
From: m.OwnerKey.Address,
|
||||
To: m.ActorAddr,
|
||||
Method: builtin.MethodsMiner.ChangePeerID,
|
||||
Params: enc,
|
||||
Value: types.NewInt(0),
|
||||
}
|
||||
|
||||
_, err2 := m.FullNode.MpoolPushMessage(ctx, msg, &api.MessageSendSpec{
|
||||
MsgUuid: uuid.New(),
|
||||
})
|
||||
require.NoError(n.t, err2)
|
||||
|
||||
minerCopy := *m.FullNode
|
||||
minerCopy.FullNode = modules.MakeUuidWrapper(minerCopy.FullNode)
|
||||
m.FullNode = &minerCopy
|
||||
|
||||
n.active.unmanagedMiners = append(n.active.unmanagedMiners, m)
|
||||
}
|
||||
|
||||
// If we are here, we have processed all inactive manual miners and moved them
|
||||
// to active, so clear the slice.
|
||||
n.inactive.unmanagedMiners = n.inactive.unmanagedMiners[:0]
|
||||
|
||||
// ---------------------
|
||||
// WORKERS
|
||||
// ---------------------
|
||||
@ -1003,6 +1089,10 @@ func (n *Ensemble) BeginMining(blocktime time.Duration, miners ...*TestMiner) []
|
||||
return bms
|
||||
}
|
||||
|
||||
func (n *Ensemble) minerCount() uint64 {
|
||||
return uint64(len(n.inactive.miners) + len(n.active.miners) + len(n.inactive.unmanagedMiners) + len(n.active.unmanagedMiners))
|
||||
}
|
||||
|
||||
func (n *Ensemble) generateGenesis() *genesis.Template {
|
||||
var verifRoot = gen.DefaultVerifregRootkeyActor
|
||||
if k := n.options.verifiedRoot.key; k != nil {
|
||||
|
@ -12,6 +12,7 @@ import (
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
"github.com/stretchr/testify/require"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
@ -97,6 +98,30 @@ func (f *TestFullNode) WaitTillChain(ctx context.Context, pred ChainPredicate) *
|
||||
return nil
|
||||
}
|
||||
|
||||
// WaitTillChain waits until a specified chain condition is met. It returns
|
||||
// the first tipset where the condition is met.
|
||||
func (f *TestFullNode) WaitTillChainOrError(ctx context.Context, pred ChainPredicate) (*types.TipSet, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
heads, err := f.ChainNotify(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for chg := range heads {
|
||||
for _, c := range chg {
|
||||
if c.Type != "apply" {
|
||||
continue
|
||||
}
|
||||
if ts := c.Val; pred(ts) {
|
||||
return ts, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil, xerrors.New("chain condition not met")
|
||||
}
|
||||
|
||||
func (f *TestFullNode) WaitForSectorActive(ctx context.Context, t *testing.T, sn abi.SectorNumber, maddr address.Address) {
|
||||
for {
|
||||
active, err := f.StateMinerActiveSectors(ctx, maddr, types.EmptyTSK)
|
||||
|
1042
itests/kit/node_unmanaged.go
Normal file
1042
itests/kit/node_unmanaged.go
Normal file
File diff suppressed because it is too large
Load Diff
174
itests/manual_onboarding_test.go
Normal file
174
itests/manual_onboarding_test.go
Normal file
@ -0,0 +1,174 @@
|
||||
package itests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/itests/kit"
|
||||
)
|
||||
|
||||
const defaultSectorSize = abi.SectorSize(2 << 10) // 2KiB
|
||||
|
||||
// Manually onboard CC sectors, bypassing lotus-miner onboarding pathways
|
||||
func TestManualSectorOnboarding(t *testing.T) {
|
||||
req := require.New(t)
|
||||
|
||||
for _, withMockProofs := range []bool{true, false} {
|
||||
testName := "WithRealProofs"
|
||||
if withMockProofs {
|
||||
testName = "WithMockProofs"
|
||||
}
|
||||
t.Run(testName, func(t *testing.T) {
|
||||
if testName == "WithRealProofs" {
|
||||
kit.Expensive(t)
|
||||
}
|
||||
kit.QuietMiningLogs()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
var (
|
||||
// need to pick a balance value so that the test is not racy on CI by running through it's WindowPostDeadlines too fast
|
||||
blocktime = 2 * time.Millisecond
|
||||
client kit.TestFullNode
|
||||
minerA kit.TestMiner // A is a standard genesis miner
|
||||
)
|
||||
|
||||
// Setup and begin mining with a single miner (A)
|
||||
// Miner A will only be a genesis Miner with power allocated in the genesis block and will not onboard any sectors from here on
|
||||
kitOpts := []kit.EnsembleOpt{}
|
||||
if withMockProofs {
|
||||
kitOpts = append(kitOpts, kit.MockProofs())
|
||||
}
|
||||
ens := kit.NewEnsemble(t, kitOpts...).
|
||||
FullNode(&client, kit.SectorSize(defaultSectorSize)).
|
||||
// preseal more than the default number of sectors to ensure that the genesis miner has power
|
||||
// because our unmanaged miners won't produce blocks so we may get null rounds
|
||||
Miner(&minerA, &client, kit.PresealSectors(5), kit.SectorSize(defaultSectorSize), kit.WithAllSubsystems()).
|
||||
Start().
|
||||
InterconnectAll()
|
||||
blockMiners := ens.BeginMiningMustPost(blocktime)
|
||||
req.Len(blockMiners, 1)
|
||||
blockMiner := blockMiners[0]
|
||||
|
||||
// Instantiate MinerB to manually handle sector onboarding and power acquisition through sector activation.
|
||||
// Unlike other miners managed by the Lotus Miner storage infrastructure, MinerB operates independently,
|
||||
// performing all related tasks manually. Managed by the TestKit, MinerB has the capability to utilize actual proofs
|
||||
// for the processes of sector onboarding and activation.
|
||||
nodeOpts := []kit.NodeOpt{kit.SectorSize(defaultSectorSize), kit.OwnerAddr(client.DefaultKey)}
|
||||
minerB, ens := ens.UnmanagedMiner(&client, nodeOpts...)
|
||||
// MinerC is similar to MinerB, but onboards pieces instead of a pure CC sector
|
||||
minerC, ens := ens.UnmanagedMiner(&client, nodeOpts...)
|
||||
|
||||
ens.Start()
|
||||
|
||||
build.Clock.Sleep(time.Second)
|
||||
|
||||
t.Log("Checking initial power ...")
|
||||
|
||||
// Miner A should have power as it has already onboarded sectors in the genesis block
|
||||
head, err := client.ChainHead(ctx)
|
||||
req.NoError(err)
|
||||
p, err := client.StateMinerPower(ctx, minerA.ActorAddr, head.Key())
|
||||
req.NoError(err)
|
||||
t.Logf("MinerA RBP: %v, QaP: %v", p.MinerPower.QualityAdjPower.String(), p.MinerPower.RawBytePower.String())
|
||||
|
||||
// Miner B should have no power as it has yet to onboard and activate any sectors
|
||||
minerB.AssertNoPower(ctx)
|
||||
|
||||
// Miner C should have no power as it has yet to onboard and activate any sectors
|
||||
minerC.AssertNoPower(ctx)
|
||||
|
||||
// ---- Miner B onboards a CC sector
|
||||
var bSectorNum abi.SectorNumber
|
||||
var bRespCh chan kit.WindowPostResp
|
||||
var bWdPostCancelF context.CancelFunc
|
||||
|
||||
if withMockProofs {
|
||||
bSectorNum, bRespCh, bWdPostCancelF = minerB.OnboardCCSectorWithMockProofs(ctx, kit.TestSpt)
|
||||
} else {
|
||||
bSectorNum, bRespCh, bWdPostCancelF = minerB.OnboardCCSectorWithRealProofs(ctx, kit.TestSpt)
|
||||
}
|
||||
// Miner B should still not have power as power can only be gained after sector is activated i.e. the first WindowPost is submitted for it
|
||||
minerB.AssertNoPower(ctx)
|
||||
// Ensure that the block miner checks for and waits for posts during the appropriate proving window from our new miner with a sector
|
||||
blockMiner.WatchMinerForPost(minerB.ActorAddr)
|
||||
|
||||
// --- Miner C onboards sector with data/pieces
|
||||
var cSectorNum abi.SectorNumber
|
||||
var cRespCh chan kit.WindowPostResp
|
||||
|
||||
if withMockProofs {
|
||||
cSectorNum, cRespCh, _ = minerC.OnboardSectorWithPiecesAndMockProofs(ctx, kit.TestSpt)
|
||||
} else {
|
||||
cSectorNum, cRespCh, _ = minerC.OnboardSectorWithPiecesAndRealProofs(ctx, kit.TestSpt)
|
||||
}
|
||||
// Miner C should still not have power as power can only be gained after sector is activated i.e. the first WindowPost is submitted for it
|
||||
minerC.AssertNoPower(ctx)
|
||||
// Ensure that the block miner checks for and waits for posts during the appropriate proving window from our new miner with a sector
|
||||
blockMiner.WatchMinerForPost(minerC.ActorAddr)
|
||||
|
||||
// Wait till both miners' sectors have had their first post and are activated and check that this is reflected in miner power
|
||||
waitTillActivatedAndAssertPower(ctx, t, minerB, bRespCh, bSectorNum, uint64(defaultSectorSize), withMockProofs)
|
||||
waitTillActivatedAndAssertPower(ctx, t, minerC, cRespCh, cSectorNum, uint64(defaultSectorSize), withMockProofs)
|
||||
|
||||
// Miner B has activated the CC sector -> upgrade it with snapdeals
|
||||
// Note: We can't activate a sector with mock proofs as the WdPost is successfully disputed and so no point
|
||||
// in snapping it as snapping is only for activated sectors
|
||||
if !withMockProofs {
|
||||
minerB.SnapDealWithRealProofs(ctx, kit.TestSpt, bSectorNum)
|
||||
// cancel the WdPost for the CC sector as the corresponding CommR is no longer valid
|
||||
bWdPostCancelF()
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func waitTillActivatedAndAssertPower(ctx context.Context, t *testing.T, miner *kit.TestUnmanagedMiner, respCh chan kit.WindowPostResp, sector abi.SectorNumber,
|
||||
sectorSize uint64, withMockProofs bool) {
|
||||
req := require.New(t)
|
||||
// wait till sector is activated
|
||||
select {
|
||||
case resp := <-respCh:
|
||||
req.NoError(resp.Error)
|
||||
req.True(resp.Posted)
|
||||
case <-ctx.Done():
|
||||
t.Fatal("timed out waiting for sector activation")
|
||||
}
|
||||
|
||||
// Fetch on-chain sector properties
|
||||
head, err := miner.FullNode.ChainHead(ctx)
|
||||
req.NoError(err)
|
||||
|
||||
soi, err := miner.FullNode.StateSectorGetInfo(ctx, miner.ActorAddr, sector, head.Key())
|
||||
req.NoError(err)
|
||||
t.Logf("Miner %s SectorOnChainInfo %d: %+v", miner.ActorAddr.String(), sector, soi)
|
||||
|
||||
_ = miner.FullNode.WaitTillChain(ctx, kit.HeightAtLeast(head.Height()+5))
|
||||
|
||||
t.Log("Checking power after PoSt ...")
|
||||
|
||||
// Miner B should now have power
|
||||
miner.AssertPower(ctx, sectorSize, sectorSize)
|
||||
|
||||
if withMockProofs {
|
||||
// WindowPost Dispute should succeed as we are using mock proofs
|
||||
err := miner.SubmitPostDispute(ctx, sector)
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
// WindowPost Dispute should fail
|
||||
assertDisputeFails(ctx, t, miner, sector)
|
||||
}
|
||||
}
|
||||
|
||||
func assertDisputeFails(ctx context.Context, t *testing.T, miner *kit.TestUnmanagedMiner, sector abi.SectorNumber) {
|
||||
err := miner.SubmitPostDispute(ctx, sector)
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "failed to dispute valid post")
|
||||
require.Contains(t, err.Error(), "(RetCode=16)")
|
||||
}
|
Loading…
Reference in New Issue
Block a user