2021-06-18 18:45:29 +00:00
|
|
|
package kit
|
2021-06-11 17:26:25 +00:00
|
|
|
|
|
|
|
import (
|
2021-12-08 17:11:19 +00:00
|
|
|
"bytes"
|
2021-06-11 17:26:25 +00:00
|
|
|
"context"
|
2022-02-14 19:00:41 +00:00
|
|
|
"fmt"
|
2021-06-11 17:26:25 +00:00
|
|
|
"sync"
|
|
|
|
"sync/atomic"
|
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
2022-06-14 15:00:51 +00:00
|
|
|
"github.com/stretchr/testify/require"
|
2022-04-20 21:34:28 +00:00
|
|
|
|
2021-12-08 17:11:19 +00:00
|
|
|
"github.com/filecoin-project/go-bitfield"
|
2021-06-11 17:26:25 +00:00
|
|
|
"github.com/filecoin-project/go-state-types/abi"
|
2022-06-14 15:00:51 +00:00
|
|
|
"github.com/filecoin-project/go-state-types/builtin"
|
|
|
|
minertypes "github.com/filecoin-project/go-state-types/builtin/v8/miner"
|
2022-02-14 14:47:18 +00:00
|
|
|
"github.com/filecoin-project/go-state-types/dline"
|
2022-06-14 15:00:51 +00:00
|
|
|
|
2021-12-08 17:11:19 +00:00
|
|
|
"github.com/filecoin-project/lotus/api"
|
|
|
|
"github.com/filecoin-project/lotus/chain/types"
|
2021-06-11 17:26:25 +00:00
|
|
|
"github.com/filecoin-project/lotus/miner"
|
|
|
|
)
|
|
|
|
|
|
|
|
// BlockMiner is a utility that makes a test miner Mine blocks on a timer.
|
|
|
|
type BlockMiner struct {
|
|
|
|
t *testing.T
|
|
|
|
miner *TestMiner
|
|
|
|
|
|
|
|
nextNulls int64
|
|
|
|
wg sync.WaitGroup
|
|
|
|
cancel context.CancelFunc
|
|
|
|
}
|
|
|
|
|
|
|
|
func NewBlockMiner(t *testing.T, miner *TestMiner) *BlockMiner {
|
|
|
|
return &BlockMiner{
|
|
|
|
t: t,
|
|
|
|
miner: miner,
|
|
|
|
cancel: func() {},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-08 17:11:19 +00:00
|
|
|
type partitionTracker struct {
|
|
|
|
partitions []api.Partition
|
|
|
|
posted bitfield.BitField
|
|
|
|
}
|
|
|
|
|
|
|
|
func newPartitionTracker(ctx context.Context, dlIdx uint64, bm *BlockMiner) *partitionTracker {
|
|
|
|
dlines, err := bm.miner.FullNode.StateMinerDeadlines(ctx, bm.miner.ActorAddr, types.EmptyTSK)
|
|
|
|
require.NoError(bm.t, err)
|
|
|
|
dl := dlines[dlIdx]
|
|
|
|
|
|
|
|
parts, err := bm.miner.FullNode.StateMinerPartitions(ctx, bm.miner.ActorAddr, dlIdx, types.EmptyTSK)
|
|
|
|
require.NoError(bm.t, err)
|
|
|
|
return &partitionTracker{
|
|
|
|
partitions: parts,
|
|
|
|
posted: dl.PostSubmissions,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *partitionTracker) count(t *testing.T) uint64 {
|
|
|
|
pCnt, err := p.posted.Count()
|
|
|
|
require.NoError(t, err)
|
|
|
|
return pCnt
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *partitionTracker) done(t *testing.T) bool {
|
|
|
|
return uint64(len(p.partitions)) == p.count(t)
|
|
|
|
}
|
|
|
|
|
2022-02-14 19:00:41 +00:00
|
|
|
func (p *partitionTracker) recordIfPost(t *testing.T, bm *BlockMiner, msg *types.Message) (ret bool) {
|
2021-12-08 17:11:19 +00:00
|
|
|
defer func() {
|
|
|
|
ret = p.done(t)
|
|
|
|
}()
|
|
|
|
if !(msg.To == bm.miner.ActorAddr) {
|
|
|
|
return
|
|
|
|
}
|
2022-04-20 21:34:28 +00:00
|
|
|
if msg.Method != builtin.MethodsMiner.SubmitWindowedPoSt {
|
2021-12-08 17:11:19 +00:00
|
|
|
return
|
|
|
|
}
|
2022-04-20 21:34:28 +00:00
|
|
|
params := minertypes.SubmitWindowedPoStParams{}
|
2021-12-08 17:11:19 +00:00
|
|
|
require.NoError(t, params.UnmarshalCBOR(bytes.NewReader(msg.Params)))
|
|
|
|
for _, part := range params.Partitions {
|
|
|
|
p.posted.Set(part.Index)
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-02-14 14:47:18 +00:00
|
|
|
func (bm *BlockMiner) forcePoSt(ctx context.Context, ts *types.TipSet, dlinfo *dline.Info) {
|
|
|
|
|
|
|
|
tracker := newPartitionTracker(ctx, dlinfo.Index, bm)
|
|
|
|
if !tracker.done(bm.t) { // need to wait for post
|
|
|
|
bm.t.Logf("expect %d partitions proved but only see %d", len(tracker.partitions), tracker.count(bm.t))
|
|
|
|
poolEvts, err := bm.miner.FullNode.MpoolSub(ctx) //subscribe before checking pending so we don't miss any events
|
|
|
|
require.NoError(bm.t, err)
|
|
|
|
|
|
|
|
// First check pending messages we'll mine this epoch
|
|
|
|
msgs, err := bm.miner.FullNode.MpoolPending(ctx, types.EmptyTSK)
|
|
|
|
require.NoError(bm.t, err)
|
|
|
|
for _, msg := range msgs {
|
2022-02-14 19:00:41 +00:00
|
|
|
if tracker.recordIfPost(bm.t, bm, &msg.Message) {
|
|
|
|
fmt.Printf("found post in mempool pending\n")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Account for included but not yet executed messages
|
|
|
|
for _, bc := range ts.Cids() {
|
|
|
|
msgs, err := bm.miner.FullNode.ChainGetBlockMessages(ctx, bc)
|
|
|
|
require.NoError(bm.t, err)
|
|
|
|
for _, msg := range msgs.BlsMessages {
|
|
|
|
if tracker.recordIfPost(bm.t, bm, msg) {
|
|
|
|
fmt.Printf("found post in message of prev tipset\n")
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
for _, msg := range msgs.SecpkMessages {
|
|
|
|
if tracker.recordIfPost(bm.t, bm, &msg.Message) {
|
|
|
|
fmt.Printf("found post in message of prev tipset\n")
|
|
|
|
}
|
|
|
|
}
|
2022-02-14 14:47:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// post not yet in mpool, wait for it
|
|
|
|
if !tracker.done(bm.t) {
|
|
|
|
bm.t.Logf("post missing from mpool, block mining suspended until it arrives")
|
|
|
|
POOL:
|
|
|
|
for {
|
|
|
|
bm.t.Logf("mpool event wait loop at block height %d, ts: %s", ts.Height(), ts.Key())
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return
|
|
|
|
case evt := <-poolEvts:
|
|
|
|
bm.t.Logf("pool event: %d", evt.Type)
|
|
|
|
if evt.Type == api.MpoolAdd {
|
|
|
|
bm.t.Logf("incoming message %v", evt.Message)
|
2022-02-14 19:00:41 +00:00
|
|
|
if tracker.recordIfPost(bm.t, bm, &evt.Message.Message) {
|
|
|
|
fmt.Printf("found post in mempool evt\n")
|
2022-02-14 14:47:18 +00:00
|
|
|
break POOL
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
bm.t.Logf("done waiting on mpool")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-08 17:11:19 +00:00
|
|
|
// Like MineBlocks but refuses to mine until the window post scheduler has wdpost messages in the mempool
|
2022-01-05 13:24:46 +00:00
|
|
|
// and everything shuts down if a post fails. It also enforces that every block mined succeeds
|
2021-12-08 17:11:19 +00:00
|
|
|
func (bm *BlockMiner) MineBlocksMustPost(ctx context.Context, blocktime time.Duration) {
|
2022-02-14 14:47:18 +00:00
|
|
|
time.Sleep(time.Second)
|
2021-12-08 17:11:19 +00:00
|
|
|
|
|
|
|
// wrap context in a cancellable context.
|
|
|
|
ctx, bm.cancel = context.WithCancel(ctx)
|
|
|
|
bm.wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer bm.wg.Done()
|
|
|
|
|
2022-01-05 13:24:46 +00:00
|
|
|
ts, err := bm.miner.FullNode.ChainHead(ctx)
|
|
|
|
require.NoError(bm.t, err)
|
|
|
|
wait := make(chan bool)
|
|
|
|
chg, err := bm.miner.FullNode.ChainNotify(ctx)
|
|
|
|
require.NoError(bm.t, err)
|
|
|
|
// read current out
|
|
|
|
curr := <-chg
|
2022-02-14 14:47:18 +00:00
|
|
|
require.Equal(bm.t, ts.Height(), curr[0].Val.Height(), "failed sanity check: are multiple miners mining with must post?")
|
2021-12-08 17:11:19 +00:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-time.After(blocktime):
|
|
|
|
case <-ctx.Done():
|
|
|
|
return
|
|
|
|
}
|
|
|
|
nulls := atomic.SwapInt64(&bm.nextNulls, 0)
|
|
|
|
|
|
|
|
// Wake up and figure out if we are at the end of an active deadline
|
|
|
|
ts, err := bm.miner.FullNode.ChainHead(ctx)
|
|
|
|
require.NoError(bm.t, err)
|
2022-01-07 16:50:49 +00:00
|
|
|
|
2022-02-14 14:47:18 +00:00
|
|
|
dlinfo, err := bm.miner.FullNode.StateMinerProvingDeadline(ctx, bm.miner.ActorAddr, ts.Key())
|
2021-12-08 17:11:19 +00:00
|
|
|
require.NoError(bm.t, err)
|
2022-02-14 14:47:18 +00:00
|
|
|
if ts.Height()+1+abi.ChainEpoch(nulls) >= dlinfo.Last() { // Next block brings us past the last epoch in dline, we need to wait for miner to post
|
|
|
|
bm.forcePoSt(ctx, ts, dlinfo)
|
2022-01-05 13:24:46 +00:00
|
|
|
}
|
2021-12-08 17:11:19 +00:00
|
|
|
|
2022-01-07 16:50:49 +00:00
|
|
|
var target abi.ChainEpoch
|
|
|
|
reportSuccessFn := func(success bool, epoch abi.ChainEpoch, err error) {
|
2022-01-05 13:24:46 +00:00
|
|
|
require.NoError(bm.t, err)
|
2022-01-07 16:50:49 +00:00
|
|
|
target = epoch
|
|
|
|
wait <- success
|
2021-12-08 17:11:19 +00:00
|
|
|
}
|
|
|
|
|
2022-01-05 13:24:46 +00:00
|
|
|
var success bool
|
|
|
|
for i := int64(0); !success; i++ {
|
|
|
|
err = bm.miner.MineOne(ctx, miner.MineReq{
|
|
|
|
InjectNulls: abi.ChainEpoch(nulls + i),
|
|
|
|
Done: reportSuccessFn,
|
|
|
|
})
|
2022-03-18 09:42:21 +00:00
|
|
|
select {
|
|
|
|
case success = <-wait:
|
|
|
|
case <-ctx.Done():
|
|
|
|
return
|
|
|
|
}
|
2022-02-14 14:47:18 +00:00
|
|
|
if !success {
|
|
|
|
// if we are mining a new null block and it brings us past deadline boundary we need to wait for miner to post
|
|
|
|
if ts.Height()+1+abi.ChainEpoch(nulls+i) >= dlinfo.Last() {
|
|
|
|
bm.forcePoSt(ctx, ts, dlinfo)
|
|
|
|
}
|
|
|
|
}
|
2022-01-05 13:24:46 +00:00
|
|
|
}
|
2022-01-07 16:50:49 +00:00
|
|
|
|
|
|
|
// Wait until it shows up on the given full nodes ChainHead
|
|
|
|
// TODO this replicates a flaky condition from MineUntil,
|
|
|
|
// it would be better to use api to wait for sync,
|
|
|
|
// but currently this is a bit difficult
|
|
|
|
// and flaky failure is easy to debug and retry
|
|
|
|
nloops := 200
|
|
|
|
for i := 0; i < nloops; i++ {
|
|
|
|
ts, err := bm.miner.FullNode.ChainHead(ctx)
|
|
|
|
require.NoError(bm.t, err)
|
|
|
|
|
|
|
|
if ts.Height() == target {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
require.NotEqual(bm.t, i, nloops-1, "block never managed to sync to node")
|
|
|
|
time.Sleep(time.Millisecond * 10)
|
|
|
|
}
|
|
|
|
|
2021-12-08 17:11:19 +00:00
|
|
|
switch {
|
|
|
|
case err == nil: // wrap around
|
|
|
|
case ctx.Err() != nil: // context fired.
|
|
|
|
return
|
|
|
|
default: // log error
|
|
|
|
bm.t.Error(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2021-06-11 17:26:25 +00:00
|
|
|
func (bm *BlockMiner) MineBlocks(ctx context.Context, blocktime time.Duration) {
|
|
|
|
time.Sleep(time.Second)
|
|
|
|
|
|
|
|
// wrap context in a cancellable context.
|
|
|
|
ctx, bm.cancel = context.WithCancel(ctx)
|
|
|
|
|
|
|
|
bm.wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer bm.wg.Done()
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-time.After(blocktime):
|
|
|
|
case <-ctx.Done():
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
nulls := atomic.SwapInt64(&bm.nextNulls, 0)
|
|
|
|
err := bm.miner.MineOne(ctx, miner.MineReq{
|
|
|
|
InjectNulls: abi.ChainEpoch(nulls),
|
|
|
|
Done: func(bool, abi.ChainEpoch, error) {},
|
|
|
|
})
|
|
|
|
switch {
|
|
|
|
case err == nil: // wrap around
|
|
|
|
case ctx.Err() != nil: // context fired.
|
|
|
|
return
|
|
|
|
default: // log error
|
|
|
|
bm.t.Error(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
|
|
|
// InjectNulls injects the specified amount of null rounds in the next
|
|
|
|
// mining rounds.
|
|
|
|
func (bm *BlockMiner) InjectNulls(rounds abi.ChainEpoch) {
|
|
|
|
atomic.AddInt64(&bm.nextNulls, int64(rounds))
|
|
|
|
}
|
|
|
|
|
|
|
|
func (bm *BlockMiner) MineUntilBlock(ctx context.Context, fn *TestFullNode, cb func(abi.ChainEpoch)) {
|
|
|
|
for i := 0; i < 1000; i++ {
|
|
|
|
var (
|
|
|
|
success bool
|
|
|
|
err error
|
|
|
|
epoch abi.ChainEpoch
|
|
|
|
wait = make(chan struct{})
|
|
|
|
)
|
|
|
|
|
|
|
|
doneFn := func(win bool, ep abi.ChainEpoch, e error) {
|
|
|
|
success = win
|
|
|
|
err = e
|
|
|
|
epoch = ep
|
|
|
|
wait <- struct{}{}
|
|
|
|
}
|
|
|
|
|
|
|
|
mineErr := bm.miner.MineOne(ctx, miner.MineReq{Done: doneFn})
|
|
|
|
require.NoError(bm.t, mineErr)
|
|
|
|
<-wait
|
|
|
|
|
|
|
|
require.NoError(bm.t, err)
|
|
|
|
|
|
|
|
if success {
|
|
|
|
// Wait until it shows up on the given full nodes ChainHead
|
|
|
|
nloops := 200
|
|
|
|
for i := 0; i < nloops; i++ {
|
|
|
|
ts, err := fn.ChainHead(ctx)
|
|
|
|
require.NoError(bm.t, err)
|
|
|
|
|
|
|
|
if ts.Height() == epoch {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
require.NotEqual(bm.t, i, nloops-1, "block never managed to sync to node")
|
|
|
|
time.Sleep(time.Millisecond * 10)
|
|
|
|
}
|
|
|
|
|
|
|
|
if cb != nil {
|
|
|
|
cb(epoch)
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
bm.t.Log("did not Mine block, trying again", i)
|
|
|
|
}
|
|
|
|
bm.t.Fatal("failed to Mine 1000 times in a row...")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stop stops the block miner.
|
|
|
|
func (bm *BlockMiner) Stop() {
|
|
|
|
bm.t.Log("shutting down mining")
|
|
|
|
bm.cancel()
|
|
|
|
bm.wg.Wait()
|
|
|
|
}
|