lotus/itests/kit/blockminer.go

374 lines
9.8 KiB
Go
Raw Normal View History

2021-06-18 18:45:29 +00:00
package kit
import (
"bytes"
"context"
"fmt"
2022-07-12 10:42:08 +00:00
"strings"
"sync"
"sync/atomic"
"testing"
"time"
2022-06-14 15:00:51 +00:00
"github.com/stretchr/testify/require"
2022-04-20 21:34:28 +00:00
"github.com/filecoin-project/go-bitfield"
build: release: v1.18.0 (#9652) * build: Bump version to v1.17.3-dev * build: set version to v1.18.0-dev * chore: actors: Allow builtin-actors to return a map of methods (#9342) * Allow builtin-actors to return a map of methods * go mod * Fix tests * Fix tests, check carefully please * Delete lotus-pond (#9352) * feat: add StateNetworkVersion to mpool API * chore: refactor: rename NewestNetworkVersion * feat: actors: Integrate datacap actor into lotus (#9348) * Integrate datacap actor * Implement datacap actor in chain/builtin * feat: support typed errors over RPC * chore: deps: update to go-jsonrpc 0.1.8 * remove duplicate import * fix: itest: check for closed connection * chore: refactor: move retry test to API * address magik supernit * Add ability to only have single partition per msg for partitions with recovery sectors * doc gen * Address comments * Return beneficiary info from miner state Info() * Update builtin-actors to dev/20220922-v9 which includes FIP-0045 changes in progress * Integrate verifreg changes to lotus * Setup datacap actor * Update builtin-actors to dev/20220922-v9-1 * Update datacap actor to query datacap instead of verifreg * update gst * update markets * update actors with hamt fix * update gst * Update datacap to parse tokens * Update bundles * datacap and verifreg actors use ID addresses without protocol byte * update builtin-actors to rc1 * update go-fil-markets * Update bundles to rc2 * Integrate the v9 migration * Add api for getting allocation * Add upgrade epoch for butterfly * Tweak PreSeal struct to be infra-friendly * docsgen * More tweaking of PreSeal for genesis * review fixes * Use fake cid for test * add butterfly artifacts for oct 5 upgrade * check datacaps for v8 verifreg match v9 datacap actor * Remove print statements * Update to go-state-types master * Update to go-state-types v0.9.0-rc1 * review fixes * use go-fil-markets v1.24.0-v17 * Add accessors for allocations and claims maps * fix: missing permissions tag * butterfly * update butterfly artifacts * sealing pipeline: Prepare deal assigning logic for FIP-45 * sealing pipeline: Get allocationId with StateApi * use NoAllocationID instead of nil AllocationId * address review * Add datacap actor to registry.go * Add cli for listing allocations and removing expired allocations * Update to go-state-types master * deps: upgrade go-merkledag to 0.8.0 * shark params * Update cli/filplus.go Co-authored-by: Aayush Rajasekaran <arajasek94@gmail.com> * revert change to verifreg util * docsgen-cli * miss the stuff * Update FFI * Update go-state-types to v0.9.0 * Update builtin-actors to v9.0.0 * add calib upgrade epcoh * update the upgrade envvar * kill shark * Remove fvm splash banner from nv17 upgrade * check invariance for pending deals and allocations * check pending verified deal proposal migrated to allocation * Add check for unsealed CID in precommit sectors * Fix counting of allocations in nv17 migration test * make gen * pass state trees as pointers * Add assertion that migrations with & without cache are the same * compare allocation to verified deal proposal * Fix miner state precommit info * fix migration test tool * add changelog * Update to go-state-types v0.9.1 * Integrate builtin-actors v9.0.1 * chore: ver: bump version for rc3 (#9512) * Bump version to 1.18.0-rc3 * Update CHANGELOG.md * Update CHANGELOG.md Co-authored-by: Aayush Rajasekaran <arajasek94@gmail.com> * Update CHANGELOG.md Co-authored-by: Aayush Rajasekaran <arajasek94@gmail.com> Co-authored-by: Jiaying Wang <42981373+jennijuju@users.noreply.github.com> Co-authored-by: Aayush Rajasekaran <arajasek94@gmail.com> * Migration: Use autobatch bs * Fix autobatch Signed-off-by: Jakub Sztandera <kubuxu@protocol.ai> * Invoker: Use MethodMeta from go-state-types * Add a second premigration for nv17 * Add more shed tools for migration checking * address review * Lotus release v1.18.0-rc4 * fix: ci: fix app-image build on ci (#9527) * Remove old go version first * Add GO_VERSION file * Use GO_VERSION to set / verify go version * mv GO_VERSION GO_VERSION_MIN * Use GO_VERSION_MIN in Makefile check Co-authored-by: Ian Davis <jungziege@gmail.com> * Update to latest go-state-types for migration fixes * go mod tidy * fix: use api.ErrActorNotFound instead of types.ErrActorNotFound * fix: add fields to ForkUpgradeParams * docs: update actors_version_checklist.md * chore: fix lint * update to go state type v0.9.6 with market migration fix (#9545) * update go-state-types to v-0.9.7 * Add invariant checks to migration * fix invariant check: number of entries in datacap actor should include verifreg * Invariant checks: Only include not-activated deals * test: nv17 migration * Address review * add lotus-shed invariance method * Migration cli takes a stateroot cid and a height * make gen * Update to builtin-actors v9.0.2 * Failing test that shows that notaries can remove datacap from the verifreg actor * Test that should pass when the problem is solved * make gen * Review fixes * statemanager call function will return call information even if call errors * update go-state-types * update builtin-actors * bubble up errors properly from ApplyImplicitMessage * bump to rc5 * set new upgrade heights for calibnet * set new upgrade height for butterfly * tweak calibnet upgrade schedule * clarify changelog note about calibnet * butterfly * update calibnet artifacts * Allow setting local bundles for Debug FVM for av 9+ * fix: autobatch: remove potential deadlock when a block is missing Check the _underlying_ blockstore instead of recursing. Also, drop the lock before we do that. * fix imports * build: set shark mainnet epoch (#9640) * chore: build: Lotus release v1.18.0 (#9641) * Lotus release v1.18.0 * add changelog * address review * changelog improvement Co-authored-by: Jennifer Wang <jiayingw703@gmail.com> Co-authored-by: Jiaying Wang <42981373+jennijuju@users.noreply.github.com> Signed-off-by: Jakub Sztandera <kubuxu@protocol.ai> Co-authored-by: Łukasz Magiera <magik6k@gmail.com> Co-authored-by: Łukasz Magiera <magik6k@users.noreply.github.com> Co-authored-by: Aayush <arajasek94@gmail.com> Co-authored-by: Geoff Stuart <geoff.vball@gmail.com> Co-authored-by: Shrenuj Bansal <shrenuj.bansal@protocol.ai> Co-authored-by: simlecode <69969590+simlecode@users.noreply.github.com> Co-authored-by: Rod Vagg <rod@vagg.org> Co-authored-by: Jakub Sztandera <kubuxu@protocol.ai> Co-authored-by: Ian Davis <jungziege@gmail.com> Co-authored-by: zenground0 <ZenGround0@users.noreply.github.com> Co-authored-by: Steven Allen <steven@stebalien.com>
2022-11-16 01:57:23 +00:00
"github.com/filecoin-project/go-jsonrpc"
"github.com/filecoin-project/go-state-types/abi"
2022-06-14 15:00:51 +00:00
"github.com/filecoin-project/go-state-types/builtin"
minertypes "github.com/filecoin-project/go-state-types/builtin/v8/miner"
"github.com/filecoin-project/go-state-types/dline"
2022-06-14 15:00:51 +00:00
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/miner"
)
// BlockMiner is a utility that makes a test miner Mine blocks on a timer.
type BlockMiner struct {
t *testing.T
miner *TestMiner
nextNulls int64
pause chan struct{}
unpause chan struct{}
wg sync.WaitGroup
cancel context.CancelFunc
}
func NewBlockMiner(t *testing.T, miner *TestMiner) *BlockMiner {
return &BlockMiner{
t: t,
miner: miner,
cancel: func() {},
unpause: make(chan struct{}),
pause: make(chan struct{}),
}
}
type partitionTracker struct {
partitions []api.Partition
posted bitfield.BitField
}
func newPartitionTracker(ctx context.Context, dlIdx uint64, bm *BlockMiner) *partitionTracker {
dlines, err := bm.miner.FullNode.StateMinerDeadlines(ctx, bm.miner.ActorAddr, types.EmptyTSK)
require.NoError(bm.t, err)
dl := dlines[dlIdx]
parts, err := bm.miner.FullNode.StateMinerPartitions(ctx, bm.miner.ActorAddr, dlIdx, types.EmptyTSK)
require.NoError(bm.t, err)
return &partitionTracker{
partitions: parts,
posted: dl.PostSubmissions,
}
}
func (p *partitionTracker) count(t *testing.T) uint64 {
pCnt, err := p.posted.Count()
require.NoError(t, err)
return pCnt
}
func (p *partitionTracker) done(t *testing.T) bool {
return uint64(len(p.partitions)) == p.count(t)
}
func (p *partitionTracker) recordIfPost(t *testing.T, bm *BlockMiner, msg *types.Message) (ret bool) {
defer func() {
ret = p.done(t)
}()
if !(msg.To == bm.miner.ActorAddr) {
return
}
2022-04-20 21:34:28 +00:00
if msg.Method != builtin.MethodsMiner.SubmitWindowedPoSt {
return
}
2022-04-20 21:34:28 +00:00
params := minertypes.SubmitWindowedPoStParams{}
require.NoError(t, params.UnmarshalCBOR(bytes.NewReader(msg.Params)))
for _, part := range params.Partitions {
p.posted.Set(part.Index)
}
return
}
func (bm *BlockMiner) forcePoSt(ctx context.Context, ts *types.TipSet, dlinfo *dline.Info) {
tracker := newPartitionTracker(ctx, dlinfo.Index, bm)
if !tracker.done(bm.t) { // need to wait for post
bm.t.Logf("expect %d partitions proved but only see %d", len(tracker.partitions), tracker.count(bm.t))
poolEvts, err := bm.miner.FullNode.MpoolSub(ctx) //subscribe before checking pending so we don't miss any events
require.NoError(bm.t, err)
// First check pending messages we'll mine this epoch
msgs, err := bm.miner.FullNode.MpoolPending(ctx, types.EmptyTSK)
require.NoError(bm.t, err)
for _, msg := range msgs {
if tracker.recordIfPost(bm.t, bm, &msg.Message) {
fmt.Printf("found post in mempool pending\n")
}
}
// Account for included but not yet executed messages
for _, bc := range ts.Cids() {
msgs, err := bm.miner.FullNode.ChainGetBlockMessages(ctx, bc)
require.NoError(bm.t, err)
for _, msg := range msgs.BlsMessages {
if tracker.recordIfPost(bm.t, bm, msg) {
fmt.Printf("found post in message of prev tipset\n")
}
}
for _, msg := range msgs.SecpkMessages {
if tracker.recordIfPost(bm.t, bm, &msg.Message) {
fmt.Printf("found post in message of prev tipset\n")
}
}
}
// post not yet in mpool, wait for it
if !tracker.done(bm.t) {
bm.t.Logf("post missing from mpool, block mining suspended until it arrives")
POOL:
for {
bm.t.Logf("mpool event wait loop at block height %d, ts: %s", ts.Height(), ts.Key())
select {
case <-ctx.Done():
return
case evt := <-poolEvts:
bm.t.Logf("pool event: %d", evt.Type)
if evt.Type == api.MpoolAdd {
bm.t.Logf("incoming message %v", evt.Message)
if tracker.recordIfPost(bm.t, bm, &evt.Message.Message) {
fmt.Printf("found post in mempool evt\n")
break POOL
}
}
}
}
bm.t.Logf("done waiting on mpool")
}
}
}
// Like MineBlocks but refuses to mine until the window post scheduler has wdpost messages in the mempool
2022-01-05 13:24:46 +00:00
// and everything shuts down if a post fails. It also enforces that every block mined succeeds
func (bm *BlockMiner) MineBlocksMustPost(ctx context.Context, blocktime time.Duration) {
time.Sleep(time.Second)
// wrap context in a cancellable context.
ctx, bm.cancel = context.WithCancel(ctx)
bm.wg.Add(1)
go func() {
defer bm.wg.Done()
2022-01-05 13:24:46 +00:00
ts, err := bm.miner.FullNode.ChainHead(ctx)
require.NoError(bm.t, err)
wait := make(chan bool)
chg, err := bm.miner.FullNode.ChainNotify(ctx)
require.NoError(bm.t, err)
// read current out
curr := <-chg
require.Equal(bm.t, ts.Height(), curr[0].Val.Height(), "failed sanity check: are multiple miners mining with must post?")
for {
select {
case <-time.After(blocktime):
case <-ctx.Done():
return
}
nulls := atomic.SwapInt64(&bm.nextNulls, 0)
// Wake up and figure out if we are at the end of an active deadline
ts, err := bm.miner.FullNode.ChainHead(ctx)
require.NoError(bm.t, err)
2022-01-07 16:50:49 +00:00
dlinfo, err := bm.miner.FullNode.StateMinerProvingDeadline(ctx, bm.miner.ActorAddr, ts.Key())
require.NoError(bm.t, err)
if ts.Height()+1+abi.ChainEpoch(nulls) >= dlinfo.Last() { // Next block brings us past the last epoch in dline, we need to wait for miner to post
bm.forcePoSt(ctx, ts, dlinfo)
2022-01-05 13:24:46 +00:00
}
2022-01-07 16:50:49 +00:00
var target abi.ChainEpoch
reportSuccessFn := func(success bool, epoch abi.ChainEpoch, err error) {
// if api shuts down before mining, we may get an error which we should probably just ignore
// (fixing it will require rewriting most of the mining loop)
build: release: v1.18.0 (#9652) * build: Bump version to v1.17.3-dev * build: set version to v1.18.0-dev * chore: actors: Allow builtin-actors to return a map of methods (#9342) * Allow builtin-actors to return a map of methods * go mod * Fix tests * Fix tests, check carefully please * Delete lotus-pond (#9352) * feat: add StateNetworkVersion to mpool API * chore: refactor: rename NewestNetworkVersion * feat: actors: Integrate datacap actor into lotus (#9348) * Integrate datacap actor * Implement datacap actor in chain/builtin * feat: support typed errors over RPC * chore: deps: update to go-jsonrpc 0.1.8 * remove duplicate import * fix: itest: check for closed connection * chore: refactor: move retry test to API * address magik supernit * Add ability to only have single partition per msg for partitions with recovery sectors * doc gen * Address comments * Return beneficiary info from miner state Info() * Update builtin-actors to dev/20220922-v9 which includes FIP-0045 changes in progress * Integrate verifreg changes to lotus * Setup datacap actor * Update builtin-actors to dev/20220922-v9-1 * Update datacap actor to query datacap instead of verifreg * update gst * update markets * update actors with hamt fix * update gst * Update datacap to parse tokens * Update bundles * datacap and verifreg actors use ID addresses without protocol byte * update builtin-actors to rc1 * update go-fil-markets * Update bundles to rc2 * Integrate the v9 migration * Add api for getting allocation * Add upgrade epoch for butterfly * Tweak PreSeal struct to be infra-friendly * docsgen * More tweaking of PreSeal for genesis * review fixes * Use fake cid for test * add butterfly artifacts for oct 5 upgrade * check datacaps for v8 verifreg match v9 datacap actor * Remove print statements * Update to go-state-types master * Update to go-state-types v0.9.0-rc1 * review fixes * use go-fil-markets v1.24.0-v17 * Add accessors for allocations and claims maps * fix: missing permissions tag * butterfly * update butterfly artifacts * sealing pipeline: Prepare deal assigning logic for FIP-45 * sealing pipeline: Get allocationId with StateApi * use NoAllocationID instead of nil AllocationId * address review * Add datacap actor to registry.go * Add cli for listing allocations and removing expired allocations * Update to go-state-types master * deps: upgrade go-merkledag to 0.8.0 * shark params * Update cli/filplus.go Co-authored-by: Aayush Rajasekaran <arajasek94@gmail.com> * revert change to verifreg util * docsgen-cli * miss the stuff * Update FFI * Update go-state-types to v0.9.0 * Update builtin-actors to v9.0.0 * add calib upgrade epcoh * update the upgrade envvar * kill shark * Remove fvm splash banner from nv17 upgrade * check invariance for pending deals and allocations * check pending verified deal proposal migrated to allocation * Add check for unsealed CID in precommit sectors * Fix counting of allocations in nv17 migration test * make gen * pass state trees as pointers * Add assertion that migrations with & without cache are the same * compare allocation to verified deal proposal * Fix miner state precommit info * fix migration test tool * add changelog * Update to go-state-types v0.9.1 * Integrate builtin-actors v9.0.1 * chore: ver: bump version for rc3 (#9512) * Bump version to 1.18.0-rc3 * Update CHANGELOG.md * Update CHANGELOG.md Co-authored-by: Aayush Rajasekaran <arajasek94@gmail.com> * Update CHANGELOG.md Co-authored-by: Aayush Rajasekaran <arajasek94@gmail.com> Co-authored-by: Jiaying Wang <42981373+jennijuju@users.noreply.github.com> Co-authored-by: Aayush Rajasekaran <arajasek94@gmail.com> * Migration: Use autobatch bs * Fix autobatch Signed-off-by: Jakub Sztandera <kubuxu@protocol.ai> * Invoker: Use MethodMeta from go-state-types * Add a second premigration for nv17 * Add more shed tools for migration checking * address review * Lotus release v1.18.0-rc4 * fix: ci: fix app-image build on ci (#9527) * Remove old go version first * Add GO_VERSION file * Use GO_VERSION to set / verify go version * mv GO_VERSION GO_VERSION_MIN * Use GO_VERSION_MIN in Makefile check Co-authored-by: Ian Davis <jungziege@gmail.com> * Update to latest go-state-types for migration fixes * go mod tidy * fix: use api.ErrActorNotFound instead of types.ErrActorNotFound * fix: add fields to ForkUpgradeParams * docs: update actors_version_checklist.md * chore: fix lint * update to go state type v0.9.6 with market migration fix (#9545) * update go-state-types to v-0.9.7 * Add invariant checks to migration * fix invariant check: number of entries in datacap actor should include verifreg * Invariant checks: Only include not-activated deals * test: nv17 migration * Address review * add lotus-shed invariance method * Migration cli takes a stateroot cid and a height * make gen * Update to builtin-actors v9.0.2 * Failing test that shows that notaries can remove datacap from the verifreg actor * Test that should pass when the problem is solved * make gen * Review fixes * statemanager call function will return call information even if call errors * update go-state-types * update builtin-actors * bubble up errors properly from ApplyImplicitMessage * bump to rc5 * set new upgrade heights for calibnet * set new upgrade height for butterfly * tweak calibnet upgrade schedule * clarify changelog note about calibnet * butterfly * update calibnet artifacts * Allow setting local bundles for Debug FVM for av 9+ * fix: autobatch: remove potential deadlock when a block is missing Check the _underlying_ blockstore instead of recursing. Also, drop the lock before we do that. * fix imports * build: set shark mainnet epoch (#9640) * chore: build: Lotus release v1.18.0 (#9641) * Lotus release v1.18.0 * add changelog * address review * changelog improvement Co-authored-by: Jennifer Wang <jiayingw703@gmail.com> Co-authored-by: Jiaying Wang <42981373+jennijuju@users.noreply.github.com> Signed-off-by: Jakub Sztandera <kubuxu@protocol.ai> Co-authored-by: Łukasz Magiera <magik6k@gmail.com> Co-authored-by: Łukasz Magiera <magik6k@users.noreply.github.com> Co-authored-by: Aayush <arajasek94@gmail.com> Co-authored-by: Geoff Stuart <geoff.vball@gmail.com> Co-authored-by: Shrenuj Bansal <shrenuj.bansal@protocol.ai> Co-authored-by: simlecode <69969590+simlecode@users.noreply.github.com> Co-authored-by: Rod Vagg <rod@vagg.org> Co-authored-by: Jakub Sztandera <kubuxu@protocol.ai> Co-authored-by: Ian Davis <jungziege@gmail.com> Co-authored-by: zenground0 <ZenGround0@users.noreply.github.com> Co-authored-by: Steven Allen <steven@stebalien.com>
2022-11-16 01:57:23 +00:00
if err != nil && !strings.Contains(err.Error(), "websocket connection closed") && !api.ErrorIsIn(err, []error{new(jsonrpc.RPCConnectionError)}) {
2022-07-12 10:42:08 +00:00
require.NoError(bm.t, err)
}
2022-01-07 16:50:49 +00:00
target = epoch
wait <- success
}
2022-01-05 13:24:46 +00:00
var success bool
for i := int64(0); !success; i++ {
err = bm.miner.MineOne(ctx, miner.MineReq{
InjectNulls: abi.ChainEpoch(nulls + i),
Done: reportSuccessFn,
})
select {
case success = <-wait:
case <-ctx.Done():
return
}
if !success {
// if we are mining a new null block and it brings us past deadline boundary we need to wait for miner to post
if ts.Height()+1+abi.ChainEpoch(nulls+i) >= dlinfo.Last() {
bm.forcePoSt(ctx, ts, dlinfo)
}
}
2022-01-05 13:24:46 +00:00
}
2022-01-07 16:50:49 +00:00
// Wait until it shows up on the given full nodes ChainHead
// TODO this replicates a flaky condition from MineUntil,
// it would be better to use api to wait for sync,
// but currently this is a bit difficult
// and flaky failure is easy to debug and retry
nloops := 200
for i := 0; i < nloops; i++ {
ts, err := bm.miner.FullNode.ChainHead(ctx)
require.NoError(bm.t, err)
if ts.Height() == target {
break
}
require.NotEqual(bm.t, i, nloops-1, "block never managed to sync to node")
time.Sleep(time.Millisecond * 10)
}
switch {
case err == nil: // wrap around
case ctx.Err() != nil: // context fired.
return
default: // log error
bm.t.Error(err)
}
}
}()
}
func (bm *BlockMiner) MineBlocks(ctx context.Context, blocktime time.Duration) {
time.Sleep(time.Second)
// wrap context in a cancellable context.
ctx, bm.cancel = context.WithCancel(ctx)
bm.wg.Add(1)
go func() {
defer bm.wg.Done()
for {
select {
case <-bm.pause:
select {
case <-bm.unpause:
case <-ctx.Done():
return
}
case <-ctx.Done():
return
default:
}
select {
case <-time.After(blocktime):
case <-ctx.Done():
return
}
nulls := atomic.SwapInt64(&bm.nextNulls, 0)
err := bm.miner.MineOne(ctx, miner.MineReq{
InjectNulls: abi.ChainEpoch(nulls),
Done: func(bool, abi.ChainEpoch, error) {},
})
switch {
case err == nil: // wrap around
case ctx.Err() != nil: // context fired.
return
default: // log error
bm.t.Error(err)
}
}
}()
}
// InjectNulls injects the specified amount of null rounds in the next
// mining rounds.
func (bm *BlockMiner) InjectNulls(rounds abi.ChainEpoch) {
atomic.AddInt64(&bm.nextNulls, int64(rounds))
}
// Pause compels the miner to wait for a signal to restart
func (bm *BlockMiner) Pause() {
bm.pause <- struct{}{}
}
// Restart continues mining after a pause. This will hang if called before pause
func (bm *BlockMiner) Restart() {
bm.unpause <- struct{}{}
}
func (bm *BlockMiner) MineUntilBlock(ctx context.Context, fn *TestFullNode, cb func(abi.ChainEpoch)) {
for i := 0; i < 1000; i++ {
var (
success bool
err error
epoch abi.ChainEpoch
wait = make(chan struct{})
)
doneFn := func(win bool, ep abi.ChainEpoch, e error) {
success = win
err = e
epoch = ep
wait <- struct{}{}
}
mineErr := bm.miner.MineOne(ctx, miner.MineReq{Done: doneFn})
require.NoError(bm.t, mineErr)
<-wait
require.NoError(bm.t, err)
if success {
// Wait until it shows up on the given full nodes ChainHead
nloops := 200
for i := 0; i < nloops; i++ {
ts, err := fn.ChainHead(ctx)
require.NoError(bm.t, err)
if ts.Height() == epoch {
break
}
require.NotEqual(bm.t, i, nloops-1, "block never managed to sync to node")
time.Sleep(time.Millisecond * 10)
}
if cb != nil {
cb(epoch)
}
return
}
bm.t.Log("did not Mine block, trying again", i)
}
bm.t.Fatal("failed to Mine 1000 times in a row...")
}
// Stop stops the block miner.
func (bm *BlockMiner) Stop() {
bm.t.Log("shutting down mining")
bm.cancel()
bm.wg.Wait()
if bm.unpause != nil {
close(bm.unpause)
bm.unpause = nil
}
if bm.pause != nil {
close(bm.pause)
bm.pause = nil
}
}