2022-08-29 14:25:30 +00:00
|
|
|
// stm: #unit
|
2022-06-14 17:27:04 +00:00
|
|
|
package wdpost
|
2020-09-09 13:01:37 +00:00
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"context"
|
|
|
|
"testing"
|
|
|
|
|
2022-06-14 15:00:51 +00:00
|
|
|
"github.com/ipfs/go-cid"
|
2020-09-09 13:01:37 +00:00
|
|
|
"github.com/stretchr/testify/require"
|
2021-01-22 21:02:54 +00:00
|
|
|
"golang.org/x/xerrors"
|
2020-09-09 13:01:37 +00:00
|
|
|
|
2020-09-21 07:52:57 +00:00
|
|
|
"github.com/filecoin-project/go-address"
|
|
|
|
"github.com/filecoin-project/go-bitfield"
|
2020-09-09 13:01:37 +00:00
|
|
|
"github.com/filecoin-project/go-state-types/abi"
|
2022-09-06 15:49:29 +00:00
|
|
|
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
2020-09-21 07:52:57 +00:00
|
|
|
"github.com/filecoin-project/go-state-types/big"
|
2022-06-14 15:00:51 +00:00
|
|
|
"github.com/filecoin-project/go-state-types/builtin"
|
2022-09-06 15:49:29 +00:00
|
|
|
minertypes "github.com/filecoin-project/go-state-types/builtin/v9/miner"
|
2020-09-09 13:01:37 +00:00
|
|
|
"github.com/filecoin-project/go-state-types/crypto"
|
2020-09-21 07:52:57 +00:00
|
|
|
"github.com/filecoin-project/go-state-types/dline"
|
2020-10-28 14:10:43 +00:00
|
|
|
"github.com/filecoin-project/go-state-types/network"
|
2022-06-14 15:00:51 +00:00
|
|
|
prooftypes "github.com/filecoin-project/go-state-types/proof"
|
|
|
|
tutils "github.com/filecoin-project/specs-actors/v2/support/testing"
|
2022-01-18 10:37:15 +00:00
|
|
|
|
2020-09-21 07:52:57 +00:00
|
|
|
"github.com/filecoin-project/lotus/api"
|
2020-10-28 14:10:43 +00:00
|
|
|
"github.com/filecoin-project/lotus/build"
|
2022-06-14 15:00:51 +00:00
|
|
|
"github.com/filecoin-project/lotus/chain/actors"
|
2021-06-16 23:39:23 +00:00
|
|
|
"github.com/filecoin-project/lotus/chain/actors/policy"
|
2020-09-21 07:52:57 +00:00
|
|
|
"github.com/filecoin-project/lotus/chain/types"
|
2020-10-09 19:52:04 +00:00
|
|
|
"github.com/filecoin-project/lotus/journal"
|
2022-06-14 17:32:29 +00:00
|
|
|
"github.com/filecoin-project/lotus/storage/ctladdr"
|
2022-06-14 18:03:38 +00:00
|
|
|
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
2020-09-09 13:01:37 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
type mockStorageMinerAPI struct {
|
2020-09-18 18:34:23 +00:00
|
|
|
partitions []api.Partition
|
2020-09-09 13:01:37 +00:00
|
|
|
pushedMessages chan *types.Message
|
2022-06-14 17:33:57 +00:00
|
|
|
NodeAPI
|
2020-09-09 13:01:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func newMockStorageMinerAPI() *mockStorageMinerAPI {
|
|
|
|
return &mockStorageMinerAPI{
|
|
|
|
pushedMessages: make(chan *types.Message),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-20 21:34:28 +00:00
|
|
|
func (m *mockStorageMinerAPI) StateMinerInfo(ctx context.Context, a address.Address, key types.TipSetKey) (api.MinerInfo, error) {
|
|
|
|
return api.MinerInfo{
|
2020-09-21 07:52:57 +00:00
|
|
|
Worker: tutils.NewIDAddr(nil, 101),
|
|
|
|
Owner: tutils.NewIDAddr(nil, 101),
|
|
|
|
}, nil
|
2020-09-18 18:34:23 +00:00
|
|
|
}
|
|
|
|
|
2020-10-07 17:41:07 +00:00
|
|
|
func (m *mockStorageMinerAPI) StateNetworkVersion(ctx context.Context, key types.TipSetKey) (network.Version, error) {
|
|
|
|
return build.NewestNetworkVersion, nil
|
|
|
|
}
|
|
|
|
|
2021-09-12 02:24:53 +00:00
|
|
|
func (m *mockStorageMinerAPI) StateGetRandomnessFromTickets(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte, tsk types.TipSetKey) (abi.Randomness, error) {
|
2020-09-09 13:01:37 +00:00
|
|
|
return abi.Randomness("ticket rand"), nil
|
|
|
|
}
|
|
|
|
|
2021-09-12 02:24:53 +00:00
|
|
|
func (m *mockStorageMinerAPI) StateGetRandomnessFromBeacon(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte, tsk types.TipSetKey) (abi.Randomness, error) {
|
2020-09-09 13:01:37 +00:00
|
|
|
return abi.Randomness("beacon rand"), nil
|
|
|
|
}
|
|
|
|
|
2020-09-18 18:34:23 +00:00
|
|
|
func (m *mockStorageMinerAPI) setPartitions(ps []api.Partition) {
|
2020-09-09 13:01:37 +00:00
|
|
|
m.partitions = append(m.partitions, ps...)
|
|
|
|
}
|
|
|
|
|
2020-09-18 18:34:23 +00:00
|
|
|
func (m *mockStorageMinerAPI) StateMinerPartitions(ctx context.Context, a address.Address, dlIdx uint64, tsk types.TipSetKey) ([]api.Partition, error) {
|
2020-09-09 13:01:37 +00:00
|
|
|
return m.partitions, nil
|
|
|
|
}
|
|
|
|
|
2022-04-20 21:34:28 +00:00
|
|
|
func (m *mockStorageMinerAPI) StateMinerSectors(ctx context.Context, address address.Address, snos *bitfield.BitField, key types.TipSetKey) ([]*minertypes.SectorOnChainInfo, error) {
|
|
|
|
var sis []*minertypes.SectorOnChainInfo
|
2020-09-21 19:05:01 +00:00
|
|
|
if snos == nil {
|
|
|
|
panic("unsupported")
|
|
|
|
}
|
|
|
|
_ = snos.ForEach(func(i uint64) error {
|
2022-04-20 21:34:28 +00:00
|
|
|
sis = append(sis, &minertypes.SectorOnChainInfo{
|
2020-09-21 19:05:01 +00:00
|
|
|
SectorNumber: abi.SectorNumber(i),
|
2020-09-09 13:01:37 +00:00
|
|
|
})
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
return sis, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *mockStorageMinerAPI) MpoolPushMessage(ctx context.Context, message *types.Message, spec *api.MessageSendSpec) (*types.SignedMessage, error) {
|
|
|
|
m.pushedMessages <- message
|
|
|
|
return &types.SignedMessage{
|
|
|
|
Message: *message,
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2021-04-05 19:34:03 +00:00
|
|
|
func (m *mockStorageMinerAPI) StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64, limit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error) {
|
2020-09-09 13:01:37 +00:00
|
|
|
return &api.MsgLookup{
|
|
|
|
Receipt: types.MessageReceipt{
|
|
|
|
ExitCode: 0,
|
|
|
|
},
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2020-11-19 17:57:43 +00:00
|
|
|
func (m *mockStorageMinerAPI) GasEstimateGasPremium(_ context.Context, nblocksincl uint64, sender address.Address, gaslimit int64, tsk types.TipSetKey) (types.BigInt, error) {
|
|
|
|
return big.Zero(), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *mockStorageMinerAPI) GasEstimateFeeCap(context.Context, *types.Message, int64, types.TipSetKey) (types.BigInt, error) {
|
|
|
|
return big.Zero(), nil
|
|
|
|
}
|
|
|
|
|
2020-09-09 13:01:37 +00:00
|
|
|
type mockProver struct {
|
|
|
|
}
|
|
|
|
|
2022-04-20 21:34:28 +00:00
|
|
|
func (m *mockProver) GenerateWinningPoStWithVanilla(ctx context.Context, proofType abi.RegisteredPoStProof, minerID abi.ActorID, randomness abi.PoStRandomness, proofs [][]byte) ([]prooftypes.PoStProof, error) {
|
2022-01-14 13:11:04 +00:00
|
|
|
panic("implement me")
|
|
|
|
}
|
|
|
|
|
2022-04-20 21:34:28 +00:00
|
|
|
func (m *mockProver) GenerateWindowPoStWithVanilla(ctx context.Context, proofType abi.RegisteredPoStProof, minerID abi.ActorID, randomness abi.PoStRandomness, proofs [][]byte, partitionIdx int) (prooftypes.PoStProof, error) {
|
2022-01-14 13:11:04 +00:00
|
|
|
panic("implement me")
|
|
|
|
}
|
|
|
|
|
2022-04-20 21:34:28 +00:00
|
|
|
func (m *mockProver) GenerateWinningPoSt(context.Context, abi.ActorID, []prooftypes.ExtendedSectorInfo, abi.PoStRandomness) ([]prooftypes.PoStProof, error) {
|
2020-09-09 13:01:37 +00:00
|
|
|
panic("implement me")
|
|
|
|
}
|
|
|
|
|
2022-04-20 21:34:28 +00:00
|
|
|
func (m *mockProver) GenerateWindowPoSt(ctx context.Context, aid abi.ActorID, sis []prooftypes.ExtendedSectorInfo, pr abi.PoStRandomness) ([]prooftypes.PoStProof, []abi.SectorID, error) {
|
|
|
|
return []prooftypes.PoStProof{
|
2020-09-09 13:01:37 +00:00
|
|
|
{
|
|
|
|
PoStProof: abi.RegisteredPoStProof_StackedDrgWindow2KiBV1,
|
|
|
|
ProofBytes: []byte("post-proof"),
|
|
|
|
},
|
|
|
|
}, nil, nil
|
|
|
|
}
|
|
|
|
|
2021-01-22 21:02:54 +00:00
|
|
|
type mockVerif struct {
|
|
|
|
}
|
|
|
|
|
2022-04-20 21:34:28 +00:00
|
|
|
func (m mockVerif) VerifyWinningPoSt(ctx context.Context, info prooftypes.WinningPoStVerifyInfo) (bool, error) {
|
2021-01-22 21:02:54 +00:00
|
|
|
panic("implement me")
|
|
|
|
}
|
|
|
|
|
2022-04-20 21:34:28 +00:00
|
|
|
func (m mockVerif) VerifyWindowPoSt(ctx context.Context, info prooftypes.WindowPoStVerifyInfo) (bool, error) {
|
2021-01-22 21:02:54 +00:00
|
|
|
if len(info.Proofs) != 1 {
|
|
|
|
return false, xerrors.Errorf("expected 1 proof entry")
|
|
|
|
}
|
|
|
|
|
|
|
|
proof := info.Proofs[0]
|
|
|
|
|
|
|
|
if !bytes.Equal(proof.ProofBytes, []byte("post-proof")) {
|
|
|
|
return false, xerrors.Errorf("bad proof")
|
|
|
|
}
|
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
|
2022-04-20 21:34:28 +00:00
|
|
|
func (m mockVerif) VerifyAggregateSeals(aggregate prooftypes.AggregateSealVerifyProofAndInfos) (bool, error) {
|
2021-11-04 15:59:29 +00:00
|
|
|
panic("implement me")
|
|
|
|
}
|
|
|
|
|
2022-04-20 21:34:28 +00:00
|
|
|
func (m mockVerif) VerifyReplicaUpdate(update prooftypes.ReplicaUpdateInfo) (bool, error) {
|
2021-05-17 18:47:41 +00:00
|
|
|
panic("implement me")
|
|
|
|
}
|
|
|
|
|
2022-04-20 21:34:28 +00:00
|
|
|
func (m mockVerif) VerifySeal(prooftypes.SealVerifyInfo) (bool, error) {
|
2021-01-22 21:02:54 +00:00
|
|
|
panic("implement me")
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m mockVerif) GenerateWinningPoStSectorChallenge(context.Context, abi.RegisteredPoStProof, abi.ActorID, abi.PoStRandomness, uint64) ([]uint64, error) {
|
|
|
|
panic("implement me")
|
|
|
|
}
|
|
|
|
|
2020-09-09 13:01:37 +00:00
|
|
|
type mockFaultTracker struct {
|
|
|
|
}
|
|
|
|
|
2022-06-17 11:31:05 +00:00
|
|
|
func (m mockFaultTracker) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storiface.SectorRef, rg storiface.RGetter) (map[abi.SectorID]string, error) {
|
2020-12-01 13:50:51 +00:00
|
|
|
// Returns "bad" sectors so just return empty map meaning all sectors are good
|
2020-11-27 15:34:41 +00:00
|
|
|
return map[abi.SectorID]string{}, nil
|
2020-09-09 13:01:37 +00:00
|
|
|
}
|
|
|
|
|
2022-10-04 18:33:18 +00:00
|
|
|
func generatePartition(sectorCount uint64, recoverySectorCount uint64) api.Partition {
|
|
|
|
var partition api.Partition
|
|
|
|
sectors := bitfield.New()
|
|
|
|
recoverySectors := bitfield.New()
|
|
|
|
for s := uint64(0); s < sectorCount; s++ {
|
|
|
|
sectors.Set(s)
|
|
|
|
}
|
|
|
|
for s := uint64(0); s < recoverySectorCount; s++ {
|
|
|
|
recoverySectors.Set(s)
|
|
|
|
}
|
|
|
|
partition = api.Partition{
|
|
|
|
AllSectors: sectors,
|
|
|
|
FaultySectors: bitfield.New(),
|
|
|
|
RecoveringSectors: recoverySectors,
|
|
|
|
LiveSectors: sectors,
|
|
|
|
ActiveSectors: sectors,
|
|
|
|
}
|
|
|
|
return partition
|
|
|
|
}
|
|
|
|
|
2020-09-09 13:01:37 +00:00
|
|
|
// TestWDPostDoPost verifies that doPost will send the correct number of window
|
|
|
|
// PoST messages for a given number of partitions
|
|
|
|
func TestWDPostDoPost(t *testing.T) {
|
feat: Add additional test annotations (#8272)
* Annotate api,proxy_util,blockstore_badger, policy tests
* Annotate splitstore: bsbadger / markset
* Annotate splitstore feature
* Annotate union/timed blockstore tests
* Annotate openrpc, diff_adt tests
* Annotate error,drand,events tests
* Annotate predicates_test
* Fix annotations
* Annotate tscache, gen tests
* Annotate fundmanager test
* Annotate repub and selection tests
* Annotate statetree_test
* Annotate forks_test
* Annotate searchwait_test.go
* Fix duplicated @@ symbols
* Annotate chain stmgr/store tests
* Annotate more (types) tests
* More tests annotated
* Annotate conformance chaos actor tests
* Annotate more integration tests
* Annotate journal system tests
* Annotate more tests.
* Annotate gas,head buffer behaviors
* Fix markset annotations
* doc: test annotations for the markets dagstore wrapper
* Annotate miner_api test in dagstore
* Annotate more test files
* Remove bad annotations from fsrepo
* Annotate wdpost system
* Remove bad annotations
* Renamce "conformance" to "chaos_actor" tests
* doc: stm annotations for blockheader & election proof tests
* Annotate remaining "A" tests
* annotate: stm for error_test
* memrepo_test.go
* Annotate "b" file tests
* message_test.go
* doc: stm annotate for fsrepo_test
* Annotate "c" file tests
* Annotate "D" test files
* message_test.go
* doc: stm annotate for chain, node/config & client
* docs: stm annotate node_test
* Annotate u,v,wl tests
* doc: stm annotations for various test files
* Annotate "T" test files
* doc: stm annotate for proxy_util_test & policy_test
* doc: stm annotate for various tests
* doc: final few stm annotations
* Add mempool unit tests
* Add two more memPool Add tests
* Update submodules
* Add check function tests
* Add stm annotations, refactor test helper
* Annotate api,proxy_util,blockstore_badger, policy tests
* Annotate splitstore: bsbadger / markset
solving merge conflicts
* Annotate splitstore feature
* Annotate union/timed blockstore tests
* Annotate openrpc, diff_adt tests
* Annotate error,drand,events tests
* Annotate predicates_test
* Fix annotations
* Annotate tscache, gen tests
* Annotate fundmanager test
* Annotate statetree_test
* Annotate forks_test
* Annotate searchwait_test.go
* Fix duplicated @@ symbols
* Annotate chain stmgr/store tests
* Annotate more (types) tests
* More tests annotated
* Annotate conformance chaos actor tests
* Annotate more integration tests
* Annotate journal system tests
* Annotate more tests.
* Annotate gas,head buffer behaviors
solve merge conflict
* Fix markset annotations
* Annotate miner_api test in dagstore
* Annotate more test files
* doc: test annotations for the markets dagstore wrapper
* Annotate wdpost system
* Renamce "conformance" to "chaos_actor" tests
* Annotate remaining "A" tests
* doc: stm annotations for blockheader & election proof tests
* annotate: stm for error_test
* Annotate "b" file tests
* memrepo_test.go
* Annotate "c" file tests
* message_test.go
* Annotate "D" test files
* doc: stm annotate for fsrepo_test
* Annotate u,v,wl tests
* message_test.go
* doc: stm annotate for chain, node/config & client
* docs: stm annotate node_test
* Annotate "T" test files
* doc: stm annotations for various test files
* Add mempool unit tests
solve merge conflict
* doc: stm annotate for proxy_util_test & policy_test
* doc: stm annotate for various tests
* doc: final few stm annotations
* Add two more memPool Add tests
* Update submodules
* Add check function tests
solve conflict
* Add stm annotations, refactor test helper
solve merge conflict
* Change CLI test kinds to "unit"
* Fix double merged test
* Fix ccupgrade_test merge
* Fix lint issues
* Add stm annotation to types_Test
* Test vectors submodule
* Add file annotation to burn_test
Co-authored-by: Nikola Divic <divicnikola@gmail.com>
Co-authored-by: TheMenko <themenkoprojects@gmail.com>
2022-03-16 17:37:34 +00:00
|
|
|
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
|
|
|
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
|
|
|
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
|
|
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
2020-09-09 13:01:37 +00:00
|
|
|
ctx := context.Background()
|
|
|
|
expectedMsgCount := 5
|
|
|
|
|
|
|
|
proofType := abi.RegisteredPoStProof_StackedDrgWindow2KiBV1
|
|
|
|
postAct := tutils.NewIDAddr(t, 100)
|
|
|
|
|
|
|
|
mockStgMinerAPI := newMockStorageMinerAPI()
|
|
|
|
|
|
|
|
// Get the number of sectors allowed in a partition for this proof type
|
2022-04-20 21:34:28 +00:00
|
|
|
sectorsPerPartition, err := builtin.PoStProofWindowPoStPartitionSectors(proofType)
|
2020-09-09 13:01:37 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
// Work out the number of partitions that can be included in a message
|
|
|
|
// without exceeding the message sector limit
|
2020-09-18 18:34:23 +00:00
|
|
|
|
feat: Add additional test annotations (#8272)
* Annotate api,proxy_util,blockstore_badger, policy tests
* Annotate splitstore: bsbadger / markset
* Annotate splitstore feature
* Annotate union/timed blockstore tests
* Annotate openrpc, diff_adt tests
* Annotate error,drand,events tests
* Annotate predicates_test
* Fix annotations
* Annotate tscache, gen tests
* Annotate fundmanager test
* Annotate repub and selection tests
* Annotate statetree_test
* Annotate forks_test
* Annotate searchwait_test.go
* Fix duplicated @@ symbols
* Annotate chain stmgr/store tests
* Annotate more (types) tests
* More tests annotated
* Annotate conformance chaos actor tests
* Annotate more integration tests
* Annotate journal system tests
* Annotate more tests.
* Annotate gas,head buffer behaviors
* Fix markset annotations
* doc: test annotations for the markets dagstore wrapper
* Annotate miner_api test in dagstore
* Annotate more test files
* Remove bad annotations from fsrepo
* Annotate wdpost system
* Remove bad annotations
* Renamce "conformance" to "chaos_actor" tests
* doc: stm annotations for blockheader & election proof tests
* Annotate remaining "A" tests
* annotate: stm for error_test
* memrepo_test.go
* Annotate "b" file tests
* message_test.go
* doc: stm annotate for fsrepo_test
* Annotate "c" file tests
* Annotate "D" test files
* message_test.go
* doc: stm annotate for chain, node/config & client
* docs: stm annotate node_test
* Annotate u,v,wl tests
* doc: stm annotations for various test files
* Annotate "T" test files
* doc: stm annotate for proxy_util_test & policy_test
* doc: stm annotate for various tests
* doc: final few stm annotations
* Add mempool unit tests
* Add two more memPool Add tests
* Update submodules
* Add check function tests
* Add stm annotations, refactor test helper
* Annotate api,proxy_util,blockstore_badger, policy tests
* Annotate splitstore: bsbadger / markset
solving merge conflicts
* Annotate splitstore feature
* Annotate union/timed blockstore tests
* Annotate openrpc, diff_adt tests
* Annotate error,drand,events tests
* Annotate predicates_test
* Fix annotations
* Annotate tscache, gen tests
* Annotate fundmanager test
* Annotate statetree_test
* Annotate forks_test
* Annotate searchwait_test.go
* Fix duplicated @@ symbols
* Annotate chain stmgr/store tests
* Annotate more (types) tests
* More tests annotated
* Annotate conformance chaos actor tests
* Annotate more integration tests
* Annotate journal system tests
* Annotate more tests.
* Annotate gas,head buffer behaviors
solve merge conflict
* Fix markset annotations
* Annotate miner_api test in dagstore
* Annotate more test files
* doc: test annotations for the markets dagstore wrapper
* Annotate wdpost system
* Renamce "conformance" to "chaos_actor" tests
* Annotate remaining "A" tests
* doc: stm annotations for blockheader & election proof tests
* annotate: stm for error_test
* Annotate "b" file tests
* memrepo_test.go
* Annotate "c" file tests
* message_test.go
* Annotate "D" test files
* doc: stm annotate for fsrepo_test
* Annotate u,v,wl tests
* message_test.go
* doc: stm annotate for chain, node/config & client
* docs: stm annotate node_test
* Annotate "T" test files
* doc: stm annotations for various test files
* Add mempool unit tests
solve merge conflict
* doc: stm annotate for proxy_util_test & policy_test
* doc: stm annotate for various tests
* doc: final few stm annotations
* Add two more memPool Add tests
* Update submodules
* Add check function tests
solve conflict
* Add stm annotations, refactor test helper
solve merge conflict
* Change CLI test kinds to "unit"
* Fix double merged test
* Fix ccupgrade_test merge
* Fix lint issues
* Add stm annotation to types_Test
* Test vectors submodule
* Add file annotation to burn_test
Co-authored-by: Nikola Divic <divicnikola@gmail.com>
Co-authored-by: TheMenko <themenkoprojects@gmail.com>
2022-03-16 17:37:34 +00:00
|
|
|
//stm: @BLOCKCHAIN_POLICY_GET_MAX_POST_PARTITIONS_001
|
2021-06-16 23:39:23 +00:00
|
|
|
partitionsPerMsg, err := policy.GetMaxPoStPartitions(network.Version13, proofType)
|
2020-09-18 18:34:23 +00:00
|
|
|
require.NoError(t, err)
|
2022-04-20 21:34:28 +00:00
|
|
|
if partitionsPerMsg > minertypes.AddressedPartitionsMax {
|
|
|
|
partitionsPerMsg = minertypes.AddressedPartitionsMax
|
2021-05-31 23:28:49 +00:00
|
|
|
}
|
2020-09-09 13:01:37 +00:00
|
|
|
|
|
|
|
// Enough partitions to fill expectedMsgCount-1 messages
|
|
|
|
partitionCount := (expectedMsgCount - 1) * partitionsPerMsg
|
|
|
|
// Add an extra partition that should be included in the last message
|
|
|
|
partitionCount++
|
|
|
|
|
2020-09-18 18:34:23 +00:00
|
|
|
var partitions []api.Partition
|
2020-09-09 13:01:37 +00:00
|
|
|
for p := 0; p < partitionCount; p++ {
|
|
|
|
sectors := bitfield.New()
|
|
|
|
for s := uint64(0); s < sectorsPerPartition; s++ {
|
|
|
|
sectors.Set(s)
|
|
|
|
}
|
2020-09-18 18:34:23 +00:00
|
|
|
partitions = append(partitions, api.Partition{
|
|
|
|
AllSectors: sectors,
|
|
|
|
FaultySectors: bitfield.New(),
|
|
|
|
RecoveringSectors: bitfield.New(),
|
|
|
|
LiveSectors: sectors,
|
|
|
|
ActiveSectors: sectors,
|
2020-09-09 13:01:37 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
mockStgMinerAPI.setPartitions(partitions)
|
|
|
|
|
|
|
|
// Run window PoST
|
|
|
|
scheduler := &WindowPoStScheduler{
|
|
|
|
api: mockStgMinerAPI,
|
|
|
|
prover: &mockProver{},
|
2021-01-22 21:02:54 +00:00
|
|
|
verifier: &mockVerif{},
|
2020-09-09 13:01:37 +00:00
|
|
|
faultTracker: &mockFaultTracker{},
|
|
|
|
proofType: proofType,
|
|
|
|
actor: postAct,
|
2020-10-09 19:52:04 +00:00
|
|
|
journal: journal.NilJournal(),
|
2022-06-14 17:32:29 +00:00
|
|
|
addrSel: &ctladdr.AddressSelector{},
|
2020-09-09 13:01:37 +00:00
|
|
|
}
|
|
|
|
|
2020-09-21 07:52:57 +00:00
|
|
|
di := &dline.Info{
|
2022-04-20 21:34:28 +00:00
|
|
|
WPoStPeriodDeadlines: minertypes.WPoStPeriodDeadlines,
|
|
|
|
WPoStProvingPeriod: minertypes.WPoStProvingPeriod,
|
|
|
|
WPoStChallengeWindow: minertypes.WPoStChallengeWindow,
|
|
|
|
WPoStChallengeLookback: minertypes.WPoStChallengeLookback,
|
|
|
|
FaultDeclarationCutoff: minertypes.FaultDeclarationCutoff,
|
2020-09-21 07:52:57 +00:00
|
|
|
}
|
2020-09-09 13:01:37 +00:00
|
|
|
ts := mockTipSet(t)
|
2020-09-18 16:03:59 +00:00
|
|
|
|
2022-04-20 21:34:28 +00:00
|
|
|
scheduler.startGeneratePoST(ctx, ts, di, func(posts []minertypes.SubmitWindowedPoStParams, err error) {
|
2020-09-18 16:03:59 +00:00
|
|
|
scheduler.startSubmitPoST(ctx, ts, di, posts, func(err error) {})
|
|
|
|
})
|
2020-09-09 13:01:37 +00:00
|
|
|
|
|
|
|
// Read the window PoST messages
|
|
|
|
for i := 0; i < expectedMsgCount; i++ {
|
|
|
|
msg := <-mockStgMinerAPI.pushedMessages
|
2022-04-20 21:34:28 +00:00
|
|
|
require.Equal(t, builtin.MethodsMiner.SubmitWindowedPoSt, msg.Method)
|
|
|
|
var params minertypes.SubmitWindowedPoStParams
|
2020-09-09 13:01:37 +00:00
|
|
|
err := params.UnmarshalCBOR(bytes.NewReader(msg.Params))
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
if i == expectedMsgCount-1 {
|
|
|
|
// In the last message we only included a single partition (see above)
|
|
|
|
require.Len(t, params.Partitions, 1)
|
|
|
|
} else {
|
|
|
|
// All previous messages should include the full number of partitions
|
|
|
|
require.Len(t, params.Partitions, partitionsPerMsg)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-07-07 14:52:22 +00:00
|
|
|
// TestWDPostDoPostPartLimitConfig verifies that doPost will send the correct number of window
|
2022-07-07 10:33:40 +00:00
|
|
|
// PoST messages for a given number of partitions based on user config
|
|
|
|
func TestWDPostDoPostPartLimitConfig(t *testing.T) {
|
|
|
|
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
|
|
|
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
|
|
|
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
|
|
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
|
|
|
ctx := context.Background()
|
|
|
|
expectedMsgCount := 364
|
|
|
|
|
|
|
|
proofType := abi.RegisteredPoStProof_StackedDrgWindow2KiBV1
|
|
|
|
postAct := tutils.NewIDAddr(t, 100)
|
|
|
|
|
|
|
|
mockStgMinerAPI := newMockStorageMinerAPI()
|
|
|
|
|
|
|
|
// Get the number of sectors allowed in a partition for this proof type
|
|
|
|
sectorsPerPartition, err := builtin.PoStProofWindowPoStPartitionSectors(proofType)
|
|
|
|
require.NoError(t, err)
|
|
|
|
// Work out the number of partitions that can be included in a message
|
|
|
|
// without exceeding the message sector limit
|
|
|
|
|
|
|
|
//stm: @BLOCKCHAIN_POLICY_GET_MAX_POST_PARTITIONS_001
|
|
|
|
partitionsPerMsg, err := policy.GetMaxPoStPartitions(network.Version13, proofType)
|
|
|
|
require.NoError(t, err)
|
|
|
|
if partitionsPerMsg > minertypes.AddressedPartitionsMax {
|
|
|
|
partitionsPerMsg = minertypes.AddressedPartitionsMax
|
|
|
|
}
|
|
|
|
|
|
|
|
partitionCount := 4 * partitionsPerMsg
|
|
|
|
|
|
|
|
// Assert that user config is less than network limit
|
|
|
|
userPartLimit := 33
|
|
|
|
lastMsgParts := 21
|
|
|
|
require.Greater(t, partitionCount, userPartLimit)
|
|
|
|
|
|
|
|
// Assert that we consts are correct
|
|
|
|
require.Equal(t, (expectedMsgCount-1)*userPartLimit+lastMsgParts, 4*partitionsPerMsg)
|
|
|
|
|
|
|
|
var partitions []api.Partition
|
|
|
|
for p := 0; p < partitionCount; p++ {
|
|
|
|
sectors := bitfield.New()
|
|
|
|
for s := uint64(0); s < sectorsPerPartition; s++ {
|
|
|
|
sectors.Set(s)
|
|
|
|
}
|
|
|
|
partitions = append(partitions, api.Partition{
|
|
|
|
AllSectors: sectors,
|
|
|
|
FaultySectors: bitfield.New(),
|
|
|
|
RecoveringSectors: bitfield.New(),
|
|
|
|
LiveSectors: sectors,
|
|
|
|
ActiveSectors: sectors,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
mockStgMinerAPI.setPartitions(partitions)
|
|
|
|
|
|
|
|
// Run window PoST
|
|
|
|
scheduler := &WindowPoStScheduler{
|
|
|
|
api: mockStgMinerAPI,
|
|
|
|
prover: &mockProver{},
|
|
|
|
verifier: &mockVerif{},
|
|
|
|
faultTracker: &mockFaultTracker{},
|
|
|
|
proofType: proofType,
|
|
|
|
actor: postAct,
|
|
|
|
journal: journal.NilJournal(),
|
|
|
|
addrSel: &ctladdr.AddressSelector{},
|
|
|
|
|
2022-07-07 14:52:22 +00:00
|
|
|
maxPartitionsPerPostMessage: userPartLimit,
|
2022-07-07 10:33:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
di := &dline.Info{
|
|
|
|
WPoStPeriodDeadlines: minertypes.WPoStPeriodDeadlines,
|
|
|
|
WPoStProvingPeriod: minertypes.WPoStProvingPeriod,
|
|
|
|
WPoStChallengeWindow: minertypes.WPoStChallengeWindow,
|
|
|
|
WPoStChallengeLookback: minertypes.WPoStChallengeLookback,
|
|
|
|
FaultDeclarationCutoff: minertypes.FaultDeclarationCutoff,
|
|
|
|
}
|
|
|
|
ts := mockTipSet(t)
|
|
|
|
|
|
|
|
scheduler.startGeneratePoST(ctx, ts, di, func(posts []minertypes.SubmitWindowedPoStParams, err error) {
|
|
|
|
scheduler.startSubmitPoST(ctx, ts, di, posts, func(err error) {})
|
|
|
|
})
|
|
|
|
|
|
|
|
// Read the window PoST messages
|
|
|
|
for i := 0; i < expectedMsgCount; i++ {
|
|
|
|
msg := <-mockStgMinerAPI.pushedMessages
|
|
|
|
require.Equal(t, builtin.MethodsMiner.SubmitWindowedPoSt, msg.Method)
|
|
|
|
var params minertypes.SubmitWindowedPoStParams
|
|
|
|
err := params.UnmarshalCBOR(bytes.NewReader(msg.Params))
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
if i == expectedMsgCount-1 {
|
|
|
|
// In the last message we only included a 21 partitions
|
|
|
|
require.Len(t, params.Partitions, lastMsgParts)
|
|
|
|
} else {
|
|
|
|
// All previous messages should include the full number of partitions
|
|
|
|
require.Len(t, params.Partitions, userPartLimit)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-10-04 18:33:18 +00:00
|
|
|
// TestBatchPartitionsRecoverySectors tests if the batches with recovery sectors
|
|
|
|
// contain only single partitions while keeping all the partitions in order
|
|
|
|
func TestBatchPartitionsRecoverySectors(t *testing.T) {
|
|
|
|
|
|
|
|
proofType := abi.RegisteredPoStProof_StackedDrgWindow2KiBV1
|
|
|
|
postAct := tutils.NewIDAddr(t, 100)
|
|
|
|
|
|
|
|
mockStgMinerAPI := newMockStorageMinerAPI()
|
|
|
|
|
|
|
|
userPartLimit := 4
|
|
|
|
|
|
|
|
scheduler := &WindowPoStScheduler{
|
|
|
|
api: mockStgMinerAPI,
|
|
|
|
prover: &mockProver{},
|
|
|
|
verifier: &mockVerif{},
|
|
|
|
faultTracker: &mockFaultTracker{},
|
|
|
|
proofType: proofType,
|
|
|
|
actor: postAct,
|
|
|
|
journal: journal.NilJournal(),
|
|
|
|
addrSel: &ctladdr.AddressSelector{},
|
|
|
|
|
|
|
|
maxPartitionsPerPostMessage: userPartLimit,
|
|
|
|
singleRecoveringPartitionPerPostMessage: true,
|
|
|
|
}
|
|
|
|
|
|
|
|
var partitions []api.Partition
|
|
|
|
for p := 0; p < 4; p++ {
|
|
|
|
partitions = append(partitions, generatePartition(100, 0))
|
|
|
|
}
|
|
|
|
for p := 0; p < 2; p++ {
|
|
|
|
partitions = append(partitions, generatePartition(100, 10))
|
|
|
|
}
|
|
|
|
for p := 0; p < 6; p++ {
|
|
|
|
partitions = append(partitions, generatePartition(100, 0))
|
|
|
|
}
|
|
|
|
partitions = append(partitions, generatePartition(100, 10))
|
|
|
|
|
|
|
|
expectedBatchLens := []int{4, 1, 1, 4, 2, 1}
|
|
|
|
|
|
|
|
batches, err := scheduler.BatchPartitions(partitions, network.Version16)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.Equal(t, len(batches), 6)
|
|
|
|
|
|
|
|
for i, batch := range batches {
|
|
|
|
require.Equal(t, len(batch), expectedBatchLens[i])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-07-07 14:52:22 +00:00
|
|
|
// TestWDPostDeclareRecoveriesPartLimitConfig verifies that declareRecoveries will send the correct number of
|
|
|
|
// DeclareFaultsRecovered messages for a given number of partitions based on user config
|
|
|
|
func TestWDPostDeclareRecoveriesPartLimitConfig(t *testing.T) {
|
|
|
|
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
|
|
|
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
|
|
|
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
|
|
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
|
|
|
ctx := context.Background()
|
|
|
|
|
|
|
|
proofType := abi.RegisteredPoStProof_StackedDrgWindow2KiBV1
|
|
|
|
postAct := tutils.NewIDAddr(t, 100)
|
|
|
|
|
|
|
|
mockStgMinerAPI := newMockStorageMinerAPI()
|
|
|
|
|
|
|
|
// Get the number of sectors allowed in a partition for this proof type
|
|
|
|
sectorsPerPartition, err := builtin.PoStProofWindowPoStPartitionSectors(proofType)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// Let's have 11/20 partitions with faulty sectors, and a config of 3 partitions per message
|
|
|
|
userPartLimit := 3
|
|
|
|
partitionCount := 20
|
|
|
|
faultyPartitionCount := 11
|
|
|
|
|
|
|
|
var partitions []api.Partition
|
|
|
|
for p := 0; p < partitionCount; p++ {
|
|
|
|
sectors := bitfield.New()
|
|
|
|
for s := uint64(0); s < sectorsPerPartition; s++ {
|
|
|
|
sectors.Set(s)
|
|
|
|
}
|
|
|
|
|
|
|
|
partition := api.Partition{
|
|
|
|
AllSectors: sectors,
|
|
|
|
FaultySectors: bitfield.New(),
|
|
|
|
RecoveringSectors: bitfield.New(),
|
|
|
|
LiveSectors: sectors,
|
|
|
|
ActiveSectors: sectors,
|
|
|
|
}
|
|
|
|
|
|
|
|
if p < faultyPartitionCount {
|
|
|
|
partition.FaultySectors = sectors
|
|
|
|
}
|
|
|
|
|
|
|
|
partitions = append(partitions, partition)
|
|
|
|
}
|
|
|
|
|
|
|
|
mockStgMinerAPI.setPartitions(partitions)
|
|
|
|
|
|
|
|
// Run declareRecoverios
|
|
|
|
scheduler := &WindowPoStScheduler{
|
|
|
|
api: mockStgMinerAPI,
|
|
|
|
prover: &mockProver{},
|
|
|
|
verifier: &mockVerif{},
|
|
|
|
faultTracker: &mockFaultTracker{},
|
|
|
|
proofType: proofType,
|
|
|
|
actor: postAct,
|
|
|
|
journal: journal.NilJournal(),
|
|
|
|
addrSel: &ctladdr.AddressSelector{},
|
|
|
|
|
|
|
|
maxPartitionsPerRecoveryMessage: userPartLimit,
|
|
|
|
}
|
|
|
|
|
|
|
|
di := uint64(0)
|
|
|
|
ts := mockTipSet(t)
|
|
|
|
|
|
|
|
expectedMsgCount := faultyPartitionCount/userPartLimit + 1
|
|
|
|
lastMsgParts := faultyPartitionCount % userPartLimit
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
batchedRecoveries, msgs, err := scheduler.declareRecoveries(ctx, di, partitions, ts.Key())
|
|
|
|
require.NoError(t, err, "failed to declare recoveries")
|
|
|
|
require.Equal(t, len(batchedRecoveries), len(msgs))
|
|
|
|
require.Equal(t, expectedMsgCount, len(msgs))
|
|
|
|
}()
|
|
|
|
|
|
|
|
// Read the window PoST messages
|
|
|
|
for i := 0; i < expectedMsgCount; i++ {
|
|
|
|
msg := <-mockStgMinerAPI.pushedMessages
|
|
|
|
require.Equal(t, builtin.MethodsMiner.DeclareFaultsRecovered, msg.Method)
|
|
|
|
var params minertypes.DeclareFaultsRecoveredParams
|
|
|
|
err := params.UnmarshalCBOR(bytes.NewReader(msg.Params))
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
if i == expectedMsgCount-1 {
|
|
|
|
// In the last message we only included a 21 partitions
|
|
|
|
require.Len(t, params.Recoveries, lastMsgParts)
|
|
|
|
} else {
|
|
|
|
// All previous messages should include the full number of partitions
|
|
|
|
require.Len(t, params.Recoveries, userPartLimit)
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-09 13:01:37 +00:00
|
|
|
func mockTipSet(t *testing.T) *types.TipSet {
|
|
|
|
minerAct := tutils.NewActorAddr(t, "miner")
|
|
|
|
c, err := cid.Decode("QmbFMke1KXqnYyBBWxB74N4c5SBnJMVAiMNRcGu6x1AwQH")
|
|
|
|
require.NoError(t, err)
|
|
|
|
blks := []*types.BlockHeader{
|
|
|
|
{
|
|
|
|
Miner: minerAct,
|
|
|
|
Height: abi.ChainEpoch(1),
|
|
|
|
ParentStateRoot: c,
|
|
|
|
ParentMessageReceipts: c,
|
|
|
|
Messages: c,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
ts, err := types.NewTipSet(blks)
|
|
|
|
require.NoError(t, err)
|
|
|
|
return ts
|
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// All the mock methods below here are unused
|
|
|
|
//
|
|
|
|
|
|
|
|
func (m *mockStorageMinerAPI) StateMinerProvingDeadline(ctx context.Context, address address.Address, key types.TipSetKey) (*dline.Info, error) {
|
2020-09-21 07:52:57 +00:00
|
|
|
return &dline.Info{
|
|
|
|
CurrentEpoch: 0,
|
|
|
|
PeriodStart: 0,
|
|
|
|
Index: 0,
|
|
|
|
Open: 0,
|
|
|
|
Close: 0,
|
|
|
|
Challenge: 0,
|
|
|
|
FaultCutoff: 0,
|
2022-04-20 21:34:28 +00:00
|
|
|
WPoStPeriodDeadlines: minertypes.WPoStPeriodDeadlines,
|
|
|
|
WPoStProvingPeriod: minertypes.WPoStProvingPeriod,
|
|
|
|
WPoStChallengeWindow: minertypes.WPoStChallengeWindow,
|
|
|
|
WPoStChallengeLookback: minertypes.WPoStChallengeLookback,
|
|
|
|
FaultDeclarationCutoff: minertypes.FaultDeclarationCutoff,
|
2020-09-21 07:52:57 +00:00
|
|
|
}, nil
|
2020-09-09 13:01:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (m *mockStorageMinerAPI) StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error) {
|
2022-09-06 15:49:29 +00:00
|
|
|
code, ok := actors.GetActorCodeID(actorstypes.Version7, actors.MinerKey)
|
2022-06-29 16:54:14 +00:00
|
|
|
if !ok {
|
|
|
|
return nil, xerrors.Errorf("failed to get miner actor code ID for actors version %d", actors.Version7)
|
2022-04-20 21:34:28 +00:00
|
|
|
}
|
2020-09-18 18:34:23 +00:00
|
|
|
return &types.Actor{
|
2022-04-20 21:34:28 +00:00
|
|
|
Code: code,
|
2020-09-18 18:34:23 +00:00
|
|
|
}, nil
|
2020-09-09 13:01:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (m *mockStorageMinerAPI) StateAccountKey(ctx context.Context, address address.Address, key types.TipSetKey) (address.Address, error) {
|
2020-09-21 07:52:57 +00:00
|
|
|
return address, nil
|
2020-09-09 13:01:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (m *mockStorageMinerAPI) GasEstimateMessageGas(ctx context.Context, message *types.Message, spec *api.MessageSendSpec, key types.TipSetKey) (*types.Message, error) {
|
2020-09-21 07:52:57 +00:00
|
|
|
msg := *message
|
|
|
|
msg.GasFeeCap = big.NewInt(1)
|
|
|
|
msg.GasPremium = big.NewInt(1)
|
|
|
|
msg.GasLimit = 2
|
|
|
|
return &msg, nil
|
2020-09-09 13:01:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (m *mockStorageMinerAPI) ChainHead(ctx context.Context) (*types.TipSet, error) {
|
2021-03-10 12:13:03 +00:00
|
|
|
return nil, nil
|
2020-09-09 13:01:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (m *mockStorageMinerAPI) WalletSign(ctx context.Context, address address.Address, bytes []byte) (*crypto.Signature, error) {
|
2020-09-21 07:52:57 +00:00
|
|
|
return nil, nil
|
2020-09-09 13:01:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (m *mockStorageMinerAPI) WalletBalance(ctx context.Context, address address.Address) (types.BigInt, error) {
|
2020-09-21 07:52:57 +00:00
|
|
|
return big.NewInt(333), nil
|
2020-09-09 13:01:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (m *mockStorageMinerAPI) WalletHas(ctx context.Context, address address.Address) (bool, error) {
|
2020-09-21 07:52:57 +00:00
|
|
|
return true, nil
|
2020-09-09 13:01:37 +00:00
|
|
|
}
|
2020-09-18 18:34:23 +00:00
|
|
|
|
2022-06-14 17:33:57 +00:00
|
|
|
var _ NodeAPI = &mockStorageMinerAPI{}
|