lotus/storage/wdpost/wdpost_run_test.go

612 lines
21 KiB
Go
Raw Normal View History

2022-08-29 14:25:30 +00:00
// stm: #unit
package wdpost
import (
"bytes"
"context"
"testing"
2022-06-14 15:00:51 +00:00
"github.com/ipfs/go-cid"
"github.com/stretchr/testify/require"
"golang.org/x/xerrors"
2020-09-21 07:52:57 +00:00
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield"
"github.com/filecoin-project/go-state-types/abi"
2022-09-06 15:49:29 +00:00
actorstypes "github.com/filecoin-project/go-state-types/actors"
2020-09-21 07:52:57 +00:00
"github.com/filecoin-project/go-state-types/big"
2022-06-14 15:00:51 +00:00
"github.com/filecoin-project/go-state-types/builtin"
2022-09-06 15:49:29 +00:00
minertypes "github.com/filecoin-project/go-state-types/builtin/v9/miner"
"github.com/filecoin-project/go-state-types/crypto"
2020-09-21 07:52:57 +00:00
"github.com/filecoin-project/go-state-types/dline"
"github.com/filecoin-project/go-state-types/manifest"
2020-10-28 14:10:43 +00:00
"github.com/filecoin-project/go-state-types/network"
2022-06-14 15:00:51 +00:00
prooftypes "github.com/filecoin-project/go-state-types/proof"
tutils "github.com/filecoin-project/specs-actors/v2/support/testing"
2022-01-18 10:37:15 +00:00
2020-09-21 07:52:57 +00:00
"github.com/filecoin-project/lotus/api"
2020-10-28 14:10:43 +00:00
"github.com/filecoin-project/lotus/build"
2022-06-14 15:00:51 +00:00
"github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/policy"
2020-09-21 07:52:57 +00:00
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/journal"
"github.com/filecoin-project/lotus/storage/ctladdr"
"github.com/filecoin-project/lotus/storage/sealer/storiface"
)
type mockStorageMinerAPI struct {
2020-09-18 18:34:23 +00:00
partitions []api.Partition
pushedMessages chan *types.Message
2022-06-14 17:33:57 +00:00
NodeAPI
}
func newMockStorageMinerAPI() *mockStorageMinerAPI {
return &mockStorageMinerAPI{
pushedMessages: make(chan *types.Message),
}
}
2022-04-20 21:34:28 +00:00
func (m *mockStorageMinerAPI) StateMinerInfo(ctx context.Context, a address.Address, key types.TipSetKey) (api.MinerInfo, error) {
return api.MinerInfo{
2020-09-21 07:52:57 +00:00
Worker: tutils.NewIDAddr(nil, 101),
Owner: tutils.NewIDAddr(nil, 101),
}, nil
2020-09-18 18:34:23 +00:00
}
func (m *mockStorageMinerAPI) StateNetworkVersion(ctx context.Context, key types.TipSetKey) (network.Version, error) {
return build.TestNetworkVersion, nil
}
func (m *mockStorageMinerAPI) StateGetRandomnessFromTickets(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte, tsk types.TipSetKey) (abi.Randomness, error) {
return abi.Randomness("ticket rand"), nil
}
func (m *mockStorageMinerAPI) StateGetRandomnessFromBeacon(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte, tsk types.TipSetKey) (abi.Randomness, error) {
return abi.Randomness("beacon rand"), nil
}
2020-09-18 18:34:23 +00:00
func (m *mockStorageMinerAPI) setPartitions(ps []api.Partition) {
m.partitions = append(m.partitions, ps...)
}
2020-09-18 18:34:23 +00:00
func (m *mockStorageMinerAPI) StateMinerPartitions(ctx context.Context, a address.Address, dlIdx uint64, tsk types.TipSetKey) ([]api.Partition, error) {
return m.partitions, nil
}
2022-04-20 21:34:28 +00:00
func (m *mockStorageMinerAPI) StateMinerSectors(ctx context.Context, address address.Address, snos *bitfield.BitField, key types.TipSetKey) ([]*minertypes.SectorOnChainInfo, error) {
var sis []*minertypes.SectorOnChainInfo
if snos == nil {
panic("unsupported")
}
_ = snos.ForEach(func(i uint64) error {
2022-04-20 21:34:28 +00:00
sis = append(sis, &minertypes.SectorOnChainInfo{
SectorNumber: abi.SectorNumber(i),
})
return nil
})
return sis, nil
}
func (m *mockStorageMinerAPI) MpoolPushMessage(ctx context.Context, message *types.Message, spec *api.MessageSendSpec) (*types.SignedMessage, error) {
m.pushedMessages <- message
return &types.SignedMessage{
Message: *message,
}, nil
}
2021-04-05 19:34:03 +00:00
func (m *mockStorageMinerAPI) StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64, limit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error) {
return &api.MsgLookup{
Receipt: types.MessageReceipt{
ExitCode: 0,
},
}, nil
}
2020-11-19 17:57:43 +00:00
func (m *mockStorageMinerAPI) GasEstimateGasPremium(_ context.Context, nblocksincl uint64, sender address.Address, gaslimit int64, tsk types.TipSetKey) (types.BigInt, error) {
return big.Zero(), nil
}
func (m *mockStorageMinerAPI) GasEstimateFeeCap(context.Context, *types.Message, int64, types.TipSetKey) (types.BigInt, error) {
return big.Zero(), nil
}
type mockProver struct {
}
2022-04-20 21:34:28 +00:00
func (m *mockProver) GenerateWinningPoStWithVanilla(ctx context.Context, proofType abi.RegisteredPoStProof, minerID abi.ActorID, randomness abi.PoStRandomness, proofs [][]byte) ([]prooftypes.PoStProof, error) {
2022-01-14 13:11:04 +00:00
panic("implement me")
}
2022-04-20 21:34:28 +00:00
func (m *mockProver) GenerateWindowPoStWithVanilla(ctx context.Context, proofType abi.RegisteredPoStProof, minerID abi.ActorID, randomness abi.PoStRandomness, proofs [][]byte, partitionIdx int) (prooftypes.PoStProof, error) {
2022-01-14 13:11:04 +00:00
panic("implement me")
}
2022-04-20 21:34:28 +00:00
func (m *mockProver) GenerateWinningPoSt(context.Context, abi.ActorID, []prooftypes.ExtendedSectorInfo, abi.PoStRandomness) ([]prooftypes.PoStProof, error) {
panic("implement me")
}
chore: build: Merge/v22 into 21 for 23 (#10702) * chore: update ffi to increase execution parallelism * Don't enforce walking receipt tree during compaction * fix: build: drop drand incentinet servers * chore: release lotus v1.20.4 * Apply suggestions from code review Co-authored-by: Jiaying Wang <42981373+jennijuju@users.noreply.github.com> * feat: Introduce nv19 skeleton Update to go-state-types v0.11.0-alpha-1 Introduce dummy v11 actor bundles Make new actors adapters Add upgrade to Upgrade Schedules make jen Update to go-state-types v0.11.0-alpha-2 * feat: vm: switch to the new exec trace format (#10372) This is now "FVM" native. Changes include: 1. Don't treat "trace" messages like off-chain messages. E.g., don't include CIDs, versions, etc. 2. Include IPLD codecs where applicable. 3. Remove fields that aren't filled by the FVM (timing, some errors, code locations, etc.). * feat: implement FIP-0061 * Address review * Add and test the FIP-0061 migration * Update actors bundles to fip/20230406 * Update to go-state-types master * Update to actors v11.0.0-rc1 * - Update go state types - Keep current expiration defaults on creation, extension some tests - Update ffi * ffi experiment * Integration nv19 migration - Open splitstore in migration shed tool - Update state root version * Post rebase fixup * Fix * gen * nv19 invariant checking * Try fixig blockstore so bundle is loaded * Debug * Fix * Make butterfly upgrades happen * Another ffi experiment * Fix copy paste error * Actually schedule migration (#10656) Co-authored-by: zenground0 <ZenGround0@users.noreply.github.com> * Butterfly artifacts * Set calibration net upgrade height * Review Response * Fix state tree version assert * Quick butterfly upgrade to sanity check (#10660) * Quick butterfly upgrade to sanity check * Update butterfly artifacts * Revert fake fix * Give butterfly net correct genesis * Butterfly artifacts * Give time before upgrade --------- Co-authored-by: zenground0 <ZenGround0@users.noreply.github.com> * chore:releasepolish v1.22 release (#10666) * Update butterfly artifacts * register actors v11 * Update calibration upgrade time * State inspection shed cmds * Fix * make gen * Fix swallowed errors * Lint fixup --------- Co-authored-by: zenground0 <ZenGround0@users.noreply.github.com> * v1.22.0-rc3 * bundle fix * Feat/expedite nv19 (#10681) * Update go-state-types * Modify upgrade schedule and params * Revert fip 0052 * Update gst * docsgen * fast butterfly migration to validate migration * Correct epoch to match specified date * Update actors v11 * Update changelog build version * Update butterfly artifacts * Fix lotus-miner init to work after upgrade --------- Co-authored-by: zenground0 <ZenGround0@users.noreply.github.com> * fix:deps:stable ffi for stable release (#10698) * Point to stable ffi for stable lotus release * go mod tidy --------- Co-authored-by: zenground0 <ZenGround0@users.noreply.github.com> * Update CHANGELOG.md Co-authored-by: Jiaying Wang <42981373+jennijuju@users.noreply.github.com> --------- Co-authored-by: Aayush Rajasekaran <arajasek94@gmail.com> Co-authored-by: zenground0 <ZenGround0@users.noreply.github.com> Co-authored-by: Jiaying Wang <42981373+jennijuju@users.noreply.github.com> Co-authored-by: Steven Allen <steven@stebalien.com> Co-authored-by: jennijuju <jiayingw703@gmail.com>
2023-04-19 22:40:18 +00:00
func (m *mockProver) GenerateWindowPoSt(ctx context.Context, aid abi.ActorID, ppt abi.RegisteredPoStProof, sis []prooftypes.ExtendedSectorInfo, pr abi.PoStRandomness) ([]prooftypes.PoStProof, []abi.SectorID, error) {
2022-04-20 21:34:28 +00:00
return []prooftypes.PoStProof{
{
chore: build: Merge/v22 into 21 for 23 (#10702) * chore: update ffi to increase execution parallelism * Don't enforce walking receipt tree during compaction * fix: build: drop drand incentinet servers * chore: release lotus v1.20.4 * Apply suggestions from code review Co-authored-by: Jiaying Wang <42981373+jennijuju@users.noreply.github.com> * feat: Introduce nv19 skeleton Update to go-state-types v0.11.0-alpha-1 Introduce dummy v11 actor bundles Make new actors adapters Add upgrade to Upgrade Schedules make jen Update to go-state-types v0.11.0-alpha-2 * feat: vm: switch to the new exec trace format (#10372) This is now "FVM" native. Changes include: 1. Don't treat "trace" messages like off-chain messages. E.g., don't include CIDs, versions, etc. 2. Include IPLD codecs where applicable. 3. Remove fields that aren't filled by the FVM (timing, some errors, code locations, etc.). * feat: implement FIP-0061 * Address review * Add and test the FIP-0061 migration * Update actors bundles to fip/20230406 * Update to go-state-types master * Update to actors v11.0.0-rc1 * - Update go state types - Keep current expiration defaults on creation, extension some tests - Update ffi * ffi experiment * Integration nv19 migration - Open splitstore in migration shed tool - Update state root version * Post rebase fixup * Fix * gen * nv19 invariant checking * Try fixig blockstore so bundle is loaded * Debug * Fix * Make butterfly upgrades happen * Another ffi experiment * Fix copy paste error * Actually schedule migration (#10656) Co-authored-by: zenground0 <ZenGround0@users.noreply.github.com> * Butterfly artifacts * Set calibration net upgrade height * Review Response * Fix state tree version assert * Quick butterfly upgrade to sanity check (#10660) * Quick butterfly upgrade to sanity check * Update butterfly artifacts * Revert fake fix * Give butterfly net correct genesis * Butterfly artifacts * Give time before upgrade --------- Co-authored-by: zenground0 <ZenGround0@users.noreply.github.com> * chore:releasepolish v1.22 release (#10666) * Update butterfly artifacts * register actors v11 * Update calibration upgrade time * State inspection shed cmds * Fix * make gen * Fix swallowed errors * Lint fixup --------- Co-authored-by: zenground0 <ZenGround0@users.noreply.github.com> * v1.22.0-rc3 * bundle fix * Feat/expedite nv19 (#10681) * Update go-state-types * Modify upgrade schedule and params * Revert fip 0052 * Update gst * docsgen * fast butterfly migration to validate migration * Correct epoch to match specified date * Update actors v11 * Update changelog build version * Update butterfly artifacts * Fix lotus-miner init to work after upgrade --------- Co-authored-by: zenground0 <ZenGround0@users.noreply.github.com> * fix:deps:stable ffi for stable release (#10698) * Point to stable ffi for stable lotus release * go mod tidy --------- Co-authored-by: zenground0 <ZenGround0@users.noreply.github.com> * Update CHANGELOG.md Co-authored-by: Jiaying Wang <42981373+jennijuju@users.noreply.github.com> --------- Co-authored-by: Aayush Rajasekaran <arajasek94@gmail.com> Co-authored-by: zenground0 <ZenGround0@users.noreply.github.com> Co-authored-by: Jiaying Wang <42981373+jennijuju@users.noreply.github.com> Co-authored-by: Steven Allen <steven@stebalien.com> Co-authored-by: jennijuju <jiayingw703@gmail.com>
2023-04-19 22:40:18 +00:00
PoStProof: ppt,
ProofBytes: []byte("post-proof"),
},
}, nil, nil
}
type mockVerif struct {
}
2022-04-20 21:34:28 +00:00
func (m mockVerif) VerifyWinningPoSt(ctx context.Context, info prooftypes.WinningPoStVerifyInfo) (bool, error) {
panic("implement me")
}
2022-04-20 21:34:28 +00:00
func (m mockVerif) VerifyWindowPoSt(ctx context.Context, info prooftypes.WindowPoStVerifyInfo) (bool, error) {
if len(info.Proofs) != 1 {
return false, xerrors.Errorf("expected 1 proof entry")
}
proof := info.Proofs[0]
if !bytes.Equal(proof.ProofBytes, []byte("post-proof")) {
return false, xerrors.Errorf("bad proof")
}
return true, nil
}
2022-04-20 21:34:28 +00:00
func (m mockVerif) VerifyAggregateSeals(aggregate prooftypes.AggregateSealVerifyProofAndInfos) (bool, error) {
2021-11-04 15:59:29 +00:00
panic("implement me")
}
2022-04-20 21:34:28 +00:00
func (m mockVerif) VerifyReplicaUpdate(update prooftypes.ReplicaUpdateInfo) (bool, error) {
2021-05-17 18:47:41 +00:00
panic("implement me")
}
2022-04-20 21:34:28 +00:00
func (m mockVerif) VerifySeal(prooftypes.SealVerifyInfo) (bool, error) {
panic("implement me")
}
func (m mockVerif) GenerateWinningPoStSectorChallenge(context.Context, abi.RegisteredPoStProof, abi.ActorID, abi.PoStRandomness, uint64) ([]uint64, error) {
panic("implement me")
}
type mockFaultTracker struct {
}
func (m mockFaultTracker) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storiface.SectorRef, rg storiface.RGetter) (map[abi.SectorID]string, error) {
// Returns "bad" sectors so just return empty map meaning all sectors are good
2020-11-27 15:34:41 +00:00
return map[abi.SectorID]string{}, nil
}
func generatePartition(sectorCount uint64, recoverySectorCount uint64) api.Partition {
var partition api.Partition
sectors := bitfield.New()
recoverySectors := bitfield.New()
for s := uint64(0); s < sectorCount; s++ {
sectors.Set(s)
}
for s := uint64(0); s < recoverySectorCount; s++ {
recoverySectors.Set(s)
}
partition = api.Partition{
AllSectors: sectors,
FaultySectors: bitfield.New(),
RecoveringSectors: recoverySectors,
LiveSectors: sectors,
ActiveSectors: sectors,
}
return partition
}
// TestWDPostDoPost verifies that doPost will send the correct number of window
// PoST messages for a given number of partitions
func TestWDPostDoPost(t *testing.T) {
feat: Add additional test annotations (#8272) * Annotate api,proxy_util,blockstore_badger, policy tests * Annotate splitstore: bsbadger / markset * Annotate splitstore feature * Annotate union/timed blockstore tests * Annotate openrpc, diff_adt tests * Annotate error,drand,events tests * Annotate predicates_test * Fix annotations * Annotate tscache, gen tests * Annotate fundmanager test * Annotate repub and selection tests * Annotate statetree_test * Annotate forks_test * Annotate searchwait_test.go * Fix duplicated @@ symbols * Annotate chain stmgr/store tests * Annotate more (types) tests * More tests annotated * Annotate conformance chaos actor tests * Annotate more integration tests * Annotate journal system tests * Annotate more tests. * Annotate gas,head buffer behaviors * Fix markset annotations * doc: test annotations for the markets dagstore wrapper * Annotate miner_api test in dagstore * Annotate more test files * Remove bad annotations from fsrepo * Annotate wdpost system * Remove bad annotations * Renamce "conformance" to "chaos_actor" tests * doc: stm annotations for blockheader & election proof tests * Annotate remaining "A" tests * annotate: stm for error_test * memrepo_test.go * Annotate "b" file tests * message_test.go * doc: stm annotate for fsrepo_test * Annotate "c" file tests * Annotate "D" test files * message_test.go * doc: stm annotate for chain, node/config & client * docs: stm annotate node_test * Annotate u,v,wl tests * doc: stm annotations for various test files * Annotate "T" test files * doc: stm annotate for proxy_util_test & policy_test * doc: stm annotate for various tests * doc: final few stm annotations * Add mempool unit tests * Add two more memPool Add tests * Update submodules * Add check function tests * Add stm annotations, refactor test helper * Annotate api,proxy_util,blockstore_badger, policy tests * Annotate splitstore: bsbadger / markset solving merge conflicts * Annotate splitstore feature * Annotate union/timed blockstore tests * Annotate openrpc, diff_adt tests * Annotate error,drand,events tests * Annotate predicates_test * Fix annotations * Annotate tscache, gen tests * Annotate fundmanager test * Annotate statetree_test * Annotate forks_test * Annotate searchwait_test.go * Fix duplicated @@ symbols * Annotate chain stmgr/store tests * Annotate more (types) tests * More tests annotated * Annotate conformance chaos actor tests * Annotate more integration tests * Annotate journal system tests * Annotate more tests. * Annotate gas,head buffer behaviors solve merge conflict * Fix markset annotations * Annotate miner_api test in dagstore * Annotate more test files * doc: test annotations for the markets dagstore wrapper * Annotate wdpost system * Renamce "conformance" to "chaos_actor" tests * Annotate remaining "A" tests * doc: stm annotations for blockheader & election proof tests * annotate: stm for error_test * Annotate "b" file tests * memrepo_test.go * Annotate "c" file tests * message_test.go * Annotate "D" test files * doc: stm annotate for fsrepo_test * Annotate u,v,wl tests * message_test.go * doc: stm annotate for chain, node/config & client * docs: stm annotate node_test * Annotate "T" test files * doc: stm annotations for various test files * Add mempool unit tests solve merge conflict * doc: stm annotate for proxy_util_test & policy_test * doc: stm annotate for various tests * doc: final few stm annotations * Add two more memPool Add tests * Update submodules * Add check function tests solve conflict * Add stm annotations, refactor test helper solve merge conflict * Change CLI test kinds to "unit" * Fix double merged test * Fix ccupgrade_test merge * Fix lint issues * Add stm annotation to types_Test * Test vectors submodule * Add file annotation to burn_test Co-authored-by: Nikola Divic <divicnikola@gmail.com> Co-authored-by: TheMenko <themenkoprojects@gmail.com>
2022-03-16 17:37:34 +00:00
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
ctx := context.Background()
expectedMsgCount := 5
proofType := abi.RegisteredPoStProof_StackedDrgWindow2KiBV1
postAct := tutils.NewIDAddr(t, 100)
mockStgMinerAPI := newMockStorageMinerAPI()
// Get the number of sectors allowed in a partition for this proof type
2022-04-20 21:34:28 +00:00
sectorsPerPartition, err := builtin.PoStProofWindowPoStPartitionSectors(proofType)
require.NoError(t, err)
// Work out the number of partitions that can be included in a message
// without exceeding the message sector limit
2020-09-18 18:34:23 +00:00
feat: Add additional test annotations (#8272) * Annotate api,proxy_util,blockstore_badger, policy tests * Annotate splitstore: bsbadger / markset * Annotate splitstore feature * Annotate union/timed blockstore tests * Annotate openrpc, diff_adt tests * Annotate error,drand,events tests * Annotate predicates_test * Fix annotations * Annotate tscache, gen tests * Annotate fundmanager test * Annotate repub and selection tests * Annotate statetree_test * Annotate forks_test * Annotate searchwait_test.go * Fix duplicated @@ symbols * Annotate chain stmgr/store tests * Annotate more (types) tests * More tests annotated * Annotate conformance chaos actor tests * Annotate more integration tests * Annotate journal system tests * Annotate more tests. * Annotate gas,head buffer behaviors * Fix markset annotations * doc: test annotations for the markets dagstore wrapper * Annotate miner_api test in dagstore * Annotate more test files * Remove bad annotations from fsrepo * Annotate wdpost system * Remove bad annotations * Renamce "conformance" to "chaos_actor" tests * doc: stm annotations for blockheader & election proof tests * Annotate remaining "A" tests * annotate: stm for error_test * memrepo_test.go * Annotate "b" file tests * message_test.go * doc: stm annotate for fsrepo_test * Annotate "c" file tests * Annotate "D" test files * message_test.go * doc: stm annotate for chain, node/config & client * docs: stm annotate node_test * Annotate u,v,wl tests * doc: stm annotations for various test files * Annotate "T" test files * doc: stm annotate for proxy_util_test & policy_test * doc: stm annotate for various tests * doc: final few stm annotations * Add mempool unit tests * Add two more memPool Add tests * Update submodules * Add check function tests * Add stm annotations, refactor test helper * Annotate api,proxy_util,blockstore_badger, policy tests * Annotate splitstore: bsbadger / markset solving merge conflicts * Annotate splitstore feature * Annotate union/timed blockstore tests * Annotate openrpc, diff_adt tests * Annotate error,drand,events tests * Annotate predicates_test * Fix annotations * Annotate tscache, gen tests * Annotate fundmanager test * Annotate statetree_test * Annotate forks_test * Annotate searchwait_test.go * Fix duplicated @@ symbols * Annotate chain stmgr/store tests * Annotate more (types) tests * More tests annotated * Annotate conformance chaos actor tests * Annotate more integration tests * Annotate journal system tests * Annotate more tests. * Annotate gas,head buffer behaviors solve merge conflict * Fix markset annotations * Annotate miner_api test in dagstore * Annotate more test files * doc: test annotations for the markets dagstore wrapper * Annotate wdpost system * Renamce "conformance" to "chaos_actor" tests * Annotate remaining "A" tests * doc: stm annotations for blockheader & election proof tests * annotate: stm for error_test * Annotate "b" file tests * memrepo_test.go * Annotate "c" file tests * message_test.go * Annotate "D" test files * doc: stm annotate for fsrepo_test * Annotate u,v,wl tests * message_test.go * doc: stm annotate for chain, node/config & client * docs: stm annotate node_test * Annotate "T" test files * doc: stm annotations for various test files * Add mempool unit tests solve merge conflict * doc: stm annotate for proxy_util_test & policy_test * doc: stm annotate for various tests * doc: final few stm annotations * Add two more memPool Add tests * Update submodules * Add check function tests solve conflict * Add stm annotations, refactor test helper solve merge conflict * Change CLI test kinds to "unit" * Fix double merged test * Fix ccupgrade_test merge * Fix lint issues * Add stm annotation to types_Test * Test vectors submodule * Add file annotation to burn_test Co-authored-by: Nikola Divic <divicnikola@gmail.com> Co-authored-by: TheMenko <themenkoprojects@gmail.com>
2022-03-16 17:37:34 +00:00
//stm: @BLOCKCHAIN_POLICY_GET_MAX_POST_PARTITIONS_001
partitionsPerMsg, err := policy.GetMaxPoStPartitions(network.Version13, proofType)
2020-09-18 18:34:23 +00:00
require.NoError(t, err)
2022-04-20 21:34:28 +00:00
if partitionsPerMsg > minertypes.AddressedPartitionsMax {
partitionsPerMsg = minertypes.AddressedPartitionsMax
}
// Enough partitions to fill expectedMsgCount-1 messages
partitionCount := (expectedMsgCount - 1) * partitionsPerMsg
// Add an extra partition that should be included in the last message
partitionCount++
2020-09-18 18:34:23 +00:00
var partitions []api.Partition
for p := 0; p < partitionCount; p++ {
sectors := bitfield.New()
for s := uint64(0); s < sectorsPerPartition; s++ {
sectors.Set(s)
}
2020-09-18 18:34:23 +00:00
partitions = append(partitions, api.Partition{
AllSectors: sectors,
FaultySectors: bitfield.New(),
RecoveringSectors: bitfield.New(),
LiveSectors: sectors,
ActiveSectors: sectors,
})
}
mockStgMinerAPI.setPartitions(partitions)
// Run window PoST
scheduler := &WindowPoStScheduler{
api: mockStgMinerAPI,
prover: &mockProver{},
verifier: &mockVerif{},
faultTracker: &mockFaultTracker{},
proofType: proofType,
actor: postAct,
journal: journal.NilJournal(),
addrSel: &ctladdr.AddressSelector{},
}
2020-09-21 07:52:57 +00:00
di := &dline.Info{
2022-04-20 21:34:28 +00:00
WPoStPeriodDeadlines: minertypes.WPoStPeriodDeadlines,
WPoStProvingPeriod: minertypes.WPoStProvingPeriod,
WPoStChallengeWindow: minertypes.WPoStChallengeWindow,
WPoStChallengeLookback: minertypes.WPoStChallengeLookback,
FaultDeclarationCutoff: minertypes.FaultDeclarationCutoff,
2020-09-21 07:52:57 +00:00
}
ts := mockTipSet(t)
2022-04-20 21:34:28 +00:00
scheduler.startGeneratePoST(ctx, ts, di, func(posts []minertypes.SubmitWindowedPoStParams, err error) {
scheduler.startSubmitPoST(ctx, ts, di, posts, func(err error) {})
})
// Read the window PoST messages
for i := 0; i < expectedMsgCount; i++ {
msg := <-mockStgMinerAPI.pushedMessages
2022-04-20 21:34:28 +00:00
require.Equal(t, builtin.MethodsMiner.SubmitWindowedPoSt, msg.Method)
var params minertypes.SubmitWindowedPoStParams
err := params.UnmarshalCBOR(bytes.NewReader(msg.Params))
require.NoError(t, err)
if i == expectedMsgCount-1 {
// In the last message we only included a single partition (see above)
require.Len(t, params.Partitions, 1)
} else {
// All previous messages should include the full number of partitions
require.Len(t, params.Partitions, partitionsPerMsg)
}
}
}
// TestWDPostDoPostPartLimitConfig verifies that doPost will send the correct number of window
// PoST messages for a given number of partitions based on user config
func TestWDPostDoPostPartLimitConfig(t *testing.T) {
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
ctx := context.Background()
expectedMsgCount := 364
proofType := abi.RegisteredPoStProof_StackedDrgWindow2KiBV1
postAct := tutils.NewIDAddr(t, 100)
mockStgMinerAPI := newMockStorageMinerAPI()
// Get the number of sectors allowed in a partition for this proof type
sectorsPerPartition, err := builtin.PoStProofWindowPoStPartitionSectors(proofType)
require.NoError(t, err)
// Work out the number of partitions that can be included in a message
// without exceeding the message sector limit
//stm: @BLOCKCHAIN_POLICY_GET_MAX_POST_PARTITIONS_001
partitionsPerMsg, err := policy.GetMaxPoStPartitions(network.Version13, proofType)
require.NoError(t, err)
if partitionsPerMsg > minertypes.AddressedPartitionsMax {
partitionsPerMsg = minertypes.AddressedPartitionsMax
}
partitionCount := 4 * partitionsPerMsg
// Assert that user config is less than network limit
userPartLimit := 33
lastMsgParts := 21
require.Greater(t, partitionCount, userPartLimit)
// Assert that we consts are correct
require.Equal(t, (expectedMsgCount-1)*userPartLimit+lastMsgParts, 4*partitionsPerMsg)
var partitions []api.Partition
for p := 0; p < partitionCount; p++ {
sectors := bitfield.New()
for s := uint64(0); s < sectorsPerPartition; s++ {
sectors.Set(s)
}
partitions = append(partitions, api.Partition{
AllSectors: sectors,
FaultySectors: bitfield.New(),
RecoveringSectors: bitfield.New(),
LiveSectors: sectors,
ActiveSectors: sectors,
})
}
mockStgMinerAPI.setPartitions(partitions)
// Run window PoST
scheduler := &WindowPoStScheduler{
api: mockStgMinerAPI,
prover: &mockProver{},
verifier: &mockVerif{},
faultTracker: &mockFaultTracker{},
proofType: proofType,
actor: postAct,
journal: journal.NilJournal(),
addrSel: &ctladdr.AddressSelector{},
maxPartitionsPerPostMessage: userPartLimit,
}
di := &dline.Info{
WPoStPeriodDeadlines: minertypes.WPoStPeriodDeadlines,
WPoStProvingPeriod: minertypes.WPoStProvingPeriod,
WPoStChallengeWindow: minertypes.WPoStChallengeWindow,
WPoStChallengeLookback: minertypes.WPoStChallengeLookback,
FaultDeclarationCutoff: minertypes.FaultDeclarationCutoff,
}
ts := mockTipSet(t)
scheduler.startGeneratePoST(ctx, ts, di, func(posts []minertypes.SubmitWindowedPoStParams, err error) {
scheduler.startSubmitPoST(ctx, ts, di, posts, func(err error) {})
})
// Read the window PoST messages
for i := 0; i < expectedMsgCount; i++ {
msg := <-mockStgMinerAPI.pushedMessages
require.Equal(t, builtin.MethodsMiner.SubmitWindowedPoSt, msg.Method)
var params minertypes.SubmitWindowedPoStParams
err := params.UnmarshalCBOR(bytes.NewReader(msg.Params))
require.NoError(t, err)
if i == expectedMsgCount-1 {
// In the last message we only included a 21 partitions
require.Len(t, params.Partitions, lastMsgParts)
} else {
// All previous messages should include the full number of partitions
require.Len(t, params.Partitions, userPartLimit)
}
}
}
// TestBatchPartitionsRecoverySectors tests if the batches with recovery sectors
// contain only single partitions while keeping all the partitions in order
func TestBatchPartitionsRecoverySectors(t *testing.T) {
proofType := abi.RegisteredPoStProof_StackedDrgWindow2KiBV1
postAct := tutils.NewIDAddr(t, 100)
mockStgMinerAPI := newMockStorageMinerAPI()
userPartLimit := 4
scheduler := &WindowPoStScheduler{
api: mockStgMinerAPI,
prover: &mockProver{},
verifier: &mockVerif{},
faultTracker: &mockFaultTracker{},
proofType: proofType,
actor: postAct,
journal: journal.NilJournal(),
addrSel: &ctladdr.AddressSelector{},
maxPartitionsPerPostMessage: userPartLimit,
singleRecoveringPartitionPerPostMessage: true,
}
var partitions []api.Partition
for p := 0; p < 4; p++ {
partitions = append(partitions, generatePartition(100, 0))
}
for p := 0; p < 2; p++ {
partitions = append(partitions, generatePartition(100, 10))
}
for p := 0; p < 6; p++ {
partitions = append(partitions, generatePartition(100, 0))
}
partitions = append(partitions, generatePartition(100, 10))
expectedBatchLens := []int{4, 1, 1, 4, 2, 1}
batches, err := scheduler.BatchPartitions(partitions, network.Version16)
require.NoError(t, err)
require.Equal(t, len(batches), 6)
for i, batch := range batches {
require.Equal(t, len(batch), expectedBatchLens[i])
}
}
// TestWDPostDeclareRecoveriesPartLimitConfig verifies that declareRecoveries will send the correct number of
// DeclareFaultsRecovered messages for a given number of partitions based on user config
func TestWDPostDeclareRecoveriesPartLimitConfig(t *testing.T) {
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
ctx := context.Background()
proofType := abi.RegisteredPoStProof_StackedDrgWindow2KiBV1
postAct := tutils.NewIDAddr(t, 100)
mockStgMinerAPI := newMockStorageMinerAPI()
// Get the number of sectors allowed in a partition for this proof type
sectorsPerPartition, err := builtin.PoStProofWindowPoStPartitionSectors(proofType)
require.NoError(t, err)
// Let's have 11/20 partitions with faulty sectors, and a config of 3 partitions per message
userPartLimit := 3
partitionCount := 20
faultyPartitionCount := 11
var partitions []api.Partition
for p := 0; p < partitionCount; p++ {
sectors := bitfield.New()
for s := uint64(0); s < sectorsPerPartition; s++ {
sectors.Set(s)
}
partition := api.Partition{
AllSectors: sectors,
FaultySectors: bitfield.New(),
RecoveringSectors: bitfield.New(),
LiveSectors: sectors,
ActiveSectors: sectors,
}
if p < faultyPartitionCount {
partition.FaultySectors = sectors
}
partitions = append(partitions, partition)
}
mockStgMinerAPI.setPartitions(partitions)
// Run declareRecoverios
scheduler := &WindowPoStScheduler{
api: mockStgMinerAPI,
prover: &mockProver{},
verifier: &mockVerif{},
faultTracker: &mockFaultTracker{},
proofType: proofType,
actor: postAct,
journal: journal.NilJournal(),
addrSel: &ctladdr.AddressSelector{},
maxPartitionsPerRecoveryMessage: userPartLimit,
}
di := uint64(0)
ts := mockTipSet(t)
expectedMsgCount := faultyPartitionCount/userPartLimit + 1
lastMsgParts := faultyPartitionCount % userPartLimit
go func() {
batchedRecoveries, msgs, err := scheduler.declareRecoveries(ctx, di, partitions, ts.Key())
require.NoError(t, err, "failed to declare recoveries")
require.Equal(t, len(batchedRecoveries), len(msgs))
require.Equal(t, expectedMsgCount, len(msgs))
}()
// Read the window PoST messages
for i := 0; i < expectedMsgCount; i++ {
msg := <-mockStgMinerAPI.pushedMessages
require.Equal(t, builtin.MethodsMiner.DeclareFaultsRecovered, msg.Method)
var params minertypes.DeclareFaultsRecoveredParams
err := params.UnmarshalCBOR(bytes.NewReader(msg.Params))
require.NoError(t, err)
if i == expectedMsgCount-1 {
// In the last message we only included a 21 partitions
require.Len(t, params.Recoveries, lastMsgParts)
} else {
// All previous messages should include the full number of partitions
require.Len(t, params.Recoveries, userPartLimit)
}
}
}
func mockTipSet(t *testing.T) *types.TipSet {
minerAct := tutils.NewActorAddr(t, "miner")
c, err := cid.Decode("QmbFMke1KXqnYyBBWxB74N4c5SBnJMVAiMNRcGu6x1AwQH")
require.NoError(t, err)
blks := []*types.BlockHeader{
{
Miner: minerAct,
Height: abi.ChainEpoch(1),
ParentStateRoot: c,
ParentMessageReceipts: c,
Messages: c,
},
}
ts, err := types.NewTipSet(blks)
require.NoError(t, err)
return ts
}
//
// All the mock methods below here are unused
//
func (m *mockStorageMinerAPI) StateMinerProvingDeadline(ctx context.Context, address address.Address, key types.TipSetKey) (*dline.Info, error) {
2020-09-21 07:52:57 +00:00
return &dline.Info{
CurrentEpoch: 0,
PeriodStart: 0,
Index: 0,
Open: 0,
Close: 0,
Challenge: 0,
FaultCutoff: 0,
2022-04-20 21:34:28 +00:00
WPoStPeriodDeadlines: minertypes.WPoStPeriodDeadlines,
WPoStProvingPeriod: minertypes.WPoStProvingPeriod,
WPoStChallengeWindow: minertypes.WPoStChallengeWindow,
WPoStChallengeLookback: minertypes.WPoStChallengeLookback,
FaultDeclarationCutoff: minertypes.FaultDeclarationCutoff,
2020-09-21 07:52:57 +00:00
}, nil
}
func (m *mockStorageMinerAPI) StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error) {
code, ok := actors.GetActorCodeID(actorstypes.Version7, manifest.MinerKey)
if !ok {
return nil, xerrors.Errorf("failed to get miner actor code ID for actors version %d", actors.Version7)
2022-04-20 21:34:28 +00:00
}
2020-09-18 18:34:23 +00:00
return &types.Actor{
2022-04-20 21:34:28 +00:00
Code: code,
2020-09-18 18:34:23 +00:00
}, nil
}
func (m *mockStorageMinerAPI) StateAccountKey(ctx context.Context, address address.Address, key types.TipSetKey) (address.Address, error) {
2020-09-21 07:52:57 +00:00
return address, nil
}
func (m *mockStorageMinerAPI) GasEstimateMessageGas(ctx context.Context, message *types.Message, spec *api.MessageSendSpec, key types.TipSetKey) (*types.Message, error) {
2020-09-21 07:52:57 +00:00
msg := *message
msg.GasFeeCap = big.NewInt(1)
msg.GasPremium = big.NewInt(1)
msg.GasLimit = 2
return &msg, nil
}
func (m *mockStorageMinerAPI) ChainHead(ctx context.Context) (*types.TipSet, error) {
return nil, nil
}
func (m *mockStorageMinerAPI) WalletSign(ctx context.Context, address address.Address, bytes []byte) (*crypto.Signature, error) {
2020-09-21 07:52:57 +00:00
return nil, nil
}
func (m *mockStorageMinerAPI) WalletBalance(ctx context.Context, address address.Address) (types.BigInt, error) {
2020-09-21 07:52:57 +00:00
return big.NewInt(333), nil
}
func (m *mockStorageMinerAPI) WalletHas(ctx context.Context, address address.Address) (bool, error) {
2020-09-21 07:52:57 +00:00
return true, nil
}
2020-09-18 18:34:23 +00:00
2022-06-14 17:33:57 +00:00
var _ NodeAPI = &mockStorageMinerAPI{}