d7f59b3d74
This is done with the intention to add new {Get,Subscribe}ActorEvents in a future release (i.e. soon!) with both decoded values (dag-json represented) and simplified (no flags or codec). But because this comes with some trade-offs wrt fidelity of information (e.g. likely needing to drop events with badly encoded values, and not retaining original codec), we need to also have a Raw form of these APIs for consumers that want to take on the burden of consuming them as they are.
201 lines
6.0 KiB
Go
201 lines
6.0 KiB
Go
// stm: #integration
|
|
package itests
|
|
|
|
import (
|
|
"bytes"
|
|
"context"
|
|
"testing"
|
|
"time"
|
|
|
|
"github.com/ipld/go-ipld-prime"
|
|
"github.com/ipld/go-ipld-prime/codec/dagcbor"
|
|
"github.com/ipld/go-ipld-prime/node/basicnode"
|
|
"github.com/multiformats/go-multicodec"
|
|
"github.com/stretchr/testify/require"
|
|
|
|
"github.com/filecoin-project/go-bitfield"
|
|
"github.com/filecoin-project/go-state-types/abi"
|
|
|
|
"github.com/filecoin-project/lotus/chain/types"
|
|
"github.com/filecoin-project/lotus/itests/kit"
|
|
"github.com/filecoin-project/lotus/lib/must"
|
|
sealing "github.com/filecoin-project/lotus/storage/pipeline"
|
|
)
|
|
|
|
func TestTerminate(t *testing.T) {
|
|
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
|
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
|
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
|
|
|
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
|
kit.Expensive(t)
|
|
|
|
kit.QuietMiningLogs()
|
|
|
|
var (
|
|
blocktime = 2 * time.Millisecond
|
|
nSectors = 2
|
|
ctx = context.Background()
|
|
)
|
|
|
|
client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.PresealSectors(nSectors))
|
|
ens.InterconnectAll().BeginMining(blocktime)
|
|
|
|
maddr, err := miner.ActorAddress(ctx)
|
|
require.NoError(t, err)
|
|
|
|
ssz, err := miner.ActorSectorSize(ctx, maddr)
|
|
require.NoError(t, err)
|
|
|
|
//stm: @CHAIN_STATE_MINER_POWER_001
|
|
p, err := client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
|
require.NoError(t, err)
|
|
require.Equal(t, p.MinerPower, p.TotalPower)
|
|
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*uint64(nSectors)))
|
|
|
|
t.Log("Seal a sector")
|
|
|
|
miner.PledgeSectors(ctx, 1, 0, nil)
|
|
|
|
t.Log("wait for power")
|
|
|
|
{
|
|
//stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001
|
|
// Wait until proven.
|
|
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
|
require.NoError(t, err)
|
|
|
|
waitUntil := di.Open + di.WPoStProvingPeriod
|
|
t.Logf("End for head.Height > %d", waitUntil)
|
|
|
|
ts := client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil))
|
|
t.Logf("Now head.Height = %d", ts.Height())
|
|
}
|
|
|
|
nSectors++
|
|
|
|
//stm: @CHAIN_STATE_MINER_POWER_001
|
|
p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
|
require.NoError(t, err)
|
|
require.Equal(t, p.MinerPower, p.TotalPower)
|
|
require.Equal(t, types.NewInt(uint64(ssz)*uint64(nSectors)), p.MinerPower.RawBytePower)
|
|
|
|
t.Log("Terminate a sector")
|
|
|
|
toTerminate := abi.SectorNumber(3)
|
|
|
|
//stm: @SECTOR_TERMINATE_001
|
|
err = miner.SectorTerminate(ctx, toTerminate)
|
|
require.NoError(t, err)
|
|
|
|
msgTriggerred := false
|
|
loop:
|
|
for {
|
|
si, err := miner.SectorsStatus(ctx, toTerminate, false)
|
|
require.NoError(t, err)
|
|
|
|
t.Log("state: ", si.State, msgTriggerred)
|
|
|
|
switch sealing.SectorState(si.State) {
|
|
//stm: @SECTOR_TERMINATE_PENDING_001
|
|
case sealing.Terminating:
|
|
if !msgTriggerred {
|
|
{
|
|
p, err := miner.SectorTerminatePending(ctx)
|
|
require.NoError(t, err)
|
|
require.Len(t, p, 1)
|
|
require.Equal(t, abi.SectorNumber(3), p[0].Number)
|
|
}
|
|
|
|
c, err := miner.SectorTerminateFlush(ctx)
|
|
require.NoError(t, err)
|
|
if c != nil {
|
|
msgTriggerred = true
|
|
t.Log("terminate message:", c)
|
|
|
|
{
|
|
p, err := miner.SectorTerminatePending(ctx)
|
|
require.NoError(t, err)
|
|
require.Len(t, p, 0)
|
|
}
|
|
}
|
|
}
|
|
case sealing.TerminateWait, sealing.TerminateFinality, sealing.Removed:
|
|
break loop
|
|
}
|
|
|
|
time.Sleep(100 * time.Millisecond)
|
|
}
|
|
|
|
// need to wait for message to be mined and applied.
|
|
time.Sleep(5 * time.Second)
|
|
|
|
//stm: @CHAIN_STATE_MINER_POWER_001
|
|
// check power decreased
|
|
p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
|
require.NoError(t, err)
|
|
require.Equal(t, p.MinerPower, p.TotalPower)
|
|
require.Equal(t, types.NewInt(uint64(ssz)*uint64(nSectors-1)), p.MinerPower.RawBytePower)
|
|
|
|
// check in terminated set
|
|
{
|
|
//stm: @CHAIN_STATE_MINER_GET_PARTITIONS_001
|
|
parts, err := client.StateMinerPartitions(ctx, maddr, 1, types.EmptyTSK)
|
|
require.NoError(t, err)
|
|
require.Greater(t, len(parts), 0)
|
|
|
|
bflen := func(b bitfield.BitField) uint64 {
|
|
l, err := b.Count()
|
|
require.NoError(t, err)
|
|
return l
|
|
}
|
|
|
|
require.Equal(t, uint64(1), bflen(parts[0].AllSectors))
|
|
require.Equal(t, uint64(0), bflen(parts[0].LiveSectors))
|
|
}
|
|
|
|
//stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001
|
|
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
|
require.NoError(t, err)
|
|
|
|
waitUntil := di.PeriodStart + di.WPoStProvingPeriod + 20 // slack like above
|
|
t.Logf("End for head.Height > %d", waitUntil)
|
|
ts := client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil))
|
|
t.Logf("Now head.Height = %d", ts.Height())
|
|
|
|
//stm: @CHAIN_STATE_MINER_POWER_001
|
|
p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, p.MinerPower, p.TotalPower)
|
|
require.Equal(t, types.NewInt(uint64(ssz)*uint64(nSectors-1)), p.MinerPower.RawBytePower)
|
|
|
|
// check "sector-terminated" actor event
|
|
var epochZero abi.ChainEpoch
|
|
allEvents, err := miner.FullNode.GetActorEventsRaw(ctx, &types.ActorEventFilter{
|
|
FromHeight: &epochZero,
|
|
})
|
|
require.NoError(t, err)
|
|
for _, key := range []string{"sector-precommitted", "sector-activated", "sector-terminated"} {
|
|
var found bool
|
|
keyBytes := must.One(ipld.Encode(basicnode.NewString(key), dagcbor.Encode))
|
|
for _, event := range allEvents {
|
|
for _, e := range event.Entries {
|
|
if e.Key == "$type" && bytes.Equal(e.Value, keyBytes) {
|
|
found = true
|
|
if key == "sector-terminated" {
|
|
expectedEntries := []types.EventEntry{
|
|
{Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "$type", Value: keyBytes},
|
|
{Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "sector", Value: must.One(ipld.Encode(basicnode.NewInt(int64(toTerminate)), dagcbor.Encode))},
|
|
}
|
|
require.ElementsMatch(t, expectedEntries, event.Entries)
|
|
}
|
|
break
|
|
}
|
|
}
|
|
}
|
|
require.True(t, found, "expected to find event %s", key)
|
|
}
|
|
}
|