2022-08-29 14:25:30 +00:00
|
|
|
// stm: #unit
|
2019-07-29 19:34:34 +00:00
|
|
|
package chain_test
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"fmt"
|
2019-12-02 18:56:27 +00:00
|
|
|
"os"
|
2019-07-29 19:34:34 +00:00
|
|
|
"testing"
|
2019-10-06 03:32:56 +00:00
|
|
|
"time"
|
2019-07-29 19:34:34 +00:00
|
|
|
|
2020-08-20 04:49:10 +00:00
|
|
|
"github.com/ipfs/go-cid"
|
2020-08-06 01:16:54 +00:00
|
|
|
ds "github.com/ipfs/go-datastore"
|
2020-01-08 19:10:57 +00:00
|
|
|
logging "github.com/ipfs/go-log/v2"
|
2022-08-25 18:20:41 +00:00
|
|
|
"github.com/libp2p/go-libp2p/core/peer"
|
2019-07-29 19:34:34 +00:00
|
|
|
mocknet "github.com/libp2p/go-libp2p/p2p/net/mock"
|
2019-07-30 11:45:48 +00:00
|
|
|
"github.com/stretchr/testify/require"
|
2019-07-29 19:34:34 +00:00
|
|
|
|
2019-12-19 20:13:17 +00:00
|
|
|
"github.com/filecoin-project/go-address"
|
2020-09-07 03:49:10 +00:00
|
|
|
"github.com/filecoin-project/go-state-types/abi"
|
2022-06-14 15:00:51 +00:00
|
|
|
"github.com/filecoin-project/go-state-types/network"
|
|
|
|
prooftypes "github.com/filecoin-project/go-state-types/proof"
|
2020-05-12 17:58:12 +00:00
|
|
|
|
2019-10-18 04:47:41 +00:00
|
|
|
"github.com/filecoin-project/lotus/api"
|
2019-11-05 17:33:58 +00:00
|
|
|
"github.com/filecoin-project/lotus/build"
|
2020-09-23 19:24:51 +00:00
|
|
|
"github.com/filecoin-project/lotus/chain/actors/policy"
|
2021-09-02 16:07:23 +00:00
|
|
|
"github.com/filecoin-project/lotus/chain/consensus/filcns"
|
2019-10-18 04:47:41 +00:00
|
|
|
"github.com/filecoin-project/lotus/chain/gen"
|
2020-08-06 01:16:54 +00:00
|
|
|
"github.com/filecoin-project/lotus/chain/gen/slashfilter"
|
2022-06-14 15:00:51 +00:00
|
|
|
"github.com/filecoin-project/lotus/chain/stmgr"
|
2019-10-18 04:47:41 +00:00
|
|
|
"github.com/filecoin-project/lotus/chain/store"
|
|
|
|
"github.com/filecoin-project/lotus/chain/types"
|
2020-05-08 17:59:18 +00:00
|
|
|
mocktypes "github.com/filecoin-project/lotus/chain/types/mock"
|
2019-10-18 04:47:41 +00:00
|
|
|
"github.com/filecoin-project/lotus/node"
|
|
|
|
"github.com/filecoin-project/lotus/node/impl"
|
|
|
|
"github.com/filecoin-project/lotus/node/modules"
|
|
|
|
"github.com/filecoin-project/lotus/node/repo"
|
2019-07-29 19:34:34 +00:00
|
|
|
)
|
|
|
|
|
2019-11-22 16:41:09 +00:00
|
|
|
func init() {
|
|
|
|
build.InsecurePoStValidation = true
|
2020-08-20 04:49:10 +00:00
|
|
|
err := os.Setenv("TRUST_PARAMS", "1")
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
2020-09-23 19:24:51 +00:00
|
|
|
policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1)
|
|
|
|
policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048))
|
|
|
|
policy.SetMinVerifiedDealSize(abi.NewStoragePower(256))
|
2019-11-22 16:41:09 +00:00
|
|
|
}
|
|
|
|
|
2019-07-30 13:55:36 +00:00
|
|
|
const source = 0
|
|
|
|
|
2019-09-03 04:36:07 +00:00
|
|
|
func (tu *syncTestUtil) repoWithChain(t testing.TB, h int) (repo.Repo, []byte, []*store.FullTipSet) {
|
|
|
|
blks := make([]*store.FullTipSet, h)
|
2019-07-30 16:04:36 +00:00
|
|
|
|
2019-07-29 19:34:34 +00:00
|
|
|
for i := 0; i < h; i++ {
|
2019-09-03 04:36:07 +00:00
|
|
|
mts, err := tu.g.NextTipSet()
|
2019-07-30 11:45:48 +00:00
|
|
|
require.NoError(t, err)
|
2019-07-30 16:04:36 +00:00
|
|
|
|
2019-09-03 04:36:07 +00:00
|
|
|
blks[i] = mts.TipSet
|
2019-07-29 19:34:34 +00:00
|
|
|
}
|
|
|
|
|
2019-08-02 22:21:46 +00:00
|
|
|
r, err := tu.g.YieldRepo()
|
2019-07-30 11:45:48 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-08-02 22:21:46 +00:00
|
|
|
genb, err := tu.g.GenesisCar()
|
2019-07-30 11:45:48 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-07-30 16:04:36 +00:00
|
|
|
return r, genb, blks
|
2019-07-29 19:34:34 +00:00
|
|
|
}
|
|
|
|
|
2019-07-30 13:20:40 +00:00
|
|
|
type syncTestUtil struct {
|
2019-08-30 07:05:21 +00:00
|
|
|
t testing.TB
|
2019-07-29 19:34:34 +00:00
|
|
|
|
2019-08-30 07:05:21 +00:00
|
|
|
ctx context.Context
|
|
|
|
cancel func()
|
|
|
|
|
|
|
|
mn mocknet.Mocknet
|
2019-07-29 19:34:34 +00:00
|
|
|
|
2019-08-02 22:21:46 +00:00
|
|
|
g *gen.ChainGen
|
|
|
|
|
2019-07-30 13:20:40 +00:00
|
|
|
genesis []byte
|
2019-09-03 04:36:07 +00:00
|
|
|
blocks []*store.FullTipSet
|
2019-07-30 13:20:40 +00:00
|
|
|
|
|
|
|
nds []api.FullNode
|
2021-06-16 18:15:40 +00:00
|
|
|
us stmgr.UpgradeSchedule
|
2019-07-30 13:20:40 +00:00
|
|
|
}
|
|
|
|
|
2019-08-30 07:05:21 +00:00
|
|
|
func prepSyncTest(t testing.TB, h int) *syncTestUtil {
|
2019-07-30 16:04:36 +00:00
|
|
|
logging.SetLogLevel("*", "INFO")
|
|
|
|
|
2019-08-02 22:21:46 +00:00
|
|
|
g, err := gen.NewGenerator()
|
|
|
|
if err != nil {
|
2020-01-23 14:33:28 +00:00
|
|
|
t.Fatalf("%+v", err)
|
2019-08-02 22:21:46 +00:00
|
|
|
}
|
|
|
|
|
2019-08-30 07:05:21 +00:00
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
|
2019-07-30 16:04:36 +00:00
|
|
|
tu := &syncTestUtil{
|
2019-08-30 07:05:21 +00:00
|
|
|
t: t,
|
|
|
|
ctx: ctx,
|
|
|
|
cancel: cancel,
|
|
|
|
|
2022-01-18 15:13:35 +00:00
|
|
|
mn: mocknet.New(),
|
2019-08-30 07:05:21 +00:00
|
|
|
g: g,
|
2021-09-02 16:07:23 +00:00
|
|
|
us: filcns.DefaultUpgradeSchedule(),
|
2019-07-30 16:04:36 +00:00
|
|
|
}
|
|
|
|
|
2021-06-16 18:15:40 +00:00
|
|
|
tu.addSourceNode(h)
|
2021-05-07 03:51:42 +00:00
|
|
|
|
2019-09-06 20:03:28 +00:00
|
|
|
//tu.checkHeight("source", source, h)
|
2019-07-30 16:04:36 +00:00
|
|
|
|
|
|
|
// separate logs
|
|
|
|
fmt.Println("\x1b[31m///////////////////////////////////////////////////\x1b[39b")
|
|
|
|
|
|
|
|
return tu
|
|
|
|
}
|
|
|
|
|
2021-05-26 23:03:46 +00:00
|
|
|
func prepSyncTestWithV5Height(t testing.TB, h int, v5height abi.ChainEpoch) *syncTestUtil {
|
|
|
|
logging.SetLogLevel("*", "INFO")
|
|
|
|
|
2021-06-16 18:15:40 +00:00
|
|
|
sched := stmgr.UpgradeSchedule{{
|
2021-05-26 23:03:46 +00:00
|
|
|
// prepare for upgrade.
|
|
|
|
Network: network.Version9,
|
|
|
|
Height: 1,
|
2022-04-12 07:49:34 +00:00
|
|
|
Migration: filcns.UpgradeActorsV2,
|
2021-05-26 23:03:46 +00:00
|
|
|
}, {
|
|
|
|
Network: network.Version10,
|
|
|
|
Height: 2,
|
2022-04-12 07:49:34 +00:00
|
|
|
Migration: filcns.UpgradeActorsV3,
|
2021-05-26 23:03:46 +00:00
|
|
|
}, {
|
|
|
|
Network: network.Version12,
|
|
|
|
Height: 3,
|
2022-04-12 07:49:34 +00:00
|
|
|
Migration: filcns.UpgradeActorsV4,
|
2021-05-26 23:03:46 +00:00
|
|
|
}, {
|
|
|
|
Network: network.Version13,
|
|
|
|
Height: v5height,
|
2022-04-12 07:49:34 +00:00
|
|
|
Migration: filcns.UpgradeActorsV5,
|
2021-09-15 15:22:25 +00:00
|
|
|
}, {
|
|
|
|
Network: network.Version14,
|
|
|
|
Height: v5height + 10,
|
2022-04-12 07:49:34 +00:00
|
|
|
Migration: filcns.UpgradeActorsV6,
|
2022-03-01 03:57:40 +00:00
|
|
|
}, {
|
|
|
|
Network: network.Version15,
|
|
|
|
Height: v5height + 15,
|
2022-04-12 07:49:34 +00:00
|
|
|
Migration: filcns.UpgradeActorsV7,
|
2022-03-01 03:57:40 +00:00
|
|
|
}, {
|
|
|
|
Network: network.Version16,
|
|
|
|
Height: v5height + 20,
|
|
|
|
Migration: filcns.UpgradeActorsV8,
|
2021-05-26 23:03:46 +00:00
|
|
|
}}
|
|
|
|
|
2021-06-16 18:15:40 +00:00
|
|
|
g, err := gen.NewGeneratorWithUpgradeSchedule(sched)
|
2021-05-26 23:03:46 +00:00
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("%+v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
|
|
|
|
tu := &syncTestUtil{
|
|
|
|
t: t,
|
|
|
|
ctx: ctx,
|
|
|
|
cancel: cancel,
|
|
|
|
|
2022-01-18 15:13:35 +00:00
|
|
|
mn: mocknet.New(),
|
2021-05-26 23:03:46 +00:00
|
|
|
g: g,
|
2021-06-16 18:15:40 +00:00
|
|
|
us: sched,
|
2021-05-26 23:03:46 +00:00
|
|
|
}
|
|
|
|
|
2021-06-16 18:15:40 +00:00
|
|
|
tu.addSourceNode(h)
|
2021-05-26 23:03:46 +00:00
|
|
|
//tu.checkHeight("source", source, h)
|
|
|
|
|
|
|
|
// separate logs
|
|
|
|
fmt.Println("\x1b[31m///////////////////////////////////////////////////\x1b[39b")
|
|
|
|
return tu
|
|
|
|
}
|
|
|
|
|
2019-08-30 07:05:21 +00:00
|
|
|
func (tu *syncTestUtil) Shutdown() {
|
|
|
|
tu.cancel()
|
|
|
|
}
|
|
|
|
|
2019-10-15 12:19:10 +00:00
|
|
|
func (tu *syncTestUtil) printHeads() {
|
|
|
|
for i, n := range tu.nds {
|
|
|
|
head, err := n.ChainHead(tu.ctx)
|
|
|
|
if err != nil {
|
|
|
|
tu.t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
fmt.Printf("Node %d: %s\n", i, head.Cids())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-10 03:04:10 +00:00
|
|
|
func (tu *syncTestUtil) pushFtsAndWait(to int, fts *store.FullTipSet, wait bool) {
|
2019-10-06 03:32:56 +00:00
|
|
|
// TODO: would be great if we could pass a whole tipset here...
|
2019-10-15 12:19:10 +00:00
|
|
|
tu.pushTsExpectErr(to, fts, false)
|
|
|
|
|
|
|
|
if wait {
|
|
|
|
start := time.Now()
|
|
|
|
h, err := tu.nds[to].ChainHead(tu.ctx)
|
|
|
|
require.NoError(tu.t, err)
|
|
|
|
for !h.Equals(fts.TipSet()) {
|
|
|
|
time.Sleep(time.Millisecond * 50)
|
|
|
|
h, err = tu.nds[to].ChainHead(tu.ctx)
|
|
|
|
require.NoError(tu.t, err)
|
|
|
|
|
|
|
|
if time.Since(start) > time.Second*10 {
|
|
|
|
tu.t.Fatal("took too long waiting for block to be accepted")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (tu *syncTestUtil) pushTsExpectErr(to int, fts *store.FullTipSet, experr bool) {
|
2021-12-17 10:04:04 +00:00
|
|
|
ctx := context.TODO()
|
2019-10-06 03:32:56 +00:00
|
|
|
for _, fb := range fts.Blocks {
|
|
|
|
var b types.BlockMsg
|
|
|
|
|
|
|
|
// -1 to match block.Height
|
|
|
|
b.Header = fb.Header
|
|
|
|
for _, msg := range fb.SecpkMessages {
|
2021-12-17 10:04:04 +00:00
|
|
|
c, err := tu.nds[to].(*impl.FullNodeAPI).ChainAPI.Chain.PutMessage(ctx, msg)
|
2019-10-06 03:32:56 +00:00
|
|
|
require.NoError(tu.t, err)
|
|
|
|
|
|
|
|
b.SecpkMessages = append(b.SecpkMessages, c)
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, msg := range fb.BlsMessages {
|
2021-12-17 10:04:04 +00:00
|
|
|
c, err := tu.nds[to].(*impl.FullNodeAPI).ChainAPI.Chain.PutMessage(ctx, msg)
|
2019-10-06 03:32:56 +00:00
|
|
|
require.NoError(tu.t, err)
|
|
|
|
|
|
|
|
b.BlsMessages = append(b.BlsMessages, c)
|
|
|
|
}
|
|
|
|
|
2019-10-15 12:19:10 +00:00
|
|
|
err := tu.nds[to].SyncSubmitBlock(tu.ctx, &b)
|
|
|
|
if experr {
|
|
|
|
require.Error(tu.t, err, "expected submit block to fail")
|
|
|
|
} else {
|
2019-10-10 03:04:10 +00:00
|
|
|
require.NoError(tu.t, err)
|
2019-10-06 03:32:56 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-26 15:07:53 +00:00
|
|
|
func (tu *syncTestUtil) mineOnBlock(blk *store.FullTipSet, to int, miners []int, wait, fail bool, msgs [][]*types.SignedMessage, nulls abi.ChainEpoch, push bool) *store.FullTipSet {
|
2019-10-05 16:04:58 +00:00
|
|
|
if miners == nil {
|
|
|
|
for i := range tu.g.Miners {
|
|
|
|
miners = append(miners, i)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
var maddrs []address.Address
|
2019-10-06 03:32:56 +00:00
|
|
|
for _, i := range miners {
|
2019-10-05 16:04:58 +00:00
|
|
|
maddrs = append(maddrs, tu.g.Miners[i])
|
|
|
|
}
|
|
|
|
|
2019-10-06 03:32:56 +00:00
|
|
|
fmt.Println("Miner mining block: ", maddrs)
|
|
|
|
|
2020-08-09 00:20:37 +00:00
|
|
|
var nts *store.FullTipSet
|
|
|
|
var err error
|
|
|
|
if msgs != nil {
|
2021-07-26 15:07:53 +00:00
|
|
|
nts, err = tu.g.NextTipSetFromMinersWithMessagesAndNulls(blk.TipSet(), maddrs, msgs, nulls)
|
2020-08-09 00:20:37 +00:00
|
|
|
require.NoError(tu.t, err)
|
|
|
|
} else {
|
2021-05-26 23:03:46 +00:00
|
|
|
mt, err := tu.g.NextTipSetFromMiners(blk.TipSet(), maddrs, nulls)
|
2020-08-09 00:20:37 +00:00
|
|
|
require.NoError(tu.t, err)
|
|
|
|
nts = mt.TipSet
|
|
|
|
}
|
2019-08-02 23:14:58 +00:00
|
|
|
|
2021-07-26 15:07:53 +00:00
|
|
|
if push {
|
|
|
|
if fail {
|
|
|
|
tu.pushTsExpectErr(to, nts, true)
|
|
|
|
} else {
|
|
|
|
tu.pushFtsAndWait(to, nts, wait)
|
|
|
|
}
|
2019-10-15 12:19:10 +00:00
|
|
|
}
|
2019-10-05 23:18:39 +00:00
|
|
|
|
2020-08-09 00:20:37 +00:00
|
|
|
return nts
|
2019-10-05 23:18:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (tu *syncTestUtil) mineNewBlock(src int, miners []int) {
|
2021-07-26 15:07:53 +00:00
|
|
|
mts := tu.mineOnBlock(tu.g.CurTipset, src, miners, true, false, nil, 0, true)
|
2019-10-05 23:18:39 +00:00
|
|
|
tu.g.CurTipset = mts
|
2019-08-02 23:14:58 +00:00
|
|
|
}
|
|
|
|
|
2021-06-16 18:15:40 +00:00
|
|
|
func (tu *syncTestUtil) addSourceNode(gen int) {
|
2019-07-30 13:20:40 +00:00
|
|
|
if tu.genesis != nil {
|
|
|
|
tu.t.Fatal("source node already exists")
|
|
|
|
}
|
2019-07-29 19:34:34 +00:00
|
|
|
|
2019-08-02 22:21:46 +00:00
|
|
|
sourceRepo, genesis, blocks := tu.repoWithChain(tu.t, gen)
|
2019-07-30 13:20:40 +00:00
|
|
|
var out api.FullNode
|
2019-07-30 11:45:48 +00:00
|
|
|
|
2020-10-08 22:59:06 +00:00
|
|
|
stop, err := node.New(tu.ctx,
|
2019-07-30 13:20:40 +00:00
|
|
|
node.FullAPI(&out),
|
2021-07-07 11:56:37 +00:00
|
|
|
node.Base(),
|
2019-07-30 11:45:48 +00:00
|
|
|
node.Repo(sourceRepo),
|
2019-07-30 13:20:40 +00:00
|
|
|
node.MockHost(tu.mn),
|
2019-10-23 11:02:00 +00:00
|
|
|
node.Test(),
|
2019-07-29 19:34:34 +00:00
|
|
|
|
2019-07-30 11:45:48 +00:00
|
|
|
node.Override(new(modules.Genesis), modules.LoadGenesis(genesis)),
|
2021-06-16 18:15:40 +00:00
|
|
|
node.Override(new(stmgr.UpgradeSchedule), tu.us),
|
2019-07-29 19:34:34 +00:00
|
|
|
)
|
2019-07-30 13:20:40 +00:00
|
|
|
require.NoError(tu.t, err)
|
2020-10-08 22:59:06 +00:00
|
|
|
tu.t.Cleanup(func() { _ = stop(context.Background()) })
|
2019-07-29 19:34:34 +00:00
|
|
|
|
2019-10-05 23:18:39 +00:00
|
|
|
lastTs := blocks[len(blocks)-1].Blocks
|
|
|
|
for _, lastB := range lastTs {
|
2019-10-15 12:19:10 +00:00
|
|
|
cs := out.(*impl.FullNodeAPI).ChainAPI.Chain
|
2021-12-14 15:45:38 +00:00
|
|
|
require.NoError(tu.t, cs.AddToTipSetTracker(context.Background(), lastB.Header))
|
2019-10-15 11:50:30 +00:00
|
|
|
err = cs.AddBlock(tu.ctx, lastB.Header)
|
2019-10-05 23:18:39 +00:00
|
|
|
require.NoError(tu.t, err)
|
|
|
|
}
|
|
|
|
|
2019-07-30 13:20:40 +00:00
|
|
|
tu.genesis = genesis
|
2019-07-30 16:04:36 +00:00
|
|
|
tu.blocks = blocks
|
2019-07-30 13:55:36 +00:00
|
|
|
tu.nds = append(tu.nds, out) // always at 0
|
2019-07-30 13:20:40 +00:00
|
|
|
}
|
2019-07-30 11:45:48 +00:00
|
|
|
|
2019-07-30 13:20:40 +00:00
|
|
|
func (tu *syncTestUtil) addClientNode() int {
|
2019-07-30 13:55:36 +00:00
|
|
|
if tu.genesis == nil {
|
|
|
|
tu.t.Fatal("source doesn't exists")
|
|
|
|
}
|
|
|
|
|
2019-07-30 13:20:40 +00:00
|
|
|
var out api.FullNode
|
2019-07-29 19:34:34 +00:00
|
|
|
|
2021-06-24 14:02:51 +00:00
|
|
|
r := repo.NewMemory(nil)
|
2020-10-08 22:59:06 +00:00
|
|
|
stop, err := node.New(tu.ctx,
|
2019-07-30 13:20:40 +00:00
|
|
|
node.FullAPI(&out),
|
2021-07-07 11:56:37 +00:00
|
|
|
node.Base(),
|
2021-06-24 14:02:51 +00:00
|
|
|
node.Repo(r),
|
2019-07-30 13:20:40 +00:00
|
|
|
node.MockHost(tu.mn),
|
2019-10-23 11:02:00 +00:00
|
|
|
node.Test(),
|
2019-07-29 19:34:34 +00:00
|
|
|
|
2019-07-30 13:20:40 +00:00
|
|
|
node.Override(new(modules.Genesis), modules.LoadGenesis(tu.genesis)),
|
2021-06-16 18:15:40 +00:00
|
|
|
node.Override(new(stmgr.UpgradeSchedule), tu.us),
|
2019-07-29 19:34:34 +00:00
|
|
|
)
|
2019-07-30 13:20:40 +00:00
|
|
|
require.NoError(tu.t, err)
|
2020-10-08 22:59:06 +00:00
|
|
|
tu.t.Cleanup(func() { _ = stop(context.Background()) })
|
2019-07-30 13:20:40 +00:00
|
|
|
|
|
|
|
tu.nds = append(tu.nds, out)
|
|
|
|
return len(tu.nds) - 1
|
|
|
|
}
|
|
|
|
|
2019-10-05 23:18:39 +00:00
|
|
|
func (tu *syncTestUtil) pid(n int) peer.ID {
|
|
|
|
nal, err := tu.nds[n].NetAddrsListen(tu.ctx)
|
|
|
|
require.NoError(tu.t, err)
|
|
|
|
|
|
|
|
return nal.ID
|
|
|
|
}
|
|
|
|
|
2019-07-30 13:55:36 +00:00
|
|
|
func (tu *syncTestUtil) connect(from, to int) {
|
|
|
|
toPI, err := tu.nds[to].NetAddrsListen(tu.ctx)
|
|
|
|
require.NoError(tu.t, err)
|
2019-07-30 13:20:40 +00:00
|
|
|
|
2019-07-30 13:55:36 +00:00
|
|
|
err = tu.nds[from].NetConnect(tu.ctx, toPI)
|
|
|
|
require.NoError(tu.t, err)
|
|
|
|
}
|
2019-07-30 13:20:40 +00:00
|
|
|
|
2019-10-05 23:18:39 +00:00
|
|
|
func (tu *syncTestUtil) disconnect(from, to int) {
|
|
|
|
toPI, err := tu.nds[to].NetAddrsListen(tu.ctx)
|
|
|
|
require.NoError(tu.t, err)
|
|
|
|
|
|
|
|
err = tu.nds[from].NetDisconnect(tu.ctx, toPI.ID)
|
|
|
|
require.NoError(tu.t, err)
|
|
|
|
}
|
|
|
|
|
2019-07-30 13:55:36 +00:00
|
|
|
func (tu *syncTestUtil) checkHeight(name string, n int, h int) {
|
|
|
|
b, err := tu.nds[n].ChainHead(tu.ctx)
|
|
|
|
require.NoError(tu.t, err)
|
2019-07-30 13:20:40 +00:00
|
|
|
|
2019-07-30 13:55:36 +00:00
|
|
|
require.Equal(tu.t, uint64(h), b.Height())
|
|
|
|
fmt.Printf("%s H: %d\n", name, b.Height())
|
|
|
|
}
|
2019-07-30 13:20:40 +00:00
|
|
|
|
2019-07-30 13:55:36 +00:00
|
|
|
func (tu *syncTestUtil) compareSourceState(with int) {
|
2019-09-06 20:03:28 +00:00
|
|
|
sourceHead, err := tu.nds[source].ChainHead(tu.ctx)
|
|
|
|
require.NoError(tu.t, err)
|
|
|
|
|
|
|
|
targetHead, err := tu.nds[with].ChainHead(tu.ctx)
|
|
|
|
require.NoError(tu.t, err)
|
|
|
|
|
|
|
|
if !sourceHead.Equals(targetHead) {
|
|
|
|
fmt.Println("different chains: ", sourceHead.Height(), targetHead.Height())
|
|
|
|
tu.t.Fatalf("nodes were not synced correctly: %s != %s", sourceHead.Cids(), targetHead.Cids())
|
|
|
|
}
|
|
|
|
|
2019-07-30 13:55:36 +00:00
|
|
|
sourceAccounts, err := tu.nds[source].WalletList(tu.ctx)
|
|
|
|
require.NoError(tu.t, err)
|
2019-07-30 13:20:40 +00:00
|
|
|
|
2019-07-30 13:55:36 +00:00
|
|
|
for _, addr := range sourceAccounts {
|
|
|
|
sourceBalance, err := tu.nds[source].WalletBalance(tu.ctx, addr)
|
|
|
|
require.NoError(tu.t, err)
|
2019-07-30 16:04:36 +00:00
|
|
|
fmt.Printf("Source state check for %s, expect %s\n", addr, sourceBalance)
|
2019-07-30 11:45:48 +00:00
|
|
|
|
2019-07-30 13:55:36 +00:00
|
|
|
actBalance, err := tu.nds[with].WalletBalance(tu.ctx, addr)
|
|
|
|
require.NoError(tu.t, err)
|
2019-07-30 13:20:40 +00:00
|
|
|
|
2019-07-30 13:55:36 +00:00
|
|
|
require.Equal(tu.t, sourceBalance, actBalance)
|
2019-07-30 16:04:36 +00:00
|
|
|
fmt.Printf("Source state check <OK> for %s\n", addr)
|
2019-07-30 13:55:36 +00:00
|
|
|
}
|
|
|
|
}
|
2019-07-30 13:20:40 +00:00
|
|
|
|
2020-09-09 07:12:04 +00:00
|
|
|
func (tu *syncTestUtil) assertBad(node int, ts *types.TipSet) {
|
|
|
|
for _, blk := range ts.Cids() {
|
|
|
|
rsn, err := tu.nds[node].SyncCheckBad(context.TODO(), blk)
|
|
|
|
require.NoError(tu.t, err)
|
|
|
|
require.True(tu.t, len(rsn) != 0)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (tu *syncTestUtil) getHead(node int) *types.TipSet {
|
|
|
|
ts, err := tu.nds[node].ChainHead(context.TODO())
|
|
|
|
require.NoError(tu.t, err)
|
|
|
|
return ts
|
|
|
|
}
|
|
|
|
|
|
|
|
func (tu *syncTestUtil) checkpointTs(node int, tsk types.TipSetKey) {
|
|
|
|
require.NoError(tu.t, tu.nds[node].SyncCheckpoint(context.TODO(), tsk))
|
|
|
|
}
|
|
|
|
|
2021-04-28 20:48:13 +00:00
|
|
|
func (tu *syncTestUtil) nodeHasTs(node int, tsk types.TipSetKey) bool {
|
|
|
|
_, err := tu.nds[node].ChainGetTipSet(context.TODO(), tsk)
|
|
|
|
return err == nil
|
|
|
|
}
|
|
|
|
|
2020-09-09 07:12:04 +00:00
|
|
|
func (tu *syncTestUtil) waitUntilNodeHasTs(node int, tsk types.TipSetKey) {
|
2021-04-28 20:48:13 +00:00
|
|
|
for !tu.nodeHasTs(node, tsk) {
|
|
|
|
// Time to allow for syncing and validation
|
|
|
|
time.Sleep(10 * time.Millisecond)
|
2020-09-09 07:12:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Time to allow for syncing and validation
|
|
|
|
time.Sleep(2 * time.Second)
|
|
|
|
}
|
|
|
|
|
2019-08-30 07:05:21 +00:00
|
|
|
func (tu *syncTestUtil) waitUntilSync(from, to int) {
|
2019-10-06 00:51:48 +00:00
|
|
|
target, err := tu.nds[from].ChainHead(tu.ctx)
|
2019-08-30 07:05:21 +00:00
|
|
|
if err != nil {
|
|
|
|
tu.t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2019-10-06 00:51:48 +00:00
|
|
|
tu.waitUntilSyncTarget(to, target)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (tu *syncTestUtil) waitUntilSyncTarget(to int, target *types.TipSet) {
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
|
|
|
|
2019-08-30 07:05:21 +00:00
|
|
|
hc, err := tu.nds[to].ChainNotify(ctx)
|
|
|
|
if err != nil {
|
|
|
|
tu.t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2021-06-03 08:42:26 +00:00
|
|
|
timeout := time.After(5 * time.Second)
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case n := <-hc:
|
|
|
|
for _, c := range n {
|
|
|
|
if c.Val.Equals(target) {
|
|
|
|
return
|
|
|
|
}
|
2019-09-18 11:01:52 +00:00
|
|
|
}
|
2021-06-03 08:42:26 +00:00
|
|
|
case <-timeout:
|
|
|
|
tu.t.Fatal("waitUntilSyncTarget timeout")
|
2019-08-30 07:05:21 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-30 16:04:36 +00:00
|
|
|
func TestSyncSimple(t *testing.T) {
|
2021-12-13 12:41:04 +00:00
|
|
|
//stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001
|
|
|
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
2019-12-02 18:56:27 +00:00
|
|
|
H := 50
|
2019-07-30 16:04:36 +00:00
|
|
|
tu := prepSyncTest(t, H)
|
2019-07-30 11:45:48 +00:00
|
|
|
|
2019-07-30 13:55:36 +00:00
|
|
|
client := tu.addClientNode()
|
2019-09-06 20:03:28 +00:00
|
|
|
//tu.checkHeight("client", client, 0)
|
2019-07-30 11:45:48 +00:00
|
|
|
|
2019-07-30 13:55:36 +00:00
|
|
|
require.NoError(t, tu.mn.LinkAll())
|
|
|
|
tu.connect(1, 0)
|
2019-08-31 01:11:32 +00:00
|
|
|
tu.waitUntilSync(0, client)
|
2019-07-30 16:04:36 +00:00
|
|
|
|
2019-09-06 20:03:28 +00:00
|
|
|
//tu.checkHeight("client", client, H)
|
2019-07-30 16:04:36 +00:00
|
|
|
|
|
|
|
tu.compareSourceState(client)
|
|
|
|
}
|
|
|
|
|
2019-08-02 22:21:46 +00:00
|
|
|
func TestSyncMining(t *testing.T) {
|
2021-12-13 12:41:04 +00:00
|
|
|
//stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001
|
|
|
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
2019-09-03 04:36:07 +00:00
|
|
|
H := 50
|
2019-08-02 22:21:46 +00:00
|
|
|
tu := prepSyncTest(t, H)
|
|
|
|
|
|
|
|
client := tu.addClientNode()
|
2019-09-06 20:03:28 +00:00
|
|
|
//tu.checkHeight("client", client, 0)
|
2019-08-02 22:21:46 +00:00
|
|
|
|
|
|
|
require.NoError(t, tu.mn.LinkAll())
|
2019-08-30 07:05:21 +00:00
|
|
|
tu.connect(client, 0)
|
|
|
|
tu.waitUntilSync(0, client)
|
2019-08-02 22:21:46 +00:00
|
|
|
|
2019-09-06 20:03:28 +00:00
|
|
|
//tu.checkHeight("client", client, H)
|
2019-08-02 22:21:46 +00:00
|
|
|
|
|
|
|
tu.compareSourceState(client)
|
|
|
|
|
2019-08-02 23:14:58 +00:00
|
|
|
for i := 0; i < 5; i++ {
|
2019-10-05 16:04:58 +00:00
|
|
|
tu.mineNewBlock(0, nil)
|
2019-08-31 01:11:32 +00:00
|
|
|
tu.waitUntilSync(0, client)
|
2019-08-02 23:14:58 +00:00
|
|
|
tu.compareSourceState(client)
|
2019-08-02 22:21:46 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-10 03:04:10 +00:00
|
|
|
func TestSyncBadTimestamp(t *testing.T) {
|
2021-12-13 12:41:04 +00:00
|
|
|
//stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001
|
|
|
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
2019-10-10 03:04:10 +00:00
|
|
|
H := 50
|
|
|
|
tu := prepSyncTest(t, H)
|
|
|
|
|
|
|
|
client := tu.addClientNode()
|
|
|
|
|
|
|
|
require.NoError(t, tu.mn.LinkAll())
|
|
|
|
tu.connect(client, 0)
|
|
|
|
tu.waitUntilSync(0, client)
|
|
|
|
|
|
|
|
base := tu.g.CurTipset
|
2020-02-10 19:16:36 +00:00
|
|
|
tu.g.Timestamper = func(pts *types.TipSet, tl abi.ChainEpoch) uint64 {
|
2020-06-30 13:22:48 +00:00
|
|
|
return pts.MinTimestamp() + (build.BlockDelaySecs / 2)
|
2019-10-10 03:04:10 +00:00
|
|
|
}
|
|
|
|
|
2019-10-15 12:19:10 +00:00
|
|
|
fmt.Println("BASE: ", base.Cids())
|
|
|
|
tu.printHeads()
|
|
|
|
|
2021-07-26 15:07:53 +00:00
|
|
|
a1 := tu.mineOnBlock(base, 0, nil, false, true, nil, 0, true)
|
2019-10-10 03:04:10 +00:00
|
|
|
|
|
|
|
tu.g.Timestamper = nil
|
2020-05-27 20:53:20 +00:00
|
|
|
require.NoError(t, tu.g.ResyncBankerNonce(a1.TipSet()))
|
2019-10-10 03:04:10 +00:00
|
|
|
|
2020-08-06 01:16:54 +00:00
|
|
|
tu.nds[0].(*impl.FullNodeAPI).SlashFilter = slashfilter.New(ds.NewMapDatastore())
|
|
|
|
|
2019-10-15 12:19:10 +00:00
|
|
|
fmt.Println("After mine bad block!")
|
|
|
|
tu.printHeads()
|
2021-07-26 15:07:53 +00:00
|
|
|
a2 := tu.mineOnBlock(base, 0, nil, true, false, nil, 0, true)
|
2019-10-10 03:04:10 +00:00
|
|
|
|
|
|
|
tu.waitUntilSync(0, client)
|
|
|
|
|
|
|
|
head, err := tu.nds[0].ChainHead(tu.ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
if !head.Equals(a2.TipSet()) {
|
|
|
|
t.Fatalf("expected head to be %s, but got %s", a2.Cids(), head.Cids())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-22 19:12:41 +00:00
|
|
|
type badWpp struct{}
|
|
|
|
|
|
|
|
func (wpp badWpp) GenerateCandidates(context.Context, abi.PoStRandomness, uint64) ([]uint64, error) {
|
|
|
|
return []uint64{1}, nil
|
|
|
|
}
|
|
|
|
|
2022-04-20 21:34:28 +00:00
|
|
|
func (wpp badWpp) ComputeProof(context.Context, []prooftypes.ExtendedSectorInfo, abi.PoStRandomness, abi.ChainEpoch, network.Version) ([]prooftypes.PoStProof, error) {
|
|
|
|
return []prooftypes.PoStProof{
|
2020-08-20 04:49:10 +00:00
|
|
|
{
|
2020-07-22 19:12:41 +00:00
|
|
|
PoStProof: abi.RegisteredPoStProof_StackedDrgWinning2KiBV1,
|
|
|
|
ProofBytes: []byte("evil"),
|
|
|
|
},
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestSyncBadWinningPoSt(t *testing.T) {
|
2021-12-13 12:41:04 +00:00
|
|
|
//stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001
|
|
|
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
2020-07-22 21:04:20 +00:00
|
|
|
H := 15
|
2020-07-22 19:12:41 +00:00
|
|
|
tu := prepSyncTest(t, H)
|
|
|
|
|
|
|
|
client := tu.addClientNode()
|
|
|
|
|
|
|
|
require.NoError(t, tu.mn.LinkAll())
|
|
|
|
tu.connect(client, 0)
|
|
|
|
tu.waitUntilSync(0, client)
|
|
|
|
|
|
|
|
base := tu.g.CurTipset
|
|
|
|
|
2020-07-22 21:04:20 +00:00
|
|
|
// both miners now produce invalid winning posts
|
2020-07-22 19:12:41 +00:00
|
|
|
tu.g.SetWinningPoStProver(tu.g.Miners[0], &badWpp{})
|
|
|
|
tu.g.SetWinningPoStProver(tu.g.Miners[1], &badWpp{})
|
|
|
|
|
2020-07-22 21:04:20 +00:00
|
|
|
// now ensure that new blocks are not accepted
|
2021-07-26 15:07:53 +00:00
|
|
|
tu.mineOnBlock(base, client, nil, false, true, nil, 0, true)
|
2020-07-22 19:12:41 +00:00
|
|
|
}
|
|
|
|
|
2019-10-06 03:32:56 +00:00
|
|
|
func (tu *syncTestUtil) loadChainToNode(to int) {
|
|
|
|
// utility to simulate incoming blocks without miner process
|
|
|
|
// TODO: should call syncer directly, this won't work correctly in all cases
|
|
|
|
|
|
|
|
for i := 0; i < len(tu.blocks); i++ {
|
2019-10-10 03:04:10 +00:00
|
|
|
tu.pushFtsAndWait(to, tu.blocks[i], true)
|
2019-10-06 03:32:56 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestSyncFork(t *testing.T) {
|
2021-12-13 12:41:04 +00:00
|
|
|
//stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001
|
|
|
|
//stm: @CHAIN_SYNCER_SYNC_001, @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
|
|
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
2019-10-06 03:32:56 +00:00
|
|
|
H := 10
|
|
|
|
tu := prepSyncTest(t, H)
|
|
|
|
|
|
|
|
p1 := tu.addClientNode()
|
|
|
|
p2 := tu.addClientNode()
|
|
|
|
|
|
|
|
fmt.Println("GENESIS: ", tu.g.Genesis().Cid())
|
|
|
|
tu.loadChainToNode(p1)
|
|
|
|
tu.loadChainToNode(p2)
|
|
|
|
|
2021-06-03 08:42:26 +00:00
|
|
|
printHead := func() {
|
2019-10-06 03:32:56 +00:00
|
|
|
h1, err := tu.nds[1].ChainHead(tu.ctx)
|
|
|
|
require.NoError(tu.t, err)
|
|
|
|
|
|
|
|
h2, err := tu.nds[2].ChainHead(tu.ctx)
|
|
|
|
require.NoError(tu.t, err)
|
|
|
|
|
2021-06-03 08:42:26 +00:00
|
|
|
w1, err := tu.nds[1].(*impl.FullNodeAPI).ChainAPI.Chain.Weight(tu.ctx, h1)
|
|
|
|
require.NoError(tu.t, err)
|
|
|
|
w2, err := tu.nds[2].(*impl.FullNodeAPI).ChainAPI.Chain.Weight(tu.ctx, h2)
|
|
|
|
require.NoError(tu.t, err)
|
|
|
|
|
|
|
|
fmt.Println("Node 1: ", h1.Cids(), h1.Parents(), h1.Height(), w1)
|
|
|
|
fmt.Println("Node 2: ", h2.Cids(), h2.Parents(), h2.Height(), w2)
|
2019-10-06 03:32:56 +00:00
|
|
|
//time.Sleep(time.Second * 2)
|
|
|
|
fmt.Println()
|
|
|
|
fmt.Println()
|
|
|
|
fmt.Println()
|
|
|
|
fmt.Println()
|
|
|
|
}
|
|
|
|
|
2021-06-03 08:42:26 +00:00
|
|
|
printHead()
|
2019-10-06 03:32:56 +00:00
|
|
|
|
|
|
|
base := tu.g.CurTipset
|
|
|
|
fmt.Println("Mining base: ", base.TipSet().Cids(), base.TipSet().Height())
|
|
|
|
|
2019-10-06 00:51:48 +00:00
|
|
|
// The two nodes fork at this point into 'a' and 'b'
|
2021-07-26 15:07:53 +00:00
|
|
|
a1 := tu.mineOnBlock(base, p1, []int{0}, true, false, nil, 0, true)
|
|
|
|
a := tu.mineOnBlock(a1, p1, []int{0}, true, false, nil, 0, true)
|
|
|
|
a = tu.mineOnBlock(a, p1, []int{0}, true, false, nil, 0, true)
|
2019-10-06 03:32:56 +00:00
|
|
|
|
2020-05-27 20:53:20 +00:00
|
|
|
require.NoError(t, tu.g.ResyncBankerNonce(a1.TipSet()))
|
2019-10-06 00:51:48 +00:00
|
|
|
// chain B will now be heaviest
|
2021-07-26 15:07:53 +00:00
|
|
|
b := tu.mineOnBlock(base, p2, []int{1}, true, false, nil, 0, true)
|
|
|
|
b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil, 0, true)
|
|
|
|
b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil, 0, true)
|
|
|
|
b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil, 0, true)
|
2019-10-06 03:32:56 +00:00
|
|
|
|
|
|
|
fmt.Println("A: ", a.Cids(), a.TipSet().Height())
|
|
|
|
fmt.Println("B: ", b.Cids(), b.TipSet().Height())
|
|
|
|
|
2021-06-03 08:42:26 +00:00
|
|
|
printHead()
|
|
|
|
|
2019-10-06 03:32:56 +00:00
|
|
|
// Now for the fun part!!
|
|
|
|
|
|
|
|
require.NoError(t, tu.mn.LinkAll())
|
|
|
|
tu.connect(p1, p2)
|
2019-10-06 00:51:48 +00:00
|
|
|
tu.waitUntilSyncTarget(p1, b.TipSet())
|
|
|
|
tu.waitUntilSyncTarget(p2, b.TipSet())
|
2019-10-06 03:32:56 +00:00
|
|
|
|
2021-06-03 08:42:26 +00:00
|
|
|
printHead()
|
2019-10-06 03:32:56 +00:00
|
|
|
}
|
|
|
|
|
2020-08-09 01:37:49 +00:00
|
|
|
// This test crafts a tipset with 2 blocks, A and B.
|
2020-08-09 00:20:37 +00:00
|
|
|
// A and B both include _different_ messages from sender X with nonce N (where N is the correct nonce for X).
|
|
|
|
// We can confirm that the state can be correctly computed, and that `MessagesForTipset` behaves as expected.
|
|
|
|
func TestDuplicateNonce(t *testing.T) {
|
2021-12-13 12:41:04 +00:00
|
|
|
//stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001
|
|
|
|
//stm: @CHAIN_SYNCER_SYNC_001, @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
|
|
|
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
2020-08-09 00:20:37 +00:00
|
|
|
H := 10
|
|
|
|
tu := prepSyncTest(t, H)
|
|
|
|
|
|
|
|
base := tu.g.CurTipset
|
|
|
|
|
2020-10-23 00:53:03 +00:00
|
|
|
// Get the banker from computed tipset state, not the parent.
|
|
|
|
st, _, err := tu.g.StateManager().TipSetState(context.TODO(), base.TipSet())
|
|
|
|
require.NoError(t, err)
|
|
|
|
ba, err := tu.g.StateManager().LoadActorRaw(context.TODO(), tu.g.Banker(), st)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2020-08-09 00:20:37 +00:00
|
|
|
// Produce a message from the banker to the rcvr
|
|
|
|
makeMsg := func(rcvr address.Address) *types.SignedMessage {
|
|
|
|
msg := types.Message{
|
|
|
|
To: rcvr,
|
|
|
|
From: tu.g.Banker(),
|
|
|
|
|
|
|
|
Nonce: ba.Nonce,
|
|
|
|
|
|
|
|
Value: types.NewInt(1),
|
|
|
|
|
|
|
|
Method: 0,
|
|
|
|
|
|
|
|
GasLimit: 100_000_000,
|
|
|
|
GasFeeCap: types.NewInt(0),
|
|
|
|
GasPremium: types.NewInt(0),
|
|
|
|
}
|
|
|
|
|
2020-10-08 23:33:50 +00:00
|
|
|
sig, err := tu.g.Wallet().WalletSign(context.TODO(), tu.g.Banker(), msg.Cid().Bytes(), api.MsgMeta{})
|
2020-08-09 00:20:37 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
return &types.SignedMessage{
|
|
|
|
Message: msg,
|
|
|
|
Signature: *sig,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
msgs := make([][]*types.SignedMessage, 2)
|
|
|
|
// Each miner includes a message from the banker with the same nonce, but to different addresses
|
2020-08-20 04:49:10 +00:00
|
|
|
for k := range msgs {
|
2020-08-09 00:20:37 +00:00
|
|
|
msgs[k] = []*types.SignedMessage{makeMsg(tu.g.Miners[k])}
|
|
|
|
}
|
|
|
|
|
2021-07-26 15:07:53 +00:00
|
|
|
ts1 := tu.mineOnBlock(base, 0, []int{0, 1}, true, false, msgs, 0, true)
|
2020-08-09 00:20:37 +00:00
|
|
|
|
|
|
|
tu.waitUntilSyncTarget(0, ts1.TipSet())
|
|
|
|
|
|
|
|
// mine another tipset
|
|
|
|
|
2021-07-26 15:07:53 +00:00
|
|
|
ts2 := tu.mineOnBlock(ts1, 0, []int{0, 1}, true, false, make([][]*types.SignedMessage, 2), 0, true)
|
2020-08-09 00:20:37 +00:00
|
|
|
tu.waitUntilSyncTarget(0, ts2.TipSet())
|
|
|
|
|
|
|
|
var includedMsg cid.Cid
|
|
|
|
var skippedMsg cid.Cid
|
2021-12-10 15:08:25 +00:00
|
|
|
//stm: @CHAIN_STATE_SEARCH_MSG_001
|
2021-04-05 19:34:03 +00:00
|
|
|
r0, err0 := tu.nds[0].StateSearchMsg(context.TODO(), ts2.TipSet().Key(), msgs[0][0].Cid(), api.LookbackNoLimit, true)
|
|
|
|
r1, err1 := tu.nds[0].StateSearchMsg(context.TODO(), ts2.TipSet().Key(), msgs[1][0].Cid(), api.LookbackNoLimit, true)
|
2020-08-09 00:20:37 +00:00
|
|
|
|
|
|
|
if err0 == nil {
|
|
|
|
require.Error(t, err1, "at least one of the StateGetReceipt calls should fail")
|
2021-04-05 19:34:03 +00:00
|
|
|
require.True(t, r0.Receipt.ExitCode.IsSuccess())
|
2020-08-09 00:20:37 +00:00
|
|
|
includedMsg = msgs[0][0].Message.Cid()
|
|
|
|
skippedMsg = msgs[1][0].Message.Cid()
|
|
|
|
} else {
|
|
|
|
require.NoError(t, err1, "both the StateGetReceipt calls should not fail")
|
2021-04-05 19:34:03 +00:00
|
|
|
require.True(t, r1.Receipt.ExitCode.IsSuccess())
|
2020-08-09 00:20:37 +00:00
|
|
|
includedMsg = msgs[1][0].Message.Cid()
|
|
|
|
skippedMsg = msgs[0][0].Message.Cid()
|
|
|
|
}
|
|
|
|
|
|
|
|
_, rslts, err := tu.g.StateManager().ExecutionTrace(context.TODO(), ts1.TipSet())
|
|
|
|
require.NoError(t, err)
|
|
|
|
found := false
|
|
|
|
for _, v := range rslts {
|
|
|
|
if v.Msg.Cid() == skippedMsg {
|
|
|
|
t.Fatal("skipped message should not be in exec trace")
|
|
|
|
}
|
|
|
|
|
|
|
|
if v.Msg.Cid() == includedMsg {
|
|
|
|
found = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if !found {
|
|
|
|
t.Fatal("included message should be in exec trace")
|
|
|
|
}
|
|
|
|
|
2021-12-17 10:04:04 +00:00
|
|
|
mft, err := tu.g.ChainStore().MessagesForTipset(context.TODO(), ts1.TipSet())
|
2020-08-09 00:20:37 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.True(t, len(mft) == 1, "only expecting one message for this tipset")
|
|
|
|
require.Equal(t, includedMsg, mft[0].VMMessage().Cid(), "messages for tipset didn't contain expected message")
|
|
|
|
}
|
|
|
|
|
2020-09-16 00:55:11 +00:00
|
|
|
// This test asserts that a block that includes a message with bad nonce can't be synced. A nonce is "bad" if it can't
|
|
|
|
// be applied on the parent state.
|
|
|
|
func TestBadNonce(t *testing.T) {
|
2021-12-13 12:41:04 +00:00
|
|
|
//stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001
|
|
|
|
//stm: @CHAIN_SYNCER_SYNC_001, @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001
|
|
|
|
//stm: @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001, @CHAIN_SYNCER_STOP_001
|
2020-09-16 00:55:11 +00:00
|
|
|
H := 10
|
|
|
|
tu := prepSyncTest(t, H)
|
|
|
|
|
|
|
|
base := tu.g.CurTipset
|
|
|
|
|
2021-05-26 23:03:46 +00:00
|
|
|
// Get the banker from computed tipset state, not the parent.
|
|
|
|
st, _, err := tu.g.StateManager().TipSetState(context.TODO(), base.TipSet())
|
|
|
|
require.NoError(t, err)
|
|
|
|
ba, err := tu.g.StateManager().LoadActorRaw(context.TODO(), tu.g.Banker(), st)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2020-09-16 00:55:11 +00:00
|
|
|
// Produce a message from the banker with a bad nonce
|
|
|
|
makeBadMsg := func() *types.SignedMessage {
|
|
|
|
msg := types.Message{
|
|
|
|
To: tu.g.Banker(),
|
|
|
|
From: tu.g.Banker(),
|
|
|
|
|
|
|
|
Nonce: ba.Nonce + 5,
|
|
|
|
|
|
|
|
Value: types.NewInt(1),
|
|
|
|
|
|
|
|
Method: 0,
|
|
|
|
|
|
|
|
GasLimit: 100_000_000,
|
|
|
|
GasFeeCap: types.NewInt(0),
|
|
|
|
GasPremium: types.NewInt(0),
|
|
|
|
}
|
|
|
|
|
2020-10-08 23:33:50 +00:00
|
|
|
sig, err := tu.g.Wallet().WalletSign(context.TODO(), tu.g.Banker(), msg.Cid().Bytes(), api.MsgMeta{})
|
2020-09-16 00:55:11 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
return &types.SignedMessage{
|
|
|
|
Message: msg,
|
|
|
|
Signature: *sig,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
msgs := make([][]*types.SignedMessage, 1)
|
|
|
|
msgs[0] = []*types.SignedMessage{makeBadMsg()}
|
|
|
|
|
2021-07-26 15:07:53 +00:00
|
|
|
tu.mineOnBlock(base, 0, []int{0}, true, true, msgs, 0, true)
|
2020-09-16 00:55:11 +00:00
|
|
|
}
|
|
|
|
|
2021-05-07 03:51:42 +00:00
|
|
|
// This test introduces a block that has 2 messages, with the same sender, and same nonce.
|
|
|
|
// One of the messages uses the sender's robust address, the other uses the ID address.
|
|
|
|
// Such a block is invalid and should not sync.
|
|
|
|
func TestMismatchedNoncesRobustID(t *testing.T) {
|
2021-12-13 12:41:04 +00:00
|
|
|
//stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001
|
|
|
|
//stm: @CHAIN_SYNCER_SYNC_001, @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001
|
|
|
|
//stm: @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001, @CHAIN_SYNCER_STOP_001
|
2021-05-07 03:51:42 +00:00
|
|
|
v5h := abi.ChainEpoch(4)
|
|
|
|
tu := prepSyncTestWithV5Height(t, int(v5h+5), v5h)
|
|
|
|
|
|
|
|
base := tu.g.CurTipset
|
|
|
|
|
|
|
|
// Get the banker from computed tipset state, not the parent.
|
|
|
|
st, _, err := tu.g.StateManager().TipSetState(context.TODO(), base.TipSet())
|
|
|
|
require.NoError(t, err)
|
|
|
|
ba, err := tu.g.StateManager().LoadActorRaw(context.TODO(), tu.g.Banker(), st)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// Produce a message from the banker
|
2021-12-10 15:08:25 +00:00
|
|
|
//stm: @CHAIN_STATE_LOOKUP_ID_001
|
2021-05-07 03:51:42 +00:00
|
|
|
makeMsg := func(id bool) *types.SignedMessage {
|
|
|
|
sender := tu.g.Banker()
|
|
|
|
if id {
|
|
|
|
s, err := tu.nds[0].StateLookupID(context.TODO(), sender, base.TipSet().Key())
|
|
|
|
require.NoError(t, err)
|
|
|
|
sender = s
|
|
|
|
}
|
|
|
|
|
|
|
|
msg := types.Message{
|
|
|
|
To: tu.g.Banker(),
|
|
|
|
From: sender,
|
|
|
|
|
|
|
|
Nonce: ba.Nonce,
|
|
|
|
|
|
|
|
Value: types.NewInt(1),
|
|
|
|
|
|
|
|
Method: 0,
|
|
|
|
|
|
|
|
GasLimit: 100_000_000,
|
|
|
|
GasFeeCap: types.NewInt(0),
|
|
|
|
GasPremium: types.NewInt(0),
|
|
|
|
}
|
|
|
|
|
|
|
|
sig, err := tu.g.Wallet().WalletSign(context.TODO(), tu.g.Banker(), msg.Cid().Bytes(), api.MsgMeta{})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
return &types.SignedMessage{
|
|
|
|
Message: msg,
|
|
|
|
Signature: *sig,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
msgs := make([][]*types.SignedMessage, 1)
|
|
|
|
msgs[0] = []*types.SignedMessage{makeMsg(false), makeMsg(true)}
|
|
|
|
|
2021-07-26 15:07:53 +00:00
|
|
|
tu.mineOnBlock(base, 0, []int{0}, true, true, msgs, 0, true)
|
2021-05-07 03:51:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// This test introduces a block that has 2 messages, with the same sender, and nonces N and N+1 (so both can be included in a block)
|
|
|
|
// One of the messages uses the sender's robust address, the other uses the ID address.
|
|
|
|
// Such a block is valid and should sync.
|
|
|
|
func TestMatchedNoncesRobustID(t *testing.T) {
|
2021-12-13 12:41:04 +00:00
|
|
|
//stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001
|
|
|
|
//stm: @CHAIN_SYNCER_SYNC_001, @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001
|
|
|
|
//stm: @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001, @CHAIN_SYNCER_STOP_001
|
2021-05-07 03:51:42 +00:00
|
|
|
v5h := abi.ChainEpoch(4)
|
|
|
|
tu := prepSyncTestWithV5Height(t, int(v5h+5), v5h)
|
|
|
|
|
|
|
|
base := tu.g.CurTipset
|
|
|
|
|
|
|
|
// Get the banker from computed tipset state, not the parent.
|
|
|
|
st, _, err := tu.g.StateManager().TipSetState(context.TODO(), base.TipSet())
|
|
|
|
require.NoError(t, err)
|
|
|
|
ba, err := tu.g.StateManager().LoadActorRaw(context.TODO(), tu.g.Banker(), st)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// Produce a message from the banker with specified nonce
|
2021-12-10 15:08:25 +00:00
|
|
|
//stm: @CHAIN_STATE_LOOKUP_ID_001
|
2021-05-07 03:51:42 +00:00
|
|
|
makeMsg := func(n uint64, id bool) *types.SignedMessage {
|
|
|
|
sender := tu.g.Banker()
|
|
|
|
if id {
|
|
|
|
s, err := tu.nds[0].StateLookupID(context.TODO(), sender, base.TipSet().Key())
|
|
|
|
require.NoError(t, err)
|
|
|
|
sender = s
|
|
|
|
}
|
|
|
|
|
|
|
|
msg := types.Message{
|
|
|
|
To: tu.g.Banker(),
|
|
|
|
From: sender,
|
|
|
|
|
|
|
|
Nonce: n,
|
|
|
|
|
|
|
|
Value: types.NewInt(1),
|
|
|
|
|
|
|
|
Method: 0,
|
|
|
|
|
|
|
|
GasLimit: 100_000_000,
|
|
|
|
GasFeeCap: types.NewInt(0),
|
|
|
|
GasPremium: types.NewInt(0),
|
|
|
|
}
|
|
|
|
|
|
|
|
sig, err := tu.g.Wallet().WalletSign(context.TODO(), tu.g.Banker(), msg.Cid().Bytes(), api.MsgMeta{})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
return &types.SignedMessage{
|
|
|
|
Message: msg,
|
|
|
|
Signature: *sig,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
msgs := make([][]*types.SignedMessage, 1)
|
|
|
|
msgs[0] = []*types.SignedMessage{makeMsg(ba.Nonce, false), makeMsg(ba.Nonce+1, true)}
|
|
|
|
|
2021-07-26 15:07:53 +00:00
|
|
|
tu.mineOnBlock(base, 0, []int{0}, true, false, msgs, 0, true)
|
2021-05-07 03:51:42 +00:00
|
|
|
}
|
|
|
|
|
2019-08-30 07:05:21 +00:00
|
|
|
func BenchmarkSyncBasic(b *testing.B) {
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
runSyncBenchLength(b, 100)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func runSyncBenchLength(b *testing.B, l int) {
|
|
|
|
tu := prepSyncTest(b, l)
|
|
|
|
|
|
|
|
client := tu.addClientNode()
|
|
|
|
tu.checkHeight("client", client, 0)
|
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
require.NoError(b, tu.mn.LinkAll())
|
|
|
|
tu.connect(1, 0)
|
|
|
|
|
|
|
|
tu.waitUntilSync(0, client)
|
|
|
|
}
|
2020-05-08 17:59:18 +00:00
|
|
|
|
|
|
|
func TestSyncInputs(t *testing.T) {
|
2021-12-13 12:41:04 +00:00
|
|
|
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_VALIDATE_BLOCK_001,
|
|
|
|
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_STOP_001
|
2020-05-08 17:59:18 +00:00
|
|
|
H := 10
|
|
|
|
tu := prepSyncTest(t, H)
|
|
|
|
|
|
|
|
p1 := tu.addClientNode()
|
|
|
|
|
|
|
|
fn := tu.nds[p1].(*impl.FullNodeAPI)
|
|
|
|
|
|
|
|
s := fn.SyncAPI.Syncer
|
|
|
|
|
|
|
|
err := s.ValidateBlock(context.TODO(), &types.FullBlock{
|
|
|
|
Header: &types.BlockHeader{},
|
2020-09-30 05:39:06 +00:00
|
|
|
}, false)
|
2020-05-08 17:59:18 +00:00
|
|
|
if err == nil {
|
|
|
|
t.Fatal("should error on empty block")
|
|
|
|
}
|
|
|
|
|
|
|
|
h := mocktypes.MkBlock(nil, 123, 432)
|
|
|
|
|
|
|
|
h.ElectionProof = nil
|
|
|
|
|
2020-09-30 05:39:06 +00:00
|
|
|
err = s.ValidateBlock(context.TODO(), &types.FullBlock{Header: h}, false)
|
2020-05-08 17:59:18 +00:00
|
|
|
if err == nil {
|
|
|
|
t.Fatal("should error on block with nil election proof")
|
|
|
|
}
|
|
|
|
}
|
2020-09-09 07:12:04 +00:00
|
|
|
|
2020-09-09 23:21:48 +00:00
|
|
|
func TestSyncCheckpointHead(t *testing.T) {
|
2021-12-13 12:41:04 +00:00
|
|
|
//stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001
|
|
|
|
//stm: @CHAIN_SYNCER_SYNC_001, @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001
|
|
|
|
//stm: @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001, @CHAIN_SYNCER_STOP_001
|
2020-09-09 07:12:04 +00:00
|
|
|
H := 10
|
|
|
|
tu := prepSyncTest(t, H)
|
|
|
|
|
|
|
|
p1 := tu.addClientNode()
|
|
|
|
p2 := tu.addClientNode()
|
|
|
|
|
|
|
|
fmt.Println("GENESIS: ", tu.g.Genesis().Cid())
|
|
|
|
tu.loadChainToNode(p1)
|
|
|
|
tu.loadChainToNode(p2)
|
|
|
|
|
|
|
|
base := tu.g.CurTipset
|
|
|
|
fmt.Println("Mining base: ", base.TipSet().Cids(), base.TipSet().Height())
|
|
|
|
|
|
|
|
// The two nodes fork at this point into 'a' and 'b'
|
2021-07-26 15:07:53 +00:00
|
|
|
a1 := tu.mineOnBlock(base, p1, []int{0}, true, false, nil, 0, true)
|
|
|
|
a := tu.mineOnBlock(a1, p1, []int{0}, true, false, nil, 0, true)
|
|
|
|
a = tu.mineOnBlock(a, p1, []int{0}, true, false, nil, 0, true)
|
2020-09-09 07:12:04 +00:00
|
|
|
|
|
|
|
tu.waitUntilSyncTarget(p1, a.TipSet())
|
2021-12-13 12:41:04 +00:00
|
|
|
//stm: @CHAIN_SYNCER_CHECKPOINT_001
|
2020-09-09 07:12:04 +00:00
|
|
|
tu.checkpointTs(p1, a.TipSet().Key())
|
|
|
|
|
|
|
|
require.NoError(t, tu.g.ResyncBankerNonce(a1.TipSet()))
|
|
|
|
// chain B will now be heaviest
|
2021-07-26 15:07:53 +00:00
|
|
|
b := tu.mineOnBlock(base, p2, []int{1}, true, false, nil, 0, true)
|
|
|
|
b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil, 0, true)
|
|
|
|
b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil, 0, true)
|
|
|
|
b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil, 0, true)
|
2020-09-09 07:12:04 +00:00
|
|
|
|
|
|
|
fmt.Println("A: ", a.Cids(), a.TipSet().Height())
|
|
|
|
fmt.Println("B: ", b.Cids(), b.TipSet().Height())
|
|
|
|
|
|
|
|
// Now for the fun part!! p1 should mark p2's head as BAD.
|
|
|
|
|
|
|
|
require.NoError(t, tu.mn.LinkAll())
|
|
|
|
tu.connect(p1, p2)
|
|
|
|
tu.waitUntilNodeHasTs(p1, b.TipSet().Key())
|
|
|
|
p1Head := tu.getHead(p1)
|
2021-04-28 20:48:13 +00:00
|
|
|
require.True(tu.t, p1Head.Equals(a.TipSet()))
|
2021-12-13 12:41:04 +00:00
|
|
|
//stm: @CHAIN_SYNCER_CHECK_BAD_001
|
2020-09-09 07:12:04 +00:00
|
|
|
tu.assertBad(p1, b.TipSet())
|
2021-04-28 19:49:21 +00:00
|
|
|
|
|
|
|
// Should be able to switch forks.
|
2021-12-13 12:41:04 +00:00
|
|
|
//stm: @CHAIN_SYNCER_CHECKPOINT_001
|
2021-04-28 19:49:21 +00:00
|
|
|
tu.checkpointTs(p1, b.TipSet().Key())
|
|
|
|
p1Head = tu.getHead(p1)
|
|
|
|
require.True(tu.t, p1Head.Equals(b.TipSet()))
|
2020-09-09 07:12:04 +00:00
|
|
|
}
|
2020-09-09 23:21:48 +00:00
|
|
|
|
|
|
|
func TestSyncCheckpointEarlierThanHead(t *testing.T) {
|
2021-12-13 12:41:04 +00:00
|
|
|
//stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001
|
|
|
|
//stm: @CHAIN_SYNCER_SYNC_001, @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001
|
|
|
|
//stm: @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001, @CHAIN_SYNCER_STOP_001
|
2020-09-09 23:21:48 +00:00
|
|
|
H := 10
|
|
|
|
tu := prepSyncTest(t, H)
|
|
|
|
|
|
|
|
p1 := tu.addClientNode()
|
|
|
|
p2 := tu.addClientNode()
|
|
|
|
|
|
|
|
fmt.Println("GENESIS: ", tu.g.Genesis().Cid())
|
|
|
|
tu.loadChainToNode(p1)
|
|
|
|
tu.loadChainToNode(p2)
|
|
|
|
|
|
|
|
base := tu.g.CurTipset
|
|
|
|
fmt.Println("Mining base: ", base.TipSet().Cids(), base.TipSet().Height())
|
|
|
|
|
|
|
|
// The two nodes fork at this point into 'a' and 'b'
|
2021-07-26 15:07:53 +00:00
|
|
|
a1 := tu.mineOnBlock(base, p1, []int{0}, true, false, nil, 0, true)
|
|
|
|
a := tu.mineOnBlock(a1, p1, []int{0}, true, false, nil, 0, true)
|
|
|
|
a = tu.mineOnBlock(a, p1, []int{0}, true, false, nil, 0, true)
|
2020-09-09 23:21:48 +00:00
|
|
|
|
|
|
|
tu.waitUntilSyncTarget(p1, a.TipSet())
|
2021-12-13 12:41:04 +00:00
|
|
|
//stm: @CHAIN_SYNCER_CHECKPOINT_001
|
2020-09-09 23:21:48 +00:00
|
|
|
tu.checkpointTs(p1, a1.TipSet().Key())
|
|
|
|
|
|
|
|
require.NoError(t, tu.g.ResyncBankerNonce(a1.TipSet()))
|
|
|
|
// chain B will now be heaviest
|
2021-07-26 15:07:53 +00:00
|
|
|
b := tu.mineOnBlock(base, p2, []int{1}, true, false, nil, 0, true)
|
|
|
|
b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil, 0, true)
|
|
|
|
b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil, 0, true)
|
|
|
|
b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil, 0, true)
|
2020-09-09 23:21:48 +00:00
|
|
|
|
|
|
|
fmt.Println("A: ", a.Cids(), a.TipSet().Height())
|
|
|
|
fmt.Println("B: ", b.Cids(), b.TipSet().Height())
|
|
|
|
|
|
|
|
// Now for the fun part!! p1 should mark p2's head as BAD.
|
|
|
|
|
|
|
|
require.NoError(t, tu.mn.LinkAll())
|
|
|
|
tu.connect(p1, p2)
|
|
|
|
tu.waitUntilNodeHasTs(p1, b.TipSet().Key())
|
|
|
|
p1Head := tu.getHead(p1)
|
2021-04-28 20:48:13 +00:00
|
|
|
require.True(tu.t, p1Head.Equals(a.TipSet()))
|
2021-12-13 12:41:04 +00:00
|
|
|
//stm: @CHAIN_SYNCER_CHECK_BAD_001
|
2020-09-09 23:21:48 +00:00
|
|
|
tu.assertBad(p1, b.TipSet())
|
2021-04-28 19:49:21 +00:00
|
|
|
|
|
|
|
// Should be able to switch forks.
|
2021-12-13 12:41:04 +00:00
|
|
|
//stm: @CHAIN_SYNCER_CHECKPOINT_001
|
2021-04-28 19:49:21 +00:00
|
|
|
tu.checkpointTs(p1, b.TipSet().Key())
|
|
|
|
p1Head = tu.getHead(p1)
|
|
|
|
require.True(tu.t, p1Head.Equals(b.TipSet()))
|
2020-09-09 23:21:48 +00:00
|
|
|
}
|
2021-05-26 23:03:46 +00:00
|
|
|
|
2021-07-26 15:07:53 +00:00
|
|
|
func TestInvalidHeight(t *testing.T) {
|
2021-12-13 12:41:04 +00:00
|
|
|
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001
|
|
|
|
//stm: @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
2021-07-26 15:07:53 +00:00
|
|
|
H := 50
|
|
|
|
tu := prepSyncTest(t, H)
|
|
|
|
|
|
|
|
client := tu.addClientNode()
|
|
|
|
|
|
|
|
require.NoError(t, tu.mn.LinkAll())
|
|
|
|
tu.connect(client, 0)
|
|
|
|
tu.waitUntilSync(0, client)
|
|
|
|
|
|
|
|
base := tu.g.CurTipset
|
|
|
|
|
|
|
|
for i := 0; i < 5; i++ {
|
|
|
|
base = tu.mineOnBlock(base, 0, nil, false, false, nil, 0, false)
|
|
|
|
}
|
|
|
|
|
|
|
|
tu.mineOnBlock(base, 0, nil, false, true, nil, -1, true)
|
|
|
|
}
|
2022-02-10 23:46:59 +00:00
|
|
|
|
|
|
|
// TestIncomingBlocks mines new blocks and checks if the incoming channel streams new block headers properly
|
|
|
|
func TestIncomingBlocks(t *testing.T) {
|
|
|
|
H := 50
|
|
|
|
tu := prepSyncTest(t, H)
|
|
|
|
|
|
|
|
client := tu.addClientNode()
|
|
|
|
require.NoError(t, tu.mn.LinkAll())
|
|
|
|
|
|
|
|
clientNode := tu.nds[client]
|
2022-02-11 14:06:06 +00:00
|
|
|
//stm: @CHAIN_SYNCER_INCOMING_BLOCKS_001
|
2022-02-10 23:46:59 +00:00
|
|
|
incoming, err := clientNode.SyncIncomingBlocks(tu.ctx)
|
|
|
|
require.NoError(tu.t, err)
|
|
|
|
|
|
|
|
tu.connect(client, 0)
|
|
|
|
tu.waitUntilSync(0, client)
|
|
|
|
tu.compareSourceState(client)
|
|
|
|
|
|
|
|
timeout := time.After(10 * time.Second)
|
|
|
|
|
|
|
|
for i := 0; i < 5; i++ {
|
|
|
|
tu.mineNewBlock(0, nil)
|
|
|
|
tu.waitUntilSync(0, client)
|
|
|
|
tu.compareSourceState(client)
|
|
|
|
|
|
|
|
// just in case, so we don't get deadlocked
|
|
|
|
select {
|
|
|
|
case <-incoming:
|
|
|
|
case <-timeout:
|
|
|
|
tu.t.Fatal("TestIncomingBlocks timeout")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestSyncManualBadTS tests manually marking and unmarking blocks in the bad TS cache
|
|
|
|
func TestSyncManualBadTS(t *testing.T) {
|
|
|
|
// Test setup:
|
|
|
|
// - source node is fully synced,
|
|
|
|
// - client node is unsynced
|
|
|
|
// - client manually marked source's head and it's parent as bad
|
|
|
|
H := 50
|
|
|
|
tu := prepSyncTest(t, H)
|
|
|
|
|
|
|
|
client := tu.addClientNode()
|
|
|
|
require.NoError(t, tu.mn.LinkAll())
|
|
|
|
|
|
|
|
sourceHead, err := tu.nds[source].ChainHead(tu.ctx)
|
|
|
|
require.NoError(tu.t, err)
|
|
|
|
|
|
|
|
clientHead, err := tu.nds[client].ChainHead(tu.ctx)
|
|
|
|
require.NoError(tu.t, err)
|
|
|
|
|
|
|
|
require.True(tu.t, !sourceHead.Equals(clientHead), "source and client should be out of sync in test setup")
|
|
|
|
|
2022-02-11 14:06:06 +00:00
|
|
|
//stm: @CHAIN_SYNCER_MARK_BAD_001
|
2022-02-10 23:46:59 +00:00
|
|
|
err = tu.nds[client].SyncMarkBad(tu.ctx, sourceHead.Cids()[0])
|
|
|
|
require.NoError(tu.t, err)
|
|
|
|
|
|
|
|
sourceHeadParent := sourceHead.Parents().Cids()[0]
|
|
|
|
err = tu.nds[client].SyncMarkBad(tu.ctx, sourceHeadParent)
|
|
|
|
require.NoError(tu.t, err)
|
|
|
|
|
2022-02-11 14:06:06 +00:00
|
|
|
//stm: @CHAIN_SYNCER_CHECK_BAD_001
|
2022-02-10 23:46:59 +00:00
|
|
|
reason, err := tu.nds[client].SyncCheckBad(tu.ctx, sourceHead.Cids()[0])
|
|
|
|
require.NoError(tu.t, err)
|
|
|
|
require.NotEqual(tu.t, "", reason, "block is not bad after manually marking")
|
|
|
|
|
|
|
|
reason, err = tu.nds[client].SyncCheckBad(tu.ctx, sourceHeadParent)
|
|
|
|
require.NoError(tu.t, err)
|
|
|
|
require.NotEqual(tu.t, "", reason, "block is not bad after manually marking")
|
|
|
|
|
|
|
|
// Assertion 1:
|
|
|
|
// - client shouldn't be synced after timeout, because the source TS is marked bad.
|
|
|
|
// - bad block is the first block that should be synced, 1sec should be enough
|
|
|
|
tu.connect(1, 0)
|
|
|
|
timeout := time.After(1 * time.Second)
|
|
|
|
<-timeout
|
|
|
|
|
|
|
|
clientHead, err = tu.nds[client].ChainHead(tu.ctx)
|
|
|
|
require.NoError(tu.t, err)
|
|
|
|
require.True(tu.t, !sourceHead.Equals(clientHead), "source and client should be out of sync if source head is bad")
|
|
|
|
|
|
|
|
// Assertion 2:
|
|
|
|
// - after unmarking blocks as bad and reconnecting, source & client should be in sync
|
2022-02-11 14:06:06 +00:00
|
|
|
//stm: @CHAIN_SYNCER_UNMARK_BAD_001
|
2022-02-10 23:46:59 +00:00
|
|
|
err = tu.nds[client].SyncUnmarkBad(tu.ctx, sourceHead.Cids()[0])
|
|
|
|
require.NoError(tu.t, err)
|
|
|
|
|
|
|
|
reason, err = tu.nds[client].SyncCheckBad(tu.ctx, sourceHead.Cids()[0])
|
|
|
|
require.NoError(tu.t, err)
|
|
|
|
require.Equal(tu.t, "", reason, "block is still bad after manually unmarking")
|
|
|
|
|
|
|
|
err = tu.nds[client].SyncUnmarkAllBad(tu.ctx)
|
|
|
|
require.NoError(tu.t, err)
|
|
|
|
|
|
|
|
reason, err = tu.nds[client].SyncCheckBad(tu.ctx, sourceHeadParent)
|
|
|
|
require.NoError(tu.t, err)
|
|
|
|
require.Equal(tu.t, "", reason, "block is still bad after manually unmarking")
|
|
|
|
|
|
|
|
tu.disconnect(1, 0)
|
|
|
|
tu.connect(1, 0)
|
|
|
|
|
|
|
|
tu.waitUntilSync(0, client)
|
|
|
|
tu.compareSourceState(client)
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestState tests fetching the sync worker state before, during & after the sync
|
|
|
|
func TestSyncState(t *testing.T) {
|
|
|
|
H := 50
|
|
|
|
tu := prepSyncTest(t, H)
|
|
|
|
|
|
|
|
client := tu.addClientNode()
|
|
|
|
require.NoError(t, tu.mn.LinkAll())
|
|
|
|
clientNode := tu.nds[client]
|
|
|
|
sourceHead, err := tu.nds[source].ChainHead(tu.ctx)
|
|
|
|
require.NoError(tu.t, err)
|
|
|
|
|
|
|
|
// sync state should be empty before the sync
|
|
|
|
state, err := clientNode.SyncState(tu.ctx)
|
|
|
|
require.NoError(tu.t, err)
|
|
|
|
require.Equal(tu.t, len(state.ActiveSyncs), 0)
|
|
|
|
|
|
|
|
tu.connect(client, 0)
|
|
|
|
|
|
|
|
// wait until sync starts, or at most `timeout` seconds
|
|
|
|
timeout := time.After(5 * time.Second)
|
|
|
|
activeSyncs := []api.ActiveSync{}
|
|
|
|
|
|
|
|
for len(activeSyncs) == 0 {
|
2022-02-11 14:06:06 +00:00
|
|
|
//stm: @CHAIN_SYNCER_STATE_001
|
2022-02-10 23:46:59 +00:00
|
|
|
state, err = clientNode.SyncState(tu.ctx)
|
|
|
|
require.NoError(tu.t, err)
|
|
|
|
activeSyncs = state.ActiveSyncs
|
|
|
|
|
|
|
|
sleep := time.After(100 * time.Millisecond)
|
|
|
|
select {
|
|
|
|
case <-sleep:
|
|
|
|
case <-timeout:
|
|
|
|
tu.t.Fatal("TestSyncState timeout")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// check state during sync
|
|
|
|
require.Equal(tu.t, len(activeSyncs), 1)
|
|
|
|
require.True(tu.t, activeSyncs[0].Target.Equals(sourceHead))
|
|
|
|
|
|
|
|
tu.waitUntilSync(0, client)
|
|
|
|
tu.compareSourceState(client)
|
|
|
|
|
|
|
|
// check state after sync
|
|
|
|
state, err = clientNode.SyncState(tu.ctx)
|
|
|
|
require.NoError(tu.t, err)
|
|
|
|
require.Equal(tu.t, len(state.ActiveSyncs), 1)
|
|
|
|
require.Equal(tu.t, state.ActiveSyncs[0].Stage, api.StageSyncComplete)
|
|
|
|
}
|