Few more changes
This commit is contained in:
parent
a1f2fdb706
commit
4171be0b98
@ -187,7 +187,7 @@ func TestMessageSignerSignMessage(t *testing.T) {
|
||||
mpool.setNonce(m.msg.From, m.mpoolNonce[0])
|
||||
}
|
||||
merr := m.cbErr
|
||||
smsg, err := ms.SignMessage(ctx, m.msg, func(message *types.SignedMessage) error {
|
||||
smsg, err := ms.SignMessage(ctx, m.msg, nil, func(message *types.SignedMessage) error {
|
||||
return merr
|
||||
})
|
||||
|
||||
|
@ -106,8 +106,8 @@ func (f *TestFullNode) WaitForSectorActive(ctx context.Context, t *testing.T, sn
|
||||
}
|
||||
}
|
||||
|
||||
func (t *TestFullNode) AssignPrivKey(pkey *Libp2p) {
|
||||
t.Pkey = pkey
|
||||
func (f *TestFullNode) AssignPrivKey(pkey *Libp2p) {
|
||||
f.Pkey = pkey
|
||||
}
|
||||
|
||||
// ChainPredicate encapsulates a chain condition.
|
||||
|
@ -37,17 +37,17 @@ func generatePrivKey() (*kit.Libp2p, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &kit.Libp2p{peerId, privkey}, nil
|
||||
return &kit.Libp2p{PeerID: peerId, PrivKey: privkey}, nil
|
||||
}
|
||||
|
||||
func getRaftState(t *testing.T, ctx context.Context, node *kit.TestFullNode) consensus.RaftState {
|
||||
func getRaftState(ctx context.Context, t *testing.T, node *kit.TestFullNode) consensus.RaftState {
|
||||
raftState, err := node.RaftState(ctx)
|
||||
require.NoError(t, err)
|
||||
rstate := raftState.(consensus.RaftState)
|
||||
return rstate
|
||||
}
|
||||
|
||||
func setup(t *testing.T, ctx context.Context, node0 *kit.TestFullNode, node1 *kit.TestFullNode, node2 *kit.TestFullNode, miner *kit.TestMiner) *kit.Ensemble {
|
||||
func setup(ctx context.Context, t *testing.T, node0 *kit.TestFullNode, node1 *kit.TestFullNode, node2 *kit.TestFullNode, miner *kit.TestMiner) *kit.Ensemble {
|
||||
|
||||
blockTime := 5 * time.Millisecond
|
||||
|
||||
@ -112,7 +112,7 @@ func TestRaftState(t *testing.T) {
|
||||
miner kit.TestMiner
|
||||
)
|
||||
|
||||
setup(t, ctx, &node0, &node1, &node2, &miner)
|
||||
setup(ctx, t, &node0, &node1, &node2, &miner)
|
||||
|
||||
fmt.Println(node0.WalletList(context.Background()))
|
||||
fmt.Println(node1.WalletList(context.Background()))
|
||||
@ -135,9 +135,9 @@ func TestRaftState(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, exitcode.Ok, mLookup.Receipt.ExitCode)
|
||||
|
||||
rstate0 := getRaftState(t, ctx, &node0)
|
||||
rstate1 := getRaftState(t, ctx, &node1)
|
||||
rstate2 := getRaftState(t, ctx, &node2)
|
||||
rstate0 := getRaftState(ctx, t, &node0)
|
||||
rstate1 := getRaftState(ctx, t, &node1)
|
||||
rstate2 := getRaftState(ctx, t, &node2)
|
||||
|
||||
require.True(t, reflect.DeepEqual(rstate0, rstate1))
|
||||
require.True(t, reflect.DeepEqual(rstate0, rstate2))
|
||||
@ -158,7 +158,7 @@ func TestRaftStateLeaderDisconnects(t *testing.T) {
|
||||
|
||||
nodes := []*kit.TestFullNode{&node0, &node1, &node2}
|
||||
|
||||
setup(t, ctx, &node0, &node1, &node2, &miner)
|
||||
setup(ctx, t, &node0, &node1, &node2, &miner)
|
||||
|
||||
peerToNode := make(map[peer.ID]*kit.TestFullNode)
|
||||
for _, n := range nodes {
|
||||
@ -182,9 +182,9 @@ func TestRaftStateLeaderDisconnects(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, exitcode.Ok, mLookup.Receipt.ExitCode)
|
||||
|
||||
rstate0 := getRaftState(t, ctx, &node0)
|
||||
rstate1 := getRaftState(t, ctx, &node1)
|
||||
rstate2 := getRaftState(t, ctx, &node2)
|
||||
rstate0 := getRaftState(ctx, t, &node0)
|
||||
rstate1 := getRaftState(ctx, t, &node1)
|
||||
rstate2 := getRaftState(ctx, t, &node2)
|
||||
|
||||
require.True(t, reflect.DeepEqual(rstate0, rstate1))
|
||||
require.True(t, reflect.DeepEqual(rstate0, rstate2))
|
||||
@ -193,7 +193,8 @@ func TestRaftStateLeaderDisconnects(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
leaderNode := peerToNode[leader]
|
||||
|
||||
leaderNode.Stop(ctx)
|
||||
err = leaderNode.Stop(ctx)
|
||||
require.NoError(t, err)
|
||||
oldLeaderNode := leaderNode
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
@ -225,11 +226,11 @@ func TestRaftStateLeaderDisconnects(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, exitcode.Ok, mLookup.Receipt.ExitCode)
|
||||
|
||||
rstate := getRaftState(t, ctx, leaderNode)
|
||||
rstate := getRaftState(ctx, t, leaderNode)
|
||||
|
||||
for _, n := range nodes {
|
||||
if n != oldLeaderNode {
|
||||
rs := getRaftState(t, ctx, n)
|
||||
rs := getRaftState(ctx, t, n)
|
||||
require.True(t, reflect.DeepEqual(rs, rstate))
|
||||
}
|
||||
}
|
||||
|
@ -264,7 +264,7 @@ func (cfg *Config) ConfigKey() string {
|
||||
//}
|
||||
//
|
||||
// Default initializes this configuration with working defaults.
|
||||
func (cfg *Config) Default() error {
|
||||
func (cfg *Config) Default() {
|
||||
cfg.DataFolder = "" // empty so it gets omitted
|
||||
cfg.InitPeerset = []peer.ID{}
|
||||
cfg.WaitForLeaderTimeout = DefaultWaitForLeaderTimeout
|
||||
@ -282,7 +282,6 @@ func (cfg *Config) Default() error {
|
||||
// Set up logging
|
||||
cfg.RaftConfig.LogOutput = ioutil.Discard
|
||||
//cfg.RaftConfig.Logger = &hcLogToLogger{}
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewDefaultConfig() *Config {
|
||||
|
@ -149,7 +149,7 @@ func (a *MpoolAPI) MpoolPushMessage(ctx context.Context, msg *types.Message, spe
|
||||
// Redirect to leader if current node is not leader. A single non raft based node is always the leader
|
||||
if !a.MessageSigner.IsLeader(ctx) {
|
||||
var signedMsg types.SignedMessage
|
||||
redirected, err := a.MessageSigner.RedirectToLeader(ctx, "MpoolPushMessage", api.MpoolMessageWhole{msg, spec}, &signedMsg)
|
||||
redirected, err := a.MessageSigner.RedirectToLeader(ctx, "MpoolPushMessage", api.MpoolMessageWhole{Msg: msg, Spec: spec}, &signedMsg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user