Address moar comments

This commit is contained in:
Shrenuj Bansal 2022-09-22 16:27:15 -04:00
parent 1fe4aa3467
commit 7470549199
13 changed files with 19 additions and 139 deletions

View File

@ -39,7 +39,7 @@ type MsgSigner interface {
SaveNonce(ctx context.Context, addr address.Address, nonce uint64) error
dstoreKey(addr address.Address) datastore.Key
IsLeader(ctx context.Context) bool
RaftLeader(ctx context.Context) (peer.ID, error)
Leader(ctx context.Context) (peer.ID, error)
RedirectToLeader(ctx context.Context, method string, arg interface{}, ret interface{}) (bool, error)
GetRaftState(ctx context.Context) (consensus.State, error)
}
@ -205,13 +205,13 @@ func (ms *MessageSigner) IsLeader(ctx context.Context) bool {
}
func (ms *MessageSigner) RedirectToLeader(ctx context.Context, method string, arg interface{}, ret interface{}) (bool, error) {
return false, xerrors.Errorf("Single node shouldn't have any redirects")
return false, xerrors.Errorf("single node shouldn't have any redirects")
}
func (ms *MessageSigner) GetRaftState(ctx context.Context) (consensus.State, error) {
return nil, xerrors.Errorf("This is a non raft consensus message signer")
return nil, xerrors.Errorf("this is a non raft consensus message signer")
}
func (ms *MessageSigner) RaftLeader(ctx context.Context) (peer.ID, error) {
return "", xerrors.Errorf("No leaders in non raft message signer")
func (ms *MessageSigner) Leader(ctx context.Context) (peer.ID, error) {
return "", xerrors.Errorf("no leaders in non raft message signer")
}

View File

@ -94,6 +94,6 @@ func (ms *MessageSignerConsensus) GetRaftState(ctx context.Context) (libp2pconse
return ms.consensus.State(ctx)
}
func (ms *MessageSignerConsensus) RaftLeader(ctx context.Context) (peer.ID, error) {
func (ms *MessageSignerConsensus) Leader(ctx context.Context) (peer.ID, error) {
return ms.consensus.Leader(ctx)
}

Binary file not shown.

View File

@ -69,7 +69,7 @@ func setup(ctx context.Context, t *testing.T, node0 *kit.TestFullNode, node1 *ki
node.Override(new(*messagesigner.MessageSignerConsensus), messagesigner.NewMessageSignerConsensus),
node.Override(new(messagesigner.MsgSigner), func(ms *messagesigner.MessageSignerConsensus) *messagesigner.MessageSignerConsensus { return ms }),
node.Override(new(*modules.RPCHandler), modules.NewRPCHandler),
node.Override(node.RPCServer, modules.NewRPCServer),
node.Override(node.GoRPCServer, modules.NewRPCServer),
)
//raftOps := kit.ConstructorOpts()
@ -140,9 +140,8 @@ func TestRaftState(t *testing.T) {
rstate1 := getRaftState(ctx, t, &node1)
rstate2 := getRaftState(ctx, t, &node2)
require.True(t, reflect.DeepEqual(rstate0, rstate1))
require.True(t, reflect.DeepEqual(rstate0, rstate2))
require.EqualValues(t, rstate0, rstate1)
require.EqualValues(t, rstate0, rstate2)
}
func TestRaftStateLeaderDisconnects(t *testing.T) {
@ -210,7 +209,6 @@ func TestRaftStateLeaderDisconnects(t *testing.T) {
}
require.NotEqual(t, newLeader, leader)
leaderNode = peerToNode[newLeader]
msg2 := &types.Message{

View File

@ -26,7 +26,7 @@ func TestSelfSentTxnV15(t *testing.T) {
kit.QuietMiningLogs()
client15, _, ens := kit.EnsembleMinimal(t, kit.MockProofs()) //, kit.GenesisNetworkVersion(network.Version15))
client15, _, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.GenesisNetworkVersion(network.Version15))
ens.InterconnectAll().BeginMining(10 * time.Millisecond)
bal, err := client15.WalletBalance(ctx, client15.DefaultKey.Address)

View File

@ -6,11 +6,11 @@ import (
"context"
"errors"
"fmt"
"golang.org/x/exp/slices"
"sort"
"time"
"github.com/google/uuid"
"golang.org/x/exp/slices"
addr "github.com/filecoin-project/go-address"
@ -457,7 +457,6 @@ func (cc *Consensus) State(ctx context.Context) (consensus.State, error) {
st, err := cc.consensus.GetLogHead()
if err == libp2praft.ErrNoState {
fmt.Println("Err no state return")
return newRaftState(), nil
}

View File

@ -1,112 +0,0 @@
{
"NetworkVersion": 16,
"Accounts": [
{
"Type": "account",
"Balance": "50000000000000000000000000",
"Meta": {
"Owner": "t3v26vdulzazde5z2fvxtyy7pm24qn2ztxp6jvqsfh3yqe3vkj44w4mlmvfbwuxp2c7n3yc44o7sbge2snbqha"
}
}
],
"Miners": [
{
"ID": "t01000",
"Owner": "t3v26vdulzazde5z2fvxtyy7pm24qn2ztxp6jvqsfh3yqe3vkj44w4mlmvfbwuxp2c7n3yc44o7sbge2snbqha",
"Worker": "t3v26vdulzazde5z2fvxtyy7pm24qn2ztxp6jvqsfh3yqe3vkj44w4mlmvfbwuxp2c7n3yc44o7sbge2snbqha",
"PeerId": "12D3KooWPp8ochucGCrspFfuYqEf26wz3DZUUWAd7dGSw5e1bqio",
"MarketBalance": "0",
"PowerBalance": "0",
"SectorSize": 2048,
"Sectors": [
{
"CommR": {
"/": "bagboea4b5abcbiikcvp43xiaxqwvzs56vmefz72hj5zpv4myynrsymnsfhv3luzh"
},
"CommD": {
"/": "baga6ea4seaqlaz34kjgrpb5l6isyjjsodxzs74rkq6ym5rsw67zqpk2hsxyruky"
},
"SectorID": 0,
"Deal": {
"PieceCID": {
"/": "baga6ea4seaqlaz34kjgrpb5l6isyjjsodxzs74rkq6ym5rsw67zqpk2hsxyruky"
},
"PieceSize": 2048,
"VerifiedDeal": false,
"Client": "t3v26vdulzazde5z2fvxtyy7pm24qn2ztxp6jvqsfh3yqe3vkj44w4mlmvfbwuxp2c7n3yc44o7sbge2snbqha",
"Provider": "t01000",
"Label": "0",
"StartEpoch": 0,
"EndEpoch": 9001,
"StoragePricePerEpoch": "0",
"ProviderCollateral": "0",
"ClientCollateral": "0"
},
"DealClientKey": {
"Type": "bls",
"PrivateKey": "jM6TtJBVoBleVAABOX7oQxbOqxiK2e0cjyEo34VOPjw=",
"PublicKey": "rr1R0XkGRk7nRa3njH3s1yDdZnd/k1hIp94gTdVJ5y3GLZUobUu/Qvt3gXOO/IJi",
"Address": "t3v26vdulzazde5z2fvxtyy7pm24qn2ztxp6jvqsfh3yqe3vkj44w4mlmvfbwuxp2c7n3yc44o7sbge2snbqha"
},
"ProofType": 5
},
{
"CommR": {
"/": "bagboea4b5abcazkra3xmise6fchdg3ownnmvsahz5gonqjiil2sgdgjy63rz4qkb"
},
"CommD": {
"/": "baga6ea4seaqbxdb2tltuyvx3i7o74ups4jn3vtmzpdwlh4oqzcfo7fxxapviqha"
},
"SectorID": 1,
"Deal": {
"PieceCID": {
"/": "baga6ea4seaqbxdb2tltuyvx3i7o74ups4jn3vtmzpdwlh4oqzcfo7fxxapviqha"
},
"PieceSize": 2048,
"VerifiedDeal": false,
"Client": "t3v26vdulzazde5z2fvxtyy7pm24qn2ztxp6jvqsfh3yqe3vkj44w4mlmvfbwuxp2c7n3yc44o7sbge2snbqha",
"Provider": "t01000",
"Label": "1",
"StartEpoch": 0,
"EndEpoch": 9001,
"StoragePricePerEpoch": "0",
"ProviderCollateral": "0",
"ClientCollateral": "0"
},
"DealClientKey": {
"Type": "bls",
"PrivateKey": "jM6TtJBVoBleVAABOX7oQxbOqxiK2e0cjyEo34VOPjw=",
"PublicKey": "rr1R0XkGRk7nRa3njH3s1yDdZnd/k1hIp94gTdVJ5y3GLZUobUu/Qvt3gXOO/IJi",
"Address": "t3v26vdulzazde5z2fvxtyy7pm24qn2ztxp6jvqsfh3yqe3vkj44w4mlmvfbwuxp2c7n3yc44o7sbge2snbqha"
},
"ProofType": 5
}
]
}
],
"NetworkName": "localnet-2602dfef-e23d-4d09-9893-3010f961e1b9",
"VerifregRootKey": {
"Type": "multisig",
"Balance": "0",
"Meta": {
"Signers": [
"t1ceb34gnsc6qk5dt6n7xg6ycwzasjhbxm3iylkiy"
],
"Threshold": 1,
"VestingDuration": 0,
"VestingStart": 0
}
},
"RemainderAccount": {
"Type": "multisig",
"Balance": "0",
"Meta": {
"Signers": [
"t1ceb34gnsc6qk5dt6n7xg6ycwzasjhbxm3iylkiy"
],
"Threshold": 1,
"VestingDuration": 0,
"VestingStart": 0
}
}
}

View File

@ -121,7 +121,7 @@ const (
SettlePaymentChannelsKey
RunPeerTaggerKey
SetupFallbackBlockstoresKey
RPCServer
GoRPCServer
SetApiEndpointKey

View File

@ -244,9 +244,9 @@ func ConfigFullNode(c interface{}) Option {
Override(new(*gorpc.Client), modules.NewRPCClient),
Override(new(*consensus.Consensus), consensus2.NewConsensusWithRPCClient(false)),
Override(new(*messagesigner.MessageSignerConsensus), messagesigner.NewMessageSignerConsensus),
Override(new(messagesigner.MsgSigner), func(ms *messagesigner.MessageSignerConsensus) *messagesigner.MessageSignerConsensus { return ms }),
Override(new(messagesigner.MsgSigner), From(new(*messagesigner.MessageSignerConsensus))),
Override(new(*modules.RPCHandler), modules.NewRPCHandler),
Override(RPCServer, modules.NewRPCServer),
Override(GoRPCServer, modules.NewRPCServer),
),
)
}

View File

@ -286,7 +286,7 @@ var (
DefaultNetworkTimeout = 100 * time.Second
DefaultCommitRetryDelay = 200 * time.Millisecond
DefaultBackupsRotate = 6
DefaultDatastoreNamespace = "/r" // from "/raft"
DefaultDatastoreNamespace = "/raft"
)
func DefaultClusterRaftConfig() *ClusterRaftConfig {

View File

@ -146,14 +146,14 @@ initialized or when starting in staging mode.`,
},
{
Num: "WaitForLeaderTimeout",
Type: "time.Duration",
Type: "Duration",
Comment: `LeaderTimeout specifies how long to wait for a leader before
failing an operation.`,
},
{
Num: "NetworkTimeout",
Type: "time.Duration",
Type: "Duration",
Comment: `NetworkTimeout specifies how long before a Raft network
operation is timed out`,
@ -167,7 +167,7 @@ we give up.`,
},
{
Num: "CommitRetryDelay",
Type: "time.Duration",
Type: "Duration",
Comment: `How long to wait between retries`,
},

View File

@ -295,5 +295,5 @@ func (a *MpoolAPI) GetRaftState(ctx context.Context) (consensus.State, error) {
}
func (a *MpoolAPI) RaftLeader(ctx context.Context) (peer.ID, error) {
return a.MessageSigner.RaftLeader(ctx)
return a.MessageSigner.Leader(ctx)
}

View File

@ -19,11 +19,6 @@ type RPCHandler struct {
cons *consensus.Consensus
}
//type ConsensusRPCAPI struct {
// cons *consensus.Consensus
// rpcHandler *RPCHandler
//}
func NewRPCHandler(mpoolAPI full.MpoolAPI, cons *consensus.Consensus) *RPCHandler {
return &RPCHandler{mpoolAPI, cons}
}