Merge pull request #9294 from filecoin-project/sbansal/nonce-coordination-and-consensus-for-chain-nodes
Raft consensus for lotus nodes in a cluster
This commit is contained in:
commit
04f0af5865
@ -915,6 +915,11 @@ workflows:
|
||||
suite: itest-mpool_msg_uuid
|
||||
target: "./itests/mpool_msg_uuid_test.go"
|
||||
|
||||
- test:
|
||||
name: test-itest-mpool_push_with_uuid
|
||||
suite: itest-mpool_push_with_uuid
|
||||
target: "./itests/mpool_push_with_uuid_test.go"
|
||||
|
||||
- test:
|
||||
name: test-itest-multisig
|
||||
suite: itest-multisig
|
||||
@ -955,6 +960,11 @@ workflows:
|
||||
suite: itest-pending_deal_allocation
|
||||
target: "./itests/pending_deal_allocation_test.go"
|
||||
|
||||
- test:
|
||||
name: test-itest-raft_messagesigner
|
||||
suite: itest-raft_messagesigner
|
||||
target: "./itests/raft_messagesigner_test.go"
|
||||
|
||||
- test:
|
||||
name: test-itest-remove_verifreg_datacap
|
||||
suite: itest-remove_verifreg_datacap
|
||||
|
@ -764,6 +764,9 @@ type FullNode interface {
|
||||
// LOTUS_BACKUP_BASE_PATH environment variable set to some path, and that
|
||||
// the path specified when calling CreateBackup is within the base path
|
||||
CreateBackup(ctx context.Context, fpath string) error //perm:admin
|
||||
|
||||
RaftState(ctx context.Context) (*RaftStateData, error) //perm:read
|
||||
RaftLeader(ctx context.Context) (peer.ID, error) //perm:read
|
||||
}
|
||||
|
||||
type StorageAsk struct {
|
||||
|
@ -349,6 +349,10 @@ func init() {
|
||||
addExample(map[string]bitfield.BitField{
|
||||
"": bitfield.NewFromSet([]uint64{5, 6, 7, 10}),
|
||||
})
|
||||
addExample(&api.RaftStateData{
|
||||
NonceMap: make(map[address.Address]uint64),
|
||||
MsgUuids: make(map[uuid.UUID]*types.SignedMessage),
|
||||
})
|
||||
|
||||
addExample(http.Header{
|
||||
"Authorization": []string{"Bearer ey.."},
|
||||
|
@ -2244,6 +2244,36 @@ func (mr *MockFullNodeMockRecorder) PaychVoucherSubmit(arg0, arg1, arg2, arg3, a
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherSubmit", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherSubmit), arg0, arg1, arg2, arg3, arg4)
|
||||
}
|
||||
|
||||
// RaftLeader mocks base method.
|
||||
func (m *MockFullNode) RaftLeader(arg0 context.Context) (peer.ID, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "RaftLeader", arg0)
|
||||
ret0, _ := ret[0].(peer.ID)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// RaftLeader indicates an expected call of RaftLeader.
|
||||
func (mr *MockFullNodeMockRecorder) RaftLeader(arg0 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RaftLeader", reflect.TypeOf((*MockFullNode)(nil).RaftLeader), arg0)
|
||||
}
|
||||
|
||||
// RaftState mocks base method.
|
||||
func (m *MockFullNode) RaftState(arg0 context.Context) (*api.RaftStateData, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "RaftState", arg0)
|
||||
ret0, _ := ret[0].(*api.RaftStateData)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// RaftState indicates an expected call of RaftState.
|
||||
func (mr *MockFullNodeMockRecorder) RaftState(arg0 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RaftState", reflect.TypeOf((*MockFullNode)(nil).RaftState), arg0)
|
||||
}
|
||||
|
||||
// Session mocks base method.
|
||||
func (m *MockFullNode) Session(arg0 context.Context) (uuid.UUID, error) {
|
||||
m.ctrl.T.Helper()
|
||||
|
@ -342,6 +342,10 @@ type FullNodeStruct struct {
|
||||
|
||||
PaychVoucherSubmit func(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 []byte) (cid.Cid, error) `perm:"sign"`
|
||||
|
||||
RaftLeader func(p0 context.Context) (peer.ID, error) `perm:"read"`
|
||||
|
||||
RaftState func(p0 context.Context) (*RaftStateData, error) `perm:"read"`
|
||||
|
||||
StateAccountKey func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) `perm:"read"`
|
||||
|
||||
StateActorCodeCIDs func(p0 context.Context, p1 abinetwork.Version) (map[string]cid.Cid, error) `perm:"read"`
|
||||
@ -2473,6 +2477,28 @@ func (s *FullNodeStub) PaychVoucherSubmit(p0 context.Context, p1 address.Address
|
||||
return *new(cid.Cid), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) RaftLeader(p0 context.Context) (peer.ID, error) {
|
||||
if s.Internal.RaftLeader == nil {
|
||||
return *new(peer.ID), ErrNotSupported
|
||||
}
|
||||
return s.Internal.RaftLeader(p0)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) RaftLeader(p0 context.Context) (peer.ID, error) {
|
||||
return *new(peer.ID), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) RaftState(p0 context.Context) (*RaftStateData, error) {
|
||||
if s.Internal.RaftState == nil {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
return s.Internal.RaftState(p0)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) RaftState(p0 context.Context) (*RaftStateData, error) {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) StateAccountKey(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) {
|
||||
if s.Internal.StateAccountKey == nil {
|
||||
return *new(address.Address), ErrNotSupported
|
||||
|
63
api/types.go
63
api/types.go
@ -59,6 +59,11 @@ type MessageSendSpec struct {
|
||||
MsgUuid uuid.UUID
|
||||
}
|
||||
|
||||
type MpoolMessageWhole struct {
|
||||
Msg *types.Message
|
||||
Spec *MessageSendSpec
|
||||
}
|
||||
|
||||
// GraphSyncDataTransfer provides diagnostics on a data transfer happening over graphsync
|
||||
type GraphSyncDataTransfer struct {
|
||||
// GraphSync request id for this transfer
|
||||
@ -334,3 +339,61 @@ type ForkUpgradeParams struct {
|
||||
UpgradeSkyrHeight abi.ChainEpoch
|
||||
UpgradeSharkHeight abi.ChainEpoch
|
||||
}
|
||||
|
||||
type NonceMapType map[address.Address]uint64
|
||||
type MsgUuidMapType map[uuid.UUID]*types.SignedMessage
|
||||
|
||||
type RaftStateData struct {
|
||||
NonceMap NonceMapType
|
||||
MsgUuids MsgUuidMapType
|
||||
}
|
||||
|
||||
func (n *NonceMapType) MarshalJSON() ([]byte, error) {
|
||||
marshalled := make(map[string]uint64)
|
||||
for a, n := range *n {
|
||||
marshalled[a.String()] = n
|
||||
}
|
||||
return json.Marshal(marshalled)
|
||||
}
|
||||
|
||||
func (n *NonceMapType) UnmarshalJSON(b []byte) error {
|
||||
unmarshalled := make(map[string]uint64)
|
||||
err := json.Unmarshal(b, &unmarshalled)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*n = make(map[address.Address]uint64)
|
||||
for saddr, nonce := range unmarshalled {
|
||||
a, err := address.NewFromString(saddr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
(*n)[a] = nonce
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MsgUuidMapType) MarshalJSON() ([]byte, error) {
|
||||
marshalled := make(map[string]*types.SignedMessage)
|
||||
for u, msg := range *m {
|
||||
marshalled[u.String()] = msg
|
||||
}
|
||||
return json.Marshal(marshalled)
|
||||
}
|
||||
|
||||
func (m *MsgUuidMapType) UnmarshalJSON(b []byte) error {
|
||||
unmarshalled := make(map[string]*types.SignedMessage)
|
||||
err := json.Unmarshal(b, &unmarshalled)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*m = make(map[uuid.UUID]*types.SignedMessage)
|
||||
for suid, msg := range unmarshalled {
|
||||
u, err := uuid.Parse(suid)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
(*m)[u] = msg
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -82,6 +82,7 @@ var (
|
||||
ErrRBFTooLowPremium = errors.New("replace by fee has too low GasPremium")
|
||||
ErrTooManyPendingMessages = errors.New("too many pending messages for actor")
|
||||
ErrNonceGap = errors.New("unfulfilled nonce gap")
|
||||
ErrExistingNonce = errors.New("message with nonce already exists")
|
||||
)
|
||||
|
||||
const (
|
||||
@ -276,7 +277,7 @@ func (ms *msgSet) add(m *types.SignedMessage, mp *MessagePool, strict, untrusted
|
||||
}
|
||||
} else {
|
||||
return false, xerrors.Errorf("message from %s with nonce %d already in mpool: %w",
|
||||
m.Message.From, m.Message.Nonce, ErrSoftValidationFailure)
|
||||
m.Message.From, m.Message.Nonce, ErrExistingNonce)
|
||||
}
|
||||
|
||||
ms.requiredFunds.Sub(ms.requiredFunds, exms.Message.RequiredFunds().Int)
|
||||
@ -667,7 +668,9 @@ func (mp *MessagePool) verifyMsgBeforeAdd(ctx context.Context, m *types.SignedMe
|
||||
return publish, nil
|
||||
}
|
||||
|
||||
func (mp *MessagePool) Push(ctx context.Context, m *types.SignedMessage) (cid.Cid, error) {
|
||||
// Push checks the signed message for any violations, adds the message to the message pool and
|
||||
// publishes the message if the publish flag is set
|
||||
func (mp *MessagePool) Push(ctx context.Context, m *types.SignedMessage, publish bool) (cid.Cid, error) {
|
||||
done := metrics.Timer(ctx, metrics.MpoolPushDuration)
|
||||
defer done()
|
||||
|
||||
@ -683,14 +686,14 @@ func (mp *MessagePool) Push(ctx context.Context, m *types.SignedMessage) (cid.Ci
|
||||
}()
|
||||
|
||||
mp.curTsLk.Lock()
|
||||
publish, err := mp.addTs(ctx, m, mp.curTs, true, false)
|
||||
ok, err := mp.addTs(ctx, m, mp.curTs, true, false)
|
||||
if err != nil {
|
||||
mp.curTsLk.Unlock()
|
||||
return cid.Undef, err
|
||||
}
|
||||
mp.curTsLk.Unlock()
|
||||
|
||||
if publish {
|
||||
if ok && publish {
|
||||
msgb, err := m.Serialize()
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("error serializing message: %w", err)
|
||||
@ -1583,3 +1586,8 @@ func getBaseFeeLowerBound(baseFee, factor types.BigInt) types.BigInt {
|
||||
|
||||
return baseFeeLowerBound
|
||||
}
|
||||
|
||||
type MpoolNonceAPI interface {
|
||||
GetNonce(context.Context, address.Address, types.TipSetKey) (uint64, error)
|
||||
GetActor(context.Context, address.Address, types.TipSetKey) (*types.Actor, error)
|
||||
}
|
||||
|
@ -545,7 +545,7 @@ func TestLoadLocal(t *testing.T) {
|
||||
for i := 0; i < 10; i++ {
|
||||
m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1))
|
||||
//stm: @CHAIN_MEMPOOL_PUSH_001
|
||||
cid, err := mp.Push(context.TODO(), m)
|
||||
cid, err := mp.Push(context.TODO(), m, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -618,7 +618,7 @@ func TestClearAll(t *testing.T) {
|
||||
for i := 0; i < 10; i++ {
|
||||
m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1))
|
||||
//stm: @CHAIN_MEMPOOL_PUSH_001
|
||||
_, err := mp.Push(context.TODO(), m)
|
||||
_, err := mp.Push(context.TODO(), m, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -676,7 +676,7 @@ func TestClearNonLocal(t *testing.T) {
|
||||
for i := 0; i < 10; i++ {
|
||||
m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1))
|
||||
//stm: @CHAIN_MEMPOOL_PUSH_001
|
||||
_, err := mp.Push(context.TODO(), m)
|
||||
_, err := mp.Push(context.TODO(), m, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -749,7 +749,7 @@ func TestUpdates(t *testing.T) {
|
||||
for i := 0; i < 10; i++ {
|
||||
m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1))
|
||||
//stm: @CHAIN_MEMPOOL_PUSH_001
|
||||
_, err := mp.Push(context.TODO(), m)
|
||||
_, err := mp.Push(context.TODO(), m, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -12,7 +12,6 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/messagesigner"
|
||||
"github.com/filecoin-project/lotus/chain/stmgr"
|
||||
"github.com/filecoin-project/lotus/chain/store"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
@ -42,7 +41,7 @@ type mpoolProvider struct {
|
||||
sm *stmgr.StateManager
|
||||
ps *pubsub.PubSub
|
||||
|
||||
lite messagesigner.MpoolNonceAPI
|
||||
lite MpoolNonceAPI
|
||||
}
|
||||
|
||||
var _ Provider = (*mpoolProvider)(nil)
|
||||
@ -51,7 +50,7 @@ func NewProvider(sm *stmgr.StateManager, ps *pubsub.PubSub) Provider {
|
||||
return &mpoolProvider{sm: sm, ps: ps}
|
||||
}
|
||||
|
||||
func NewProviderLite(sm *stmgr.StateManager, ps *pubsub.PubSub, noncer messagesigner.MpoolNonceAPI) Provider {
|
||||
func NewProviderLite(sm *stmgr.StateManager, ps *pubsub.PubSub, noncer MpoolNonceAPI) Provider {
|
||||
return &mpoolProvider{sm: sm, ps: ps, lite: noncer}
|
||||
}
|
||||
|
||||
|
@ -60,7 +60,7 @@ func TestRepubMessages(t *testing.T) {
|
||||
for i := 0; i < 10; i++ {
|
||||
m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1))
|
||||
//stm: @CHAIN_MEMPOOL_PUSH_001
|
||||
_, err := mp.Push(context.TODO(), m)
|
||||
_, err := mp.Push(context.TODO(), m, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -15,6 +15,7 @@ import (
|
||||
"github.com/filecoin-project/go-address"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/messagepool"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
)
|
||||
@ -24,9 +25,12 @@ const dsKeyMsgUUIDSet = "MsgUuidSet"
|
||||
|
||||
var log = logging.Logger("messagesigner")
|
||||
|
||||
type MpoolNonceAPI interface {
|
||||
GetNonce(context.Context, address.Address, types.TipSetKey) (uint64, error)
|
||||
GetActor(context.Context, address.Address, types.TipSetKey) (*types.Actor, error)
|
||||
type MsgSigner interface {
|
||||
SignMessage(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec, cb func(*types.SignedMessage) error) (*types.SignedMessage, error)
|
||||
GetSignedMessage(ctx context.Context, uuid uuid.UUID) (*types.SignedMessage, error)
|
||||
StoreSignedMessage(ctx context.Context, uuid uuid.UUID, message *types.SignedMessage) error
|
||||
NextNonce(ctx context.Context, addr address.Address) (uint64, error)
|
||||
SaveNonce(ctx context.Context, addr address.Address, nonce uint64) error
|
||||
}
|
||||
|
||||
// MessageSigner keeps track of nonces per address, and increments the nonce
|
||||
@ -34,11 +38,11 @@ type MpoolNonceAPI interface {
|
||||
type MessageSigner struct {
|
||||
wallet api.Wallet
|
||||
lk sync.Mutex
|
||||
mpool MpoolNonceAPI
|
||||
mpool messagepool.MpoolNonceAPI
|
||||
ds datastore.Batching
|
||||
}
|
||||
|
||||
func NewMessageSigner(wallet api.Wallet, mpool MpoolNonceAPI, ds dtypes.MetadataDS) *MessageSigner {
|
||||
func NewMessageSigner(wallet api.Wallet, mpool messagepool.MpoolNonceAPI, ds dtypes.MetadataDS) *MessageSigner {
|
||||
ds = namespace.Wrap(ds, datastore.NewKey("/message-signer/"))
|
||||
return &MessageSigner{
|
||||
wallet: wallet,
|
||||
@ -49,12 +53,12 @@ func NewMessageSigner(wallet api.Wallet, mpool MpoolNonceAPI, ds dtypes.Metadata
|
||||
|
||||
// SignMessage increments the nonce for the message From address, and signs
|
||||
// the message
|
||||
func (ms *MessageSigner) SignMessage(ctx context.Context, msg *types.Message, cb func(*types.SignedMessage) error) (*types.SignedMessage, error) {
|
||||
func (ms *MessageSigner) SignMessage(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec, cb func(*types.SignedMessage) error) (*types.SignedMessage, error) {
|
||||
ms.lk.Lock()
|
||||
defer ms.lk.Unlock()
|
||||
|
||||
// Get the next message nonce
|
||||
nonce, err := ms.nextNonce(ctx, msg.From)
|
||||
nonce, err := ms.NextNonce(ctx, msg.From)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to create nonce: %w", err)
|
||||
}
|
||||
@ -72,7 +76,7 @@ func (ms *MessageSigner) SignMessage(ctx context.Context, msg *types.Message, cb
|
||||
Extra: mb.RawData(),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to sign message: %w", err)
|
||||
return nil, xerrors.Errorf("failed to sign message: %w, addr=%s", err, msg.From)
|
||||
}
|
||||
|
||||
// Callback with the signed message
|
||||
@ -80,13 +84,14 @@ func (ms *MessageSigner) SignMessage(ctx context.Context, msg *types.Message, cb
|
||||
Message: *msg,
|
||||
Signature: *sig,
|
||||
}
|
||||
|
||||
err = cb(smsg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If the callback executed successfully, write the nonce to the datastore
|
||||
if err := ms.saveNonce(ctx, msg.From, nonce); err != nil {
|
||||
if err := ms.SaveNonce(ctx, msg.From, nonce); err != nil {
|
||||
return nil, xerrors.Errorf("failed to save nonce: %w", err)
|
||||
}
|
||||
|
||||
@ -113,9 +118,9 @@ func (ms *MessageSigner) StoreSignedMessage(ctx context.Context, uuid uuid.UUID,
|
||||
return ms.ds.Put(ctx, key, serializedMsg)
|
||||
}
|
||||
|
||||
// nextNonce gets the next nonce for the given address.
|
||||
// NextNonce gets the next nonce for the given address.
|
||||
// If there is no nonce in the datastore, gets the nonce from the message pool.
|
||||
func (ms *MessageSigner) nextNonce(ctx context.Context, addr address.Address) (uint64, error) {
|
||||
func (ms *MessageSigner) NextNonce(ctx context.Context, addr address.Address) (uint64, error) {
|
||||
// Nonces used to be created by the mempool and we need to support nodes
|
||||
// that have mempool nonces, so first check the mempool for a nonce for
|
||||
// this address. Note that the mempool returns the actor state's nonce
|
||||
@ -159,9 +164,9 @@ func (ms *MessageSigner) nextNonce(ctx context.Context, addr address.Address) (u
|
||||
}
|
||||
}
|
||||
|
||||
// saveNonce increments the nonce for this address and writes it to the
|
||||
// SaveNonce increments the nonce for this address and writes it to the
|
||||
// datastore
|
||||
func (ms *MessageSigner) saveNonce(ctx context.Context, addr address.Address, nonce uint64) error {
|
||||
func (ms *MessageSigner) SaveNonce(ctx context.Context, addr address.Address, nonce uint64) error {
|
||||
// Increment the nonce
|
||||
nonce++
|
||||
|
||||
|
98
chain/messagesigner/messagesigner_consensus.go
Normal file
98
chain/messagesigner/messagesigner_consensus.go
Normal file
@ -0,0 +1,98 @@
|
||||
package messagesigner
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/ipfs/go-datastore"
|
||||
"github.com/ipfs/go-datastore/namespace"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/messagepool"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
consensus "github.com/filecoin-project/lotus/lib/consensus/raft"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
)
|
||||
|
||||
type MessageSignerConsensus struct {
|
||||
MsgSigner
|
||||
Consensus *consensus.Consensus
|
||||
}
|
||||
|
||||
func NewMessageSignerConsensus(
|
||||
wallet api.Wallet,
|
||||
mpool messagepool.MpoolNonceAPI,
|
||||
ds dtypes.MetadataDS,
|
||||
consensus *consensus.Consensus) *MessageSignerConsensus {
|
||||
|
||||
ds = namespace.Wrap(ds, datastore.NewKey("/message-signer-consensus/"))
|
||||
return &MessageSignerConsensus{
|
||||
MsgSigner: &MessageSigner{
|
||||
wallet: wallet,
|
||||
mpool: mpool,
|
||||
ds: ds,
|
||||
},
|
||||
Consensus: consensus,
|
||||
}
|
||||
}
|
||||
|
||||
func (ms *MessageSignerConsensus) IsLeader(ctx context.Context) bool {
|
||||
return ms.Consensus.IsLeader(ctx)
|
||||
}
|
||||
|
||||
func (ms *MessageSignerConsensus) RedirectToLeader(ctx context.Context, method string, arg interface{}, ret interface{}) (bool, error) {
|
||||
ok, err := ms.Consensus.RedirectToLeader(method, arg, ret.(*types.SignedMessage))
|
||||
if err != nil {
|
||||
return ok, err
|
||||
}
|
||||
return ok, nil
|
||||
}
|
||||
|
||||
func (ms *MessageSignerConsensus) SignMessage(
|
||||
ctx context.Context,
|
||||
msg *types.Message,
|
||||
spec *api.MessageSendSpec,
|
||||
cb func(*types.SignedMessage) error) (*types.SignedMessage, error) {
|
||||
|
||||
signedMsg, err := ms.MsgSigner.SignMessage(ctx, msg, spec, cb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
op := &consensus.ConsensusOp{
|
||||
Nonce: signedMsg.Message.Nonce,
|
||||
Uuid: spec.MsgUuid,
|
||||
Addr: signedMsg.Message.From,
|
||||
SignedMsg: signedMsg,
|
||||
}
|
||||
err = ms.Consensus.Commit(ctx, op)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return signedMsg, nil
|
||||
}
|
||||
|
||||
func (ms *MessageSignerConsensus) GetSignedMessage(ctx context.Context, uuid uuid.UUID) (*types.SignedMessage, error) {
|
||||
cstate, err := ms.Consensus.State(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
//cstate := state.(Consensus.RaftState)
|
||||
msg, ok := cstate.MsgUuids[uuid]
|
||||
if !ok {
|
||||
return nil, xerrors.Errorf("Msg with Uuid %s not available", uuid)
|
||||
}
|
||||
return msg, nil
|
||||
}
|
||||
|
||||
func (ms *MessageSignerConsensus) GetRaftState(ctx context.Context) (*consensus.RaftState, error) {
|
||||
return ms.Consensus.State(ctx)
|
||||
}
|
||||
|
||||
func (ms *MessageSignerConsensus) Leader(ctx context.Context) (peer.ID, error) {
|
||||
return ms.Consensus.Leader(ctx)
|
||||
}
|
@ -13,6 +13,7 @@ import (
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/messagepool"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/chain/wallet"
|
||||
)
|
||||
@ -22,7 +23,7 @@ type mockMpool struct {
|
||||
nonces map[address.Address]uint64
|
||||
}
|
||||
|
||||
var _ MpoolNonceAPI = (*mockMpool)(nil)
|
||||
var _ messagepool.MpoolNonceAPI = (*mockMpool)(nil)
|
||||
|
||||
func newMockMpool() *mockMpool {
|
||||
return &mockMpool{nonces: make(map[address.Address]uint64)}
|
||||
@ -187,7 +188,7 @@ func TestMessageSignerSignMessage(t *testing.T) {
|
||||
mpool.setNonce(m.msg.From, m.mpoolNonce[0])
|
||||
}
|
||||
merr := m.cbErr
|
||||
smsg, err := ms.SignMessage(ctx, m.msg, func(message *types.SignedMessage) error {
|
||||
smsg, err := ms.SignMessage(ctx, m.msg, nil, func(message *types.SignedMessage) error {
|
||||
return merr
|
||||
})
|
||||
|
||||
|
203
cli/util/api.go
203
cli/util/api.go
@ -8,8 +8,10 @@ import (
|
||||
"net/url"
|
||||
"os"
|
||||
"os/signal"
|
||||
"reflect"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/mitchellh/go-homedir"
|
||||
"github.com/urfave/cli/v2"
|
||||
@ -21,6 +23,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/api/client"
|
||||
"github.com/filecoin-project/lotus/api/v0api"
|
||||
"github.com/filecoin-project/lotus/api/v1api"
|
||||
"github.com/filecoin-project/lotus/lib/retry"
|
||||
"github.com/filecoin-project/lotus/node/repo"
|
||||
)
|
||||
|
||||
@ -36,7 +39,7 @@ const (
|
||||
// 2. *_API_INFO environment variables
|
||||
// 3. deprecated *_API_INFO environment variables
|
||||
// 4. *-repo command line flags.
|
||||
func GetAPIInfo(ctx *cli.Context, t repo.RepoType) (APIInfo, error) {
|
||||
func GetAPIInfoMulti(ctx *cli.Context, t repo.RepoType) ([]APIInfo, error) {
|
||||
// Check if there was a flag passed with the listen address of the API
|
||||
// server (only used by the tests)
|
||||
for _, f := range t.APIFlags() {
|
||||
@ -46,7 +49,7 @@ func GetAPIInfo(ctx *cli.Context, t repo.RepoType) (APIInfo, error) {
|
||||
strma := ctx.String(f)
|
||||
strma = strings.TrimSpace(strma)
|
||||
|
||||
return APIInfo{Addr: strma}, nil
|
||||
return []APIInfo{{Addr: strma}}, nil
|
||||
}
|
||||
|
||||
//
|
||||
@ -56,14 +59,14 @@ func GetAPIInfo(ctx *cli.Context, t repo.RepoType) (APIInfo, error) {
|
||||
primaryEnv, fallbacksEnvs, deprecatedEnvs := t.APIInfoEnvVars()
|
||||
env, ok := os.LookupEnv(primaryEnv)
|
||||
if ok {
|
||||
return ParseApiInfo(env), nil
|
||||
return ParseApiInfoMulti(env), nil
|
||||
}
|
||||
|
||||
for _, env := range deprecatedEnvs {
|
||||
env, ok := os.LookupEnv(env)
|
||||
if ok {
|
||||
log.Warnf("Using deprecated env(%s) value, please use env(%s) instead.", env, primaryEnv)
|
||||
return ParseApiInfo(env), nil
|
||||
return ParseApiInfoMulti(env), nil
|
||||
}
|
||||
}
|
||||
|
||||
@ -76,26 +79,26 @@ func GetAPIInfo(ctx *cli.Context, t repo.RepoType) (APIInfo, error) {
|
||||
|
||||
p, err := homedir.Expand(path)
|
||||
if err != nil {
|
||||
return APIInfo{}, xerrors.Errorf("could not expand home dir (%s): %w", f, err)
|
||||
return []APIInfo{}, xerrors.Errorf("could not expand home dir (%s): %w", f, err)
|
||||
}
|
||||
|
||||
r, err := repo.NewFS(p)
|
||||
if err != nil {
|
||||
return APIInfo{}, xerrors.Errorf("could not open repo at path: %s; %w", p, err)
|
||||
return []APIInfo{}, xerrors.Errorf("could not open repo at path: %s; %w", p, err)
|
||||
}
|
||||
|
||||
exists, err := r.Exists()
|
||||
if err != nil {
|
||||
return APIInfo{}, xerrors.Errorf("repo.Exists returned an error: %w", err)
|
||||
return []APIInfo{}, xerrors.Errorf("repo.Exists returned an error: %w", err)
|
||||
}
|
||||
|
||||
if !exists {
|
||||
return APIInfo{}, errors.New("repo directory does not exist. Make sure your configuration is correct")
|
||||
return []APIInfo{}, errors.New("repo directory does not exist. Make sure your configuration is correct")
|
||||
}
|
||||
|
||||
ma, err := r.APIEndpoint()
|
||||
if err != nil {
|
||||
return APIInfo{}, xerrors.Errorf("could not get api endpoint: %w", err)
|
||||
return []APIInfo{}, xerrors.Errorf("could not get api endpoint: %w", err)
|
||||
}
|
||||
|
||||
token, err := r.APIToken()
|
||||
@ -103,38 +106,75 @@ func GetAPIInfo(ctx *cli.Context, t repo.RepoType) (APIInfo, error) {
|
||||
log.Warnf("Couldn't load CLI token, capabilities may be limited: %v", err)
|
||||
}
|
||||
|
||||
return APIInfo{
|
||||
return []APIInfo{{
|
||||
Addr: ma.String(),
|
||||
Token: token,
|
||||
}, nil
|
||||
}}, nil
|
||||
}
|
||||
|
||||
for _, env := range fallbacksEnvs {
|
||||
env, ok := os.LookupEnv(env)
|
||||
if ok {
|
||||
return ParseApiInfo(env), nil
|
||||
return ParseApiInfoMulti(env), nil
|
||||
}
|
||||
}
|
||||
|
||||
return APIInfo{}, fmt.Errorf("could not determine API endpoint for node type: %v", t.Type())
|
||||
return []APIInfo{}, fmt.Errorf("could not determine API endpoint for node type: %v", t.Type())
|
||||
}
|
||||
|
||||
func GetRawAPI(ctx *cli.Context, t repo.RepoType, version string) (string, http.Header, error) {
|
||||
ainfo, err := GetAPIInfo(ctx, t)
|
||||
if err != nil {
|
||||
return "", nil, xerrors.Errorf("could not get API info for %s: %w", t.Type(), err)
|
||||
func GetAPIInfo(ctx *cli.Context, t repo.RepoType) (APIInfo, error) {
|
||||
ainfos, err := GetAPIInfoMulti(ctx, t)
|
||||
if err != nil || len(ainfos) == 0 {
|
||||
return APIInfo{}, err
|
||||
}
|
||||
|
||||
addr, err := ainfo.DialArgs(version)
|
||||
if err != nil {
|
||||
return "", nil, xerrors.Errorf("could not get DialArgs: %w", err)
|
||||
if len(ainfos) > 1 {
|
||||
log.Warn("multiple API infos received when only one was expected")
|
||||
}
|
||||
|
||||
return ainfos[0], nil
|
||||
|
||||
}
|
||||
|
||||
type HttpHead struct {
|
||||
addr string
|
||||
header http.Header
|
||||
}
|
||||
|
||||
func GetRawAPIMulti(ctx *cli.Context, t repo.RepoType, version string) ([]HttpHead, error) {
|
||||
|
||||
var httpHeads []HttpHead
|
||||
ainfos, err := GetAPIInfoMulti(ctx, t)
|
||||
if err != nil || len(ainfos) == 0 {
|
||||
return httpHeads, xerrors.Errorf("could not get API info for %s: %w", t.Type(), err)
|
||||
}
|
||||
|
||||
for _, ainfo := range ainfos {
|
||||
addr, err := ainfo.DialArgs(version)
|
||||
if err != nil {
|
||||
return httpHeads, xerrors.Errorf("could not get DialArgs: %w", err)
|
||||
}
|
||||
httpHeads = append(httpHeads, HttpHead{addr: addr, header: ainfo.AuthHeader()})
|
||||
}
|
||||
|
||||
if IsVeryVerbose {
|
||||
_, _ = fmt.Fprintf(ctx.App.Writer, "using raw API %s endpoint: %s\n", version, addr)
|
||||
_, _ = fmt.Fprintf(ctx.App.Writer, "using raw API %s endpoint: %s\n", version, httpHeads[0].addr)
|
||||
}
|
||||
|
||||
return addr, ainfo.AuthHeader(), nil
|
||||
return httpHeads, nil
|
||||
}
|
||||
|
||||
func GetRawAPI(ctx *cli.Context, t repo.RepoType, version string) (string, http.Header, error) {
|
||||
heads, err := GetRawAPIMulti(ctx, t, version)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
if len(heads) > 1 {
|
||||
log.Warnf("More than 1 header received when expecting only one")
|
||||
}
|
||||
|
||||
return heads[0].addr, heads[0].header, nil
|
||||
}
|
||||
|
||||
func GetCommonAPI(ctx *cli.Context) (api.CommonNet, jsonrpc.ClientCloser, error) {
|
||||
@ -185,7 +225,72 @@ func GetFullNodeAPI(ctx *cli.Context) (v0api.FullNode, jsonrpc.ClientCloser, err
|
||||
return client.NewFullNodeRPCV0(ctx.Context, addr, headers)
|
||||
}
|
||||
|
||||
func GetFullNodeAPIV1(ctx *cli.Context) (v1api.FullNode, jsonrpc.ClientCloser, error) {
|
||||
type contextKey string
|
||||
|
||||
// Not thread safe
|
||||
func OnSingleNode(ctx context.Context) context.Context {
|
||||
return context.WithValue(ctx, contextKey("retry-node"), new(*int))
|
||||
}
|
||||
|
||||
func FullNodeProxy[T api.FullNode](ins []T, outstr *api.FullNodeStruct) {
|
||||
outs := api.GetInternalStructs(outstr)
|
||||
|
||||
var rins []reflect.Value
|
||||
for _, in := range ins {
|
||||
rins = append(rins, reflect.ValueOf(in))
|
||||
}
|
||||
|
||||
for _, out := range outs {
|
||||
rProxyInternal := reflect.ValueOf(out).Elem()
|
||||
|
||||
for f := 0; f < rProxyInternal.NumField(); f++ {
|
||||
field := rProxyInternal.Type().Field(f)
|
||||
|
||||
var fns []reflect.Value
|
||||
for _, rin := range rins {
|
||||
fns = append(fns, rin.MethodByName(field.Name))
|
||||
}
|
||||
|
||||
rProxyInternal.Field(f).Set(reflect.MakeFunc(field.Type, func(args []reflect.Value) (results []reflect.Value) {
|
||||
errorsToRetry := []error{&jsonrpc.RPCConnectionError{}, &jsonrpc.ErrClient{}}
|
||||
initialBackoff, err := time.ParseDuration("1s")
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
ctx := args[0].Interface().(context.Context)
|
||||
|
||||
curr := -1
|
||||
|
||||
// for calls that need to be performed on the same node
|
||||
// primarily for miner when calling create block and submit block subsequently
|
||||
key := contextKey("retry-node")
|
||||
if ctx.Value(key) != nil {
|
||||
if (*ctx.Value(key).(**int)) == nil {
|
||||
*ctx.Value(key).(**int) = &curr
|
||||
} else {
|
||||
curr = **ctx.Value(key).(**int) - 1
|
||||
}
|
||||
}
|
||||
|
||||
total := len(rins)
|
||||
result, err := retry.Retry(ctx, 5, initialBackoff, errorsToRetry, func() (results []reflect.Value, err2 error) {
|
||||
curr = (curr + 1) % total
|
||||
|
||||
result := fns[curr].Call(args)
|
||||
if result[len(result)-1].IsNil() {
|
||||
return result, nil
|
||||
}
|
||||
e := result[len(result)-1].Interface().(error)
|
||||
return result, e
|
||||
})
|
||||
return result
|
||||
}))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func GetFullNodeAPIV1Single(ctx *cli.Context) (v1api.FullNode, jsonrpc.ClientCloser, error) {
|
||||
if tn, ok := ctx.App.Metadata["testnode-full"]; ok {
|
||||
return tn.(v1api.FullNode), func() {}, nil
|
||||
}
|
||||
@ -214,6 +319,58 @@ func GetFullNodeAPIV1(ctx *cli.Context) (v1api.FullNode, jsonrpc.ClientCloser, e
|
||||
return v1API, closer, nil
|
||||
}
|
||||
|
||||
func GetFullNodeAPIV1(ctx *cli.Context) (v1api.FullNode, jsonrpc.ClientCloser, error) {
|
||||
if tn, ok := ctx.App.Metadata["testnode-full"]; ok {
|
||||
return tn.(v1api.FullNode), func() {}, nil
|
||||
}
|
||||
|
||||
heads, err := GetRawAPIMulti(ctx, repo.FullNode, "v1")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if IsVeryVerbose {
|
||||
_, _ = fmt.Fprintln(ctx.App.Writer, "using full node API v1 endpoint:", heads[0].addr)
|
||||
}
|
||||
|
||||
var fullNodes []api.FullNode
|
||||
var closers []jsonrpc.ClientCloser
|
||||
|
||||
for _, head := range heads {
|
||||
v1api, closer, err := client.NewFullNodeRPCV1(ctx.Context, head.addr, head.header)
|
||||
if err != nil {
|
||||
log.Warnf("Not able to establish connection to node with addr: ", head.addr)
|
||||
continue
|
||||
}
|
||||
fullNodes = append(fullNodes, v1api)
|
||||
closers = append(closers, closer)
|
||||
}
|
||||
|
||||
// When running in cluster mode and trying to establish connections to multiple nodes, fail
|
||||
// if less than 2 lotus nodes are actually running
|
||||
if len(heads) > 1 && len(fullNodes) < 2 {
|
||||
return nil, nil, xerrors.Errorf("Not able to establish connection to more than a single node")
|
||||
}
|
||||
|
||||
finalCloser := func() {
|
||||
for _, c := range closers {
|
||||
c()
|
||||
}
|
||||
}
|
||||
|
||||
var v1API api.FullNodeStruct
|
||||
FullNodeProxy(fullNodes, &v1API)
|
||||
|
||||
v, err := v1API.Version(ctx.Context)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if !v.APIVersion.EqMajorMinor(api.FullAPIVersion1) {
|
||||
return nil, nil, xerrors.Errorf("Remote API version didn't match (expected %s, remote %s)", api.FullAPIVersion1, v.APIVersion)
|
||||
}
|
||||
return &v1API, finalCloser, nil
|
||||
}
|
||||
|
||||
type GetStorageMinerOptions struct {
|
||||
PreferHttp bool
|
||||
}
|
||||
|
@ -24,6 +24,7 @@ type APIInfo struct {
|
||||
|
||||
func ParseApiInfo(s string) APIInfo {
|
||||
var tok []byte
|
||||
|
||||
if infoWithToken.Match([]byte(s)) {
|
||||
sp := strings.SplitN(s, ":", 2)
|
||||
tok = []byte(sp[0])
|
||||
@ -36,6 +37,24 @@ func ParseApiInfo(s string) APIInfo {
|
||||
}
|
||||
}
|
||||
|
||||
func ParseApiInfoMulti(s string) []APIInfo {
|
||||
var apiInfos []APIInfo
|
||||
|
||||
allAddrs := strings.SplitN(s, ",", -1)
|
||||
|
||||
for _, addr := range allAddrs {
|
||||
if infoWithToken.Match([]byte(addr)) {
|
||||
sp := strings.SplitN(addr, ":", 2)
|
||||
apiInfos = append(apiInfos, APIInfo{
|
||||
Addr: sp[1],
|
||||
Token: []byte(sp[0]),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return apiInfos
|
||||
}
|
||||
|
||||
func (a APIInfo) DialArgs(version string) (string, error) {
|
||||
ma, err := multiaddr.NewMultiaddr(a.Addr)
|
||||
if err == nil {
|
||||
|
@ -164,6 +164,9 @@
|
||||
* [PaychVoucherCreate](#PaychVoucherCreate)
|
||||
* [PaychVoucherList](#PaychVoucherList)
|
||||
* [PaychVoucherSubmit](#PaychVoucherSubmit)
|
||||
* [Raft](#Raft)
|
||||
* [RaftLeader](#RaftLeader)
|
||||
* [RaftState](#RaftState)
|
||||
* [Start](#Start)
|
||||
* [StartTime](#StartTime)
|
||||
* [State](#State)
|
||||
@ -5055,6 +5058,33 @@ Response:
|
||||
}
|
||||
```
|
||||
|
||||
## Raft
|
||||
|
||||
|
||||
### RaftLeader
|
||||
|
||||
|
||||
Perms: read
|
||||
|
||||
Inputs: `null`
|
||||
|
||||
Response: `"12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf"`
|
||||
|
||||
### RaftState
|
||||
|
||||
|
||||
Perms: read
|
||||
|
||||
Inputs: `null`
|
||||
|
||||
Response:
|
||||
```json
|
||||
{
|
||||
"NonceMap": {},
|
||||
"MsgUuids": {}
|
||||
}
|
||||
```
|
||||
|
||||
## Start
|
||||
|
||||
|
||||
|
@ -202,3 +202,65 @@
|
||||
#HotStoreFullGCFrequency = 20
|
||||
|
||||
|
||||
[Cluster]
|
||||
# EXPERIMENTAL. config to enabled node cluster with raft consensus
|
||||
#
|
||||
# type: bool
|
||||
# env var: LOTUS_CLUSTER_CLUSTERMODEENABLED
|
||||
#ClusterModeEnabled = false
|
||||
|
||||
# A folder to store Raft's data.
|
||||
#
|
||||
# type: string
|
||||
# env var: LOTUS_CLUSTER_DATAFOLDER
|
||||
#DataFolder = ""
|
||||
|
||||
# InitPeersetMultiAddr provides the list of initial cluster peers for new Raft
|
||||
# peers (with no prior state). It is ignored when Raft was already
|
||||
# initialized or when starting in staging mode.
|
||||
#
|
||||
# type: []string
|
||||
# env var: LOTUS_CLUSTER_INITPEERSETMULTIADDR
|
||||
#InitPeersetMultiAddr = []
|
||||
|
||||
# LeaderTimeout specifies how long to wait for a leader before
|
||||
# failing an operation.
|
||||
#
|
||||
# type: Duration
|
||||
# env var: LOTUS_CLUSTER_WAITFORLEADERTIMEOUT
|
||||
#WaitForLeaderTimeout = "15s"
|
||||
|
||||
# NetworkTimeout specifies how long before a Raft network
|
||||
# operation is timed out
|
||||
#
|
||||
# type: Duration
|
||||
# env var: LOTUS_CLUSTER_NETWORKTIMEOUT
|
||||
#NetworkTimeout = "1m40s"
|
||||
|
||||
# CommitRetries specifies how many times we retry a failed commit until
|
||||
# we give up.
|
||||
#
|
||||
# type: int
|
||||
# env var: LOTUS_CLUSTER_COMMITRETRIES
|
||||
#CommitRetries = 1
|
||||
|
||||
# How long to wait between retries
|
||||
#
|
||||
# type: Duration
|
||||
# env var: LOTUS_CLUSTER_COMMITRETRYDELAY
|
||||
#CommitRetryDelay = "200ms"
|
||||
|
||||
# BackupsRotate specifies the maximum number of Raft's DataFolder
|
||||
# copies that we keep as backups (renaming) after cleanup.
|
||||
#
|
||||
# type: int
|
||||
# env var: LOTUS_CLUSTER_BACKUPSROTATE
|
||||
#BackupsRotate = 6
|
||||
|
||||
# Tracing enables propagation of contexts across binary boundaries.
|
||||
#
|
||||
# type: bool
|
||||
# env var: LOTUS_CLUSTER_TRACING
|
||||
#Tracing = false
|
||||
|
||||
|
||||
|
13
go.mod
13
go.mod
@ -68,6 +68,8 @@ require (
|
||||
github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e
|
||||
github.com/hashicorp/go-multierror v1.1.1
|
||||
github.com/hashicorp/golang-lru v0.5.4
|
||||
github.com/hashicorp/raft v1.1.1
|
||||
github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea
|
||||
github.com/icza/backscanner v0.0.0-20210726202459-ac2ffc679f94
|
||||
github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab
|
||||
github.com/ipfs/bbloom v0.0.4
|
||||
@ -110,9 +112,12 @@ require (
|
||||
github.com/koalacxr/quantile v0.0.1
|
||||
github.com/libp2p/go-buffer-pool v0.1.0
|
||||
github.com/libp2p/go-libp2p v0.22.0
|
||||
github.com/libp2p/go-libp2p-consensus v0.0.1
|
||||
github.com/libp2p/go-libp2p-gorpc v0.4.0
|
||||
github.com/libp2p/go-libp2p-kad-dht v0.18.0
|
||||
github.com/libp2p/go-libp2p-peerstore v0.8.0
|
||||
github.com/libp2p/go-libp2p-pubsub v0.8.0
|
||||
github.com/libp2p/go-libp2p-raft v0.1.8
|
||||
github.com/libp2p/go-libp2p-record v0.2.0
|
||||
github.com/libp2p/go-libp2p-routing-helpers v0.2.3
|
||||
github.com/libp2p/go-maddr-filter v0.1.0
|
||||
@ -147,6 +152,7 @@ require (
|
||||
go.uber.org/fx v1.15.0
|
||||
go.uber.org/multierr v1.8.0
|
||||
go.uber.org/zap v1.22.0
|
||||
golang.org/x/exp v0.0.0-20220426173459-3bcf042a4bf5
|
||||
golang.org/x/net v0.0.0-20220812174116-3211cb980234
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab
|
||||
@ -165,9 +171,11 @@ require (
|
||||
github.com/Stebalien/go-bitfield v0.0.1 // indirect
|
||||
github.com/akavel/rsrc v0.8.0 // indirect
|
||||
github.com/alecthomas/units v0.0.0-20210927113745-59d0afb8317a // indirect
|
||||
github.com/armon/go-metrics v0.3.9 // indirect
|
||||
github.com/benbjohnson/clock v1.3.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bep/debounce v1.2.0 // indirect
|
||||
github.com/boltdb/bolt v1.3.1 // indirect
|
||||
github.com/cespare/xxhash v1.1.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/cheekybits/genny v1.0.0 // indirect
|
||||
@ -217,6 +225,9 @@ require (
|
||||
github.com/google/gopacket v1.1.19 // indirect
|
||||
github.com/hannahhoward/cbor-gen-for v0.0.0-20200817222906-ea96cece81f1 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-hclog v0.16.2 // indirect
|
||||
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
|
||||
github.com/hashicorp/go-msgpack v0.5.5 // indirect
|
||||
github.com/huin/goupnp v1.0.3 // indirect
|
||||
github.com/iancoleman/orderedmap v0.1.0 // indirect
|
||||
github.com/ipfs/go-bitfield v1.0.0 // indirect
|
||||
@ -304,6 +315,7 @@ require (
|
||||
github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 // indirect
|
||||
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
||||
github.com/uber/jaeger-client-go v2.25.0+incompatible // indirect
|
||||
github.com/ugorji/go/codec v1.2.6 // indirect
|
||||
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
||||
github.com/valyala/fasttemplate v1.0.1 // indirect
|
||||
github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11 // indirect
|
||||
@ -320,7 +332,6 @@ require (
|
||||
go.uber.org/dig v1.12.0 // indirect
|
||||
go4.org v0.0.0-20200411211856-f5505b9728dd // indirect
|
||||
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e // indirect
|
||||
golang.org/x/exp v0.0.0-20220426173459-3bcf042a4bf5 // indirect
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
|
55
go.sum
55
go.sum
@ -49,6 +49,8 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03
|
||||
github.com/BurntSushi/toml v1.1.0 h1:ksErzDEI1khOiGPgpwuI7x2ebx/uXQNw7xJpn9Eq1+I=
|
||||
github.com/BurntSushi/toml v1.1.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
|
||||
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
|
||||
github.com/DataDog/zstd v1.4.1 h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM=
|
||||
github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
|
||||
github.com/GeertJohan/go.incremental v1.0.0 h1:7AH+pY1XUgQE4Y1HcXYaMqAI0m9yrFqo/jt0CW30vsg=
|
||||
@ -99,6 +101,9 @@ github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb
|
||||
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
|
||||
github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg=
|
||||
github.com/armon/go-metrics v0.3.9 h1:O2sNqxBdvq8Eq5xmzljcYzAORli6RWCvEym4cJf9m18=
|
||||
github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc=
|
||||
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
|
||||
github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A=
|
||||
@ -120,6 +125,8 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r
|
||||
github.com/bep/debounce v1.2.0 h1:wXds8Kq8qRfwAOpAxHrJDbCXgC5aHSzgQb/0gKsHQqo=
|
||||
github.com/bep/debounce v1.2.0/go.mod h1:H8yggRPQKLUhUoqrJC1bO2xNya7vanpDl7xR3ISbCJ0=
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4=
|
||||
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
|
||||
github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
|
||||
github.com/briandowns/spinner v1.11.1/go.mod h1:QOuQk7x+EaDASo80FEXwlwiA+j/PPIcX3FScO+3/ZPQ=
|
||||
github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8=
|
||||
@ -164,6 +171,8 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn
|
||||
github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
|
||||
github.com/cilium/ebpf v0.4.0 h1:QlHdikaxALkqWasW8hAC1mfR0jdmvbfaBdBPFmRSglA=
|
||||
github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
|
||||
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
|
||||
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
|
||||
github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
@ -380,6 +389,7 @@ github.com/filecoin-project/storetheindex v0.4.17/go.mod h1:y2dL8C5D3PXi183hdxgG
|
||||
github.com/filecoin-project/test-vectors/schema v0.0.5 h1:w3zHQhzM4pYxJDl21avXjOKBLF8egrvwUwjpT8TquDg=
|
||||
github.com/filecoin-project/test-vectors/schema v0.0.5/go.mod h1:iQ9QXLpYWL3m7warwvK1JC/pTri8mnfEmKygNDqqY6E=
|
||||
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
|
||||
github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6/go.mod h1:1i71OnUq3iUe1ma7Lr6yG6/rjvM3emb6yoL7xLFzcVQ=
|
||||
github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ=
|
||||
github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
|
||||
github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk=
|
||||
@ -593,17 +603,27 @@ github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyN
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
|
||||
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||
github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
|
||||
github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs=
|
||||
github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
|
||||
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=
|
||||
github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
|
||||
github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI=
|
||||
github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
|
||||
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
|
||||
github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
|
||||
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
|
||||
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
||||
github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
|
||||
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
|
||||
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
|
||||
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
|
||||
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE=
|
||||
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
|
||||
@ -615,6 +635,10 @@ github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T
|
||||
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
|
||||
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
|
||||
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
|
||||
github.com/hashicorp/raft v1.1.1 h1:HJr7UE1x/JrJSc9Oy6aDBHtNHUUBHjcQjTgvUVihoZs=
|
||||
github.com/hashicorp/raft v1.1.1/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8=
|
||||
github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea h1:xykPFhrBAS2J0VBzVa5e80b5ZtYuNQtgXjN40qBZlD4=
|
||||
github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea/go.mod h1:pNv7Wc3ycL6F5oOWn+tPGo2gWD4a5X+yp/ntwdKLjRk=
|
||||
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
|
||||
github.com/hodgesds/perf-utils v0.0.8/go.mod h1:F6TfvsbtrF88i++hou29dTXlI2sfsJv+gRZDtmTJkAs=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
@ -979,7 +1003,9 @@ github.com/libp2p/go-libp2p v0.6.1/go.mod h1:CTFnWXogryAHjXAKEbOf1OWY+VeAP3lDMZk
|
||||
github.com/libp2p/go-libp2p v0.7.0/go.mod h1:hZJf8txWeCduQRDC/WSqBGMxaTHCOYHt2xSU1ivxn0k=
|
||||
github.com/libp2p/go-libp2p v0.7.4/go.mod h1:oXsBlTLF1q7pxr+9w6lqzS1ILpyHsaBPniVO7zIHGMw=
|
||||
github.com/libp2p/go-libp2p v0.8.1/go.mod h1:QRNH9pwdbEBpx5DTJYg+qxcVaDMAz3Ee/qDKwXujH5o=
|
||||
github.com/libp2p/go-libp2p v0.13.0/go.mod h1:pM0beYdACRfHO1WcJlp65WXyG2A6NqYM+t2DTVAJxMo=
|
||||
github.com/libp2p/go-libp2p v0.14.3/go.mod h1:d12V4PdKbpL0T1/gsUNN8DfgMuRPDX8bS2QxCZlwRH0=
|
||||
github.com/libp2p/go-libp2p v0.14.4/go.mod h1:EIRU0Of4J5S8rkockZM7eJp2S0UrCyi55m2kJVru3rM=
|
||||
github.com/libp2p/go-libp2p v0.16.0/go.mod h1:ump42BsirwAWxKzsCiFnTtN1Yc+DuPu76fyMX364/O4=
|
||||
github.com/libp2p/go-libp2p v0.17.0/go.mod h1:Fkin50rsGdv5mm5BshBUtPRZknt9esfmYXBOYcwOTgw=
|
||||
github.com/libp2p/go-libp2p v0.22.0 h1:2Tce0kHOp5zASFKJbNzRElvh0iZwdtG5uZheNW8chIw=
|
||||
@ -993,6 +1019,7 @@ github.com/libp2p/go-libp2p-autonat v0.1.1/go.mod h1:OXqkeGOY2xJVWKAGV2inNF5aKN/
|
||||
github.com/libp2p/go-libp2p-autonat v0.2.0/go.mod h1:DX+9teU4pEEoZUqR1PiMlqliONQdNbfzE1C718tcViI=
|
||||
github.com/libp2p/go-libp2p-autonat v0.2.1/go.mod h1:MWtAhV5Ko1l6QBsHQNSuM6b1sRkXrpk0/LqCr+vCVxI=
|
||||
github.com/libp2p/go-libp2p-autonat v0.2.2/go.mod h1:HsM62HkqZmHR2k1xgX34WuWDzk/nBwNHoeyyT4IWV6A=
|
||||
github.com/libp2p/go-libp2p-autonat v0.4.0/go.mod h1:YxaJlpr81FhdOv3W3BTconZPfhaYivRdf53g+S2wobk=
|
||||
github.com/libp2p/go-libp2p-autonat v0.4.2/go.mod h1:YxaJlpr81FhdOv3W3BTconZPfhaYivRdf53g+S2wobk=
|
||||
github.com/libp2p/go-libp2p-autonat v0.6.0/go.mod h1:bFC6kY8jwzNNWoqc8iGE57vsfwyJ/lP4O4DOV1e0B2o=
|
||||
github.com/libp2p/go-libp2p-autonat v0.7.0/go.mod h1:uPvPn6J7cN+LCfFwW5tpOYvAz5NvPTc4iBamTV/WDMg=
|
||||
@ -1010,6 +1037,8 @@ github.com/libp2p/go-libp2p-connmgr v0.2.4/go.mod h1:YV0b/RIm8NGPnnNWM7hG9Q38OeQ
|
||||
github.com/libp2p/go-libp2p-connmgr v0.3.0/go.mod h1:RVoyPjJm0J9Vd1m6qUN2Tn7kJm4rL1Ml20pFsFgPGik=
|
||||
github.com/libp2p/go-libp2p-connmgr v0.4.0 h1:q/KZUS1iMDIQckMZarMYwhQisJqiFPHAVC1c4DR3hDE=
|
||||
github.com/libp2p/go-libp2p-connmgr v0.4.0/go.mod h1:exFQQm19PFAx+QuJmBPw4MM58QejzPJRFFFYnNmgi2w=
|
||||
github.com/libp2p/go-libp2p-consensus v0.0.1 h1:jcVbHRZLwTXU9iT/mPi+Lx4/OrIzq3bU1TbZNhYFCV8=
|
||||
github.com/libp2p/go-libp2p-consensus v0.0.1/go.mod h1:+9Wrfhc5QOqWB0gXI0m6ARlkHfdJpcFXmRU0WoHz4Mo=
|
||||
github.com/libp2p/go-libp2p-core v0.0.1/go.mod h1:g/VxnTZ/1ygHxH3dKok7Vno1VfpvGcGip57wjTU4fco=
|
||||
github.com/libp2p/go-libp2p-core v0.0.2/go.mod h1:9dAcntw/n46XycV4RnlBq3BpgrmyUi9LuoTNdPrbUco=
|
||||
github.com/libp2p/go-libp2p-core v0.0.3/go.mod h1:j+YQMNz9WNSkNezXOsahp9kwZBKBvxLpKD316QWSJXE=
|
||||
@ -1052,6 +1081,9 @@ github.com/libp2p/go-libp2p-discovery v0.2.0/go.mod h1:s4VGaxYMbw4+4+tsoQTqh7wfx
|
||||
github.com/libp2p/go-libp2p-discovery v0.3.0/go.mod h1:o03drFnz9BVAZdzC/QUQ+NeQOu38Fu7LJGEOK2gQltw=
|
||||
github.com/libp2p/go-libp2p-discovery v0.5.0/go.mod h1:+srtPIU9gDaBNu//UHvcdliKBIcr4SfDcm0/PfPJLug=
|
||||
github.com/libp2p/go-libp2p-discovery v0.6.0/go.mod h1:/u1voHt0tKIe5oIA1RHBKQLVCWPna2dXmPNHc2zR9S8=
|
||||
github.com/libp2p/go-libp2p-gorpc v0.4.0 h1:kxHg5C3IuXeOq5FHPGbMHwQzKDlTVeB/NDr0ndc8J/g=
|
||||
github.com/libp2p/go-libp2p-gorpc v0.4.0/go.mod h1:jux2Mb6BfUE1n58KbVCmWtqvpiZo0DDaKobKInf4s5o=
|
||||
github.com/libp2p/go-libp2p-gostream v0.3.1/go.mod h1:1V3b+u4Zhaq407UUY9JLCpboaeufAeVQbnvAt12LRsI=
|
||||
github.com/libp2p/go-libp2p-gostream v0.4.0 h1:heduMMEB78yBqeEQv+P7Fn5X926MHC2jDIC7/7yLpYA=
|
||||
github.com/libp2p/go-libp2p-gostream v0.4.0/go.mod h1:21DVGBcCQwRfEXZpCnZ2kG24QiEkBpEQvG53gYXE4u0=
|
||||
github.com/libp2p/go-libp2p-host v0.0.1/go.mod h1:qWd+H1yuU0m5CwzAkvbSjqKairayEHdR5MMl7Cwa7Go=
|
||||
@ -1082,6 +1114,7 @@ github.com/libp2p/go-libp2p-net v0.0.1/go.mod h1:Yt3zgmlsHOgUWSXmt5V/Jpz9upuJBE8
|
||||
github.com/libp2p/go-libp2p-net v0.0.2/go.mod h1:Yt3zgmlsHOgUWSXmt5V/Jpz9upuJBE8EgNU9DrCcR8c=
|
||||
github.com/libp2p/go-libp2p-netutil v0.0.1/go.mod h1:GdusFvujWZI9Vt0X5BKqwWWmZFxecf9Gt03cKxm2f/Q=
|
||||
github.com/libp2p/go-libp2p-netutil v0.1.0/go.mod h1:3Qv/aDqtMLTUyQeundkKsA+YCThNdbQD54k3TqjpbFU=
|
||||
github.com/libp2p/go-libp2p-noise v0.1.1/go.mod h1:QDFLdKX7nluB7DEnlVPbz7xlLHdwHFA9HiohJRr3vwM=
|
||||
github.com/libp2p/go-libp2p-noise v0.2.0/go.mod h1:IEbYhBBzGyvdLBoxxULL/SGbJARhUeqlO8lVSREYu2Q=
|
||||
github.com/libp2p/go-libp2p-noise v0.3.0/go.mod h1:JNjHbociDJKHD64KTkzGnzqJ0FEV5gHJa6AB00kbCNQ=
|
||||
github.com/libp2p/go-libp2p-noise v0.5.0 h1:gwJZ/3iH3MRnBrLIyr/YLdCOnmqfJMptlsFFUIc3j0Y=
|
||||
@ -1112,6 +1145,8 @@ github.com/libp2p/go-libp2p-quic-transport v0.10.0/go.mod h1:RfJbZ8IqXIhxBRm5hqU
|
||||
github.com/libp2p/go-libp2p-quic-transport v0.13.0/go.mod h1:39/ZWJ1TW/jx1iFkKzzUg00W6tDJh73FC0xYudjr7Hc=
|
||||
github.com/libp2p/go-libp2p-quic-transport v0.15.0/go.mod h1:wv4uGwjcqe8Mhjj7N/Ic0aKjA+/10UnMlSzLO0yRpYQ=
|
||||
github.com/libp2p/go-libp2p-quic-transport v0.15.2/go.mod h1:wv4uGwjcqe8Mhjj7N/Ic0aKjA+/10UnMlSzLO0yRpYQ=
|
||||
github.com/libp2p/go-libp2p-raft v0.1.8 h1:Fq0aWHbbhi6WJXf+yaOQeMzV+9UgkbHIIGyaJbH3vpo=
|
||||
github.com/libp2p/go-libp2p-raft v0.1.8/go.mod h1:+YDisn3uszb7vxshLgKoDdRGs79WSbHRgrOdrYqDPk4=
|
||||
github.com/libp2p/go-libp2p-record v0.0.1/go.mod h1:grzqg263Rug/sRex85QrDOLntdFAymLDLm7lxMgU79Q=
|
||||
github.com/libp2p/go-libp2p-record v0.1.0/go.mod h1:ujNc8iuE5dlKWVy6wuL6dd58t0n7xI4hAIl8pE6wu5Q=
|
||||
github.com/libp2p/go-libp2p-record v0.1.2/go.mod h1:pal0eNcT5nqZaTV7UGhqeGqxFgGdsU/9W//C8dqjQDk=
|
||||
@ -1131,6 +1166,7 @@ github.com/libp2p/go-libp2p-swarm v0.2.2/go.mod h1:fvmtQ0T1nErXym1/aa1uJEyN7JzaT
|
||||
github.com/libp2p/go-libp2p-swarm v0.2.3/go.mod h1:P2VO/EpxRyDxtChXz/VPVXyTnszHvokHKRhfkEgFKNM=
|
||||
github.com/libp2p/go-libp2p-swarm v0.2.8/go.mod h1:JQKMGSth4SMqonruY0a8yjlPVIkb0mdNSwckW7OYziM=
|
||||
github.com/libp2p/go-libp2p-swarm v0.3.0/go.mod h1:hdv95GWCTmzkgeJpP+GK/9D9puJegb7H57B5hWQR5Kk=
|
||||
github.com/libp2p/go-libp2p-swarm v0.4.0/go.mod h1:XVFcO52VoLoo0eitSxNQWYq4D6sydGOweTOAjJNraCw=
|
||||
github.com/libp2p/go-libp2p-swarm v0.5.0/go.mod h1:sU9i6BoHE0Ve5SKz3y9WfKrh8dUat6JknzUehFx8xW4=
|
||||
github.com/libp2p/go-libp2p-swarm v0.8.0/go.mod h1:sOMp6dPuqco0r0GHTzfVheVBh6UEL0L1lXUZ5ot2Fvc=
|
||||
github.com/libp2p/go-libp2p-swarm v0.9.0/go.mod h1:2f8d8uxTJmpeqHF/1ujjdXZp+98nNIbujVOMEZxCbZ8=
|
||||
@ -1158,6 +1194,7 @@ github.com/libp2p/go-libp2p-transport-upgrader v0.0.4/go.mod h1:RGq+tupk+oj7PzL2
|
||||
github.com/libp2p/go-libp2p-transport-upgrader v0.1.1/go.mod h1:IEtA6or8JUbsV07qPW4r01GnTenLW4oi3lOPbUMGJJA=
|
||||
github.com/libp2p/go-libp2p-transport-upgrader v0.2.0/go.mod h1:mQcrHj4asu6ArfSoMuyojOdjx73Q47cYD7s5+gZOlns=
|
||||
github.com/libp2p/go-libp2p-transport-upgrader v0.3.0/go.mod h1:i+SKzbRnvXdVbU3D1dwydnTmKRPXiAR/fyvi1dXuL4o=
|
||||
github.com/libp2p/go-libp2p-transport-upgrader v0.4.0/go.mod h1:J4ko0ObtZSmgn5BX5AmegP+dK3CSnU2lMCKsSq/EY0s=
|
||||
github.com/libp2p/go-libp2p-transport-upgrader v0.4.2/go.mod h1:NR8ne1VwfreD5VIWIU62Agt/J18ekORFU/j1i2y8zvk=
|
||||
github.com/libp2p/go-libp2p-transport-upgrader v0.4.3/go.mod h1:bpkldbOWXMrXhpZbSV1mQxTrefOg2Fi+k1ClDSA4ppw=
|
||||
github.com/libp2p/go-libp2p-transport-upgrader v0.5.0/go.mod h1:Rc+XODlB3yce7dvFV4q/RmyJGsFcCZRkeZMu/Zdg0mo=
|
||||
@ -1172,6 +1209,7 @@ github.com/libp2p/go-libp2p-yamux v0.2.7/go.mod h1:X28ENrBMU/nm4I3Nx4sZ4dgjZ6VhL
|
||||
github.com/libp2p/go-libp2p-yamux v0.2.8/go.mod h1:/t6tDqeuZf0INZMTgd0WxIRbtK2EzI2h7HbFm9eAKI4=
|
||||
github.com/libp2p/go-libp2p-yamux v0.4.0/go.mod h1:+DWDjtFMzoAwYLVkNZftoucn7PelNoy5nm3tZ3/Zw30=
|
||||
github.com/libp2p/go-libp2p-yamux v0.5.0/go.mod h1:AyR8k5EzyM2QN9Bbdg6X1SkVVuqLwTGf0L4DFq9g6po=
|
||||
github.com/libp2p/go-libp2p-yamux v0.5.1/go.mod h1:dowuvDu8CRWmr0iqySMiSxK+W0iL5cMVO9S94Y6gkv4=
|
||||
github.com/libp2p/go-libp2p-yamux v0.5.4/go.mod h1:tfrXbyaTqqSU654GTvK3ocnSZL3BuHoeTSqhcel1wsE=
|
||||
github.com/libp2p/go-libp2p-yamux v0.6.0/go.mod h1:MRhd6mAYnFRnSISp4M8i0ClV/j+mWHo2mYLifWGw33k=
|
||||
github.com/libp2p/go-libp2p-yamux v0.7.0/go.mod h1:fMyA0CsPfHkIuBU0wjRGrCjTBFiXTXxG0k5M4ETv+08=
|
||||
@ -1233,7 +1271,9 @@ github.com/libp2p/go-tcp-transport v0.0.4/go.mod h1:+E8HvC8ezEVOxIo3V5vCK9l1y/19
|
||||
github.com/libp2p/go-tcp-transport v0.1.0/go.mod h1:oJ8I5VXryj493DEJ7OsBieu8fcg2nHGctwtInJVpipc=
|
||||
github.com/libp2p/go-tcp-transport v0.1.1/go.mod h1:3HzGvLbx6etZjnFlERyakbaYPdfjg2pWP97dFZworkY=
|
||||
github.com/libp2p/go-tcp-transport v0.2.0/go.mod h1:vX2U0CnWimU4h0SGSEsg++AzvBcroCGYw28kh94oLe0=
|
||||
github.com/libp2p/go-tcp-transport v0.2.1/go.mod h1:zskiJ70MEfWz2MKxvFB/Pv+tPIB1PpPUrHIWQ8aFw7M=
|
||||
github.com/libp2p/go-tcp-transport v0.2.3/go.mod h1:9dvr03yqrPyYGIEN6Dy5UvdJZjyPFvl1S/igQ5QD1SU=
|
||||
github.com/libp2p/go-tcp-transport v0.2.4/go.mod h1:9dvr03yqrPyYGIEN6Dy5UvdJZjyPFvl1S/igQ5QD1SU=
|
||||
github.com/libp2p/go-tcp-transport v0.4.0/go.mod h1:0y52Rwrn4076xdJYu/51/qJIdxz+EWDAOG2S45sV3VI=
|
||||
github.com/libp2p/go-testutil v0.0.1/go.mod h1:iAcJc/DKJQanJ5ws2V+u5ywdL2n12X1WbbEG+Jjy69I=
|
||||
github.com/libp2p/go-testutil v0.1.0/go.mod h1:81b2n5HypcVyrCg/MJx4Wgfp/VHojytjVe/gLzZ2Ehc=
|
||||
@ -1252,6 +1292,7 @@ github.com/libp2p/go-yamux v1.3.5/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZ
|
||||
github.com/libp2p/go-yamux v1.3.7/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE=
|
||||
github.com/libp2p/go-yamux v1.4.0/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE=
|
||||
github.com/libp2p/go-yamux v1.4.1/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE=
|
||||
github.com/libp2p/go-yamux/v2 v2.0.0/go.mod h1:NVWira5+sVUIU6tu1JWvaRn1dRnG+cawOJiflsAM+7U=
|
||||
github.com/libp2p/go-yamux/v2 v2.2.0/go.mod h1:3So6P6TV6r75R9jiBpiIKgU/66lOarCZjqROGxzPpPQ=
|
||||
github.com/libp2p/go-yamux/v2 v2.3.0/go.mod h1:iTU+lOIn/2h0AgKcL49clNTwfEw+WSfDYrXe05EyKIs=
|
||||
github.com/libp2p/go-yamux/v3 v3.1.2 h1:lNEy28MBk1HavUAlzKgShp+F6mn/ea1nDYWftZhFW9Q=
|
||||
@ -1306,6 +1347,7 @@ github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNx
|
||||
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
|
||||
@ -1331,6 +1373,7 @@ github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00v
|
||||
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
github.com/miekg/dns v1.1.12/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
github.com/miekg/dns v1.1.28/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
|
||||
github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
|
||||
github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
|
||||
github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4=
|
||||
github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA=
|
||||
@ -1429,6 +1472,7 @@ github.com/multiformats/go-multistream v0.0.1/go.mod h1:fJTiDfXJVmItycydCnNx4+wS
|
||||
github.com/multiformats/go-multistream v0.0.4/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg=
|
||||
github.com/multiformats/go-multistream v0.1.0/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg=
|
||||
github.com/multiformats/go-multistream v0.1.1/go.mod h1:KmHZ40hzVxiaiwlj3MEbYgK9JFk2/9UktWZAF54Du38=
|
||||
github.com/multiformats/go-multistream v0.2.0/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k=
|
||||
github.com/multiformats/go-multistream v0.2.1/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k=
|
||||
github.com/multiformats/go-multistream v0.2.2/go.mod h1:UIcnm7Zuo8HKG+HkWgfQsGL+/MIEhyTqbODbIUwSXKs=
|
||||
github.com/multiformats/go-multistream v0.3.3 h1:d5PZpjwRgVlbwfdTDjife7XszfZd8KYWfROYFlGcR8o=
|
||||
@ -1502,6 +1546,8 @@ github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnh
|
||||
github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
|
||||
github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM=
|
||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
|
||||
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0=
|
||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y=
|
||||
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
|
||||
@ -1531,6 +1577,7 @@ github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4
|
||||
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og=
|
||||
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
|
||||
github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU=
|
||||
@ -1697,6 +1744,7 @@ github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0
|
||||
github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4=
|
||||
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
|
||||
github.com/uber/jaeger-client-go v2.15.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
|
||||
github.com/uber/jaeger-client-go v2.23.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
|
||||
github.com/uber/jaeger-client-go v2.25.0+incompatible h1:IxcNZ7WRY1Y3G4poYlx24szfsn/3LvK9QHCq9oQw8+U=
|
||||
@ -1704,7 +1752,12 @@ github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMW
|
||||
github.com/uber/jaeger-lib v1.5.1-0.20181102163054-1fc5c315e03c/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
|
||||
github.com/uber/jaeger-lib v2.2.0+incompatible h1:MxZXOiR2JuoANZ3J6DE/U0kSFv/eJ/GfSYVCjK7dyaw=
|
||||
github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
|
||||
github.com/ugorji/go v1.1.13/go.mod h1:jxau1n+/wyTGLQoCkjok9r5zFa/FxT6eI5HiHKQszjc=
|
||||
github.com/ugorji/go v1.2.6/go.mod h1:anCg0y61KIhDlPZmnH+so+RQbysYVyDko0IMgJv0Nn0=
|
||||
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
|
||||
github.com/ugorji/go/codec v1.1.13/go.mod h1:oNVt3Dq+FO91WNQ/9JnHKQP2QJxTzoN7wCBFCq1OeuU=
|
||||
github.com/ugorji/go/codec v1.2.6 h1:7kbGefxLoDBuYXOms4yD7223OpNMMPNPZxXk5TvFcyQ=
|
||||
github.com/ugorji/go/codec v1.2.6/go.mod h1:V6TCNZ4PHqoHGFZuSG1W8nrCzzdgA2DozYxWFFpvxTw=
|
||||
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
@ -1897,6 +1950,7 @@ golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPh
|
||||
golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
@ -2071,6 +2125,7 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190523142557-0e01d883c5c5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190524122548-abf6ff778158/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190524152521-dbbf3f1254d4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190526052359-791d8a0f4d09/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
@ -297,7 +297,7 @@ func startNodes(
|
||||
l, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
|
||||
srv, _ := kit.CreateRPCServer(t, handler, l)
|
||||
srv, _, _ := kit.CreateRPCServer(t, handler, l)
|
||||
|
||||
// Create a gateway client API that connects to the gateway server
|
||||
var gapi api.Gateway
|
||||
|
@ -13,6 +13,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/ipfs/go-datastore"
|
||||
"github.com/ipfs/go-datastore/namespace"
|
||||
libp2pcrypto "github.com/libp2p/go-libp2p/core/crypto"
|
||||
@ -175,6 +176,16 @@ func (n *Ensemble) Mocknet() mocknet.Mocknet {
|
||||
return n.mn
|
||||
}
|
||||
|
||||
func (n *Ensemble) NewPrivKey() (libp2pcrypto.PrivKey, peer.ID) {
|
||||
privkey, _, err := libp2pcrypto.GenerateEd25519Key(rand.Reader)
|
||||
require.NoError(n.t, err)
|
||||
|
||||
peerId, err := peer.IDFromPrivateKey(privkey)
|
||||
require.NoError(n.t, err)
|
||||
|
||||
return privkey, peerId
|
||||
}
|
||||
|
||||
// FullNode enrolls a new full node.
|
||||
func (n *Ensemble) FullNode(full *TestFullNode, opts ...NodeOpt) *Ensemble {
|
||||
options := DefaultNodeOpts
|
||||
@ -200,13 +211,14 @@ func (n *Ensemble) FullNode(full *TestFullNode, opts ...NodeOpt) *Ensemble {
|
||||
}
|
||||
|
||||
*full = TestFullNode{t: n.t, options: options, DefaultKey: key}
|
||||
|
||||
n.inactive.fullnodes = append(n.inactive.fullnodes, full)
|
||||
return n
|
||||
}
|
||||
|
||||
// Miner enrolls a new miner, using the provided full node for chain
|
||||
// interactions.
|
||||
func (n *Ensemble) Miner(minerNode *TestMiner, full *TestFullNode, opts ...NodeOpt) *Ensemble {
|
||||
func (n *Ensemble) MinerEnroll(minerNode *TestMiner, full *TestFullNode, opts ...NodeOpt) *Ensemble {
|
||||
require.NotNil(n.t, full, "full node required when instantiating miner")
|
||||
|
||||
options := DefaultNodeOpts
|
||||
@ -291,8 +303,16 @@ func (n *Ensemble) Miner(minerNode *TestMiner, full *TestFullNode, opts ...NodeO
|
||||
minerNode.Libp2p.PeerID = peerId
|
||||
minerNode.Libp2p.PrivKey = privkey
|
||||
|
||||
n.inactive.miners = append(n.inactive.miners, minerNode)
|
||||
return n
|
||||
}
|
||||
|
||||
func (n *Ensemble) AddInactiveMiner(m *TestMiner) {
|
||||
n.inactive.miners = append(n.inactive.miners, m)
|
||||
}
|
||||
|
||||
func (n *Ensemble) Miner(minerNode *TestMiner, full *TestFullNode, opts ...NodeOpt) *Ensemble {
|
||||
n.MinerEnroll(minerNode, full, opts...)
|
||||
n.AddInactiveMiner(minerNode)
|
||||
return n
|
||||
}
|
||||
|
||||
@ -358,6 +378,21 @@ func (n *Ensemble) Start() *Ensemble {
|
||||
lr, err := r.Lock(repo.FullNode)
|
||||
require.NoError(n.t, err)
|
||||
|
||||
ks, err := lr.KeyStore()
|
||||
require.NoError(n.t, err)
|
||||
|
||||
if full.Pkey != nil {
|
||||
pk, err := libp2pcrypto.MarshalPrivateKey(full.Pkey.PrivKey)
|
||||
require.NoError(n.t, err)
|
||||
|
||||
err = ks.Put("libp2p-host", types.KeyInfo{
|
||||
Type: "libp2p-host",
|
||||
PrivateKey: pk,
|
||||
})
|
||||
require.NoError(n.t, err)
|
||||
|
||||
}
|
||||
|
||||
c, err := lr.Config()
|
||||
require.NoError(n.t, err)
|
||||
|
||||
@ -416,6 +451,7 @@ func (n *Ensemble) Start() *Ensemble {
|
||||
|
||||
// Construct the full node.
|
||||
stop, err := node.New(ctx, opts...)
|
||||
full.Stop = stop
|
||||
|
||||
require.NoError(n.t, err)
|
||||
|
||||
@ -425,15 +461,31 @@ func (n *Ensemble) Start() *Ensemble {
|
||||
err = full.WalletSetDefault(context.Background(), addr)
|
||||
require.NoError(n.t, err)
|
||||
|
||||
var rpcShutdownOnce sync.Once
|
||||
var stopOnce sync.Once
|
||||
var stopErr error
|
||||
|
||||
stopFunc := stop
|
||||
stop = func(ctx context.Context) error {
|
||||
stopOnce.Do(func() {
|
||||
stopErr = stopFunc(ctx)
|
||||
})
|
||||
return stopErr
|
||||
}
|
||||
|
||||
// Are we hitting this node through its RPC?
|
||||
if full.options.rpc {
|
||||
withRPC := fullRpc(n.t, full)
|
||||
withRPC, rpcCloser := fullRpc(n.t, full)
|
||||
n.inactive.fullnodes[i] = withRPC
|
||||
full.Stop = func(ctx2 context.Context) error {
|
||||
rpcShutdownOnce.Do(rpcCloser)
|
||||
return stop(ctx)
|
||||
}
|
||||
n.t.Cleanup(func() { rpcShutdownOnce.Do(rpcCloser) })
|
||||
}
|
||||
|
||||
n.t.Cleanup(func() {
|
||||
_ = stop(context.Background())
|
||||
|
||||
})
|
||||
|
||||
n.active.fullnodes = append(n.active.fullnodes, full)
|
||||
@ -477,7 +529,9 @@ func (n *Ensemble) Start() *Ensemble {
|
||||
Method: power.Methods.CreateMiner,
|
||||
Params: params,
|
||||
}
|
||||
signed, err := m.FullNode.FullNode.MpoolPushMessage(ctx, createStorageMinerMsg, nil)
|
||||
signed, err := m.FullNode.FullNode.MpoolPushMessage(ctx, createStorageMinerMsg, &api.MessageSendSpec{
|
||||
MsgUuid: uuid.New(),
|
||||
})
|
||||
require.NoError(n.t, err)
|
||||
|
||||
mw, err := m.FullNode.FullNode.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence, api.LookbackNoLimit, true)
|
||||
@ -501,7 +555,9 @@ func (n *Ensemble) Start() *Ensemble {
|
||||
Value: types.NewInt(0),
|
||||
}
|
||||
|
||||
signed, err2 := m.FullNode.FullNode.MpoolPushMessage(ctx, msg, nil)
|
||||
signed, err2 := m.FullNode.FullNode.MpoolPushMessage(ctx, msg, &api.MessageSendSpec{
|
||||
MsgUuid: uuid.New(),
|
||||
})
|
||||
require.NoError(n.t, err2)
|
||||
|
||||
mw, err2 := m.FullNode.FullNode.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence, api.LookbackNoLimit, true)
|
||||
@ -611,7 +667,9 @@ func (n *Ensemble) Start() *Ensemble {
|
||||
Value: types.NewInt(0),
|
||||
}
|
||||
|
||||
_, err2 := m.FullNode.MpoolPushMessage(ctx, msg, nil)
|
||||
_, err2 := m.FullNode.MpoolPushMessage(ctx, msg, &api.MessageSendSpec{
|
||||
MsgUuid: uuid.New(),
|
||||
})
|
||||
require.NoError(n.t, err2)
|
||||
}
|
||||
|
||||
@ -620,6 +678,13 @@ func (n *Ensemble) Start() *Ensemble {
|
||||
disallowRemoteFinalize := m.options.disallowRemoteFinalize
|
||||
|
||||
var mineBlock = make(chan lotusminer.MineReq)
|
||||
|
||||
copy := *m.FullNode
|
||||
copy.FullNode = modules.MakeUuidWrapper(copy.FullNode)
|
||||
m.FullNode = ©
|
||||
|
||||
//m.FullNode.FullNode = modules.MakeUuidWrapper(fn.FullNode)
|
||||
|
||||
opts := []node.Option{
|
||||
node.StorageMiner(&m.StorageMiner, cfg.Subsystems),
|
||||
node.Base(),
|
||||
@ -627,7 +692,9 @@ func (n *Ensemble) Start() *Ensemble {
|
||||
node.Test(),
|
||||
|
||||
node.If(m.options.disableLibp2p, node.MockHost(n.mn)),
|
||||
node.Override(new(v1api.RawFullNodeAPI), m.FullNode.FullNode),
|
||||
//node.Override(new(v1api.RawFullNodeAPI), func() api.FullNode { return modules.MakeUuidWrapper(m.FullNode) }),
|
||||
//node.Override(new(v1api.RawFullNodeAPI), modules.MakeUuidWrapper),
|
||||
node.Override(new(v1api.RawFullNodeAPI), m.FullNode),
|
||||
node.Override(new(*lotusminer.Miner), lotusminer.NewTestMiner(mineBlock, m.ActorAddr)),
|
||||
|
||||
// disable resource filtering so that local worker gets assigned tasks
|
||||
@ -814,9 +881,9 @@ func (n *Ensemble) Start() *Ensemble {
|
||||
wait.Unlock()
|
||||
})
|
||||
wait.Lock()
|
||||
n.bootstrapped = true
|
||||
}
|
||||
|
||||
n.bootstrapped = true
|
||||
return n
|
||||
}
|
||||
|
||||
|
@ -6,6 +6,8 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
libp2pcrypto "github.com/libp2p/go-libp2p/core/crypto"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
@ -16,8 +18,15 @@ import (
|
||||
"github.com/filecoin-project/lotus/api/v1api"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/chain/wallet/key"
|
||||
cliutil "github.com/filecoin-project/lotus/cli/util"
|
||||
"github.com/filecoin-project/lotus/node"
|
||||
)
|
||||
|
||||
type Libp2p struct {
|
||||
PeerID peer.ID
|
||||
PrivKey libp2pcrypto.PrivKey
|
||||
}
|
||||
|
||||
// TestFullNode represents a full node enrolled in an Ensemble.
|
||||
type TestFullNode struct {
|
||||
v1api.FullNode
|
||||
@ -30,9 +39,33 @@ type TestFullNode struct {
|
||||
ListenURL string
|
||||
DefaultKey *key.Key
|
||||
|
||||
Pkey *Libp2p
|
||||
|
||||
Stop node.StopFunc
|
||||
|
||||
options nodeOpts
|
||||
}
|
||||
|
||||
func MergeFullNodes(fullNodes []*TestFullNode) *TestFullNode {
|
||||
var wrappedFullNode TestFullNode
|
||||
var fns api.FullNodeStruct
|
||||
wrappedFullNode.FullNode = &fns
|
||||
|
||||
cliutil.FullNodeProxy(fullNodes, &fns)
|
||||
|
||||
wrappedFullNode.t = fullNodes[0].t
|
||||
wrappedFullNode.ListenAddr = fullNodes[0].ListenAddr
|
||||
wrappedFullNode.DefaultKey = fullNodes[0].DefaultKey
|
||||
wrappedFullNode.Stop = fullNodes[0].Stop
|
||||
wrappedFullNode.options = fullNodes[0].options
|
||||
|
||||
return &wrappedFullNode
|
||||
}
|
||||
|
||||
func (f TestFullNode) Shutdown(ctx context.Context) error {
|
||||
return f.Stop(ctx)
|
||||
}
|
||||
|
||||
func (f *TestFullNode) ClientImportCARFile(ctx context.Context, rseed int, size int) (res *api.ImportRes, carv1FilePath string, origFilePath string) {
|
||||
carv1FilePath, origFilePath = CreateRandomCARv1(f.t, rseed, size)
|
||||
res, err := f.ClientImport(ctx, api.FileRef{Path: carv1FilePath, IsCAR: true})
|
||||
@ -87,6 +120,10 @@ func (f *TestFullNode) WaitForSectorActive(ctx context.Context, t *testing.T, sn
|
||||
}
|
||||
}
|
||||
|
||||
func (f *TestFullNode) AssignPrivKey(pkey *Libp2p) {
|
||||
f.Pkey = pkey
|
||||
}
|
||||
|
||||
// ChainPredicate encapsulates a chain condition.
|
||||
type ChainPredicate func(set *types.TipSet) bool
|
||||
|
||||
|
@ -7,7 +7,6 @@ import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
manet "github.com/multiformats/go-multiaddr/net"
|
||||
@ -18,63 +17,49 @@ import (
|
||||
"github.com/filecoin-project/lotus/node"
|
||||
)
|
||||
|
||||
func CreateRPCServer(t *testing.T, handler http.Handler, listener net.Listener) (*httptest.Server, multiaddr.Multiaddr) {
|
||||
type Closer func()
|
||||
|
||||
func CreateRPCServer(t *testing.T, handler http.Handler, listener net.Listener) (*httptest.Server, multiaddr.Multiaddr, Closer) {
|
||||
testServ := &httptest.Server{
|
||||
Listener: listener,
|
||||
Config: &http.Server{Handler: handler},
|
||||
}
|
||||
testServ.Start()
|
||||
|
||||
t.Cleanup(func() {
|
||||
waitUpTo(testServ.Close, time.Second, "Gave up waiting for RPC server to close after 1s")
|
||||
})
|
||||
t.Cleanup(testServ.CloseClientConnections)
|
||||
|
||||
addr := testServ.Listener.Addr()
|
||||
maddr, err := manet.FromNetAddr(addr)
|
||||
require.NoError(t, err)
|
||||
return testServ, maddr
|
||||
}
|
||||
|
||||
func waitUpTo(fn func(), waitTime time.Duration, errMsg string) {
|
||||
ch := make(chan struct{})
|
||||
go func() {
|
||||
fn()
|
||||
close(ch)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-ch:
|
||||
case <-time.After(waitTime):
|
||||
fmt.Println(errMsg)
|
||||
return
|
||||
closer := func() {
|
||||
testServ.CloseClientConnections()
|
||||
testServ.Close()
|
||||
}
|
||||
|
||||
return testServ, maddr, closer
|
||||
}
|
||||
|
||||
func fullRpc(t *testing.T, f *TestFullNode) *TestFullNode {
|
||||
func fullRpc(t *testing.T, f *TestFullNode) (*TestFullNode, Closer) {
|
||||
handler, err := node.FullNodeHandler(f.FullNode, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
l, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
|
||||
srv, maddr := CreateRPCServer(t, handler, l)
|
||||
srv, maddr, rpcCloser := CreateRPCServer(t, handler, l)
|
||||
fmt.Printf("FULLNODE RPC ENV FOR CLI DEBUGGING `export FULLNODE_API_INFO=%s`\n", "ws://"+srv.Listener.Addr().String())
|
||||
sendItestdNotif("FULLNODE_API_INFO", t.Name(), "ws://"+srv.Listener.Addr().String())
|
||||
|
||||
cl, stop, err := client.NewFullNodeRPCV1(context.Background(), "ws://"+srv.Listener.Addr().String()+"/rpc/v1", nil)
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(stop)
|
||||
f.ListenAddr, f.ListenURL, f.FullNode = maddr, srv.URL, cl
|
||||
|
||||
return f
|
||||
return f, func() { stop(); rpcCloser() }
|
||||
}
|
||||
|
||||
func minerRpc(t *testing.T, m *TestMiner) *TestMiner {
|
||||
handler, err := node.MinerHandler(m.StorageMiner, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
srv, maddr := CreateRPCServer(t, handler, m.RemoteListener)
|
||||
srv, maddr, _ := CreateRPCServer(t, handler, m.RemoteListener)
|
||||
|
||||
fmt.Printf("creating RPC server for %s at %s\n", m.ActorAddr, srv.Listener.Addr().String())
|
||||
fmt.Printf("SP RPC ENV FOR CLI DEBUGGING `export MINER_API_INFO=%s`\n", "ws://"+srv.Listener.Addr().String())
|
||||
@ -92,7 +77,7 @@ func minerRpc(t *testing.T, m *TestMiner) *TestMiner {
|
||||
func workerRpc(t *testing.T, m *TestWorker) *TestWorker {
|
||||
handler := sealworker.WorkerHandler(m.MinerNode.AuthVerify, m.FetchHandler, m.Worker, false)
|
||||
|
||||
srv, maddr := CreateRPCServer(t, handler, m.RemoteListener)
|
||||
srv, maddr, _ := CreateRPCServer(t, handler, m.RemoteListener)
|
||||
|
||||
fmt.Println("creating RPC server for a worker at: ", srv.Listener.Addr().String())
|
||||
url := "ws://" + srv.Listener.Addr().String() + "/rpc/v0"
|
||||
|
54
itests/mpool_push_with_uuid_test.go
Normal file
54
itests/mpool_push_with_uuid_test.go
Normal file
@ -0,0 +1,54 @@
|
||||
package itests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
"github.com/filecoin-project/go-state-types/exitcode"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/itests/kit"
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
)
|
||||
|
||||
func TestMpoolPushWithoutUuidWithMaxFee(t *testing.T) {
|
||||
//stm: @TOKEN_WALLET_SIGN_001, @CHAIN_MEMPOOL_PUSH_001
|
||||
ctx := context.Background()
|
||||
|
||||
kit.QuietMiningLogs()
|
||||
|
||||
client15, _, ens := kit.EnsembleMinimal(t, kit.MockProofs())
|
||||
ens.InterconnectAll().BeginMining(10 * time.Millisecond)
|
||||
|
||||
bal, err := client15.WalletBalance(ctx, client15.DefaultKey.Address)
|
||||
require.NoError(t, err)
|
||||
|
||||
// send self half of account balance
|
||||
msgHalfBal := &types.Message{
|
||||
From: client15.DefaultKey.Address,
|
||||
To: client15.DefaultKey.Address,
|
||||
Value: big.Div(bal, big.NewInt(2)),
|
||||
}
|
||||
smHalfBal, err := client15.MpoolPushMessage(ctx, msgHalfBal, &api.MessageSendSpec{MaxFee: abi.TokenAmount(config.DefaultDefaultMaxFee)})
|
||||
require.NoError(t, err)
|
||||
mLookup, err := client15.StateWaitMsg(ctx, smHalfBal.Cid(), 3, api.LookbackNoLimit, true)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, exitcode.Ok, mLookup.Receipt.ExitCode)
|
||||
|
||||
msgQuarterBal := &types.Message{
|
||||
From: client15.DefaultKey.Address,
|
||||
To: client15.DefaultKey.Address,
|
||||
Value: big.Div(bal, big.NewInt(4)),
|
||||
}
|
||||
smcid, err := client15.MpoolPushMessage(ctx, msgQuarterBal, &api.MessageSendSpec{MaxFee: abi.TokenAmount(config.DefaultDefaultMaxFee)})
|
||||
require.NoError(t, err)
|
||||
mLookup, err = client15.StateWaitMsg(ctx, smcid.Cid(), 3, api.LookbackNoLimit, true)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, exitcode.Ok, mLookup.Receipt.ExitCode)
|
||||
}
|
577
itests/raft_messagesigner_test.go
Normal file
577
itests/raft_messagesigner_test.go
Normal file
@ -0,0 +1,577 @@
|
||||
package itests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
gorpc "github.com/libp2p/go-libp2p-gorpc"
|
||||
libp2pcrypto "github.com/libp2p/go-libp2p/core/crypto"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
"github.com/filecoin-project/go-state-types/exitcode"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/messagesigner"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/itests/kit"
|
||||
consensus "github.com/filecoin-project/lotus/lib/consensus/raft"
|
||||
"github.com/filecoin-project/lotus/node"
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
"github.com/filecoin-project/lotus/node/impl"
|
||||
"github.com/filecoin-project/lotus/node/modules"
|
||||
)
|
||||
|
||||
func generatePrivKey() (*kit.Libp2p, error) {
|
||||
privkey, _, err := libp2pcrypto.GenerateEd25519Key(rand.Reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
peerId, err := peer.IDFromPrivateKey(privkey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &kit.Libp2p{PeerID: peerId, PrivKey: privkey}, nil
|
||||
}
|
||||
|
||||
func getRaftState(ctx context.Context, t *testing.T, node *kit.TestFullNode) *api.RaftStateData {
|
||||
raftState, err := node.RaftState(ctx)
|
||||
require.NoError(t, err)
|
||||
return raftState
|
||||
}
|
||||
|
||||
func setup(ctx context.Context, t *testing.T, node0 *kit.TestFullNode, node1 *kit.TestFullNode, node2 *kit.TestFullNode, miner *kit.TestMiner) *kit.Ensemble {
|
||||
|
||||
blockTime := 1 * time.Second
|
||||
|
||||
pkey0, _ := generatePrivKey()
|
||||
pkey1, _ := generatePrivKey()
|
||||
pkey2, _ := generatePrivKey()
|
||||
|
||||
pkeys := []*kit.Libp2p{pkey0, pkey1, pkey2}
|
||||
initPeerSet := []string{}
|
||||
for _, pkey := range pkeys {
|
||||
initPeerSet = append(initPeerSet, "/p2p/"+pkey.PeerID.String())
|
||||
}
|
||||
|
||||
//initPeerSet := []peer.ID{pkey0.PeerID, pkey1.PeerID, pkey2.PeerID}
|
||||
|
||||
raftOps := kit.ConstructorOpts(
|
||||
node.Override(new(*gorpc.Client), modules.NewRPCClient),
|
||||
node.Override(new(*consensus.ClusterRaftConfig), func() *consensus.ClusterRaftConfig {
|
||||
cfg := consensus.DefaultClusterRaftConfig()
|
||||
cfg.InitPeerset = initPeerSet
|
||||
return cfg
|
||||
}),
|
||||
node.Override(new(*consensus.Consensus), consensus.NewConsensusWithRPCClient(false)),
|
||||
node.Override(new(*messagesigner.MessageSignerConsensus), messagesigner.NewMessageSignerConsensus),
|
||||
node.Override(new(messagesigner.MsgSigner), func(ms *messagesigner.MessageSignerConsensus) *messagesigner.MessageSignerConsensus { return ms }),
|
||||
node.Override(new(*modules.RPCHandler), modules.NewRPCHandler),
|
||||
node.Override(node.GoRPCServer, modules.NewRPCServer),
|
||||
)
|
||||
//raftOps := kit.ConstructorOpts()
|
||||
kit.ThroughRPC()
|
||||
|
||||
ens := kit.NewEnsemble(t).FullNode(node0, raftOps, kit.ThroughRPC()).FullNode(node1, raftOps, kit.ThroughRPC()).FullNode(node2, raftOps, kit.ThroughRPC())
|
||||
node0.AssignPrivKey(pkey0)
|
||||
node1.AssignPrivKey(pkey1)
|
||||
node2.AssignPrivKey(pkey2)
|
||||
|
||||
nodes := []*kit.TestFullNode{node0, node1, node2}
|
||||
wrappedFullNode := kit.MergeFullNodes(nodes)
|
||||
|
||||
ens.MinerEnroll(miner, wrappedFullNode, kit.WithAllSubsystems(), kit.ThroughRPC())
|
||||
ens.Start()
|
||||
|
||||
// Import miner wallet to all nodes
|
||||
addr0, err := node0.WalletImport(ctx, &miner.OwnerKey.KeyInfo)
|
||||
require.NoError(t, err)
|
||||
addr1, err := node1.WalletImport(ctx, &miner.OwnerKey.KeyInfo)
|
||||
require.NoError(t, err)
|
||||
addr2, err := node2.WalletImport(ctx, &miner.OwnerKey.KeyInfo)
|
||||
require.NoError(t, err)
|
||||
|
||||
fmt.Println(addr0, addr1, addr2)
|
||||
|
||||
ens.InterconnectAll()
|
||||
|
||||
ens.AddInactiveMiner(miner)
|
||||
ens.Start()
|
||||
|
||||
ens.InterconnectAll().BeginMining(blockTime)
|
||||
|
||||
return ens
|
||||
}
|
||||
|
||||
func TestRaftState(t *testing.T) {
|
||||
|
||||
kit.QuietMiningLogs()
|
||||
ctx := context.Background()
|
||||
|
||||
var (
|
||||
node0 kit.TestFullNode
|
||||
node1 kit.TestFullNode
|
||||
node2 kit.TestFullNode
|
||||
miner kit.TestMiner
|
||||
)
|
||||
|
||||
setup(ctx, t, &node0, &node1, &node2, &miner)
|
||||
|
||||
fmt.Println(node0.WalletList(context.Background()))
|
||||
fmt.Println(node1.WalletList(context.Background()))
|
||||
fmt.Println(node2.WalletList(context.Background()))
|
||||
|
||||
bal, err := node0.WalletBalance(ctx, node0.DefaultKey.Address)
|
||||
require.NoError(t, err)
|
||||
|
||||
msgHalfBal := &types.Message{
|
||||
From: miner.OwnerKey.Address,
|
||||
To: node0.DefaultKey.Address,
|
||||
Value: big.Div(bal, big.NewInt(2)),
|
||||
}
|
||||
|
||||
mu := uuid.New()
|
||||
smHalfBal, err := node0.MpoolPushMessage(ctx, msgHalfBal, &api.MessageSendSpec{
|
||||
MsgUuid: mu,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
mLookup, err := node0.StateWaitMsg(ctx, smHalfBal.Cid(), 3, api.LookbackNoLimit, true)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, exitcode.Ok, mLookup.Receipt.ExitCode)
|
||||
|
||||
rstate0 := getRaftState(ctx, t, &node0)
|
||||
rstate1 := getRaftState(ctx, t, &node1)
|
||||
rstate2 := getRaftState(ctx, t, &node2)
|
||||
|
||||
require.EqualValues(t, rstate0, rstate1)
|
||||
require.EqualValues(t, rstate0, rstate2)
|
||||
}
|
||||
|
||||
func TestRaftStateLeaderDisconnects(t *testing.T) {
|
||||
|
||||
kit.QuietMiningLogs()
|
||||
ctx := context.Background()
|
||||
|
||||
var (
|
||||
node0 kit.TestFullNode
|
||||
node1 kit.TestFullNode
|
||||
node2 kit.TestFullNode
|
||||
miner kit.TestMiner
|
||||
)
|
||||
|
||||
nodes := []*kit.TestFullNode{&node0, &node1, &node2}
|
||||
|
||||
setup(ctx, t, &node0, &node1, &node2, &miner)
|
||||
|
||||
peerToNode := make(map[peer.ID]*kit.TestFullNode)
|
||||
for _, n := range nodes {
|
||||
peerToNode[n.Pkey.PeerID] = n
|
||||
}
|
||||
|
||||
bal, err := node0.WalletBalance(ctx, node0.DefaultKey.Address)
|
||||
require.NoError(t, err)
|
||||
|
||||
msgHalfBal := &types.Message{
|
||||
From: miner.OwnerKey.Address,
|
||||
To: node0.DefaultKey.Address,
|
||||
Value: big.Div(bal, big.NewInt(2)),
|
||||
}
|
||||
mu := uuid.New()
|
||||
smHalfBal, err := node0.MpoolPushMessage(ctx, msgHalfBal, &api.MessageSendSpec{
|
||||
MsgUuid: mu,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
mLookup, err := node0.StateWaitMsg(ctx, smHalfBal.Cid(), 3, api.LookbackNoLimit, true)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, exitcode.Ok, mLookup.Receipt.ExitCode)
|
||||
|
||||
rstate0 := getRaftState(ctx, t, &node0)
|
||||
rstate1 := getRaftState(ctx, t, &node1)
|
||||
rstate2 := getRaftState(ctx, t, &node2)
|
||||
|
||||
require.True(t, reflect.DeepEqual(rstate0, rstate1))
|
||||
require.True(t, reflect.DeepEqual(rstate0, rstate2))
|
||||
|
||||
leader, err := node1.RaftLeader(ctx)
|
||||
require.NoError(t, err)
|
||||
leaderNode := peerToNode[leader]
|
||||
|
||||
err = leaderNode.Stop(ctx)
|
||||
require.NoError(t, err)
|
||||
oldLeaderNode := leaderNode
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
newLeader := leader
|
||||
for _, n := range nodes {
|
||||
if n != leaderNode {
|
||||
newLeader, err = n.RaftLeader(ctx)
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, newLeader, leader)
|
||||
}
|
||||
}
|
||||
|
||||
require.NotEqual(t, newLeader, leader)
|
||||
leaderNode = peerToNode[newLeader]
|
||||
|
||||
msg2 := &types.Message{
|
||||
From: miner.OwnerKey.Address,
|
||||
To: leaderNode.DefaultKey.Address,
|
||||
Value: big.NewInt(100000),
|
||||
}
|
||||
mu2 := uuid.New()
|
||||
signedMsg2, err := leaderNode.MpoolPushMessage(ctx, msg2, &api.MessageSendSpec{
|
||||
MsgUuid: mu2,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
mLookup, err = leaderNode.StateWaitMsg(ctx, signedMsg2.Cid(), 3, api.LookbackNoLimit, true)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, exitcode.Ok, mLookup.Receipt.ExitCode)
|
||||
|
||||
rstate := getRaftState(ctx, t, leaderNode)
|
||||
|
||||
for _, n := range nodes {
|
||||
if n != oldLeaderNode {
|
||||
rs := getRaftState(ctx, t, n)
|
||||
require.True(t, reflect.DeepEqual(rs, rstate))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRaftStateMiner(t *testing.T) {
|
||||
|
||||
kit.QuietMiningLogs()
|
||||
ctx := context.Background()
|
||||
|
||||
var (
|
||||
node0 kit.TestFullNode
|
||||
node1 kit.TestFullNode
|
||||
node2 kit.TestFullNode
|
||||
miner kit.TestMiner
|
||||
)
|
||||
|
||||
setup(ctx, t, &node0, &node1, &node2, &miner)
|
||||
|
||||
fmt.Println(node0.WalletList(context.Background()))
|
||||
fmt.Println(node1.WalletList(context.Background()))
|
||||
fmt.Println(node2.WalletList(context.Background()))
|
||||
|
||||
bal, err := node0.WalletBalance(ctx, node0.DefaultKey.Address)
|
||||
require.NoError(t, err)
|
||||
|
||||
msgHalfBal := &types.Message{
|
||||
From: miner.OwnerKey.Address,
|
||||
To: node0.DefaultKey.Address,
|
||||
Value: big.Div(bal, big.NewInt(2)),
|
||||
}
|
||||
mu := uuid.New()
|
||||
smHalfBal, err := miner.FullNode.MpoolPushMessage(ctx, msgHalfBal, &api.MessageSendSpec{
|
||||
MsgUuid: mu,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
mLookup, err := node0.StateWaitMsg(ctx, smHalfBal.Cid(), 3, api.LookbackNoLimit, true)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, exitcode.Ok, mLookup.Receipt.ExitCode)
|
||||
|
||||
rstate0 := getRaftState(ctx, t, &node0)
|
||||
rstate1 := getRaftState(ctx, t, &node1)
|
||||
rstate2 := getRaftState(ctx, t, &node2)
|
||||
|
||||
require.EqualValues(t, rstate0, rstate1)
|
||||
require.EqualValues(t, rstate0, rstate2)
|
||||
}
|
||||
|
||||
func TestRaftStateLeaderDisconnectsMiner(t *testing.T) {
|
||||
|
||||
kit.QuietMiningLogs()
|
||||
ctx := context.Background()
|
||||
|
||||
var (
|
||||
node0 kit.TestFullNode
|
||||
node1 kit.TestFullNode
|
||||
node2 kit.TestFullNode
|
||||
miner kit.TestMiner
|
||||
)
|
||||
|
||||
nodes := []*kit.TestFullNode{&node0, &node1, &node2}
|
||||
|
||||
setup(ctx, t, &node0, &node1, &node2, &miner)
|
||||
|
||||
peerToNode := make(map[peer.ID]*kit.TestFullNode)
|
||||
for _, n := range nodes {
|
||||
peerToNode[n.Pkey.PeerID] = n
|
||||
}
|
||||
|
||||
leader, err := node0.RaftLeader(ctx)
|
||||
require.NoError(t, err)
|
||||
leaderNode := peerToNode[leader]
|
||||
|
||||
// Take leader node down
|
||||
err = leaderNode.Stop(ctx)
|
||||
require.NoError(t, err)
|
||||
oldLeaderNode := leaderNode
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
newLeader := leader
|
||||
for _, n := range nodes {
|
||||
if n != leaderNode {
|
||||
newLeader, err = n.RaftLeader(ctx)
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, newLeader, leader)
|
||||
}
|
||||
}
|
||||
|
||||
require.NotEqual(t, newLeader, leader)
|
||||
leaderNode = peerToNode[newLeader]
|
||||
|
||||
msg2 := &types.Message{
|
||||
From: miner.OwnerKey.Address,
|
||||
To: node0.DefaultKey.Address,
|
||||
Value: big.NewInt(100000),
|
||||
}
|
||||
mu2 := uuid.New()
|
||||
|
||||
signedMsg2, err := miner.FullNode.MpoolPushMessage(ctx, msg2, &api.MessageSendSpec{
|
||||
MaxFee: abi.TokenAmount(config.DefaultDefaultMaxFee),
|
||||
MsgUuid: mu2,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
mLookup, err := leaderNode.StateWaitMsg(ctx, signedMsg2.Cid(), 3, api.LookbackNoLimit, true)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, exitcode.Ok, mLookup.Receipt.ExitCode)
|
||||
|
||||
rstate := getRaftState(ctx, t, leaderNode)
|
||||
|
||||
for _, n := range nodes {
|
||||
if n != oldLeaderNode {
|
||||
rs := getRaftState(ctx, t, n)
|
||||
require.True(t, reflect.DeepEqual(rs, rstate))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Miner sends message on leader
|
||||
// Leader disconnects
|
||||
// Call StateWaitMsg on new leader
|
||||
func TestLeaderDisconnectsCheckMsgStateOnNewLeader(t *testing.T) {
|
||||
|
||||
kit.QuietMiningLogs()
|
||||
ctx := context.Background()
|
||||
|
||||
var (
|
||||
node0 kit.TestFullNode
|
||||
node1 kit.TestFullNode
|
||||
node2 kit.TestFullNode
|
||||
miner kit.TestMiner
|
||||
)
|
||||
|
||||
nodes := []*kit.TestFullNode{&node0, &node1, &node2}
|
||||
|
||||
setup(ctx, t, &node0, &node1, &node2, &miner)
|
||||
|
||||
peerToNode := make(map[peer.ID]*kit.TestFullNode)
|
||||
for _, n := range nodes {
|
||||
peerToNode[n.Pkey.PeerID] = n
|
||||
}
|
||||
|
||||
bal, err := node0.WalletBalance(ctx, node0.DefaultKey.Address)
|
||||
require.NoError(t, err)
|
||||
|
||||
msgHalfBal := &types.Message{
|
||||
From: miner.OwnerKey.Address,
|
||||
To: node0.DefaultKey.Address,
|
||||
Value: big.Div(bal, big.NewInt(2)),
|
||||
}
|
||||
mu := uuid.New()
|
||||
smHalfBal, err := miner.FullNode.MpoolPushMessage(ctx, msgHalfBal, &api.MessageSendSpec{
|
||||
MsgUuid: mu,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
leader, err := node0.RaftLeader(ctx)
|
||||
require.NoError(t, err)
|
||||
leaderNode := peerToNode[leader]
|
||||
|
||||
// Take leader node down
|
||||
err = leaderNode.Stop(ctx)
|
||||
require.NoError(t, err)
|
||||
oldLeaderNode := leaderNode
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
// Check if all active nodes update their leader
|
||||
newLeader := leader
|
||||
for _, n := range nodes {
|
||||
if n != leaderNode {
|
||||
newLeader, err = n.RaftLeader(ctx)
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, newLeader, leader)
|
||||
}
|
||||
}
|
||||
|
||||
require.NotEqual(t, newLeader, leader)
|
||||
leaderNode = peerToNode[newLeader]
|
||||
|
||||
mLookup, err := leaderNode.StateWaitMsg(ctx, smHalfBal.Cid(), 3, api.LookbackNoLimit, true)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, exitcode.Ok, mLookup.Receipt.ExitCode)
|
||||
|
||||
rstate := getRaftState(ctx, t, leaderNode)
|
||||
|
||||
// Check if Raft state is consistent on all active nodes
|
||||
for _, n := range nodes {
|
||||
if n != oldLeaderNode {
|
||||
rs := getRaftState(ctx, t, n)
|
||||
require.True(t, reflect.DeepEqual(rs, rstate))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestChainStoreSync(t *testing.T) {
|
||||
|
||||
kit.QuietMiningLogs()
|
||||
ctx := context.Background()
|
||||
|
||||
var (
|
||||
node0 kit.TestFullNode
|
||||
node1 kit.TestFullNode
|
||||
node2 kit.TestFullNode
|
||||
miner kit.TestMiner
|
||||
)
|
||||
|
||||
nodes := []*kit.TestFullNode{&node0, &node1, &node2}
|
||||
|
||||
setup(ctx, t, &node0, &node1, &node2, &miner)
|
||||
|
||||
peerToNode := make(map[peer.ID]*kit.TestFullNode)
|
||||
for _, n := range nodes {
|
||||
peerToNode[n.Pkey.PeerID] = n
|
||||
}
|
||||
|
||||
bal, err := node0.WalletBalance(ctx, node0.DefaultKey.Address)
|
||||
require.NoError(t, err)
|
||||
|
||||
leader, err := node0.RaftLeader(ctx)
|
||||
require.NoError(t, err)
|
||||
leaderNode := peerToNode[leader]
|
||||
|
||||
msgHalfBal := &types.Message{
|
||||
From: miner.OwnerKey.Address,
|
||||
To: node0.DefaultKey.Address,
|
||||
Value: big.Div(bal, big.NewInt(2)),
|
||||
}
|
||||
mu := uuid.New()
|
||||
smHalfBal, err := miner.FullNode.MpoolPushMessage(ctx, msgHalfBal, &api.MessageSendSpec{
|
||||
MsgUuid: mu,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, n := range nodes {
|
||||
fmt.Println(n != leaderNode)
|
||||
if n != leaderNode {
|
||||
mLookup, err := n.StateWaitMsg(ctx, smHalfBal.Cid(), 3, api.LookbackNoLimit, true)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, exitcode.Ok, mLookup.Receipt.ExitCode)
|
||||
//break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGoRPCAuth(t *testing.T) {
|
||||
|
||||
blockTime := 1 * time.Second
|
||||
|
||||
kit.QuietMiningLogs()
|
||||
ctx := context.Background()
|
||||
|
||||
var (
|
||||
node0 kit.TestFullNode
|
||||
node1 kit.TestFullNode
|
||||
node2 kit.TestFullNode
|
||||
node3 kit.TestFullNode
|
||||
miner kit.TestMiner
|
||||
)
|
||||
|
||||
pkey0, _ := generatePrivKey()
|
||||
pkey1, _ := generatePrivKey()
|
||||
pkey2, _ := generatePrivKey()
|
||||
|
||||
pkeys := []*kit.Libp2p{pkey0, pkey1, pkey2}
|
||||
initPeerSet := []string{}
|
||||
for _, pkey := range pkeys {
|
||||
initPeerSet = append(initPeerSet, "/p2p/"+pkey.PeerID.String())
|
||||
}
|
||||
|
||||
raftOps := kit.ConstructorOpts(
|
||||
node.Override(new(*gorpc.Client), modules.NewRPCClient),
|
||||
node.Override(new(*consensus.ClusterRaftConfig), func() *consensus.ClusterRaftConfig {
|
||||
cfg := consensus.DefaultClusterRaftConfig()
|
||||
cfg.InitPeerset = initPeerSet
|
||||
return cfg
|
||||
}),
|
||||
node.Override(new(*consensus.Consensus), consensus.NewConsensusWithRPCClient(false)),
|
||||
node.Override(new(*messagesigner.MessageSignerConsensus), messagesigner.NewMessageSignerConsensus),
|
||||
node.Override(new(messagesigner.MsgSigner), func(ms *messagesigner.MessageSignerConsensus) *messagesigner.MessageSignerConsensus { return ms }),
|
||||
node.Override(new(*modules.RPCHandler), modules.NewRPCHandler),
|
||||
node.Override(node.GoRPCServer, modules.NewRPCServer),
|
||||
)
|
||||
//raftOps := kit.ConstructorOpts()
|
||||
kit.ThroughRPC()
|
||||
|
||||
ens := kit.NewEnsemble(t).FullNode(&node0, raftOps, kit.ThroughRPC()).FullNode(&node1, raftOps, kit.ThroughRPC()).FullNode(&node2, raftOps, kit.ThroughRPC()).FullNode(&node3, raftOps)
|
||||
node0.AssignPrivKey(pkey0)
|
||||
node1.AssignPrivKey(pkey1)
|
||||
node2.AssignPrivKey(pkey2)
|
||||
|
||||
nodes := []*kit.TestFullNode{&node0, &node1, &node2}
|
||||
wrappedFullNode := kit.MergeFullNodes(nodes)
|
||||
|
||||
ens.MinerEnroll(&miner, wrappedFullNode, kit.WithAllSubsystems(), kit.ThroughRPC())
|
||||
ens.Start()
|
||||
|
||||
// Import miner wallet to all nodes
|
||||
addr0, err := node0.WalletImport(ctx, &miner.OwnerKey.KeyInfo)
|
||||
require.NoError(t, err)
|
||||
addr1, err := node1.WalletImport(ctx, &miner.OwnerKey.KeyInfo)
|
||||
require.NoError(t, err)
|
||||
addr2, err := node2.WalletImport(ctx, &miner.OwnerKey.KeyInfo)
|
||||
require.NoError(t, err)
|
||||
|
||||
fmt.Println(addr0, addr1, addr2)
|
||||
|
||||
ens.InterconnectAll()
|
||||
|
||||
ens.AddInactiveMiner(&miner)
|
||||
ens.Start()
|
||||
|
||||
ens.InterconnectAll().BeginMining(blockTime)
|
||||
|
||||
leader, err := node0.RaftLeader(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
client := node3.FullNode.(*impl.FullNodeAPI).RaftAPI.MessageSigner.Consensus.RpcClient
|
||||
method := "MpoolPushMessage"
|
||||
|
||||
msg := &types.Message{
|
||||
From: miner.OwnerKey.Address,
|
||||
To: node0.DefaultKey.Address,
|
||||
Value: big.NewInt(100000),
|
||||
}
|
||||
msgWhole := &api.MpoolMessageWhole{Msg: msg}
|
||||
var ret types.SignedMessage
|
||||
|
||||
err = client.CallContext(ctx, leader, "Consensus", method, msgWhole, &ret)
|
||||
require.True(t, gorpc.IsAuthorizationError(err))
|
||||
|
||||
}
|
135
lib/consensus/raft/config.go
Normal file
135
lib/consensus/raft/config.go
Normal file
@ -0,0 +1,135 @@
|
||||
package consensus
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
hraft "github.com/hashicorp/raft"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
"github.com/filecoin-project/lotus/node/repo"
|
||||
)
|
||||
|
||||
// Configuration defaults
|
||||
var (
|
||||
DefaultDataSubFolder = "raft-cluster"
|
||||
DefaultWaitForLeaderTimeout = 15 * time.Second
|
||||
DefaultCommitRetries = 1
|
||||
DefaultNetworkTimeout = 100 * time.Second
|
||||
DefaultCommitRetryDelay = 200 * time.Millisecond
|
||||
DefaultBackupsRotate = 6
|
||||
)
|
||||
|
||||
// ClusterRaftConfig allows to configure the Raft Consensus component for the node cluster.
|
||||
type ClusterRaftConfig struct {
|
||||
// config to enabled node cluster with raft consensus
|
||||
ClusterModeEnabled bool
|
||||
// A folder to store Raft's data.
|
||||
DataFolder string
|
||||
// InitPeerset provides the list of initial cluster peers for new Raft
|
||||
// peers (with no prior state). It is ignored when Raft was already
|
||||
// initialized or when starting in staging mode.
|
||||
InitPeerset []string
|
||||
// LeaderTimeout specifies how long to wait for a leader before
|
||||
// failing an operation.
|
||||
WaitForLeaderTimeout time.Duration
|
||||
// NetworkTimeout specifies how long before a Raft network
|
||||
// operation is timed out
|
||||
NetworkTimeout time.Duration
|
||||
// CommitRetries specifies how many times we retry a failed commit until
|
||||
// we give up.
|
||||
CommitRetries int
|
||||
// How long to wait between retries
|
||||
CommitRetryDelay time.Duration
|
||||
// BackupsRotate specifies the maximum number of Raft's DataFolder
|
||||
// copies that we keep as backups (renaming) after cleanup.
|
||||
BackupsRotate int
|
||||
// A Hashicorp Raft's configuration object.
|
||||
RaftConfig *hraft.Config
|
||||
|
||||
// Tracing enables propagation of contexts across binary boundaries.
|
||||
Tracing bool
|
||||
}
|
||||
|
||||
func DefaultClusterRaftConfig() *ClusterRaftConfig {
|
||||
var cfg ClusterRaftConfig
|
||||
cfg.DataFolder = "" // empty so it gets omitted
|
||||
cfg.InitPeerset = []string{}
|
||||
cfg.WaitForLeaderTimeout = DefaultWaitForLeaderTimeout
|
||||
cfg.NetworkTimeout = DefaultNetworkTimeout
|
||||
cfg.CommitRetries = DefaultCommitRetries
|
||||
cfg.CommitRetryDelay = DefaultCommitRetryDelay
|
||||
cfg.BackupsRotate = DefaultBackupsRotate
|
||||
cfg.RaftConfig = hraft.DefaultConfig()
|
||||
|
||||
// These options are imposed over any Default Raft Config.
|
||||
cfg.RaftConfig.ShutdownOnRemove = false
|
||||
cfg.RaftConfig.LocalID = "will_be_set_automatically"
|
||||
|
||||
// Set up logging
|
||||
cfg.RaftConfig.LogOutput = ioutil.Discard
|
||||
return &cfg
|
||||
}
|
||||
|
||||
func NewClusterRaftConfig(userRaftConfig *config.UserRaftConfig) *ClusterRaftConfig {
|
||||
var cfg ClusterRaftConfig
|
||||
cfg.DataFolder = userRaftConfig.DataFolder
|
||||
cfg.InitPeerset = userRaftConfig.InitPeersetMultiAddr
|
||||
cfg.WaitForLeaderTimeout = time.Duration(userRaftConfig.WaitForLeaderTimeout)
|
||||
cfg.NetworkTimeout = time.Duration(userRaftConfig.NetworkTimeout)
|
||||
cfg.CommitRetries = userRaftConfig.CommitRetries
|
||||
cfg.CommitRetryDelay = time.Duration(userRaftConfig.CommitRetryDelay)
|
||||
cfg.BackupsRotate = userRaftConfig.BackupsRotate
|
||||
|
||||
// Keep this to be default hraft config for now
|
||||
cfg.RaftConfig = hraft.DefaultConfig()
|
||||
|
||||
// These options are imposed over any Default Raft Config.
|
||||
cfg.RaftConfig.ShutdownOnRemove = false
|
||||
cfg.RaftConfig.LocalID = "will_be_set_automatically"
|
||||
|
||||
// Set up logging
|
||||
cfg.RaftConfig.LogOutput = ioutil.Discard
|
||||
|
||||
return &cfg
|
||||
|
||||
}
|
||||
|
||||
//// Validate checks that this configuration has working values,
|
||||
//// at least in appearance.
|
||||
func ValidateConfig(cfg *ClusterRaftConfig) error {
|
||||
if cfg.RaftConfig == nil {
|
||||
return xerrors.Errorf("no hashicorp/raft.Config")
|
||||
}
|
||||
if cfg.WaitForLeaderTimeout <= 0 {
|
||||
return xerrors.Errorf("wait_for_leader_timeout <= 0")
|
||||
}
|
||||
|
||||
if cfg.NetworkTimeout <= 0 {
|
||||
return xerrors.Errorf("network_timeout <= 0")
|
||||
}
|
||||
|
||||
if cfg.CommitRetries < 0 {
|
||||
return xerrors.Errorf("commit_retries is invalid")
|
||||
}
|
||||
|
||||
if cfg.CommitRetryDelay <= 0 {
|
||||
return xerrors.Errorf("commit_retry_delay is invalid")
|
||||
}
|
||||
|
||||
if cfg.BackupsRotate <= 0 {
|
||||
return xerrors.Errorf("backups_rotate should be larger than 0")
|
||||
}
|
||||
|
||||
return hraft.ValidateConfig(cfg.RaftConfig)
|
||||
}
|
||||
|
||||
// GetDataFolder returns the Raft data folder that we are using.
|
||||
func (cfg *ClusterRaftConfig) GetDataFolder(repo repo.LockedRepo) string {
|
||||
if cfg.DataFolder == "" {
|
||||
return filepath.Join(repo.Path(), DefaultDataSubFolder)
|
||||
}
|
||||
return filepath.Join(repo.Path(), cfg.DataFolder)
|
||||
}
|
506
lib/consensus/raft/consensus.go
Normal file
506
lib/consensus/raft/consensus.go
Normal file
@ -0,0 +1,506 @@
|
||||
// Package raft implements a Consensus component for IPFS Cluster which uses
|
||||
// Raft (go-libp2p-raft).
|
||||
package consensus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"golang.org/x/exp/slices"
|
||||
|
||||
addr "github.com/filecoin-project/go-address"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/messagepool"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/lib/addrutil"
|
||||
"github.com/filecoin-project/lotus/node/repo"
|
||||
|
||||
//ds "github.com/ipfs/go-datastore"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
consensus "github.com/libp2p/go-libp2p-consensus"
|
||||
rpc "github.com/libp2p/go-libp2p-gorpc"
|
||||
libp2praft "github.com/libp2p/go-libp2p-raft"
|
||||
host "github.com/libp2p/go-libp2p/core/host"
|
||||
peer "github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
var logger = logging.Logger("raft")
|
||||
|
||||
type RaftState struct {
|
||||
NonceMap api.NonceMapType
|
||||
MsgUuids api.MsgUuidMapType
|
||||
|
||||
// TODO: add comment explaining why this is needed
|
||||
// We need a reference to the messagepool in the raft state in order to
|
||||
// sync messages that have been sent by the leader node
|
||||
// Miner calls StateWaitMsg after MpoolPushMessage to check if the message has
|
||||
// landed on chain. This check requires the message be stored in the local chainstore
|
||||
// If a leadernode goes down after sending a message to the chain and is replaced by
|
||||
// another node, the other node needs to have this message in its chainstore for the
|
||||
// above check to succeed.
|
||||
|
||||
// This is because the miner only stores signed CIDs but the message received from in a
|
||||
// block will be unsigned (for BLS). Hence, the process relies on the node to store the
|
||||
// signed message which holds a copy of the unsigned message to properly perform all the
|
||||
// needed checks
|
||||
Mpool *messagepool.MessagePool
|
||||
}
|
||||
|
||||
func newRaftState(mpool *messagepool.MessagePool) *RaftState {
|
||||
return &RaftState{
|
||||
NonceMap: make(map[addr.Address]uint64),
|
||||
MsgUuids: make(map[uuid.UUID]*types.SignedMessage),
|
||||
Mpool: mpool,
|
||||
}
|
||||
}
|
||||
|
||||
type ConsensusOp struct {
|
||||
Nonce uint64 `codec:"nonce,omitempty"`
|
||||
Uuid uuid.UUID `codec:"uuid,omitempty"`
|
||||
Addr addr.Address `codec:"addr,omitempty"`
|
||||
SignedMsg *types.SignedMessage `codec:"signedMsg,omitempty"`
|
||||
}
|
||||
|
||||
func (c ConsensusOp) ApplyTo(state consensus.State) (consensus.State, error) {
|
||||
s := state.(*RaftState)
|
||||
s.NonceMap[c.Addr] = c.Nonce
|
||||
if c.SignedMsg != nil {
|
||||
|
||||
// Deep copy to tmp
|
||||
var buffer bytes.Buffer
|
||||
err := c.SignedMsg.MarshalCBOR(&buffer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tmp, err := types.DecodeSignedMessage(buffer.Bytes())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.MsgUuids[c.Uuid] = tmp
|
||||
|
||||
_, err = s.Mpool.Push(context.TODO(), tmp, false)
|
||||
// Since this is only meant to keep messages in sync, ignore any error which
|
||||
// shows the message already exists in the mpool
|
||||
if err != nil && !api.ErrorIsIn(err, []error{messagepool.ErrExistingNonce}) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
var _ consensus.Op = &ConsensusOp{}
|
||||
|
||||
// Consensus handles the work of keeping a shared-state between
|
||||
// the peers of a Lotus Cluster, as well as modifying that state and
|
||||
// applying any updates in a thread-safe manner.
|
||||
type Consensus struct {
|
||||
ctx context.Context
|
||||
cancel func()
|
||||
config *ClusterRaftConfig
|
||||
|
||||
host host.Host
|
||||
|
||||
consensus consensus.OpLogConsensus
|
||||
actor consensus.Actor
|
||||
raft *raftWrapper
|
||||
state *RaftState
|
||||
|
||||
RpcClient *rpc.Client
|
||||
rpcReady chan struct{}
|
||||
readyCh chan struct{}
|
||||
|
||||
peerSet []peer.ID
|
||||
repo repo.LockedRepo
|
||||
}
|
||||
|
||||
// NewConsensus builds a new ClusterConsensus component using Raft.
|
||||
//
|
||||
// Raft saves state snapshots regularly and persists log data in a bolt
|
||||
// datastore. Therefore, unless memory usage is a concern, it is recommended
|
||||
// to use an in-memory go-datastore as store parameter.
|
||||
//
|
||||
// The staging parameter controls if the Raft peer should start in
|
||||
// staging mode (used when joining a new Raft peerset with other peers).
|
||||
func NewConsensus(host host.Host, cfg *ClusterRaftConfig, mpool *messagepool.MessagePool, repo repo.LockedRepo, staging bool) (*Consensus, error) {
|
||||
err := ValidateConfig(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
logger.Debug("starting Consensus and waiting for a leader...")
|
||||
state := newRaftState(mpool)
|
||||
|
||||
consensus := libp2praft.NewOpLog(state, &ConsensusOp{})
|
||||
|
||||
raft, err := newRaftWrapper(host, cfg, consensus.FSM(), repo, staging)
|
||||
if err != nil {
|
||||
logger.Error("error creating raft: ", err)
|
||||
cancel()
|
||||
return nil, err
|
||||
}
|
||||
actor := libp2praft.NewActor(raft.raft)
|
||||
consensus.SetActor(actor)
|
||||
|
||||
peers := []peer.ID{}
|
||||
addrInfos, err := addrutil.ParseAddresses(ctx, cfg.InitPeerset)
|
||||
for _, addrInfo := range addrInfos {
|
||||
peers = append(peers, addrInfo.ID)
|
||||
|
||||
// Add peer to address book
|
||||
host.Peerstore().AddAddrs(addrInfo.ID, addrInfo.Addrs, time.Hour*100)
|
||||
}
|
||||
|
||||
cc := &Consensus{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
config: cfg,
|
||||
host: host,
|
||||
consensus: consensus,
|
||||
actor: actor,
|
||||
state: state,
|
||||
raft: raft,
|
||||
peerSet: peers,
|
||||
rpcReady: make(chan struct{}, 1),
|
||||
readyCh: make(chan struct{}, 1),
|
||||
repo: repo,
|
||||
}
|
||||
|
||||
go cc.finishBootstrap()
|
||||
return cc, nil
|
||||
|
||||
}
|
||||
|
||||
// TODO: Merge with NewConsensus and remove the rpcReady chan
|
||||
func NewConsensusWithRPCClient(staging bool) func(host host.Host,
|
||||
cfg *ClusterRaftConfig,
|
||||
rpcClient *rpc.Client,
|
||||
mpool *messagepool.MessagePool,
|
||||
repo repo.LockedRepo,
|
||||
) (*Consensus, error) {
|
||||
|
||||
return func(host host.Host, cfg *ClusterRaftConfig, rpcClient *rpc.Client, mpool *messagepool.MessagePool, repo repo.LockedRepo) (*Consensus, error) {
|
||||
cc, err := NewConsensus(host, cfg, mpool, repo, staging)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cc.RpcClient = rpcClient
|
||||
cc.rpcReady <- struct{}{}
|
||||
return cc, nil
|
||||
}
|
||||
}
|
||||
|
||||
// WaitForSync waits for a leader and for the state to be up to date, then returns.
|
||||
func (cc *Consensus) WaitForSync(ctx context.Context) error {
|
||||
|
||||
leaderCtx, cancel := context.WithTimeout(ctx, cc.config.WaitForLeaderTimeout)
|
||||
defer cancel()
|
||||
|
||||
// 1 - wait for leader
|
||||
// 2 - wait until we are a Voter
|
||||
// 3 - wait until last index is applied
|
||||
|
||||
// From raft docs:
|
||||
|
||||
// once a staging server receives enough log entries to be sufficiently
|
||||
// caught up to the leader's log, the leader will invoke a membership
|
||||
// change to change the Staging server to a Voter
|
||||
|
||||
// Thus, waiting to be a Voter is a guarantee that we have a reasonable
|
||||
// up to date state. Otherwise, we might return too early (see
|
||||
// https://github.com/ipfs-cluster/ipfs-cluster/issues/378)
|
||||
|
||||
_, err := cc.raft.WaitForLeader(leaderCtx)
|
||||
if err != nil {
|
||||
return errors.New("error waiting for leader: " + err.Error())
|
||||
}
|
||||
|
||||
err = cc.raft.WaitForVoter(ctx)
|
||||
if err != nil {
|
||||
return errors.New("error waiting to become a Voter: " + err.Error())
|
||||
}
|
||||
|
||||
err = cc.raft.WaitForUpdates(ctx)
|
||||
if err != nil {
|
||||
return errors.New("error waiting for consensus updates: " + err.Error())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// waits until there is a consensus leader and syncs the state
|
||||
// to the tracker. If errors happen, this will return and never
|
||||
// signal the component as Ready.
|
||||
func (cc *Consensus) finishBootstrap() {
|
||||
// wait until we have RPC to perform any actions.
|
||||
select {
|
||||
case <-cc.ctx.Done():
|
||||
return
|
||||
case <-cc.rpcReady:
|
||||
}
|
||||
|
||||
// Sometimes bootstrap is a no-Op. It only applies when
|
||||
// no state exists and staging=false.
|
||||
_, err := cc.raft.Bootstrap()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
logger.Debugf("Bootstrap finished")
|
||||
err = cc.WaitForSync(cc.ctx)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
logger.Debug("Raft state is now up to date")
|
||||
logger.Debug("consensus ready")
|
||||
cc.readyCh <- struct{}{}
|
||||
}
|
||||
|
||||
// Shutdown stops the component so it will not process any
|
||||
// more updates. The underlying consensus is permanently
|
||||
// shutdown, along with the libp2p transport.
|
||||
func (cc *Consensus) Shutdown(ctx context.Context) error {
|
||||
|
||||
logger.Info("stopping Consensus component")
|
||||
|
||||
// Raft Shutdown
|
||||
err := cc.raft.Shutdown(ctx)
|
||||
if err != nil {
|
||||
logger.Error(err)
|
||||
}
|
||||
|
||||
cc.cancel()
|
||||
close(cc.rpcReady)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Ready returns a channel which is signaled when the Consensus
|
||||
// algorithm has finished bootstrapping and is ready to use
|
||||
func (cc *Consensus) Ready(ctx context.Context) <-chan struct{} {
|
||||
return cc.readyCh
|
||||
}
|
||||
|
||||
// IsTrustedPeer returns true. In Raft we trust all peers.
|
||||
func (cc *Consensus) IsTrustedPeer(ctx context.Context, p peer.ID) bool {
|
||||
return slices.Contains(cc.peerSet, p)
|
||||
}
|
||||
|
||||
// Trust is a no-Op.
|
||||
func (cc *Consensus) Trust(ctx context.Context, pid peer.ID) error { return nil }
|
||||
|
||||
// Distrust is a no-Op.
|
||||
func (cc *Consensus) Distrust(ctx context.Context, pid peer.ID) error { return nil }
|
||||
|
||||
// returns true if the operation was redirected to the leader
|
||||
// note that if the leader just dissappeared, the rpc call will
|
||||
// fail because we haven't heard that it's gone.
|
||||
func (cc *Consensus) RedirectToLeader(method string, arg interface{}, ret interface{}) (bool, error) {
|
||||
ctx := cc.ctx
|
||||
|
||||
var finalErr error
|
||||
|
||||
// Retry redirects
|
||||
for i := 0; i <= cc.config.CommitRetries; i++ {
|
||||
logger.Debugf("redirect try %d", i)
|
||||
leader, err := cc.Leader(ctx)
|
||||
|
||||
// No leader, wait for one
|
||||
if err != nil {
|
||||
logger.Warn("there seems to be no leader. Waiting for one")
|
||||
rctx, cancel := context.WithTimeout(ctx, cc.config.WaitForLeaderTimeout)
|
||||
defer cancel()
|
||||
pidstr, err := cc.raft.WaitForLeader(rctx)
|
||||
|
||||
// means we timed out waiting for a leader
|
||||
// we don't retry in this case
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("timed out waiting for leader: %s", err)
|
||||
}
|
||||
leader, err = peer.Decode(pidstr)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
logger.Infof("leader: %s, curr host: %s, peerSet: %s", leader, cc.host.ID(), cc.peerSet)
|
||||
|
||||
// We are the leader. Do not redirect
|
||||
if leader == cc.host.ID() {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
logger.Debugf("redirecting %s to leader: %s", method, leader.Pretty())
|
||||
finalErr = cc.RpcClient.CallContext(
|
||||
ctx,
|
||||
leader,
|
||||
"Consensus",
|
||||
method,
|
||||
arg,
|
||||
ret,
|
||||
)
|
||||
if finalErr != nil {
|
||||
logger.Errorf("retrying to redirect request to leader: %s", finalErr)
|
||||
time.Sleep(2 * cc.config.RaftConfig.HeartbeatTimeout)
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
// We tried to redirect, but something happened
|
||||
return true, finalErr
|
||||
}
|
||||
|
||||
// commit submits a cc.consensus commit. It retries upon failures.
|
||||
func (cc *Consensus) Commit(ctx context.Context, op *ConsensusOp) error {
|
||||
|
||||
var finalErr error
|
||||
for i := 0; i <= cc.config.CommitRetries; i++ {
|
||||
logger.Debugf("attempt #%d: committing %+v", i, op)
|
||||
|
||||
// this means we are retrying
|
||||
if finalErr != nil {
|
||||
logger.Errorf("retrying upon failed commit (retry %d): %s ",
|
||||
i, finalErr)
|
||||
}
|
||||
|
||||
// Being here means we are the LEADER. We can commit.
|
||||
// now commit the changes to our state
|
||||
_, finalErr = cc.consensus.CommitOp(op)
|
||||
if finalErr != nil {
|
||||
goto RETRY
|
||||
}
|
||||
|
||||
RETRY:
|
||||
time.Sleep(cc.config.CommitRetryDelay)
|
||||
}
|
||||
return finalErr
|
||||
}
|
||||
|
||||
// AddPeer adds a new peer to participate in this consensus. It will
|
||||
// forward the operation to the leader if this is not it.
|
||||
func (cc *Consensus) AddPeer(ctx context.Context, pid peer.ID) error {
|
||||
var finalErr error
|
||||
for i := 0; i <= cc.config.CommitRetries; i++ {
|
||||
logger.Debugf("attempt #%d: AddPeer %s", i, pid.Pretty())
|
||||
if finalErr != nil {
|
||||
logger.Errorf("retrying to add peer. Attempt #%d failed: %s", i, finalErr)
|
||||
}
|
||||
ok, err := cc.RedirectToLeader("AddPeer", pid, struct{}{})
|
||||
if err != nil || ok {
|
||||
return err
|
||||
}
|
||||
// Being here means we are the leader and can commit
|
||||
finalErr = cc.raft.AddPeer(ctx, pid)
|
||||
if finalErr != nil {
|
||||
time.Sleep(cc.config.CommitRetryDelay)
|
||||
continue
|
||||
}
|
||||
logger.Infof("peer added to Raft: %s", pid.Pretty())
|
||||
break
|
||||
}
|
||||
return finalErr
|
||||
}
|
||||
|
||||
// RmPeer removes a peer from this consensus. It will
|
||||
// forward the operation to the leader if this is not it.
|
||||
func (cc *Consensus) RmPeer(ctx context.Context, pid peer.ID) error {
|
||||
var finalErr error
|
||||
for i := 0; i <= cc.config.CommitRetries; i++ {
|
||||
logger.Debugf("attempt #%d: RmPeer %s", i, pid.Pretty())
|
||||
if finalErr != nil {
|
||||
logger.Errorf("retrying to remove peer. Attempt #%d failed: %s", i, finalErr)
|
||||
}
|
||||
ok, err := cc.RedirectToLeader("RmPeer", pid, struct{}{})
|
||||
if err != nil || ok {
|
||||
return err
|
||||
}
|
||||
// Being here means we are the leader and can commit
|
||||
finalErr = cc.raft.RemovePeer(ctx, peer.Encode(pid))
|
||||
if finalErr != nil {
|
||||
time.Sleep(cc.config.CommitRetryDelay)
|
||||
continue
|
||||
}
|
||||
logger.Infof("peer removed from Raft: %s", pid.Pretty())
|
||||
break
|
||||
}
|
||||
return finalErr
|
||||
}
|
||||
|
||||
// RaftState retrieves the current consensus RaftState. It may error if no RaftState has
|
||||
// been agreed upon or the state is not consistent. The returned RaftState is the
|
||||
// last agreed-upon RaftState known by this node. No writes are allowed, as all
|
||||
// writes to the shared state should happen through the Consensus component
|
||||
// methods.
|
||||
func (cc *Consensus) State(ctx context.Context) (*RaftState, error) {
|
||||
st, err := cc.consensus.GetLogHead()
|
||||
if err == libp2praft.ErrNoState {
|
||||
return newRaftState(nil), nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
state, ok := st.(*RaftState)
|
||||
if !ok {
|
||||
return nil, errors.New("wrong state type")
|
||||
}
|
||||
return state, nil
|
||||
}
|
||||
|
||||
// Leader returns the peerID of the Leader of the
|
||||
// cluster. It returns an error when there is no leader.
|
||||
func (cc *Consensus) Leader(ctx context.Context) (peer.ID, error) {
|
||||
// Note the hard-dependency on raft here...
|
||||
raftactor := cc.actor.(*libp2praft.Actor)
|
||||
return raftactor.Leader()
|
||||
}
|
||||
|
||||
// Clean removes the Raft persisted state.
|
||||
func (cc *Consensus) Clean(ctx context.Context) error {
|
||||
//return CleanupRaft(cc.config)
|
||||
return nil
|
||||
}
|
||||
|
||||
//Rollback replaces the current agreed-upon
|
||||
//state with the state provided. Only the consensus leader
|
||||
//can perform this operation.
|
||||
//func (cc *Consensus) Rollback(state RaftState) error {
|
||||
// // This is unused. It *might* be used for upgrades.
|
||||
// // There is rather untested magic in libp2p-raft's FSM()
|
||||
// // to make this possible.
|
||||
// return cc.consensus.Rollback(state)
|
||||
//}
|
||||
|
||||
// Peers return the current list of peers in the consensus.
|
||||
// The list will be sorted alphabetically.
|
||||
func (cc *Consensus) Peers(ctx context.Context) ([]peer.ID, error) {
|
||||
|
||||
peers := []peer.ID{}
|
||||
raftPeers, err := cc.raft.Peers(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot retrieve list of peers: %s", err)
|
||||
}
|
||||
|
||||
sort.Strings(raftPeers)
|
||||
|
||||
for _, p := range raftPeers {
|
||||
id, err := peer.Decode(p)
|
||||
if err != nil {
|
||||
panic("could not decode peer")
|
||||
}
|
||||
peers = append(peers, id)
|
||||
}
|
||||
return peers, nil
|
||||
}
|
||||
|
||||
func (cc *Consensus) IsLeader(ctx context.Context) bool {
|
||||
leader, _ := cc.Leader(ctx)
|
||||
return leader == cc.host.ID()
|
||||
}
|
41
lib/consensus/raft/interfaces.go
Normal file
41
lib/consensus/raft/interfaces.go
Normal file
@ -0,0 +1,41 @@
|
||||
package consensus
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
consensus "github.com/libp2p/go-libp2p-consensus"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
type ConsensusAPI interface {
|
||||
// Returns a channel to signal that the consensus layer is ready
|
||||
// allowing the main component to wait for it during start.
|
||||
Ready(context.Context) <-chan struct{}
|
||||
|
||||
AddPeer(context.Context, peer.ID) error
|
||||
RmPeer(context.Context, peer.ID) error
|
||||
State(context.Context) (consensus.State, error)
|
||||
// Provide a node which is responsible to perform
|
||||
// specific tasks which must only run in 1 cluster peer.
|
||||
Leader(context.Context) (peer.ID, error)
|
||||
// Only returns when the consensus state has all log
|
||||
// updates applied to it.
|
||||
WaitForSync(context.Context) error
|
||||
// Clean removes all consensus data.
|
||||
Clean(context.Context) error
|
||||
// Peers returns the peerset participating in the Consensus.
|
||||
Peers(context.Context) ([]peer.ID, error)
|
||||
// IsTrustedPeer returns true if the given peer is "trusted".
|
||||
// This will grant access to more rpc endpoints and a
|
||||
// non-trusted one. This should be fast as it will be
|
||||
// called repeatedly for every remote RPC request.
|
||||
IsTrustedPeer(context.Context, peer.ID) bool
|
||||
// Trust marks a peer as "trusted".
|
||||
Trust(context.Context, peer.ID) error
|
||||
// Distrust removes a peer from the "trusted" set.
|
||||
Distrust(context.Context, peer.ID) error
|
||||
// Returns true if current node is the cluster leader
|
||||
IsLeader(ctx context.Context) bool
|
||||
|
||||
Shutdown(context.Context) error
|
||||
}
|
592
lib/consensus/raft/raft.go
Normal file
592
lib/consensus/raft/raft.go
Normal file
@ -0,0 +1,592 @@
|
||||
package consensus
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
hraft "github.com/hashicorp/raft"
|
||||
raftboltdb "github.com/hashicorp/raft-boltdb"
|
||||
"github.com/ipfs/go-log/v2"
|
||||
p2praft "github.com/libp2p/go-libp2p-raft"
|
||||
host "github.com/libp2p/go-libp2p/core/host"
|
||||
peer "github.com/libp2p/go-libp2p/core/peer"
|
||||
"go.uber.org/multierr"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/filecoin-project/lotus/lib/addrutil"
|
||||
"github.com/filecoin-project/lotus/node/repo"
|
||||
)
|
||||
|
||||
var raftLogger = log.Logger("raft-cluster")
|
||||
|
||||
// ErrWaitingForSelf is returned when we are waiting for ourselves to depart
|
||||
// the peer set, which won't happen
|
||||
var errWaitingForSelf = errors.New("waiting for ourselves to depart")
|
||||
|
||||
// RaftMaxSnapshots indicates how many snapshots to keep in the consensus data
|
||||
// folder.
|
||||
// TODO: Maybe include this in Config. Not sure how useful it is to touch
|
||||
// this anyways.
|
||||
var RaftMaxSnapshots = 5
|
||||
|
||||
// RaftLogCacheSize is the maximum number of logs to cache in-memory.
|
||||
// This is used to reduce disk I/O for the recently committed entries.
|
||||
var RaftLogCacheSize = 512
|
||||
|
||||
// How long we wait for updates during shutdown before snapshotting
|
||||
var waitForUpdatesShutdownTimeout = 5 * time.Second
|
||||
var waitForUpdatesInterval = 400 * time.Millisecond
|
||||
|
||||
// How many times to retry snapshotting when shutting down
|
||||
var maxShutdownSnapshotRetries = 5
|
||||
|
||||
// raftWrapper wraps the hraft.Raft object and related things like the
|
||||
// different stores used or the hraft.Configuration.
|
||||
// Its methods provide functionality for working with Raft.
|
||||
type raftWrapper struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
raft *hraft.Raft
|
||||
config *ClusterRaftConfig
|
||||
host host.Host
|
||||
serverConfig hraft.Configuration
|
||||
transport *hraft.NetworkTransport
|
||||
snapshotStore hraft.SnapshotStore
|
||||
logStore hraft.LogStore
|
||||
stableStore hraft.StableStore
|
||||
boltdb *raftboltdb.BoltStore
|
||||
repo repo.LockedRepo
|
||||
staging bool
|
||||
}
|
||||
|
||||
// newRaftWrapper creates a Raft instance and initializes
|
||||
// everything leaving it ready to use. Note, that Bootstrap() should be called
|
||||
// to make sure the raft instance is usable.
|
||||
func newRaftWrapper(
|
||||
host host.Host,
|
||||
cfg *ClusterRaftConfig,
|
||||
fsm hraft.FSM,
|
||||
repo repo.LockedRepo,
|
||||
staging bool,
|
||||
) (*raftWrapper, error) {
|
||||
|
||||
raftW := &raftWrapper{}
|
||||
raftW.config = cfg
|
||||
raftW.host = host
|
||||
raftW.staging = staging
|
||||
raftW.repo = repo
|
||||
// Set correct LocalID
|
||||
cfg.RaftConfig.LocalID = hraft.ServerID(peer.Encode(host.ID()))
|
||||
|
||||
df := cfg.GetDataFolder(repo)
|
||||
err := makeDataFolder(df)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = raftW.makeServerConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = raftW.makeTransport()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = raftW.makeStores()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
raftLogger.Debug("creating Raft")
|
||||
raftW.raft, err = hraft.NewRaft(
|
||||
cfg.RaftConfig,
|
||||
fsm,
|
||||
raftW.logStore,
|
||||
raftW.stableStore,
|
||||
raftW.snapshotStore,
|
||||
raftW.transport,
|
||||
)
|
||||
if err != nil {
|
||||
raftLogger.Error("initializing raft: ", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
raftW.ctx, raftW.cancel = context.WithCancel(context.Background())
|
||||
|
||||
return raftW, nil
|
||||
}
|
||||
|
||||
// makeDataFolder creates the folder that is meant to store Raft data. Ensures
|
||||
// we always set 0700 mode.
|
||||
func makeDataFolder(folder string) error {
|
||||
return os.MkdirAll(folder, 0700)
|
||||
}
|
||||
|
||||
func (rw *raftWrapper) makeTransport() (err error) {
|
||||
raftLogger.Debug("creating libp2p Raft transport")
|
||||
rw.transport, err = p2praft.NewLibp2pTransport(
|
||||
rw.host,
|
||||
rw.config.NetworkTimeout,
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
func (rw *raftWrapper) makeStores() error {
|
||||
raftLogger.Debug("creating BoltDB store")
|
||||
df := rw.config.GetDataFolder(rw.repo)
|
||||
store, err := raftboltdb.NewBoltStore(filepath.Join(df, "raft.db"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// wraps the store in a LogCache to improve performance.
|
||||
// See consul/agent/consul/server.go
|
||||
cacheStore, err := hraft.NewLogCache(RaftLogCacheSize, store)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
raftLogger.Debug("creating raft snapshot store")
|
||||
snapstore, err := hraft.NewFileSnapshotStoreWithLogger(
|
||||
df,
|
||||
RaftMaxSnapshots,
|
||||
zap.NewStdLog(log.Logger("raft-snapshot").SugaredLogger.Desugar()),
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rw.logStore = cacheStore
|
||||
rw.stableStore = store
|
||||
rw.snapshotStore = snapstore
|
||||
rw.boltdb = store
|
||||
return nil
|
||||
}
|
||||
|
||||
// Bootstrap calls BootstrapCluster on the Raft instance with a valid
|
||||
// Configuration (generated from InitPeerset) when Raft has no state
|
||||
// and we are not setting up a staging peer. It returns if Raft
|
||||
// was boostrapped (true) and an error.
|
||||
func (rw *raftWrapper) Bootstrap() (bool, error) {
|
||||
logger.Debug("checking for existing raft states")
|
||||
hasState, err := hraft.HasExistingState(
|
||||
rw.logStore,
|
||||
rw.stableStore,
|
||||
rw.snapshotStore,
|
||||
)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if hasState {
|
||||
logger.Debug("raft cluster is already initialized")
|
||||
|
||||
// Inform the user that we are working with a pre-existing peerset
|
||||
logger.Info("existing Raft state found! raft.InitPeerset will be ignored")
|
||||
cf := rw.raft.GetConfiguration()
|
||||
if err := cf.Error(); err != nil {
|
||||
logger.Debug(err)
|
||||
return false, err
|
||||
}
|
||||
currentCfg := cf.Configuration()
|
||||
srvs := ""
|
||||
for _, s := range currentCfg.Servers {
|
||||
srvs += fmt.Sprintf(" %s\n", s.ID)
|
||||
}
|
||||
|
||||
logger.Debugf("Current Raft Peerset:\n%s\n", srvs)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if rw.staging {
|
||||
logger.Debug("staging servers do not need initialization")
|
||||
logger.Info("peer is ready to join a cluster")
|
||||
return false, nil
|
||||
}
|
||||
|
||||
voters := ""
|
||||
for _, s := range rw.serverConfig.Servers {
|
||||
voters += fmt.Sprintf(" %s\n", s.ID)
|
||||
}
|
||||
|
||||
logger.Infof("initializing raft cluster with the following voters:\n%s\n", voters)
|
||||
|
||||
future := rw.raft.BootstrapCluster(rw.serverConfig)
|
||||
if err := future.Error(); err != nil {
|
||||
logger.Error("bootstrapping cluster: ", err)
|
||||
return true, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// create Raft servers configuration. The result is used
|
||||
// by Bootstrap() when it proceeds to Bootstrap.
|
||||
func (rw *raftWrapper) makeServerConfig() error {
|
||||
peers := []peer.ID{}
|
||||
addrInfos, err := addrutil.ParseAddresses(context.Background(), rw.config.InitPeerset)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, addrInfo := range addrInfos {
|
||||
peers = append(peers, addrInfo.ID)
|
||||
}
|
||||
rw.serverConfig = makeServerConf(append(peers, rw.host.ID()))
|
||||
return nil
|
||||
}
|
||||
|
||||
// creates a server configuration with all peers as Voters.
|
||||
func makeServerConf(peers []peer.ID) hraft.Configuration {
|
||||
sm := make(map[string]struct{})
|
||||
|
||||
servers := make([]hraft.Server, 0)
|
||||
|
||||
// Servers are peers + self. We avoid duplicate entries below
|
||||
for _, pid := range peers {
|
||||
p := peer.Encode(pid)
|
||||
_, ok := sm[p]
|
||||
if !ok { // avoid dups
|
||||
sm[p] = struct{}{}
|
||||
servers = append(servers, hraft.Server{
|
||||
Suffrage: hraft.Voter,
|
||||
ID: hraft.ServerID(p),
|
||||
Address: hraft.ServerAddress(p),
|
||||
})
|
||||
}
|
||||
}
|
||||
return hraft.Configuration{Servers: servers}
|
||||
}
|
||||
|
||||
// WaitForLeader holds until Raft says we have a leader.
|
||||
// Returns if ctx is canceled.
|
||||
func (rw *raftWrapper) WaitForLeader(ctx context.Context) (string, error) {
|
||||
ticker := time.NewTicker(time.Second / 2)
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
if l := rw.raft.Leader(); l != "" {
|
||||
logger.Debug("waitForleaderTimer")
|
||||
logger.Infof("Current Raft Leader: %s", l)
|
||||
ticker.Stop()
|
||||
return string(l), nil
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return "", ctx.Err()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (rw *raftWrapper) WaitForVoter(ctx context.Context) error {
|
||||
logger.Debug("waiting until we are promoted to a voter")
|
||||
|
||||
pid := hraft.ServerID(peer.Encode(rw.host.ID()))
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
logger.Debugf("%s: get configuration", pid)
|
||||
configFuture := rw.raft.GetConfiguration()
|
||||
if err := configFuture.Error(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if isVoter(pid, configFuture.Configuration()) {
|
||||
return nil
|
||||
}
|
||||
logger.Debugf("%s: not voter yet", pid)
|
||||
|
||||
time.Sleep(waitForUpdatesInterval)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func isVoter(srvID hraft.ServerID, cfg hraft.Configuration) bool {
|
||||
for _, server := range cfg.Servers {
|
||||
if server.ID == srvID && server.Suffrage == hraft.Voter {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// WaitForUpdates holds until Raft has synced to the last index in the log
|
||||
func (rw *raftWrapper) WaitForUpdates(ctx context.Context) error {
|
||||
|
||||
logger.Debug("Raft state is catching up to the latest known version. Please wait...")
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
lai := rw.raft.AppliedIndex()
|
||||
li := rw.raft.LastIndex()
|
||||
logger.Debugf("current Raft index: %d/%d",
|
||||
lai, li)
|
||||
if lai == li {
|
||||
return nil
|
||||
}
|
||||
time.Sleep(waitForUpdatesInterval)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (rw *raftWrapper) WaitForPeer(ctx context.Context, pid string, depart bool) error {
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
peers, err := rw.Peers(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(peers) == 1 && pid == peers[0] && depart {
|
||||
return errWaitingForSelf
|
||||
}
|
||||
|
||||
found := find(peers, pid)
|
||||
|
||||
// departing
|
||||
if depart && !found {
|
||||
return nil
|
||||
}
|
||||
|
||||
// joining
|
||||
if !depart && found {
|
||||
return nil
|
||||
}
|
||||
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Snapshot tells Raft to take a snapshot.
|
||||
func (rw *raftWrapper) Snapshot() error {
|
||||
future := rw.raft.Snapshot()
|
||||
err := future.Error()
|
||||
if err != nil && err.Error() != hraft.ErrNothingNewToSnapshot.Error() {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// snapshotOnShutdown attempts to take a snapshot before a shutdown.
|
||||
// Snapshotting might fail if the raft applied index is not the last index.
|
||||
// This waits for the updates and tries to take a snapshot when the
|
||||
// applied index is up to date.
|
||||
// It will retry if the snapshot still fails, in case more updates have arrived.
|
||||
// If waiting for updates times-out, it will not try anymore, since something
|
||||
// is wrong. This is a best-effort solution as there is no way to tell Raft
|
||||
// to stop processing entries because we want to take a snapshot before
|
||||
// shutting down.
|
||||
func (rw *raftWrapper) snapshotOnShutdown() error {
|
||||
var err error
|
||||
for i := 0; i < maxShutdownSnapshotRetries; i++ {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), waitForUpdatesShutdownTimeout)
|
||||
err = rw.WaitForUpdates(ctx)
|
||||
cancel()
|
||||
if err != nil {
|
||||
logger.Warn("timed out waiting for state updates before shutdown. Snapshotting may fail")
|
||||
return rw.Snapshot()
|
||||
}
|
||||
|
||||
err = rw.Snapshot()
|
||||
if err == nil {
|
||||
return nil // things worked
|
||||
}
|
||||
|
||||
// There was an error
|
||||
err = errors.New("could not snapshot raft: " + err.Error())
|
||||
logger.Warnf("retrying to snapshot (%d/%d)...", i+1, maxShutdownSnapshotRetries)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Shutdown shutdown Raft and closes the BoltDB.
|
||||
func (rw *raftWrapper) Shutdown(ctx context.Context) error {
|
||||
|
||||
rw.cancel()
|
||||
|
||||
var finalErr error
|
||||
|
||||
err := rw.snapshotOnShutdown()
|
||||
if err != nil {
|
||||
finalErr = multierr.Append(finalErr, err)
|
||||
}
|
||||
|
||||
future := rw.raft.Shutdown()
|
||||
err = future.Error()
|
||||
if err != nil {
|
||||
finalErr = multierr.Append(finalErr, err)
|
||||
}
|
||||
|
||||
err = rw.boltdb.Close() // important!
|
||||
if err != nil {
|
||||
finalErr = multierr.Append(finalErr, err)
|
||||
}
|
||||
|
||||
return finalErr
|
||||
}
|
||||
|
||||
// AddPeer adds a peer to Raft
|
||||
func (rw *raftWrapper) AddPeer(ctx context.Context, peerId peer.ID) error {
|
||||
|
||||
// Check that we don't have it to not waste
|
||||
// log entries if so.
|
||||
peers, err := rw.Peers(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if find(peers, peerId.String()) {
|
||||
logger.Infof("%s is already a raft peerStr", peerId.String())
|
||||
return nil
|
||||
}
|
||||
|
||||
err = rw.host.Connect(ctx, peer.AddrInfo{ID: peerId})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
future := rw.raft.AddVoter(
|
||||
hraft.ServerID(peerId.String()),
|
||||
hraft.ServerAddress(peerId.String()),
|
||||
0,
|
||||
0,
|
||||
) // TODO: Extra cfg value?
|
||||
err = future.Error()
|
||||
if err != nil {
|
||||
logger.Error("raft cannot add peer: ", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// RemovePeer removes a peer from Raft
|
||||
func (rw *raftWrapper) RemovePeer(ctx context.Context, peer string) error {
|
||||
// Check that we have it to not waste
|
||||
// log entries if we don't.
|
||||
peers, err := rw.Peers(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !find(peers, peer) {
|
||||
logger.Infof("%s is not among raft peers", peer)
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(peers) == 1 && peers[0] == peer {
|
||||
return errors.New("cannot remove ourselves from a 1-peer cluster")
|
||||
}
|
||||
|
||||
rmFuture := rw.raft.RemoveServer(
|
||||
hraft.ServerID(peer),
|
||||
0,
|
||||
0,
|
||||
)
|
||||
err = rmFuture.Error()
|
||||
if err != nil {
|
||||
logger.Error("raft cannot remove peer: ", err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Leader returns Raft's leader. It may be an empty string if
|
||||
// there is no leader or it is unknown.
|
||||
func (rw *raftWrapper) Leader(ctx context.Context) string {
|
||||
return string(rw.raft.Leader())
|
||||
}
|
||||
|
||||
func (rw *raftWrapper) Peers(ctx context.Context) ([]string, error) {
|
||||
ids := make([]string, 0)
|
||||
|
||||
configFuture := rw.raft.GetConfiguration()
|
||||
if err := configFuture.Error(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, server := range configFuture.Configuration().Servers {
|
||||
ids = append(ids, string(server.ID))
|
||||
}
|
||||
|
||||
return ids, nil
|
||||
}
|
||||
|
||||
// CleanupRaft moves the current data folder to a backup location
|
||||
//func CleanupRaft(cfg *Config) error {
|
||||
// dataFolder := cfg.GetDataFolder()
|
||||
// keep := cfg.BackupsRotate
|
||||
//
|
||||
// meta, _, err := latestSnapshot(dataFolder)
|
||||
// if meta == nil && err == nil {
|
||||
// // no snapshots at all. Avoid creating backups
|
||||
// // from empty state folders.
|
||||
// logger.Infof("cleaning empty Raft data folder (%s)", dataFolder)
|
||||
// os.RemoveAll(dataFolder)
|
||||
// return nil
|
||||
// }
|
||||
//
|
||||
// logger.Infof("cleaning and backing up Raft data folder (%s)", dataFolder)
|
||||
// dbh := newDataBackupHelper(dataFolder, keep)
|
||||
// err = dbh.makeBackup()
|
||||
// if err != nil {
|
||||
// logger.Warn(err)
|
||||
// logger.Warn("the state could not be cleaned properly")
|
||||
// logger.Warn("manual intervention may be needed before starting cluster again")
|
||||
// }
|
||||
// return nil
|
||||
//}
|
||||
|
||||
// only call when Raft is shutdown
|
||||
func (rw *raftWrapper) Clean() error {
|
||||
//return CleanupRaft(rw.config)
|
||||
return nil
|
||||
}
|
||||
|
||||
func find(s []string, elem string) bool {
|
||||
for _, selem := range s {
|
||||
if selem == elem {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (rw *raftWrapper) observePeers() {
|
||||
obsCh := make(chan hraft.Observation, 1)
|
||||
defer close(obsCh)
|
||||
|
||||
observer := hraft.NewObserver(obsCh, true, func(o *hraft.Observation) bool {
|
||||
po, ok := o.Data.(hraft.PeerObservation)
|
||||
return ok && po.Removed
|
||||
})
|
||||
|
||||
rw.raft.RegisterObserver(observer)
|
||||
defer rw.raft.DeregisterObserver(observer)
|
||||
|
||||
for {
|
||||
select {
|
||||
case obs := <-obsCh:
|
||||
pObs := obs.Data.(hraft.PeerObservation)
|
||||
logger.Info("raft peer departed. Removing from peerstore: ", pObs.Peer.ID)
|
||||
pID, err := peer.Decode(string(pObs.Peer.ID))
|
||||
if err != nil {
|
||||
logger.Error(err)
|
||||
continue
|
||||
}
|
||||
rw.host.Peerstore().ClearAddrs(pID)
|
||||
case <-rw.ctx.Done():
|
||||
logger.Debug("stopped observing raft peers")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
@ -1,6 +1,7 @@
|
||||
package retry
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
@ -10,7 +11,7 @@ import (
|
||||
|
||||
var log = logging.Logger("retry")
|
||||
|
||||
func Retry[T any](attempts int, initialBackoff time.Duration, errorTypes []error, f func() (T, error)) (result T, err error) {
|
||||
func Retry[T any](ctx context.Context, attempts int, initialBackoff time.Duration, errorTypes []error, f func() (T, error)) (result T, err error) {
|
||||
for i := 0; i < attempts; i++ {
|
||||
if i > 0 {
|
||||
log.Info("Retrying after error:", err)
|
||||
@ -21,6 +22,9 @@ func Retry[T any](attempts int, initialBackoff time.Duration, errorTypes []error
|
||||
if err == nil || !api.ErrorIsIn(err, errorTypes) {
|
||||
return result, err
|
||||
}
|
||||
if ctx.Err() != nil {
|
||||
return result, ctx.Err()
|
||||
}
|
||||
}
|
||||
log.Errorf("Failed after %d attempts, last error: %s", attempts, err)
|
||||
return result, err
|
||||
|
@ -30,6 +30,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/gen/slashfilter"
|
||||
lrand "github.com/filecoin-project/lotus/chain/rand"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
cliutil "github.com/filecoin-project/lotus/cli/util"
|
||||
"github.com/filecoin-project/lotus/journal"
|
||||
)
|
||||
|
||||
@ -208,6 +209,8 @@ func (m *Miner) mine(ctx context.Context) {
|
||||
var lastBase MiningBase
|
||||
minerLoop:
|
||||
for {
|
||||
ctx := cliutil.OnSingleNode(ctx)
|
||||
|
||||
select {
|
||||
case <-m.stop:
|
||||
stopping := m.stopping
|
||||
|
@ -121,6 +121,7 @@ const (
|
||||
SettlePaymentChannelsKey
|
||||
RunPeerTaggerKey
|
||||
SetupFallbackBlockstoresKey
|
||||
GoRPCServer
|
||||
|
||||
SetApiEndpointKey
|
||||
|
||||
|
@ -3,6 +3,7 @@ package node
|
||||
import (
|
||||
"os"
|
||||
|
||||
gorpc "github.com/libp2p/go-libp2p-gorpc"
|
||||
"go.uber.org/fx"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
@ -28,6 +29,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/wallet"
|
||||
ledgerwallet "github.com/filecoin-project/lotus/chain/wallet/ledger"
|
||||
"github.com/filecoin-project/lotus/chain/wallet/remotewallet"
|
||||
raftcns "github.com/filecoin-project/lotus/lib/consensus/raft"
|
||||
"github.com/filecoin-project/lotus/lib/peermgr"
|
||||
"github.com/filecoin-project/lotus/markets/retrievaladapter"
|
||||
"github.com/filecoin-project/lotus/markets/storageadapter"
|
||||
@ -106,6 +108,7 @@ var ChainNode = Options(
|
||||
|
||||
// Service: Wallet
|
||||
Override(new(*messagesigner.MessageSigner), messagesigner.NewMessageSigner),
|
||||
Override(new(messagesigner.MsgSigner), func(ms *messagesigner.MessageSigner) *messagesigner.MessageSigner { return ms }),
|
||||
Override(new(*wallet.LocalWallet), wallet.NewWallet),
|
||||
Override(new(wallet.Default), From(new(*wallet.LocalWallet))),
|
||||
Override(new(api.Wallet), From(new(wallet.MultiWallet))),
|
||||
@ -142,7 +145,7 @@ var ChainNode = Options(
|
||||
// Lite node API
|
||||
ApplyIf(isLiteNode,
|
||||
Override(new(messagepool.Provider), messagepool.NewProviderLite),
|
||||
Override(new(messagesigner.MpoolNonceAPI), From(new(modules.MpoolNonceAPI))),
|
||||
Override(new(messagepool.MpoolNonceAPI), From(new(modules.MpoolNonceAPI))),
|
||||
Override(new(full.ChainModuleAPI), From(new(api.Gateway))),
|
||||
Override(new(full.GasModuleAPI), From(new(api.Gateway))),
|
||||
Override(new(full.MpoolModuleAPI), From(new(api.Gateway))),
|
||||
@ -153,7 +156,7 @@ var ChainNode = Options(
|
||||
// Full node API / service startup
|
||||
ApplyIf(isFullNode,
|
||||
Override(new(messagepool.Provider), messagepool.NewProvider),
|
||||
Override(new(messagesigner.MpoolNonceAPI), From(new(*messagepool.MessagePool))),
|
||||
Override(new(messagepool.MpoolNonceAPI), From(new(*messagepool.MessagePool))),
|
||||
Override(new(full.ChainModuleAPI), From(new(full.ChainModule))),
|
||||
Override(new(full.GasModuleAPI), From(new(full.GasModule))),
|
||||
Override(new(full.MpoolModuleAPI), From(new(full.MpoolModule))),
|
||||
@ -238,6 +241,16 @@ func ConfigFullNode(c interface{}) Option {
|
||||
Unset(new(*wallet.LocalWallet)),
|
||||
Override(new(wallet.Default), wallet.NilDefault),
|
||||
),
|
||||
// Chain node cluster enabled
|
||||
If(cfg.Cluster.ClusterModeEnabled,
|
||||
Override(new(*gorpc.Client), modules.NewRPCClient),
|
||||
Override(new(*raftcns.ClusterRaftConfig), raftcns.NewClusterRaftConfig(&cfg.Cluster)),
|
||||
Override(new(*raftcns.Consensus), raftcns.NewConsensusWithRPCClient(false)),
|
||||
Override(new(*messagesigner.MessageSignerConsensus), messagesigner.NewMessageSignerConsensus),
|
||||
Override(new(messagesigner.MsgSigner), From(new(*messagesigner.MessageSignerConsensus))),
|
||||
Override(new(*modules.RPCHandler), modules.NewRPCHandler),
|
||||
Override(GoRPCServer, modules.NewRPCServer),
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
|
@ -97,6 +97,7 @@ func DefaultFullNode() *FullNode {
|
||||
HotStoreFullGCFrequency: 20,
|
||||
},
|
||||
},
|
||||
Cluster: *DefaultUserRaftConfig(),
|
||||
}
|
||||
}
|
||||
|
||||
@ -287,3 +288,25 @@ const (
|
||||
// worker. The scheduler may assign any task to this worker.
|
||||
ResourceFilteringDisabled = ResourceFilteringStrategy("disabled")
|
||||
)
|
||||
|
||||
var (
|
||||
DefaultDataSubFolder = "raft"
|
||||
DefaultWaitForLeaderTimeout = 15 * time.Second
|
||||
DefaultCommitRetries = 1
|
||||
DefaultNetworkTimeout = 100 * time.Second
|
||||
DefaultCommitRetryDelay = 200 * time.Millisecond
|
||||
DefaultBackupsRotate = 6
|
||||
)
|
||||
|
||||
func DefaultUserRaftConfig() *UserRaftConfig {
|
||||
var cfg UserRaftConfig
|
||||
cfg.DataFolder = "" // empty so it gets omitted
|
||||
cfg.InitPeersetMultiAddr = []string{}
|
||||
cfg.WaitForLeaderTimeout = Duration(DefaultWaitForLeaderTimeout)
|
||||
cfg.NetworkTimeout = Duration(DefaultNetworkTimeout)
|
||||
cfg.CommitRetries = DefaultCommitRetries
|
||||
cfg.CommitRetryDelay = Duration(DefaultCommitRetryDelay)
|
||||
cfg.BackupsRotate = DefaultBackupsRotate
|
||||
|
||||
return &cfg
|
||||
}
|
||||
|
@ -372,6 +372,12 @@ see https://lotus.filecoin.io/storage-providers/advanced-configurations/market/#
|
||||
Name: "Chainstore",
|
||||
Type: "Chainstore",
|
||||
|
||||
Comment: ``,
|
||||
},
|
||||
{
|
||||
Name: "Cluster",
|
||||
Type: "UserRaftConfig",
|
||||
|
||||
Comment: ``,
|
||||
},
|
||||
},
|
||||
@ -1204,6 +1210,68 @@ Default is 20 (about once a week).`,
|
||||
Comment: ``,
|
||||
},
|
||||
},
|
||||
"UserRaftConfig": []DocField{
|
||||
{
|
||||
Name: "ClusterModeEnabled",
|
||||
Type: "bool",
|
||||
|
||||
Comment: `EXPERIMENTAL. config to enabled node cluster with raft consensus`,
|
||||
},
|
||||
{
|
||||
Name: "DataFolder",
|
||||
Type: "string",
|
||||
|
||||
Comment: `A folder to store Raft's data.`,
|
||||
},
|
||||
{
|
||||
Name: "InitPeersetMultiAddr",
|
||||
Type: "[]string",
|
||||
|
||||
Comment: `InitPeersetMultiAddr provides the list of initial cluster peers for new Raft
|
||||
peers (with no prior state). It is ignored when Raft was already
|
||||
initialized or when starting in staging mode.`,
|
||||
},
|
||||
{
|
||||
Name: "WaitForLeaderTimeout",
|
||||
Type: "Duration",
|
||||
|
||||
Comment: `LeaderTimeout specifies how long to wait for a leader before
|
||||
failing an operation.`,
|
||||
},
|
||||
{
|
||||
Name: "NetworkTimeout",
|
||||
Type: "Duration",
|
||||
|
||||
Comment: `NetworkTimeout specifies how long before a Raft network
|
||||
operation is timed out`,
|
||||
},
|
||||
{
|
||||
Name: "CommitRetries",
|
||||
Type: "int",
|
||||
|
||||
Comment: `CommitRetries specifies how many times we retry a failed commit until
|
||||
we give up.`,
|
||||
},
|
||||
{
|
||||
Name: "CommitRetryDelay",
|
||||
Type: "Duration",
|
||||
|
||||
Comment: `How long to wait between retries`,
|
||||
},
|
||||
{
|
||||
Name: "BackupsRotate",
|
||||
Type: "int",
|
||||
|
||||
Comment: `BackupsRotate specifies the maximum number of Raft's DataFolder
|
||||
copies that we keep as backups (renaming) after cleanup.`,
|
||||
},
|
||||
{
|
||||
Name: "Tracing",
|
||||
Type: "bool",
|
||||
|
||||
Comment: `Tracing enables propagation of contexts across binary boundaries.`,
|
||||
},
|
||||
},
|
||||
"Wallet": []DocField{
|
||||
{
|
||||
Name: "RemoteBackend",
|
||||
|
@ -26,6 +26,7 @@ type FullNode struct {
|
||||
Wallet Wallet
|
||||
Fees FeeConfig
|
||||
Chainstore Chainstore
|
||||
Cluster UserRaftConfig
|
||||
}
|
||||
|
||||
// // Common
|
||||
@ -600,3 +601,30 @@ type Wallet struct {
|
||||
type FeeConfig struct {
|
||||
DefaultMaxFee types.FIL
|
||||
}
|
||||
|
||||
type UserRaftConfig struct {
|
||||
// EXPERIMENTAL. config to enabled node cluster with raft consensus
|
||||
ClusterModeEnabled bool
|
||||
// A folder to store Raft's data.
|
||||
DataFolder string
|
||||
// InitPeersetMultiAddr provides the list of initial cluster peers for new Raft
|
||||
// peers (with no prior state). It is ignored when Raft was already
|
||||
// initialized or when starting in staging mode.
|
||||
InitPeersetMultiAddr []string
|
||||
// LeaderTimeout specifies how long to wait for a leader before
|
||||
// failing an operation.
|
||||
WaitForLeaderTimeout Duration
|
||||
// NetworkTimeout specifies how long before a Raft network
|
||||
// operation is timed out
|
||||
NetworkTimeout Duration
|
||||
// CommitRetries specifies how many times we retry a failed commit until
|
||||
// we give up.
|
||||
CommitRetries int
|
||||
// How long to wait between retries
|
||||
CommitRetryDelay Duration
|
||||
// BackupsRotate specifies the maximum number of Raft's DataFolder
|
||||
// copies that we keep as backups (renaming) after cleanup.
|
||||
BackupsRotate int
|
||||
// Tracing enables propagation of contexts across binary boundaries.
|
||||
Tracing bool
|
||||
}
|
||||
|
@ -34,6 +34,7 @@ type FullNodeAPI struct {
|
||||
full.MsigAPI
|
||||
full.WalletAPI
|
||||
full.SyncAPI
|
||||
full.RaftAPI
|
||||
|
||||
DS dtypes.MetadataDS
|
||||
NetworkName dtypes.NetworkName
|
||||
@ -117,4 +118,12 @@ func (n *FullNodeAPI) NodeStatus(ctx context.Context, inclChainStatus bool) (sta
|
||||
return status, nil
|
||||
}
|
||||
|
||||
func (n *FullNodeAPI) RaftState(ctx context.Context) (*api.RaftStateData, error) {
|
||||
return n.RaftAPI.GetRaftState(ctx)
|
||||
}
|
||||
|
||||
func (n *FullNodeAPI) RaftLeader(ctx context.Context) (peer.ID, error) {
|
||||
return n.RaftAPI.Leader(ctx)
|
||||
}
|
||||
|
||||
var _ api.FullNode = &FullNodeAPI{}
|
||||
|
@ -44,7 +44,9 @@ type MpoolAPI struct {
|
||||
WalletAPI
|
||||
GasAPI
|
||||
|
||||
MessageSigner *messagesigner.MessageSigner
|
||||
RaftAPI
|
||||
|
||||
MessageSigner messagesigner.MsgSigner
|
||||
|
||||
PushLocks *dtypes.MpoolLocker
|
||||
}
|
||||
@ -131,7 +133,7 @@ func (a *MpoolAPI) MpoolClear(ctx context.Context, local bool) error {
|
||||
}
|
||||
|
||||
func (m *MpoolModule) MpoolPush(ctx context.Context, smsg *types.SignedMessage) (cid.Cid, error) {
|
||||
return m.Mpool.Push(ctx, smsg)
|
||||
return m.Mpool.Push(ctx, smsg, true)
|
||||
}
|
||||
|
||||
func (a *MpoolAPI) MpoolPushUntrusted(ctx context.Context, smsg *types.SignedMessage) (cid.Cid, error) {
|
||||
@ -143,8 +145,29 @@ func (a *MpoolAPI) MpoolPushMessage(ctx context.Context, msg *types.Message, spe
|
||||
msg = &cp
|
||||
inMsg := *msg
|
||||
|
||||
// Check if this uuid has already been processed. Ignore if uuid is not populated
|
||||
if (spec != nil) && (spec.MsgUuid != uuid.UUID{}) {
|
||||
// Redirect to leader if current node is not leader. A single non raft based node is always the leader
|
||||
if !a.RaftAPI.IsLeader(ctx) {
|
||||
var signedMsg types.SignedMessage
|
||||
redirected, err := a.RaftAPI.RedirectToLeader(ctx, "MpoolPushMessage", api.MpoolMessageWhole{Msg: msg, Spec: spec}, &signedMsg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// It's possible that the current node became the leader between the check and the redirect
|
||||
// In that case, continue with rest of execution and only return signedMsg if something was redirected
|
||||
if redirected {
|
||||
return &signedMsg, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Generate spec and uuid if not available in the message
|
||||
if spec == nil {
|
||||
spec = &api.MessageSendSpec{
|
||||
MsgUuid: uuid.New(),
|
||||
}
|
||||
} else if (spec.MsgUuid == uuid.UUID{}) {
|
||||
spec.MsgUuid = uuid.New()
|
||||
} else {
|
||||
// Check if this uuid has already been processed. Ignore if uuid is not populated
|
||||
signedMessage, err := a.MessageSigner.GetSignedMessage(ctx, spec.MsgUuid)
|
||||
if err == nil {
|
||||
log.Warnf("Message already processed. cid=%s", signedMessage.Cid())
|
||||
@ -196,7 +219,7 @@ func (a *MpoolAPI) MpoolPushMessage(ctx context.Context, msg *types.Message, spe
|
||||
}
|
||||
|
||||
// Sign and push the message
|
||||
signedMsg, err := a.MessageSigner.SignMessage(ctx, msg, func(smsg *types.SignedMessage) error {
|
||||
signedMsg, err := a.MessageSigner.SignMessage(ctx, msg, spec, func(smsg *types.SignedMessage) error {
|
||||
if _, err := a.MpoolModuleAPI.MpoolPush(ctx, smsg); err != nil {
|
||||
return xerrors.Errorf("mpool push: failed to push message: %w", err)
|
||||
}
|
||||
@ -207,11 +230,9 @@ func (a *MpoolAPI) MpoolPushMessage(ctx context.Context, msg *types.Message, spe
|
||||
}
|
||||
|
||||
// Store uuid->signed message in datastore
|
||||
if (spec != nil) && (spec.MsgUuid != uuid.UUID{}) {
|
||||
err = a.MessageSigner.StoreSignedMessage(ctx, spec.MsgUuid, signedMsg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = a.MessageSigner.StoreSignedMessage(ctx, spec.MsgUuid, signedMsg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return signedMsg, nil
|
||||
@ -220,7 +241,7 @@ func (a *MpoolAPI) MpoolPushMessage(ctx context.Context, msg *types.Message, spe
|
||||
func (a *MpoolAPI) MpoolBatchPush(ctx context.Context, smsgs []*types.SignedMessage) ([]cid.Cid, error) {
|
||||
var messageCids []cid.Cid
|
||||
for _, smsg := range smsgs {
|
||||
smsgCid, err := a.Mpool.Push(ctx, smsg)
|
||||
smsgCid, err := a.Mpool.Push(ctx, smsg, true)
|
||||
if err != nil {
|
||||
return messageCids, err
|
||||
}
|
||||
|
50
node/impl/full/raft.go
Normal file
50
node/impl/full/raft.go
Normal file
@ -0,0 +1,50 @@
|
||||
package full
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"go.uber.org/fx"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/messagesigner"
|
||||
)
|
||||
|
||||
type RaftAPI struct {
|
||||
fx.In
|
||||
|
||||
MessageSigner *messagesigner.MessageSignerConsensus `optional:"true"`
|
||||
}
|
||||
|
||||
func (r *RaftAPI) GetRaftState(ctx context.Context) (*api.RaftStateData, error) {
|
||||
if r.MessageSigner == nil {
|
||||
return nil, xerrors.Errorf("raft consensus not enabled. Please check your configuration")
|
||||
}
|
||||
raftState, err := r.MessageSigner.GetRaftState(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &api.RaftStateData{NonceMap: raftState.NonceMap, MsgUuids: raftState.MsgUuids}, nil
|
||||
}
|
||||
|
||||
func (r *RaftAPI) Leader(ctx context.Context) (peer.ID, error) {
|
||||
if r.MessageSigner == nil {
|
||||
return "", xerrors.Errorf("raft consensus not enabled. Please check your configuration")
|
||||
}
|
||||
return r.MessageSigner.Leader(ctx)
|
||||
}
|
||||
|
||||
func (r *RaftAPI) IsLeader(ctx context.Context) bool {
|
||||
if r.MessageSigner == nil {
|
||||
return true
|
||||
}
|
||||
return r.MessageSigner.IsLeader(ctx)
|
||||
}
|
||||
|
||||
func (r *RaftAPI) RedirectToLeader(ctx context.Context, method string, arg interface{}, ret interface{}) (bool, error) {
|
||||
if r.MessageSigner == nil {
|
||||
return false, xerrors.Errorf("raft consensus not enabled. Please check your configuration")
|
||||
}
|
||||
return r.MessageSigner.RedirectToLeader(ctx, method, arg, ret)
|
||||
}
|
@ -2,6 +2,7 @@ package full
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
@ -56,7 +57,7 @@ func (a *SyncAPI) SyncSubmitBlock(ctx context.Context, blk *types.BlockMsg) erro
|
||||
return xerrors.Errorf("loading parent block: %w", err)
|
||||
}
|
||||
|
||||
if a.SlashFilter != nil {
|
||||
if a.SlashFilter != nil && os.Getenv("LOTUS_NO_SLASHFILTER") != "_yes_i_know_i_can_and_probably_will_lose_all_my_fil_and_power_" {
|
||||
if err := a.SlashFilter.MinedBlock(ctx, blk.Header, parent.Height); err != nil {
|
||||
log.Errorf("<!!> SLASH FILTER ERROR: %s", err)
|
||||
return xerrors.Errorf("<!!> SLASH FILTER ERROR: %w", err)
|
||||
|
@ -9,7 +9,7 @@ import (
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/messagesigner"
|
||||
"github.com/filecoin-project/lotus/chain/messagepool"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/node/impl/full"
|
||||
)
|
||||
@ -104,4 +104,4 @@ func (a *MpoolNonceAPI) GetActor(ctx context.Context, addr address.Address, tsk
|
||||
return act, nil
|
||||
}
|
||||
|
||||
var _ messagesigner.MpoolNonceAPI = (*MpoolNonceAPI)(nil)
|
||||
var _ messagepool.MpoolNonceAPI = (*MpoolNonceAPI)(nil)
|
||||
|
55
node/modules/rpc.go
Normal file
55
node/modules/rpc.go
Normal file
@ -0,0 +1,55 @@
|
||||
package modules
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
rpc "github.com/libp2p/go-libp2p-gorpc"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
consensus "github.com/filecoin-project/lotus/lib/consensus/raft"
|
||||
"github.com/filecoin-project/lotus/node/impl/full"
|
||||
)
|
||||
|
||||
type RPCHandler struct {
|
||||
mpoolAPI full.MpoolAPI
|
||||
cons *consensus.Consensus
|
||||
}
|
||||
|
||||
func NewRPCHandler(mpoolAPI full.MpoolAPI, cons *consensus.Consensus) *RPCHandler {
|
||||
return &RPCHandler{mpoolAPI, cons}
|
||||
}
|
||||
|
||||
func (h *RPCHandler) MpoolPushMessage(ctx context.Context, msgWhole *api.MpoolMessageWhole, ret *types.SignedMessage) error {
|
||||
signedMsg, err := h.mpoolAPI.MpoolPushMessage(ctx, msgWhole.Msg, msgWhole.Spec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*ret = *signedMsg
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *RPCHandler) AddPeer(ctx context.Context, pid peer.ID, ret *struct{}) error {
|
||||
return h.cons.AddPeer(ctx, pid)
|
||||
}
|
||||
|
||||
// Add other consensus RPC calls here
|
||||
|
||||
func NewRPCClient(host host.Host) *rpc.Client {
|
||||
protocolID := protocol.ID("/rpc/lotus-chain/v0")
|
||||
return rpc.NewClient(host, protocolID)
|
||||
}
|
||||
|
||||
func NewRPCServer(ctx context.Context, host host.Host, rpcHandler *RPCHandler) error {
|
||||
|
||||
authF := func(pid peer.ID, svc, method string) bool {
|
||||
return rpcHandler.cons.IsTrustedPeer(ctx, pid)
|
||||
}
|
||||
|
||||
protocolID := protocol.ID("/rpc/lotus-chain/v0")
|
||||
rpcServer := rpc.NewServer(host, protocolID, rpc.WithAuthorizeFunc(authF))
|
||||
return rpcServer.RegisterName("Consensus", rpcHandler)
|
||||
}
|
@ -37,7 +37,6 @@ import (
|
||||
storageimpl "github.com/filecoin-project/go-fil-markets/storagemarket/impl"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket/impl/storedask"
|
||||
smnet "github.com/filecoin-project/go-fil-markets/storagemarket/network"
|
||||
"github.com/filecoin-project/go-jsonrpc"
|
||||
"github.com/filecoin-project/go-jsonrpc/auth"
|
||||
"github.com/filecoin-project/go-paramfetch"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
@ -56,7 +55,6 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/gen/slashfilter"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/journal"
|
||||
"github.com/filecoin-project/lotus/lib/retry"
|
||||
"github.com/filecoin-project/lotus/markets"
|
||||
"github.com/filecoin-project/lotus/markets/dagstore"
|
||||
"github.com/filecoin-project/lotus/markets/idxprov"
|
||||
@ -89,12 +87,7 @@ func (a *UuidWrapper) MpoolPushMessage(ctx context.Context, msg *types.Message,
|
||||
spec = new(api.MessageSendSpec)
|
||||
}
|
||||
spec.MsgUuid = uuid.New()
|
||||
errorsToRetry := []error{&jsonrpc.RPCConnectionError{}}
|
||||
initialBackoff, err := time.ParseDuration("1s")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return retry.Retry(5, initialBackoff, errorsToRetry, func() (*types.SignedMessage, error) { return a.FullNode.MpoolPushMessage(ctx, msg, spec) })
|
||||
return a.FullNode.MpoolPushMessage(ctx, msg, spec)
|
||||
}
|
||||
|
||||
func MakeUuidWrapper(a v1api.RawFullNodeAPI) v1api.FullNode {
|
||||
|
Loading…
Reference in New Issue
Block a user