Merge pull request #11468 from filecoin-project/misc/drop-raft-experiment
misc: Drop the raft-cluster experiment
This commit is contained in:
commit
0c7037b56f
@ -879,12 +879,6 @@ workflows:
|
|||||||
- build
|
- build
|
||||||
suite: itest-pending_deal_allocation
|
suite: itest-pending_deal_allocation
|
||||||
target: "./itests/pending_deal_allocation_test.go"
|
target: "./itests/pending_deal_allocation_test.go"
|
||||||
- test:
|
|
||||||
name: test-itest-raft_messagesigner
|
|
||||||
requires:
|
|
||||||
- build
|
|
||||||
suite: itest-raft_messagesigner
|
|
||||||
target: "./itests/raft_messagesigner_test.go"
|
|
||||||
- test:
|
- test:
|
||||||
name: test-itest-remove_verifreg_datacap
|
name: test-itest-remove_verifreg_datacap
|
||||||
requires:
|
requires:
|
||||||
|
@ -879,9 +879,6 @@ type FullNode interface {
|
|||||||
// LOTUS_BACKUP_BASE_PATH environment variable set to some path, and that
|
// LOTUS_BACKUP_BASE_PATH environment variable set to some path, and that
|
||||||
// the path specified when calling CreateBackup is within the base path
|
// the path specified when calling CreateBackup is within the base path
|
||||||
CreateBackup(ctx context.Context, fpath string) error //perm:admin
|
CreateBackup(ctx context.Context, fpath string) error //perm:admin
|
||||||
|
|
||||||
RaftState(ctx context.Context) (*RaftStateData, error) //perm:read
|
|
||||||
RaftLeader(ctx context.Context) (peer.ID, error) //perm:read
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// reverse interface to the client, called after EthSubscribe
|
// reverse interface to the client, called after EthSubscribe
|
||||||
|
@ -355,10 +355,6 @@ func init() {
|
|||||||
addExample(map[string]bitfield.BitField{
|
addExample(map[string]bitfield.BitField{
|
||||||
"": bitfield.NewFromSet([]uint64{5, 6, 7, 10}),
|
"": bitfield.NewFromSet([]uint64{5, 6, 7, 10}),
|
||||||
})
|
})
|
||||||
addExample(&api.RaftStateData{
|
|
||||||
NonceMap: make(map[address.Address]uint64),
|
|
||||||
MsgUuids: make(map[uuid.UUID]*types.SignedMessage),
|
|
||||||
})
|
|
||||||
|
|
||||||
addExample(http.Header{
|
addExample(http.Header{
|
||||||
"Authorization": []string{"Bearer ey.."},
|
"Authorization": []string{"Bearer ey.."},
|
||||||
|
@ -2919,36 +2919,6 @@ func (mr *MockFullNodeMockRecorder) PaychVoucherSubmit(arg0, arg1, arg2, arg3, a
|
|||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherSubmit", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherSubmit), arg0, arg1, arg2, arg3, arg4)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherSubmit", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherSubmit), arg0, arg1, arg2, arg3, arg4)
|
||||||
}
|
}
|
||||||
|
|
||||||
// RaftLeader mocks base method.
|
|
||||||
func (m *MockFullNode) RaftLeader(arg0 context.Context) (peer.ID, error) {
|
|
||||||
m.ctrl.T.Helper()
|
|
||||||
ret := m.ctrl.Call(m, "RaftLeader", arg0)
|
|
||||||
ret0, _ := ret[0].(peer.ID)
|
|
||||||
ret1, _ := ret[1].(error)
|
|
||||||
return ret0, ret1
|
|
||||||
}
|
|
||||||
|
|
||||||
// RaftLeader indicates an expected call of RaftLeader.
|
|
||||||
func (mr *MockFullNodeMockRecorder) RaftLeader(arg0 interface{}) *gomock.Call {
|
|
||||||
mr.mock.ctrl.T.Helper()
|
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RaftLeader", reflect.TypeOf((*MockFullNode)(nil).RaftLeader), arg0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// RaftState mocks base method.
|
|
||||||
func (m *MockFullNode) RaftState(arg0 context.Context) (*api.RaftStateData, error) {
|
|
||||||
m.ctrl.T.Helper()
|
|
||||||
ret := m.ctrl.Call(m, "RaftState", arg0)
|
|
||||||
ret0, _ := ret[0].(*api.RaftStateData)
|
|
||||||
ret1, _ := ret[1].(error)
|
|
||||||
return ret0, ret1
|
|
||||||
}
|
|
||||||
|
|
||||||
// RaftState indicates an expected call of RaftState.
|
|
||||||
func (mr *MockFullNodeMockRecorder) RaftState(arg0 interface{}) *gomock.Call {
|
|
||||||
mr.mock.ctrl.T.Helper()
|
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RaftState", reflect.TypeOf((*MockFullNode)(nil).RaftState), arg0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Session mocks base method.
|
// Session mocks base method.
|
||||||
func (m *MockFullNode) Session(arg0 context.Context) (uuid.UUID, error) {
|
func (m *MockFullNode) Session(arg0 context.Context) (uuid.UUID, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
|
@ -453,10 +453,6 @@ type FullNodeMethods struct {
|
|||||||
|
|
||||||
PaychVoucherSubmit func(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 []byte) (cid.Cid, error) `perm:"sign"`
|
PaychVoucherSubmit func(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 []byte) (cid.Cid, error) `perm:"sign"`
|
||||||
|
|
||||||
RaftLeader func(p0 context.Context) (peer.ID, error) `perm:"read"`
|
|
||||||
|
|
||||||
RaftState func(p0 context.Context) (*RaftStateData, error) `perm:"read"`
|
|
||||||
|
|
||||||
StateAccountKey func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) `perm:"read"`
|
StateAccountKey func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) `perm:"read"`
|
||||||
|
|
||||||
StateActorCodeCIDs func(p0 context.Context, p1 abinetwork.Version) (map[string]cid.Cid, error) `perm:"read"`
|
StateActorCodeCIDs func(p0 context.Context, p1 abinetwork.Version) (map[string]cid.Cid, error) `perm:"read"`
|
||||||
@ -3236,28 +3232,6 @@ func (s *FullNodeStub) PaychVoucherSubmit(p0 context.Context, p1 address.Address
|
|||||||
return *new(cid.Cid), ErrNotSupported
|
return *new(cid.Cid), ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *FullNodeStruct) RaftLeader(p0 context.Context) (peer.ID, error) {
|
|
||||||
if s.Internal.RaftLeader == nil {
|
|
||||||
return *new(peer.ID), ErrNotSupported
|
|
||||||
}
|
|
||||||
return s.Internal.RaftLeader(p0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *FullNodeStub) RaftLeader(p0 context.Context) (peer.ID, error) {
|
|
||||||
return *new(peer.ID), ErrNotSupported
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *FullNodeStruct) RaftState(p0 context.Context) (*RaftStateData, error) {
|
|
||||||
if s.Internal.RaftState == nil {
|
|
||||||
return nil, ErrNotSupported
|
|
||||||
}
|
|
||||||
return s.Internal.RaftState(p0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *FullNodeStub) RaftState(p0 context.Context) (*RaftStateData, error) {
|
|
||||||
return nil, ErrNotSupported
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *FullNodeStruct) StateAccountKey(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) {
|
func (s *FullNodeStruct) StateAccountKey(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) {
|
||||||
if s.Internal.StateAccountKey == nil {
|
if s.Internal.StateAccountKey == nil {
|
||||||
return *new(address.Address), ErrNotSupported
|
return *new(address.Address), ErrNotSupported
|
||||||
|
63
api/types.go
63
api/types.go
@ -69,11 +69,6 @@ type MessageSendSpec struct {
|
|||||||
MaximizeFeeCap bool
|
MaximizeFeeCap bool
|
||||||
}
|
}
|
||||||
|
|
||||||
type MpoolMessageWhole struct {
|
|
||||||
Msg *types.Message
|
|
||||||
Spec *MessageSendSpec
|
|
||||||
}
|
|
||||||
|
|
||||||
// GraphSyncDataTransfer provides diagnostics on a data transfer happening over graphsync
|
// GraphSyncDataTransfer provides diagnostics on a data transfer happening over graphsync
|
||||||
type GraphSyncDataTransfer struct {
|
type GraphSyncDataTransfer struct {
|
||||||
// GraphSync request id for this transfer
|
// GraphSync request id for this transfer
|
||||||
@ -351,64 +346,6 @@ type ForkUpgradeParams struct {
|
|||||||
UpgradeWatermelonHeight abi.ChainEpoch
|
UpgradeWatermelonHeight abi.ChainEpoch
|
||||||
}
|
}
|
||||||
|
|
||||||
type NonceMapType map[address.Address]uint64
|
|
||||||
type MsgUuidMapType map[uuid.UUID]*types.SignedMessage
|
|
||||||
|
|
||||||
type RaftStateData struct {
|
|
||||||
NonceMap NonceMapType
|
|
||||||
MsgUuids MsgUuidMapType
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *NonceMapType) MarshalJSON() ([]byte, error) {
|
|
||||||
marshalled := make(map[string]uint64)
|
|
||||||
for a, n := range *n {
|
|
||||||
marshalled[a.String()] = n
|
|
||||||
}
|
|
||||||
return json.Marshal(marshalled)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *NonceMapType) UnmarshalJSON(b []byte) error {
|
|
||||||
unmarshalled := make(map[string]uint64)
|
|
||||||
err := json.Unmarshal(b, &unmarshalled)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
*n = make(map[address.Address]uint64)
|
|
||||||
for saddr, nonce := range unmarshalled {
|
|
||||||
a, err := address.NewFromString(saddr)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
(*n)[a] = nonce
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MsgUuidMapType) MarshalJSON() ([]byte, error) {
|
|
||||||
marshalled := make(map[string]*types.SignedMessage)
|
|
||||||
for u, msg := range *m {
|
|
||||||
marshalled[u.String()] = msg
|
|
||||||
}
|
|
||||||
return json.Marshal(marshalled)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MsgUuidMapType) UnmarshalJSON(b []byte) error {
|
|
||||||
unmarshalled := make(map[string]*types.SignedMessage)
|
|
||||||
err := json.Unmarshal(b, &unmarshalled)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
*m = make(map[uuid.UUID]*types.SignedMessage)
|
|
||||||
for suid, msg := range unmarshalled {
|
|
||||||
u, err := uuid.Parse(suid)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
(*m)[u] = msg
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ChainExportConfig holds configuration for chain ranged exports.
|
// ChainExportConfig holds configuration for chain ranged exports.
|
||||||
type ChainExportConfig struct {
|
type ChainExportConfig struct {
|
||||||
WriteBufferSize int
|
WriteBufferSize int
|
||||||
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -1,98 +0,0 @@
|
|||||||
package messagesigner
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
|
|
||||||
"github.com/google/uuid"
|
|
||||||
"github.com/ipfs/go-datastore"
|
|
||||||
"github.com/ipfs/go-datastore/namespace"
|
|
||||||
"github.com/libp2p/go-libp2p/core/peer"
|
|
||||||
"golang.org/x/xerrors"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
|
||||||
"github.com/filecoin-project/lotus/chain/messagepool"
|
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
|
||||||
consensus "github.com/filecoin-project/lotus/lib/consensus/raft"
|
|
||||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
|
||||||
)
|
|
||||||
|
|
||||||
type MessageSignerConsensus struct {
|
|
||||||
MsgSigner
|
|
||||||
Consensus *consensus.Consensus
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewMessageSignerConsensus(
|
|
||||||
wallet api.Wallet,
|
|
||||||
mpool messagepool.MpoolNonceAPI,
|
|
||||||
ds dtypes.MetadataDS,
|
|
||||||
consensus *consensus.Consensus) *MessageSignerConsensus {
|
|
||||||
|
|
||||||
ds = namespace.Wrap(ds, datastore.NewKey("/message-signer-consensus/"))
|
|
||||||
return &MessageSignerConsensus{
|
|
||||||
MsgSigner: &MessageSigner{
|
|
||||||
wallet: wallet,
|
|
||||||
mpool: mpool,
|
|
||||||
ds: ds,
|
|
||||||
},
|
|
||||||
Consensus: consensus,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ms *MessageSignerConsensus) IsLeader(ctx context.Context) bool {
|
|
||||||
return ms.Consensus.IsLeader(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ms *MessageSignerConsensus) RedirectToLeader(ctx context.Context, method string, arg interface{}, ret interface{}) (bool, error) {
|
|
||||||
ok, err := ms.Consensus.RedirectToLeader(method, arg, ret.(*types.SignedMessage))
|
|
||||||
if err != nil {
|
|
||||||
return ok, err
|
|
||||||
}
|
|
||||||
return ok, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ms *MessageSignerConsensus) SignMessage(
|
|
||||||
ctx context.Context,
|
|
||||||
msg *types.Message,
|
|
||||||
spec *api.MessageSendSpec,
|
|
||||||
cb func(*types.SignedMessage) error) (*types.SignedMessage, error) {
|
|
||||||
|
|
||||||
signedMsg, err := ms.MsgSigner.SignMessage(ctx, msg, spec, cb)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
op := &consensus.ConsensusOp{
|
|
||||||
Nonce: signedMsg.Message.Nonce,
|
|
||||||
Uuid: spec.MsgUuid,
|
|
||||||
Addr: signedMsg.Message.From,
|
|
||||||
SignedMsg: signedMsg,
|
|
||||||
}
|
|
||||||
err = ms.Consensus.Commit(ctx, op)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return signedMsg, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ms *MessageSignerConsensus) GetSignedMessage(ctx context.Context, uuid uuid.UUID) (*types.SignedMessage, error) {
|
|
||||||
cstate, err := ms.Consensus.State(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
//cstate := state.(Consensus.RaftState)
|
|
||||||
msg, ok := cstate.MsgUuids[uuid]
|
|
||||||
if !ok {
|
|
||||||
return nil, xerrors.Errorf("Msg with Uuid %s not available", uuid)
|
|
||||||
}
|
|
||||||
return msg, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ms *MessageSignerConsensus) GetRaftState(ctx context.Context) (*consensus.RaftState, error) {
|
|
||||||
return ms.Consensus.State(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ms *MessageSignerConsensus) Leader(ctx context.Context) (peer.ID, error) {
|
|
||||||
return ms.Consensus.Leader(ctx)
|
|
||||||
}
|
|
@ -211,9 +211,6 @@
|
|||||||
* [PaychVoucherCreate](#PaychVoucherCreate)
|
* [PaychVoucherCreate](#PaychVoucherCreate)
|
||||||
* [PaychVoucherList](#PaychVoucherList)
|
* [PaychVoucherList](#PaychVoucherList)
|
||||||
* [PaychVoucherSubmit](#PaychVoucherSubmit)
|
* [PaychVoucherSubmit](#PaychVoucherSubmit)
|
||||||
* [Raft](#Raft)
|
|
||||||
* [RaftLeader](#RaftLeader)
|
|
||||||
* [RaftState](#RaftState)
|
|
||||||
* [Start](#Start)
|
* [Start](#Start)
|
||||||
* [StartTime](#StartTime)
|
* [StartTime](#StartTime)
|
||||||
* [State](#State)
|
* [State](#State)
|
||||||
@ -6182,33 +6179,6 @@ Response:
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
## Raft
|
|
||||||
|
|
||||||
|
|
||||||
### RaftLeader
|
|
||||||
|
|
||||||
|
|
||||||
Perms: read
|
|
||||||
|
|
||||||
Inputs: `null`
|
|
||||||
|
|
||||||
Response: `"12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf"`
|
|
||||||
|
|
||||||
### RaftState
|
|
||||||
|
|
||||||
|
|
||||||
Perms: read
|
|
||||||
|
|
||||||
Inputs: `null`
|
|
||||||
|
|
||||||
Response:
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"NonceMap": {},
|
|
||||||
"MsgUuids": {}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## Start
|
## Start
|
||||||
|
|
||||||
|
|
||||||
|
@ -260,68 +260,6 @@
|
|||||||
#HotstoreMaxSpaceSafetyBuffer = 50000000000
|
#HotstoreMaxSpaceSafetyBuffer = 50000000000
|
||||||
|
|
||||||
|
|
||||||
[Cluster]
|
|
||||||
# EXPERIMENTAL. config to enabled node cluster with raft consensus
|
|
||||||
#
|
|
||||||
# type: bool
|
|
||||||
# env var: LOTUS_CLUSTER_CLUSTERMODEENABLED
|
|
||||||
#ClusterModeEnabled = false
|
|
||||||
|
|
||||||
# A folder to store Raft's data.
|
|
||||||
#
|
|
||||||
# type: string
|
|
||||||
# env var: LOTUS_CLUSTER_DATAFOLDER
|
|
||||||
#DataFolder = ""
|
|
||||||
|
|
||||||
# InitPeersetMultiAddr provides the list of initial cluster peers for new Raft
|
|
||||||
# peers (with no prior state). It is ignored when Raft was already
|
|
||||||
# initialized or when starting in staging mode.
|
|
||||||
#
|
|
||||||
# type: []string
|
|
||||||
# env var: LOTUS_CLUSTER_INITPEERSETMULTIADDR
|
|
||||||
#InitPeersetMultiAddr = []
|
|
||||||
|
|
||||||
# LeaderTimeout specifies how long to wait for a leader before
|
|
||||||
# failing an operation.
|
|
||||||
#
|
|
||||||
# type: Duration
|
|
||||||
# env var: LOTUS_CLUSTER_WAITFORLEADERTIMEOUT
|
|
||||||
#WaitForLeaderTimeout = "15s"
|
|
||||||
|
|
||||||
# NetworkTimeout specifies how long before a Raft network
|
|
||||||
# operation is timed out
|
|
||||||
#
|
|
||||||
# type: Duration
|
|
||||||
# env var: LOTUS_CLUSTER_NETWORKTIMEOUT
|
|
||||||
#NetworkTimeout = "1m40s"
|
|
||||||
|
|
||||||
# CommitRetries specifies how many times we retry a failed commit until
|
|
||||||
# we give up.
|
|
||||||
#
|
|
||||||
# type: int
|
|
||||||
# env var: LOTUS_CLUSTER_COMMITRETRIES
|
|
||||||
#CommitRetries = 1
|
|
||||||
|
|
||||||
# How long to wait between retries
|
|
||||||
#
|
|
||||||
# type: Duration
|
|
||||||
# env var: LOTUS_CLUSTER_COMMITRETRYDELAY
|
|
||||||
#CommitRetryDelay = "200ms"
|
|
||||||
|
|
||||||
# BackupsRotate specifies the maximum number of Raft's DataFolder
|
|
||||||
# copies that we keep as backups (renaming) after cleanup.
|
|
||||||
#
|
|
||||||
# type: int
|
|
||||||
# env var: LOTUS_CLUSTER_BACKUPSROTATE
|
|
||||||
#BackupsRotate = 6
|
|
||||||
|
|
||||||
# Tracing enables propagation of contexts across binary boundaries.
|
|
||||||
#
|
|
||||||
# type: bool
|
|
||||||
# env var: LOTUS_CLUSTER_TRACING
|
|
||||||
#Tracing = false
|
|
||||||
|
|
||||||
|
|
||||||
[Fevm]
|
[Fevm]
|
||||||
# EnableEthRPC enables eth_ rpc, and enables storing a mapping of eth transaction hashes to filecoin message Cids.
|
# EnableEthRPC enables eth_ rpc, and enables storing a mapping of eth transaction hashes to filecoin message Cids.
|
||||||
# This will also enable the RealTimeFilterAPI and HistoricFilterAPI by default, but they can be disabled by config options above.
|
# This will also enable the RealTimeFilterAPI and HistoricFilterAPI by default, but they can be disabled by config options above.
|
||||||
|
13
go.mod
13
go.mod
@ -71,12 +71,9 @@ require (
|
|||||||
github.com/gregdhill/go-openrpc v0.0.0-20220114144539-ae6f44720487
|
github.com/gregdhill/go-openrpc v0.0.0-20220114144539-ae6f44720487
|
||||||
github.com/hako/durafmt v0.0.0-20200710122514-c0fb7b4da026
|
github.com/hako/durafmt v0.0.0-20200710122514-c0fb7b4da026
|
||||||
github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e
|
github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e
|
||||||
github.com/hashicorp/go-hclog v1.3.0
|
|
||||||
github.com/hashicorp/go-multierror v1.1.1
|
github.com/hashicorp/go-multierror v1.1.1
|
||||||
github.com/hashicorp/golang-lru/arc/v2 v2.0.5
|
github.com/hashicorp/golang-lru/arc/v2 v2.0.5
|
||||||
github.com/hashicorp/golang-lru/v2 v2.0.5
|
github.com/hashicorp/golang-lru/v2 v2.0.5
|
||||||
github.com/hashicorp/raft v1.3.10
|
|
||||||
github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea
|
|
||||||
github.com/icza/backscanner v0.0.0-20210726202459-ac2ffc679f94
|
github.com/icza/backscanner v0.0.0-20210726202459-ac2ffc679f94
|
||||||
github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab
|
github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab
|
||||||
github.com/ipfs/bbloom v0.0.4
|
github.com/ipfs/bbloom v0.0.4
|
||||||
@ -111,11 +108,8 @@ require (
|
|||||||
github.com/koalacxr/quantile v0.0.1
|
github.com/koalacxr/quantile v0.0.1
|
||||||
github.com/libp2p/go-buffer-pool v0.1.0
|
github.com/libp2p/go-buffer-pool v0.1.0
|
||||||
github.com/libp2p/go-libp2p v0.31.1
|
github.com/libp2p/go-libp2p v0.31.1
|
||||||
github.com/libp2p/go-libp2p-consensus v0.0.1
|
|
||||||
github.com/libp2p/go-libp2p-gorpc v0.5.0
|
|
||||||
github.com/libp2p/go-libp2p-kad-dht v0.24.0
|
github.com/libp2p/go-libp2p-kad-dht v0.24.0
|
||||||
github.com/libp2p/go-libp2p-pubsub v0.9.3
|
github.com/libp2p/go-libp2p-pubsub v0.9.3
|
||||||
github.com/libp2p/go-libp2p-raft v0.4.0
|
|
||||||
github.com/libp2p/go-libp2p-record v0.2.0
|
github.com/libp2p/go-libp2p-record v0.2.0
|
||||||
github.com/libp2p/go-libp2p-routing-helpers v0.7.0
|
github.com/libp2p/go-libp2p-routing-helpers v0.7.0
|
||||||
github.com/libp2p/go-maddr-filter v0.1.0
|
github.com/libp2p/go-maddr-filter v0.1.0
|
||||||
@ -161,7 +155,6 @@ require (
|
|||||||
go.uber.org/multierr v1.11.0
|
go.uber.org/multierr v1.11.0
|
||||||
go.uber.org/zap v1.25.0
|
go.uber.org/zap v1.25.0
|
||||||
golang.org/x/crypto v0.12.0
|
golang.org/x/crypto v0.12.0
|
||||||
golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63
|
|
||||||
golang.org/x/net v0.14.0
|
golang.org/x/net v0.14.0
|
||||||
golang.org/x/sync v0.3.0
|
golang.org/x/sync v0.3.0
|
||||||
golang.org/x/sys v0.11.0
|
golang.org/x/sys v0.11.0
|
||||||
@ -180,11 +173,9 @@ require (
|
|||||||
github.com/StackExchange/wmi v1.2.1 // indirect
|
github.com/StackExchange/wmi v1.2.1 // indirect
|
||||||
github.com/akavel/rsrc v0.8.0 // indirect
|
github.com/akavel/rsrc v0.8.0 // indirect
|
||||||
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect
|
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect
|
||||||
github.com/armon/go-metrics v0.3.9 // indirect
|
|
||||||
github.com/benbjohnson/clock v1.3.5 // indirect
|
github.com/benbjohnson/clock v1.3.5 // indirect
|
||||||
github.com/beorn7/perks v1.0.1 // indirect
|
github.com/beorn7/perks v1.0.1 // indirect
|
||||||
github.com/bep/debounce v1.2.1 // indirect
|
github.com/bep/debounce v1.2.1 // indirect
|
||||||
github.com/boltdb/bolt v1.3.1 // indirect
|
|
||||||
github.com/cespare/xxhash v1.1.0 // indirect
|
github.com/cespare/xxhash v1.1.0 // indirect
|
||||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||||
github.com/cilium/ebpf v0.9.1 // indirect
|
github.com/cilium/ebpf v0.9.1 // indirect
|
||||||
@ -231,8 +222,6 @@ require (
|
|||||||
github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b // indirect
|
github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b // indirect
|
||||||
github.com/hannahhoward/cbor-gen-for v0.0.0-20230214144701-5d17c9d5243c // indirect
|
github.com/hannahhoward/cbor-gen-for v0.0.0-20230214144701-5d17c9d5243c // indirect
|
||||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||||
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
|
|
||||||
github.com/hashicorp/go-msgpack v0.5.5 // indirect
|
|
||||||
github.com/hashicorp/golang-lru v0.6.0 // indirect
|
github.com/hashicorp/golang-lru v0.6.0 // indirect
|
||||||
github.com/huin/goupnp v1.2.0 // indirect
|
github.com/huin/goupnp v1.2.0 // indirect
|
||||||
github.com/iancoleman/orderedmap v0.1.0 // indirect
|
github.com/iancoleman/orderedmap v0.1.0 // indirect
|
||||||
@ -315,7 +304,6 @@ require (
|
|||||||
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
||||||
github.com/tidwall/gjson v1.14.4 // indirect
|
github.com/tidwall/gjson v1.14.4 // indirect
|
||||||
github.com/twmb/murmur3 v1.1.6 // indirect
|
github.com/twmb/murmur3 v1.1.6 // indirect
|
||||||
github.com/ugorji/go/codec v1.2.11 // indirect
|
|
||||||
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
||||||
github.com/valyala/fasttemplate v1.0.1 // indirect
|
github.com/valyala/fasttemplate v1.0.1 // indirect
|
||||||
github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11 // indirect
|
github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11 // indirect
|
||||||
@ -331,6 +319,7 @@ require (
|
|||||||
go.opentelemetry.io/otel/trace v1.16.0 // indirect
|
go.opentelemetry.io/otel/trace v1.16.0 // indirect
|
||||||
go.uber.org/dig v1.17.0 // indirect
|
go.uber.org/dig v1.17.0 // indirect
|
||||||
go4.org v0.0.0-20230225012048-214862532bf5 // indirect
|
go4.org v0.0.0-20230225012048-214862532bf5 // indirect
|
||||||
|
golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 // indirect
|
||||||
golang.org/x/mod v0.12.0 // indirect
|
golang.org/x/mod v0.12.0 // indirect
|
||||||
golang.org/x/text v0.12.0 // indirect
|
golang.org/x/text v0.12.0 // indirect
|
||||||
gonum.org/v1/gonum v0.13.0 // indirect
|
gonum.org/v1/gonum v0.13.0 // indirect
|
||||||
|
40
go.sum
40
go.sum
@ -48,8 +48,6 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03
|
|||||||
github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak=
|
github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak=
|
||||||
github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
||||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||||
github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
|
|
||||||
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
|
|
||||||
github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
|
github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
|
||||||
github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ=
|
github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ=
|
||||||
github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
|
github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
|
||||||
@ -99,9 +97,6 @@ github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb
|
|||||||
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
||||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||||
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
|
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
|
||||||
github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg=
|
|
||||||
github.com/armon/go-metrics v0.3.9 h1:O2sNqxBdvq8Eq5xmzljcYzAORli6RWCvEym4cJf9m18=
|
|
||||||
github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc=
|
|
||||||
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||||
github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A=
|
github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A=
|
||||||
github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
|
github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
|
||||||
@ -118,8 +113,6 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r
|
|||||||
github.com/bep/debounce v1.2.1 h1:v67fRdBA9UQu2NhLFXrSg0Brw7CexQekrBwDMM8bzeY=
|
github.com/bep/debounce v1.2.1 h1:v67fRdBA9UQu2NhLFXrSg0Brw7CexQekrBwDMM8bzeY=
|
||||||
github.com/bep/debounce v1.2.1/go.mod h1:H8yggRPQKLUhUoqrJC1bO2xNya7vanpDl7xR3ISbCJ0=
|
github.com/bep/debounce v1.2.1/go.mod h1:H8yggRPQKLUhUoqrJC1bO2xNya7vanpDl7xR3ISbCJ0=
|
||||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||||
github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4=
|
|
||||||
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
|
|
||||||
github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
|
github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
|
||||||
github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8=
|
github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8=
|
||||||
github.com/btcsuite/btcd v0.0.0-20190523000118-16327141da8c/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI=
|
github.com/btcsuite/btcd v0.0.0-20190523000118-16327141da8c/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI=
|
||||||
@ -164,8 +157,6 @@ github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38
|
|||||||
github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
|
github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
|
||||||
github.com/cilium/ebpf v0.9.1 h1:64sn2K3UKw8NbP/blsixRpF3nXuyhz/VjRlRzvlBRu4=
|
github.com/cilium/ebpf v0.9.1 h1:64sn2K3UKw8NbP/blsixRpF3nXuyhz/VjRlRzvlBRu4=
|
||||||
github.com/cilium/ebpf v0.9.1/go.mod h1:+OhNOIXx/Fnu1IE8bJz2dzOA+VSfyTfdNUVdlQnxUFY=
|
github.com/cilium/ebpf v0.9.1/go.mod h1:+OhNOIXx/Fnu1IE8bJz2dzOA+VSfyTfdNUVdlQnxUFY=
|
||||||
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
|
|
||||||
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
|
|
||||||
github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=
|
github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=
|
||||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||||
@ -592,26 +583,16 @@ github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyN
|
|||||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||||
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
|
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
|
||||||
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||||
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
|
||||||
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||||
github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
|
|
||||||
github.com/hashicorp/go-hclog v1.3.0 h1:G0ACM8Z2WilWgPv3Vdzwm3V0BQu/kSmrkVtpe1fy9do=
|
|
||||||
github.com/hashicorp/go-hclog v1.3.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
|
|
||||||
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||||
github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=
|
|
||||||
github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
|
||||||
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
|
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
|
||||||
github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI=
|
|
||||||
github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
|
|
||||||
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
|
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
|
||||||
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
|
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
|
||||||
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
||||||
github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
|
|
||||||
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
|
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
|
||||||
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
|
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
|
||||||
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
|
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
|
||||||
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||||
github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE=
|
|
||||||
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||||
github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||||
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
|
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
|
||||||
@ -628,10 +609,6 @@ github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T
|
|||||||
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
|
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
|
||||||
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
|
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
|
||||||
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
|
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
|
||||||
github.com/hashicorp/raft v1.3.10 h1:LR5QZX1VQd0DFWZfeCwWawyeKfpS/Tm1yjnJIY5X4Tw=
|
|
||||||
github.com/hashicorp/raft v1.3.10/go.mod h1:J8naEwc6XaaCfts7+28whSeRvCqTd6e20BlCU3LtEO4=
|
|
||||||
github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea h1:xykPFhrBAS2J0VBzVa5e80b5ZtYuNQtgXjN40qBZlD4=
|
|
||||||
github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea/go.mod h1:pNv7Wc3ycL6F5oOWn+tPGo2gWD4a5X+yp/ntwdKLjRk=
|
|
||||||
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
|
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
|
||||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||||
github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
|
github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
|
||||||
@ -902,7 +879,6 @@ github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlT
|
|||||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||||
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||||
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||||
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
|
||||||
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||||
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||||
@ -1001,8 +977,6 @@ github.com/libp2p/go-libp2p-circuit v0.1.0/go.mod h1:Ahq4cY3V9VJcHcn1SBXjr78AbFk
|
|||||||
github.com/libp2p/go-libp2p-circuit v0.1.4/go.mod h1:CY67BrEjKNDhdTk8UgBX1Y/H5c3xkAcs3gnksxY7osU=
|
github.com/libp2p/go-libp2p-circuit v0.1.4/go.mod h1:CY67BrEjKNDhdTk8UgBX1Y/H5c3xkAcs3gnksxY7osU=
|
||||||
github.com/libp2p/go-libp2p-circuit v0.2.1/go.mod h1:BXPwYDN5A8z4OEY9sOfr2DUQMLQvKt/6oku45YUmjIo=
|
github.com/libp2p/go-libp2p-circuit v0.2.1/go.mod h1:BXPwYDN5A8z4OEY9sOfr2DUQMLQvKt/6oku45YUmjIo=
|
||||||
github.com/libp2p/go-libp2p-circuit v0.4.0/go.mod h1:t/ktoFIUzM6uLQ+o1G6NuBl2ANhBKN9Bc8jRIk31MoA=
|
github.com/libp2p/go-libp2p-circuit v0.4.0/go.mod h1:t/ktoFIUzM6uLQ+o1G6NuBl2ANhBKN9Bc8jRIk31MoA=
|
||||||
github.com/libp2p/go-libp2p-consensus v0.0.1 h1:jcVbHRZLwTXU9iT/mPi+Lx4/OrIzq3bU1TbZNhYFCV8=
|
|
||||||
github.com/libp2p/go-libp2p-consensus v0.0.1/go.mod h1:+9Wrfhc5QOqWB0gXI0m6ARlkHfdJpcFXmRU0WoHz4Mo=
|
|
||||||
github.com/libp2p/go-libp2p-core v0.0.1/go.mod h1:g/VxnTZ/1ygHxH3dKok7Vno1VfpvGcGip57wjTU4fco=
|
github.com/libp2p/go-libp2p-core v0.0.1/go.mod h1:g/VxnTZ/1ygHxH3dKok7Vno1VfpvGcGip57wjTU4fco=
|
||||||
github.com/libp2p/go-libp2p-core v0.0.2/go.mod h1:9dAcntw/n46XycV4RnlBq3BpgrmyUi9LuoTNdPrbUco=
|
github.com/libp2p/go-libp2p-core v0.0.2/go.mod h1:9dAcntw/n46XycV4RnlBq3BpgrmyUi9LuoTNdPrbUco=
|
||||||
github.com/libp2p/go-libp2p-core v0.0.3/go.mod h1:j+YQMNz9WNSkNezXOsahp9kwZBKBvxLpKD316QWSJXE=
|
github.com/libp2p/go-libp2p-core v0.0.3/go.mod h1:j+YQMNz9WNSkNezXOsahp9kwZBKBvxLpKD316QWSJXE=
|
||||||
@ -1031,8 +1005,6 @@ github.com/libp2p/go-libp2p-discovery v0.1.0/go.mod h1:4F/x+aldVHjHDHuX85x1zWoFT
|
|||||||
github.com/libp2p/go-libp2p-discovery v0.2.0/go.mod h1:s4VGaxYMbw4+4+tsoQTqh7wfxg97AEdo4GYBt6BadWg=
|
github.com/libp2p/go-libp2p-discovery v0.2.0/go.mod h1:s4VGaxYMbw4+4+tsoQTqh7wfxg97AEdo4GYBt6BadWg=
|
||||||
github.com/libp2p/go-libp2p-discovery v0.3.0/go.mod h1:o03drFnz9BVAZdzC/QUQ+NeQOu38Fu7LJGEOK2gQltw=
|
github.com/libp2p/go-libp2p-discovery v0.3.0/go.mod h1:o03drFnz9BVAZdzC/QUQ+NeQOu38Fu7LJGEOK2gQltw=
|
||||||
github.com/libp2p/go-libp2p-discovery v0.5.0/go.mod h1:+srtPIU9gDaBNu//UHvcdliKBIcr4SfDcm0/PfPJLug=
|
github.com/libp2p/go-libp2p-discovery v0.5.0/go.mod h1:+srtPIU9gDaBNu//UHvcdliKBIcr4SfDcm0/PfPJLug=
|
||||||
github.com/libp2p/go-libp2p-gorpc v0.5.0 h1:mmxxAPdP3JzpYH4KcDf4csXnqtd1HazLPfdyB2MBRb8=
|
|
||||||
github.com/libp2p/go-libp2p-gorpc v0.5.0/go.mod h1:GpHuvY3m0YFkd0+inOGo4HDtc4up9OS/mBPXvEpNuRY=
|
|
||||||
github.com/libp2p/go-libp2p-gostream v0.6.0 h1:QfAiWeQRce6pqnYfmIVWJFXNdDyfiR/qkCnjyaZUPYU=
|
github.com/libp2p/go-libp2p-gostream v0.6.0 h1:QfAiWeQRce6pqnYfmIVWJFXNdDyfiR/qkCnjyaZUPYU=
|
||||||
github.com/libp2p/go-libp2p-gostream v0.6.0/go.mod h1:Nywu0gYZwfj7Jc91PQvbGU8dIpqbQQkjWgDuOrFaRdA=
|
github.com/libp2p/go-libp2p-gostream v0.6.0/go.mod h1:Nywu0gYZwfj7Jc91PQvbGU8dIpqbQQkjWgDuOrFaRdA=
|
||||||
github.com/libp2p/go-libp2p-kad-dht v0.24.0 h1:nZnFDQEFU4N8GzclnR+IGxIgR7k4PPCDk/GK9A28onk=
|
github.com/libp2p/go-libp2p-kad-dht v0.24.0 h1:nZnFDQEFU4N8GzclnR+IGxIgR7k4PPCDk/GK9A28onk=
|
||||||
@ -1063,8 +1035,6 @@ github.com/libp2p/go-libp2p-pnet v0.2.0/go.mod h1:Qqvq6JH/oMZGwqs3N1Fqhv8NVhrdYc
|
|||||||
github.com/libp2p/go-libp2p-pubsub v0.9.3 h1:ihcz9oIBMaCK9kcx+yHWm3mLAFBMAUsM4ux42aikDxo=
|
github.com/libp2p/go-libp2p-pubsub v0.9.3 h1:ihcz9oIBMaCK9kcx+yHWm3mLAFBMAUsM4ux42aikDxo=
|
||||||
github.com/libp2p/go-libp2p-pubsub v0.9.3/go.mod h1:RYA7aM9jIic5VV47WXu4GkcRxRhrdElWf8xtyli+Dzc=
|
github.com/libp2p/go-libp2p-pubsub v0.9.3/go.mod h1:RYA7aM9jIic5VV47WXu4GkcRxRhrdElWf8xtyli+Dzc=
|
||||||
github.com/libp2p/go-libp2p-quic-transport v0.10.0/go.mod h1:RfJbZ8IqXIhxBRm5hqUEJqjiiY8xmEuq3HUDS993MkA=
|
github.com/libp2p/go-libp2p-quic-transport v0.10.0/go.mod h1:RfJbZ8IqXIhxBRm5hqUEJqjiiY8xmEuq3HUDS993MkA=
|
||||||
github.com/libp2p/go-libp2p-raft v0.4.0 h1:2atEs7/FWH35bRiLh8gTejGh5NA9u4eG7BXjpf/B+Z4=
|
|
||||||
github.com/libp2p/go-libp2p-raft v0.4.0/go.mod h1:qJCYtFBTbip2wngLxFeAb9o52XmAPi2vSIQ4hV7IpSA=
|
|
||||||
github.com/libp2p/go-libp2p-record v0.1.0/go.mod h1:ujNc8iuE5dlKWVy6wuL6dd58t0n7xI4hAIl8pE6wu5Q=
|
github.com/libp2p/go-libp2p-record v0.1.0/go.mod h1:ujNc8iuE5dlKWVy6wuL6dd58t0n7xI4hAIl8pE6wu5Q=
|
||||||
github.com/libp2p/go-libp2p-record v0.2.0 h1:oiNUOCWno2BFuxt3my4i1frNrt7PerzB3queqa1NkQ0=
|
github.com/libp2p/go-libp2p-record v0.2.0 h1:oiNUOCWno2BFuxt3my4i1frNrt7PerzB3queqa1NkQ0=
|
||||||
github.com/libp2p/go-libp2p-record v0.2.0/go.mod h1:I+3zMkvvg5m2OcSdoL0KPljyJyvNDFGKX7QdlpYUcwk=
|
github.com/libp2p/go-libp2p-record v0.2.0/go.mod h1:I+3zMkvvg5m2OcSdoL0KPljyJyvNDFGKX7QdlpYUcwk=
|
||||||
@ -1196,7 +1166,6 @@ github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcncea
|
|||||||
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||||
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||||
github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||||
github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
|
|
||||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||||
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||||
@ -1389,8 +1358,6 @@ github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnh
|
|||||||
github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
|
github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
|
||||||
github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM=
|
github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM=
|
||||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||||
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
|
|
||||||
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
|
||||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0=
|
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0=
|
||||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y=
|
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y=
|
||||||
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
|
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
|
||||||
@ -1421,7 +1388,6 @@ github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4
|
|||||||
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
|
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
|
||||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||||
github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og=
|
github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og=
|
||||||
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
|
|
||||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||||
github.com/prometheus/client_golang v1.10.0/go.mod h1:WJM3cc3yu7XKBKa/I8WeZm+V3eltZnBwfENSU7mdogU=
|
github.com/prometheus/client_golang v1.10.0/go.mod h1:WJM3cc3yu7XKBKa/I8WeZm+V3eltZnBwfENSU7mdogU=
|
||||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||||
@ -1443,7 +1409,6 @@ github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7q
|
|||||||
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||||
github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
|
github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
|
||||||
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
|
|
||||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||||
github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=
|
github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=
|
||||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||||
@ -1608,14 +1573,11 @@ github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhso
|
|||||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||||
github.com/triplewz/poseidon v0.0.0-20230828015038-79d8165c88ed h1:C8H2ql+vCBhEi7d3vMBBbdCAKv9s/thfPyLEuSvFpMU=
|
github.com/triplewz/poseidon v0.0.0-20230828015038-79d8165c88ed h1:C8H2ql+vCBhEi7d3vMBBbdCAKv9s/thfPyLEuSvFpMU=
|
||||||
github.com/triplewz/poseidon v0.0.0-20230828015038-79d8165c88ed/go.mod h1:QYG1d0B4YZD7TgF6qZndTTu4rxUGFCCZAQRDanDj+9c=
|
github.com/triplewz/poseidon v0.0.0-20230828015038-79d8165c88ed/go.mod h1:QYG1d0B4YZD7TgF6qZndTTu4rxUGFCCZAQRDanDj+9c=
|
||||||
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
|
|
||||||
github.com/twmb/murmur3 v1.1.6 h1:mqrRot1BRxm+Yct+vavLMou2/iJt0tNVTTC0QoIjaZg=
|
github.com/twmb/murmur3 v1.1.6 h1:mqrRot1BRxm+Yct+vavLMou2/iJt0tNVTTC0QoIjaZg=
|
||||||
github.com/twmb/murmur3 v1.1.6/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ=
|
github.com/twmb/murmur3 v1.1.6/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ=
|
||||||
github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o=
|
github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o=
|
||||||
github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVKhn2Um6rjCsSsg=
|
github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVKhn2Um6rjCsSsg=
|
||||||
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
|
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
|
||||||
github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU=
|
|
||||||
github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
|
|
||||||
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||||
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||||
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||||
@ -2038,11 +2000,9 @@ golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
@ -713,8 +713,6 @@ func (n *Ensemble) Start() *Ensemble {
|
|||||||
copy.FullNode = modules.MakeUuidWrapper(copy.FullNode)
|
copy.FullNode = modules.MakeUuidWrapper(copy.FullNode)
|
||||||
m.FullNode = ©
|
m.FullNode = ©
|
||||||
|
|
||||||
//m.FullNode.FullNode = modules.MakeUuidWrapper(fn.FullNode)
|
|
||||||
|
|
||||||
opts := []node.Option{
|
opts := []node.Option{
|
||||||
node.StorageMiner(&m.StorageMiner, cfg.Subsystems),
|
node.StorageMiner(&m.StorageMiner, cfg.Subsystems),
|
||||||
node.Base(),
|
node.Base(),
|
||||||
@ -722,8 +720,6 @@ func (n *Ensemble) Start() *Ensemble {
|
|||||||
node.Test(),
|
node.Test(),
|
||||||
|
|
||||||
node.If(m.options.disableLibp2p, node.MockHost(n.mn)),
|
node.If(m.options.disableLibp2p, node.MockHost(n.mn)),
|
||||||
//node.Override(new(v1api.RawFullNodeAPI), func() api.FullNode { return modules.MakeUuidWrapper(m.FullNode) }),
|
|
||||||
//node.Override(new(v1api.RawFullNodeAPI), modules.MakeUuidWrapper),
|
|
||||||
node.Override(new(v1api.RawFullNodeAPI), m.FullNode),
|
node.Override(new(v1api.RawFullNodeAPI), m.FullNode),
|
||||||
node.Override(new(*lotusminer.Miner), lotusminer.NewTestMiner(mineBlock, m.ActorAddr)),
|
node.Override(new(*lotusminer.Miner), lotusminer.NewTestMiner(mineBlock, m.ActorAddr)),
|
||||||
|
|
||||||
|
@ -1,577 +0,0 @@
|
|||||||
package itests
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"crypto/rand"
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/google/uuid"
|
|
||||||
gorpc "github.com/libp2p/go-libp2p-gorpc"
|
|
||||||
libp2pcrypto "github.com/libp2p/go-libp2p/core/crypto"
|
|
||||||
"github.com/libp2p/go-libp2p/core/peer"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
|
||||||
"github.com/filecoin-project/go-state-types/big"
|
|
||||||
"github.com/filecoin-project/go-state-types/exitcode"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
|
||||||
"github.com/filecoin-project/lotus/chain/messagesigner"
|
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
|
||||||
"github.com/filecoin-project/lotus/itests/kit"
|
|
||||||
consensus "github.com/filecoin-project/lotus/lib/consensus/raft"
|
|
||||||
"github.com/filecoin-project/lotus/node"
|
|
||||||
"github.com/filecoin-project/lotus/node/config"
|
|
||||||
"github.com/filecoin-project/lotus/node/impl"
|
|
||||||
"github.com/filecoin-project/lotus/node/modules"
|
|
||||||
)
|
|
||||||
|
|
||||||
func generatePrivKey() (*kit.Libp2p, error) {
|
|
||||||
privkey, _, err := libp2pcrypto.GenerateEd25519Key(rand.Reader)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
peerId, err := peer.IDFromPrivateKey(privkey)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &kit.Libp2p{PeerID: peerId, PrivKey: privkey}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func getRaftState(ctx context.Context, t *testing.T, node *kit.TestFullNode) *api.RaftStateData {
|
|
||||||
raftState, err := node.RaftState(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
return raftState
|
|
||||||
}
|
|
||||||
|
|
||||||
func setup(ctx context.Context, t *testing.T, node0 *kit.TestFullNode, node1 *kit.TestFullNode, node2 *kit.TestFullNode, miner *kit.TestMiner) *kit.Ensemble {
|
|
||||||
|
|
||||||
blockTime := 1 * time.Second
|
|
||||||
|
|
||||||
pkey0, _ := generatePrivKey()
|
|
||||||
pkey1, _ := generatePrivKey()
|
|
||||||
pkey2, _ := generatePrivKey()
|
|
||||||
|
|
||||||
pkeys := []*kit.Libp2p{pkey0, pkey1, pkey2}
|
|
||||||
initPeerSet := []string{}
|
|
||||||
for _, pkey := range pkeys {
|
|
||||||
initPeerSet = append(initPeerSet, "/p2p/"+pkey.PeerID.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
//initPeerSet := []peer.ID{pkey0.PeerID, pkey1.PeerID, pkey2.PeerID}
|
|
||||||
|
|
||||||
raftOps := kit.ConstructorOpts(
|
|
||||||
node.Override(new(*gorpc.Client), modules.NewRPCClient),
|
|
||||||
node.Override(new(*consensus.ClusterRaftConfig), func() *consensus.ClusterRaftConfig {
|
|
||||||
cfg := consensus.DefaultClusterRaftConfig()
|
|
||||||
cfg.InitPeerset = initPeerSet
|
|
||||||
return cfg
|
|
||||||
}),
|
|
||||||
node.Override(new(*consensus.Consensus), consensus.NewConsensusWithRPCClient(false)),
|
|
||||||
node.Override(new(*messagesigner.MessageSignerConsensus), messagesigner.NewMessageSignerConsensus),
|
|
||||||
node.Override(new(messagesigner.MsgSigner), func(ms *messagesigner.MessageSignerConsensus) *messagesigner.MessageSignerConsensus { return ms }),
|
|
||||||
node.Override(new(*modules.RPCHandler), modules.NewRPCHandler),
|
|
||||||
node.Override(node.GoRPCServer, modules.NewRPCServer),
|
|
||||||
)
|
|
||||||
//raftOps := kit.ConstructorOpts()
|
|
||||||
|
|
||||||
ens := kit.NewEnsemble(t).FullNode(node0, raftOps, kit.ThroughRPC()).FullNode(node1, raftOps, kit.ThroughRPC()).FullNode(node2, raftOps, kit.ThroughRPC())
|
|
||||||
node0.AssignPrivKey(pkey0)
|
|
||||||
node1.AssignPrivKey(pkey1)
|
|
||||||
node2.AssignPrivKey(pkey2)
|
|
||||||
|
|
||||||
nodes := []*kit.TestFullNode{node0, node1, node2}
|
|
||||||
wrappedFullNode := kit.MergeFullNodes(nodes)
|
|
||||||
|
|
||||||
ens.MinerEnroll(miner, wrappedFullNode, kit.WithAllSubsystems(), kit.ThroughRPC())
|
|
||||||
ens.Start()
|
|
||||||
|
|
||||||
// Import miner wallet to all nodes
|
|
||||||
addr0, err := node0.WalletImport(ctx, &miner.OwnerKey.KeyInfo)
|
|
||||||
require.NoError(t, err)
|
|
||||||
addr1, err := node1.WalletImport(ctx, &miner.OwnerKey.KeyInfo)
|
|
||||||
require.NoError(t, err)
|
|
||||||
addr2, err := node2.WalletImport(ctx, &miner.OwnerKey.KeyInfo)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
fmt.Println(addr0, addr1, addr2)
|
|
||||||
|
|
||||||
ens.InterconnectAll()
|
|
||||||
|
|
||||||
ens.AddInactiveMiner(miner)
|
|
||||||
ens.Start()
|
|
||||||
|
|
||||||
ens.InterconnectAll().BeginMining(blockTime)
|
|
||||||
|
|
||||||
return ens
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRaftState(t *testing.T) {
|
|
||||||
|
|
||||||
kit.QuietMiningLogs()
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
var (
|
|
||||||
node0 kit.TestFullNode
|
|
||||||
node1 kit.TestFullNode
|
|
||||||
node2 kit.TestFullNode
|
|
||||||
miner kit.TestMiner
|
|
||||||
)
|
|
||||||
|
|
||||||
setup(ctx, t, &node0, &node1, &node2, &miner)
|
|
||||||
|
|
||||||
fmt.Println(node0.WalletList(context.Background()))
|
|
||||||
fmt.Println(node1.WalletList(context.Background()))
|
|
||||||
fmt.Println(node2.WalletList(context.Background()))
|
|
||||||
|
|
||||||
bal, err := node0.WalletBalance(ctx, node0.DefaultKey.Address)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
msgHalfBal := &types.Message{
|
|
||||||
From: miner.OwnerKey.Address,
|
|
||||||
To: node0.DefaultKey.Address,
|
|
||||||
Value: big.Div(bal, big.NewInt(2)),
|
|
||||||
}
|
|
||||||
|
|
||||||
mu := uuid.New()
|
|
||||||
smHalfBal, err := node0.MpoolPushMessage(ctx, msgHalfBal, &api.MessageSendSpec{
|
|
||||||
MsgUuid: mu,
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
mLookup, err := node0.StateWaitMsg(ctx, smHalfBal.Cid(), 3, api.LookbackNoLimit, true)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, exitcode.Ok, mLookup.Receipt.ExitCode)
|
|
||||||
|
|
||||||
rstate0 := getRaftState(ctx, t, &node0)
|
|
||||||
rstate1 := getRaftState(ctx, t, &node1)
|
|
||||||
rstate2 := getRaftState(ctx, t, &node2)
|
|
||||||
|
|
||||||
require.EqualValues(t, rstate0, rstate1)
|
|
||||||
require.EqualValues(t, rstate0, rstate2)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRaftStateLeaderDisconnects(t *testing.T) {
|
|
||||||
|
|
||||||
kit.QuietMiningLogs()
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
var (
|
|
||||||
node0 kit.TestFullNode
|
|
||||||
node1 kit.TestFullNode
|
|
||||||
node2 kit.TestFullNode
|
|
||||||
miner kit.TestMiner
|
|
||||||
)
|
|
||||||
|
|
||||||
nodes := []*kit.TestFullNode{&node0, &node1, &node2}
|
|
||||||
|
|
||||||
setup(ctx, t, &node0, &node1, &node2, &miner)
|
|
||||||
|
|
||||||
peerToNode := make(map[peer.ID]*kit.TestFullNode)
|
|
||||||
for _, n := range nodes {
|
|
||||||
peerToNode[n.Pkey.PeerID] = n
|
|
||||||
}
|
|
||||||
|
|
||||||
bal, err := node0.WalletBalance(ctx, node0.DefaultKey.Address)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
msgHalfBal := &types.Message{
|
|
||||||
From: miner.OwnerKey.Address,
|
|
||||||
To: node0.DefaultKey.Address,
|
|
||||||
Value: big.Div(bal, big.NewInt(2)),
|
|
||||||
}
|
|
||||||
mu := uuid.New()
|
|
||||||
smHalfBal, err := node0.MpoolPushMessage(ctx, msgHalfBal, &api.MessageSendSpec{
|
|
||||||
MsgUuid: mu,
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
mLookup, err := node0.StateWaitMsg(ctx, smHalfBal.Cid(), 3, api.LookbackNoLimit, true)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, exitcode.Ok, mLookup.Receipt.ExitCode)
|
|
||||||
|
|
||||||
rstate0 := getRaftState(ctx, t, &node0)
|
|
||||||
rstate1 := getRaftState(ctx, t, &node1)
|
|
||||||
rstate2 := getRaftState(ctx, t, &node2)
|
|
||||||
|
|
||||||
require.True(t, reflect.DeepEqual(rstate0, rstate1))
|
|
||||||
require.True(t, reflect.DeepEqual(rstate0, rstate2))
|
|
||||||
|
|
||||||
leader, err := node1.RaftLeader(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
leaderNode := peerToNode[leader]
|
|
||||||
|
|
||||||
err = leaderNode.Stop(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
oldLeaderNode := leaderNode
|
|
||||||
|
|
||||||
time.Sleep(5 * time.Second)
|
|
||||||
|
|
||||||
newLeader := leader
|
|
||||||
for _, n := range nodes {
|
|
||||||
if n != leaderNode {
|
|
||||||
newLeader, err = n.RaftLeader(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NotEqual(t, newLeader, leader)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
require.NotEqual(t, newLeader, leader)
|
|
||||||
leaderNode = peerToNode[newLeader]
|
|
||||||
|
|
||||||
msg2 := &types.Message{
|
|
||||||
From: miner.OwnerKey.Address,
|
|
||||||
To: leaderNode.DefaultKey.Address,
|
|
||||||
Value: big.NewInt(100000),
|
|
||||||
}
|
|
||||||
mu2 := uuid.New()
|
|
||||||
signedMsg2, err := leaderNode.MpoolPushMessage(ctx, msg2, &api.MessageSendSpec{
|
|
||||||
MsgUuid: mu2,
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
mLookup, err = leaderNode.StateWaitMsg(ctx, signedMsg2.Cid(), 3, api.LookbackNoLimit, true)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, exitcode.Ok, mLookup.Receipt.ExitCode)
|
|
||||||
|
|
||||||
rstate := getRaftState(ctx, t, leaderNode)
|
|
||||||
|
|
||||||
for _, n := range nodes {
|
|
||||||
if n != oldLeaderNode {
|
|
||||||
rs := getRaftState(ctx, t, n)
|
|
||||||
require.True(t, reflect.DeepEqual(rs, rstate))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRaftStateMiner(t *testing.T) {
|
|
||||||
|
|
||||||
kit.QuietMiningLogs()
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
var (
|
|
||||||
node0 kit.TestFullNode
|
|
||||||
node1 kit.TestFullNode
|
|
||||||
node2 kit.TestFullNode
|
|
||||||
miner kit.TestMiner
|
|
||||||
)
|
|
||||||
|
|
||||||
setup(ctx, t, &node0, &node1, &node2, &miner)
|
|
||||||
|
|
||||||
fmt.Println(node0.WalletList(context.Background()))
|
|
||||||
fmt.Println(node1.WalletList(context.Background()))
|
|
||||||
fmt.Println(node2.WalletList(context.Background()))
|
|
||||||
|
|
||||||
bal, err := node0.WalletBalance(ctx, node0.DefaultKey.Address)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
msgHalfBal := &types.Message{
|
|
||||||
From: miner.OwnerKey.Address,
|
|
||||||
To: node0.DefaultKey.Address,
|
|
||||||
Value: big.Div(bal, big.NewInt(2)),
|
|
||||||
}
|
|
||||||
mu := uuid.New()
|
|
||||||
smHalfBal, err := miner.FullNode.MpoolPushMessage(ctx, msgHalfBal, &api.MessageSendSpec{
|
|
||||||
MsgUuid: mu,
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
mLookup, err := node0.StateWaitMsg(ctx, smHalfBal.Cid(), 3, api.LookbackNoLimit, true)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, exitcode.Ok, mLookup.Receipt.ExitCode)
|
|
||||||
|
|
||||||
rstate0 := getRaftState(ctx, t, &node0)
|
|
||||||
rstate1 := getRaftState(ctx, t, &node1)
|
|
||||||
rstate2 := getRaftState(ctx, t, &node2)
|
|
||||||
|
|
||||||
require.EqualValues(t, rstate0, rstate1)
|
|
||||||
require.EqualValues(t, rstate0, rstate2)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRaftStateLeaderDisconnectsMiner(t *testing.T) {
|
|
||||||
|
|
||||||
kit.QuietMiningLogs()
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
var (
|
|
||||||
node0 kit.TestFullNode
|
|
||||||
node1 kit.TestFullNode
|
|
||||||
node2 kit.TestFullNode
|
|
||||||
miner kit.TestMiner
|
|
||||||
)
|
|
||||||
|
|
||||||
nodes := []*kit.TestFullNode{&node0, &node1, &node2}
|
|
||||||
|
|
||||||
setup(ctx, t, &node0, &node1, &node2, &miner)
|
|
||||||
|
|
||||||
peerToNode := make(map[peer.ID]*kit.TestFullNode)
|
|
||||||
for _, n := range nodes {
|
|
||||||
peerToNode[n.Pkey.PeerID] = n
|
|
||||||
}
|
|
||||||
|
|
||||||
leader, err := node0.RaftLeader(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
leaderNode := peerToNode[leader]
|
|
||||||
|
|
||||||
// Take leader node down
|
|
||||||
err = leaderNode.Stop(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
oldLeaderNode := leaderNode
|
|
||||||
|
|
||||||
time.Sleep(5 * time.Second)
|
|
||||||
|
|
||||||
newLeader := leader
|
|
||||||
for _, n := range nodes {
|
|
||||||
if n != leaderNode {
|
|
||||||
newLeader, err = n.RaftLeader(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NotEqual(t, newLeader, leader)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
require.NotEqual(t, newLeader, leader)
|
|
||||||
leaderNode = peerToNode[newLeader]
|
|
||||||
|
|
||||||
msg2 := &types.Message{
|
|
||||||
From: miner.OwnerKey.Address,
|
|
||||||
To: node0.DefaultKey.Address,
|
|
||||||
Value: big.NewInt(100000),
|
|
||||||
}
|
|
||||||
mu2 := uuid.New()
|
|
||||||
|
|
||||||
signedMsg2, err := miner.FullNode.MpoolPushMessage(ctx, msg2, &api.MessageSendSpec{
|
|
||||||
MaxFee: abi.TokenAmount(config.DefaultDefaultMaxFee),
|
|
||||||
MsgUuid: mu2,
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
mLookup, err := leaderNode.StateWaitMsg(ctx, signedMsg2.Cid(), 3, api.LookbackNoLimit, true)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, exitcode.Ok, mLookup.Receipt.ExitCode)
|
|
||||||
|
|
||||||
rstate := getRaftState(ctx, t, leaderNode)
|
|
||||||
|
|
||||||
for _, n := range nodes {
|
|
||||||
if n != oldLeaderNode {
|
|
||||||
rs := getRaftState(ctx, t, n)
|
|
||||||
require.True(t, reflect.DeepEqual(rs, rstate))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Miner sends message on leader
|
|
||||||
// Leader disconnects
|
|
||||||
// Call StateWaitMsg on new leader
|
|
||||||
func TestLeaderDisconnectsCheckMsgStateOnNewLeader(t *testing.T) {
|
|
||||||
|
|
||||||
kit.QuietMiningLogs()
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
var (
|
|
||||||
node0 kit.TestFullNode
|
|
||||||
node1 kit.TestFullNode
|
|
||||||
node2 kit.TestFullNode
|
|
||||||
miner kit.TestMiner
|
|
||||||
)
|
|
||||||
|
|
||||||
nodes := []*kit.TestFullNode{&node0, &node1, &node2}
|
|
||||||
|
|
||||||
setup(ctx, t, &node0, &node1, &node2, &miner)
|
|
||||||
|
|
||||||
peerToNode := make(map[peer.ID]*kit.TestFullNode)
|
|
||||||
for _, n := range nodes {
|
|
||||||
peerToNode[n.Pkey.PeerID] = n
|
|
||||||
}
|
|
||||||
|
|
||||||
bal, err := node0.WalletBalance(ctx, node0.DefaultKey.Address)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
msgHalfBal := &types.Message{
|
|
||||||
From: miner.OwnerKey.Address,
|
|
||||||
To: node0.DefaultKey.Address,
|
|
||||||
Value: big.Div(bal, big.NewInt(2)),
|
|
||||||
}
|
|
||||||
mu := uuid.New()
|
|
||||||
smHalfBal, err := miner.FullNode.MpoolPushMessage(ctx, msgHalfBal, &api.MessageSendSpec{
|
|
||||||
MsgUuid: mu,
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
leader, err := node0.RaftLeader(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
leaderNode := peerToNode[leader]
|
|
||||||
|
|
||||||
// Take leader node down
|
|
||||||
err = leaderNode.Stop(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
oldLeaderNode := leaderNode
|
|
||||||
|
|
||||||
time.Sleep(5 * time.Second)
|
|
||||||
|
|
||||||
// Check if all active nodes update their leader
|
|
||||||
newLeader := leader
|
|
||||||
for _, n := range nodes {
|
|
||||||
if n != leaderNode {
|
|
||||||
newLeader, err = n.RaftLeader(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NotEqual(t, newLeader, leader)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
require.NotEqual(t, newLeader, leader)
|
|
||||||
leaderNode = peerToNode[newLeader]
|
|
||||||
|
|
||||||
mLookup, err := leaderNode.StateWaitMsg(ctx, smHalfBal.Cid(), 3, api.LookbackNoLimit, true)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, exitcode.Ok, mLookup.Receipt.ExitCode)
|
|
||||||
|
|
||||||
rstate := getRaftState(ctx, t, leaderNode)
|
|
||||||
|
|
||||||
// Check if Raft state is consistent on all active nodes
|
|
||||||
for _, n := range nodes {
|
|
||||||
if n != oldLeaderNode {
|
|
||||||
rs := getRaftState(ctx, t, n)
|
|
||||||
require.True(t, reflect.DeepEqual(rs, rstate))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestChainStoreSync(t *testing.T) {
|
|
||||||
|
|
||||||
kit.QuietMiningLogs()
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
var (
|
|
||||||
node0 kit.TestFullNode
|
|
||||||
node1 kit.TestFullNode
|
|
||||||
node2 kit.TestFullNode
|
|
||||||
miner kit.TestMiner
|
|
||||||
)
|
|
||||||
|
|
||||||
nodes := []*kit.TestFullNode{&node0, &node1, &node2}
|
|
||||||
|
|
||||||
setup(ctx, t, &node0, &node1, &node2, &miner)
|
|
||||||
|
|
||||||
peerToNode := make(map[peer.ID]*kit.TestFullNode)
|
|
||||||
for _, n := range nodes {
|
|
||||||
peerToNode[n.Pkey.PeerID] = n
|
|
||||||
}
|
|
||||||
|
|
||||||
bal, err := node0.WalletBalance(ctx, node0.DefaultKey.Address)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
leader, err := node0.RaftLeader(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
leaderNode := peerToNode[leader]
|
|
||||||
|
|
||||||
msgHalfBal := &types.Message{
|
|
||||||
From: miner.OwnerKey.Address,
|
|
||||||
To: node0.DefaultKey.Address,
|
|
||||||
Value: big.Div(bal, big.NewInt(2)),
|
|
||||||
}
|
|
||||||
mu := uuid.New()
|
|
||||||
smHalfBal, err := miner.FullNode.MpoolPushMessage(ctx, msgHalfBal, &api.MessageSendSpec{
|
|
||||||
MsgUuid: mu,
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
for _, n := range nodes {
|
|
||||||
fmt.Println(n != leaderNode)
|
|
||||||
if n != leaderNode {
|
|
||||||
mLookup, err := n.StateWaitMsg(ctx, smHalfBal.Cid(), 3, api.LookbackNoLimit, true)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, exitcode.Ok, mLookup.Receipt.ExitCode)
|
|
||||||
//break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGoRPCAuth(t *testing.T) {
|
|
||||||
// TODO Fix Raft, then enable this test. https://github.com/filecoin-project/lotus/issues/9888
|
|
||||||
t.SkipNow()
|
|
||||||
|
|
||||||
blockTime := 1 * time.Second
|
|
||||||
|
|
||||||
kit.QuietMiningLogs()
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
var (
|
|
||||||
node0 kit.TestFullNode
|
|
||||||
node1 kit.TestFullNode
|
|
||||||
node2 kit.TestFullNode
|
|
||||||
node3 kit.TestFullNode
|
|
||||||
miner kit.TestMiner
|
|
||||||
)
|
|
||||||
|
|
||||||
pkey0, _ := generatePrivKey()
|
|
||||||
pkey1, _ := generatePrivKey()
|
|
||||||
pkey2, _ := generatePrivKey()
|
|
||||||
|
|
||||||
pkeys := []*kit.Libp2p{pkey0, pkey1, pkey2}
|
|
||||||
initPeerSet := []string{}
|
|
||||||
for _, pkey := range pkeys {
|
|
||||||
initPeerSet = append(initPeerSet, "/p2p/"+pkey.PeerID.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
raftOps := kit.ConstructorOpts(
|
|
||||||
node.Override(new(*gorpc.Client), modules.NewRPCClient),
|
|
||||||
node.Override(new(*consensus.ClusterRaftConfig), func() *consensus.ClusterRaftConfig {
|
|
||||||
cfg := consensus.DefaultClusterRaftConfig()
|
|
||||||
cfg.InitPeerset = initPeerSet
|
|
||||||
return cfg
|
|
||||||
}),
|
|
||||||
node.Override(new(*consensus.Consensus), consensus.NewConsensusWithRPCClient(false)),
|
|
||||||
node.Override(new(*messagesigner.MessageSignerConsensus), messagesigner.NewMessageSignerConsensus),
|
|
||||||
node.Override(new(messagesigner.MsgSigner), func(ms *messagesigner.MessageSignerConsensus) *messagesigner.MessageSignerConsensus { return ms }),
|
|
||||||
node.Override(new(*modules.RPCHandler), modules.NewRPCHandler),
|
|
||||||
node.Override(node.GoRPCServer, modules.NewRPCServer),
|
|
||||||
)
|
|
||||||
//raftOps := kit.ConstructorOpts()
|
|
||||||
|
|
||||||
ens := kit.NewEnsemble(t).FullNode(&node0, raftOps, kit.ThroughRPC()).FullNode(&node1, raftOps, kit.ThroughRPC()).FullNode(&node2, raftOps, kit.ThroughRPC()).FullNode(&node3, raftOps)
|
|
||||||
node0.AssignPrivKey(pkey0)
|
|
||||||
node1.AssignPrivKey(pkey1)
|
|
||||||
node2.AssignPrivKey(pkey2)
|
|
||||||
|
|
||||||
nodes := []*kit.TestFullNode{&node0, &node1, &node2}
|
|
||||||
wrappedFullNode := kit.MergeFullNodes(nodes)
|
|
||||||
|
|
||||||
ens.MinerEnroll(&miner, wrappedFullNode, kit.WithAllSubsystems(), kit.ThroughRPC())
|
|
||||||
ens.Start()
|
|
||||||
|
|
||||||
// Import miner wallet to all nodes
|
|
||||||
addr0, err := node0.WalletImport(ctx, &miner.OwnerKey.KeyInfo)
|
|
||||||
require.NoError(t, err)
|
|
||||||
addr1, err := node1.WalletImport(ctx, &miner.OwnerKey.KeyInfo)
|
|
||||||
require.NoError(t, err)
|
|
||||||
addr2, err := node2.WalletImport(ctx, &miner.OwnerKey.KeyInfo)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
fmt.Println(addr0, addr1, addr2)
|
|
||||||
|
|
||||||
ens.InterconnectAll()
|
|
||||||
|
|
||||||
ens.AddInactiveMiner(&miner)
|
|
||||||
ens.Start()
|
|
||||||
|
|
||||||
ens.InterconnectAll().BeginMining(blockTime)
|
|
||||||
|
|
||||||
leader, err := node0.RaftLeader(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
client := node3.FullNode.(*impl.FullNodeAPI).RaftAPI.MessageSigner.Consensus.RpcClient
|
|
||||||
method := "MpoolPushMessage"
|
|
||||||
|
|
||||||
msg := &types.Message{
|
|
||||||
From: miner.OwnerKey.Address,
|
|
||||||
To: node0.DefaultKey.Address,
|
|
||||||
Value: big.NewInt(100000),
|
|
||||||
}
|
|
||||||
msgWhole := &api.MpoolMessageWhole{Msg: msg}
|
|
||||||
var ret types.SignedMessage
|
|
||||||
|
|
||||||
err = client.CallContext(ctx, leader, "Consensus", method, msgWhole, &ret)
|
|
||||||
require.True(t, gorpc.IsAuthorizationError(err))
|
|
||||||
|
|
||||||
}
|
|
@ -1,135 +0,0 @@
|
|||||||
package consensus
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"path/filepath"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
hraft "github.com/hashicorp/raft"
|
|
||||||
"golang.org/x/xerrors"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/node/config"
|
|
||||||
"github.com/filecoin-project/lotus/node/repo"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Configuration defaults
|
|
||||||
var (
|
|
||||||
DefaultDataSubFolder = "raft-cluster"
|
|
||||||
DefaultWaitForLeaderTimeout = 15 * time.Second
|
|
||||||
DefaultCommitRetries = 1
|
|
||||||
DefaultNetworkTimeout = 100 * time.Second
|
|
||||||
DefaultCommitRetryDelay = 200 * time.Millisecond
|
|
||||||
DefaultBackupsRotate = 6
|
|
||||||
)
|
|
||||||
|
|
||||||
// ClusterRaftConfig allows to configure the Raft Consensus component for the node cluster.
|
|
||||||
type ClusterRaftConfig struct {
|
|
||||||
// config to enabled node cluster with raft consensus
|
|
||||||
ClusterModeEnabled bool
|
|
||||||
// A folder to store Raft's data.
|
|
||||||
DataFolder string
|
|
||||||
// InitPeerset provides the list of initial cluster peers for new Raft
|
|
||||||
// peers (with no prior state). It is ignored when Raft was already
|
|
||||||
// initialized or when starting in staging mode.
|
|
||||||
InitPeerset []string
|
|
||||||
// LeaderTimeout specifies how long to wait for a leader before
|
|
||||||
// failing an operation.
|
|
||||||
WaitForLeaderTimeout time.Duration
|
|
||||||
// NetworkTimeout specifies how long before a Raft network
|
|
||||||
// operation is timed out
|
|
||||||
NetworkTimeout time.Duration
|
|
||||||
// CommitRetries specifies how many times we retry a failed commit until
|
|
||||||
// we give up.
|
|
||||||
CommitRetries int
|
|
||||||
// How long to wait between retries
|
|
||||||
CommitRetryDelay time.Duration
|
|
||||||
// BackupsRotate specifies the maximum number of Raft's DataFolder
|
|
||||||
// copies that we keep as backups (renaming) after cleanup.
|
|
||||||
BackupsRotate int
|
|
||||||
// A Hashicorp Raft's configuration object.
|
|
||||||
RaftConfig *hraft.Config
|
|
||||||
|
|
||||||
// Tracing enables propagation of contexts across binary boundaries.
|
|
||||||
Tracing bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func DefaultClusterRaftConfig() *ClusterRaftConfig {
|
|
||||||
var cfg ClusterRaftConfig
|
|
||||||
cfg.DataFolder = "" // empty so it gets omitted
|
|
||||||
cfg.InitPeerset = []string{}
|
|
||||||
cfg.WaitForLeaderTimeout = DefaultWaitForLeaderTimeout
|
|
||||||
cfg.NetworkTimeout = DefaultNetworkTimeout
|
|
||||||
cfg.CommitRetries = DefaultCommitRetries
|
|
||||||
cfg.CommitRetryDelay = DefaultCommitRetryDelay
|
|
||||||
cfg.BackupsRotate = DefaultBackupsRotate
|
|
||||||
cfg.RaftConfig = hraft.DefaultConfig()
|
|
||||||
|
|
||||||
// These options are imposed over any Default Raft Config.
|
|
||||||
cfg.RaftConfig.ShutdownOnRemove = false
|
|
||||||
cfg.RaftConfig.LocalID = "will_be_set_automatically"
|
|
||||||
|
|
||||||
// Set up logging
|
|
||||||
cfg.RaftConfig.LogOutput = io.Discard
|
|
||||||
return &cfg
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewClusterRaftConfig(userRaftConfig *config.UserRaftConfig) *ClusterRaftConfig {
|
|
||||||
var cfg ClusterRaftConfig
|
|
||||||
cfg.DataFolder = userRaftConfig.DataFolder
|
|
||||||
cfg.InitPeerset = userRaftConfig.InitPeersetMultiAddr
|
|
||||||
cfg.WaitForLeaderTimeout = time.Duration(userRaftConfig.WaitForLeaderTimeout)
|
|
||||||
cfg.NetworkTimeout = time.Duration(userRaftConfig.NetworkTimeout)
|
|
||||||
cfg.CommitRetries = userRaftConfig.CommitRetries
|
|
||||||
cfg.CommitRetryDelay = time.Duration(userRaftConfig.CommitRetryDelay)
|
|
||||||
cfg.BackupsRotate = userRaftConfig.BackupsRotate
|
|
||||||
|
|
||||||
// Keep this to be default hraft config for now
|
|
||||||
cfg.RaftConfig = hraft.DefaultConfig()
|
|
||||||
|
|
||||||
// These options are imposed over any Default Raft Config.
|
|
||||||
cfg.RaftConfig.ShutdownOnRemove = false
|
|
||||||
cfg.RaftConfig.LocalID = "will_be_set_automatically"
|
|
||||||
|
|
||||||
// Set up logging
|
|
||||||
cfg.RaftConfig.LogOutput = io.Discard
|
|
||||||
|
|
||||||
return &cfg
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate checks that this configuration has working values,
|
|
||||||
// at least in appearance.
|
|
||||||
func ValidateConfig(cfg *ClusterRaftConfig) error {
|
|
||||||
if cfg.RaftConfig == nil {
|
|
||||||
return xerrors.Errorf("no hashicorp/raft.Config")
|
|
||||||
}
|
|
||||||
if cfg.WaitForLeaderTimeout <= 0 {
|
|
||||||
return xerrors.Errorf("wait_for_leader_timeout <= 0")
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.NetworkTimeout <= 0 {
|
|
||||||
return xerrors.Errorf("network_timeout <= 0")
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.CommitRetries < 0 {
|
|
||||||
return xerrors.Errorf("commit_retries is invalid")
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.CommitRetryDelay <= 0 {
|
|
||||||
return xerrors.Errorf("commit_retry_delay is invalid")
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.BackupsRotate <= 0 {
|
|
||||||
return xerrors.Errorf("backups_rotate should be larger than 0")
|
|
||||||
}
|
|
||||||
|
|
||||||
return hraft.ValidateConfig(cfg.RaftConfig)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetDataFolder returns the Raft data folder that we are using.
|
|
||||||
func (cfg *ClusterRaftConfig) GetDataFolder(repo repo.LockedRepo) string {
|
|
||||||
if cfg.DataFolder == "" {
|
|
||||||
return filepath.Join(repo.Path(), DefaultDataSubFolder)
|
|
||||||
}
|
|
||||||
return filepath.Join(repo.Path(), cfg.DataFolder)
|
|
||||||
}
|
|
@ -1,512 +0,0 @@
|
|||||||
// Package raft implements a Consensus component for IPFS Cluster which uses
|
|
||||||
// Raft (go-libp2p-raft).
|
|
||||||
package consensus
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"sort"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/google/uuid"
|
|
||||||
"golang.org/x/exp/slices"
|
|
||||||
|
|
||||||
addr "github.com/filecoin-project/go-address"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
|
||||||
"github.com/filecoin-project/lotus/chain/messagepool"
|
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
|
||||||
"github.com/filecoin-project/lotus/lib/addrutil"
|
|
||||||
"github.com/filecoin-project/lotus/node/repo"
|
|
||||||
|
|
||||||
//ds "github.com/ipfs/go-datastore"
|
|
||||||
logging "github.com/ipfs/go-log/v2"
|
|
||||||
consensus "github.com/libp2p/go-libp2p-consensus"
|
|
||||||
rpc "github.com/libp2p/go-libp2p-gorpc"
|
|
||||||
libp2praft "github.com/libp2p/go-libp2p-raft"
|
|
||||||
host "github.com/libp2p/go-libp2p/core/host"
|
|
||||||
peer "github.com/libp2p/go-libp2p/core/peer"
|
|
||||||
)
|
|
||||||
|
|
||||||
var logger = logging.Logger("raft")
|
|
||||||
|
|
||||||
type RaftState struct {
|
|
||||||
NonceMap api.NonceMapType
|
|
||||||
MsgUuids api.MsgUuidMapType
|
|
||||||
|
|
||||||
// TODO: add comment explaining why this is needed
|
|
||||||
// We need a reference to the messagepool in the raft state in order to
|
|
||||||
// sync messages that have been sent by the leader node
|
|
||||||
// Miner calls StateWaitMsg after MpoolPushMessage to check if the message has
|
|
||||||
// landed on chain. This check requires the message be stored in the local chainstore
|
|
||||||
// If a leadernode goes down after sending a message to the chain and is replaced by
|
|
||||||
// another node, the other node needs to have this message in its chainstore for the
|
|
||||||
// above check to succeed.
|
|
||||||
|
|
||||||
// This is because the miner only stores signed CIDs but the message received from in a
|
|
||||||
// block will be unsigned (for BLS). Hence, the process relies on the node to store the
|
|
||||||
// signed message which holds a copy of the unsigned message to properly perform all the
|
|
||||||
// needed checks
|
|
||||||
Mpool *messagepool.MessagePool
|
|
||||||
}
|
|
||||||
|
|
||||||
func newRaftState(mpool *messagepool.MessagePool) *RaftState {
|
|
||||||
return &RaftState{
|
|
||||||
NonceMap: make(map[addr.Address]uint64),
|
|
||||||
MsgUuids: make(map[uuid.UUID]*types.SignedMessage),
|
|
||||||
Mpool: mpool,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type ConsensusOp struct {
|
|
||||||
Nonce uint64 `codec:"nonce,omitempty"`
|
|
||||||
Uuid uuid.UUID `codec:"uuid,omitempty"`
|
|
||||||
Addr addr.Address `codec:"addr,omitempty"`
|
|
||||||
SignedMsg *types.SignedMessage `codec:"signedMsg,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c ConsensusOp) ApplyTo(state consensus.State) (consensus.State, error) {
|
|
||||||
s := state.(*RaftState)
|
|
||||||
s.NonceMap[c.Addr] = c.Nonce
|
|
||||||
if c.SignedMsg != nil {
|
|
||||||
|
|
||||||
// Deep copy to tmp
|
|
||||||
var buffer bytes.Buffer
|
|
||||||
err := c.SignedMsg.MarshalCBOR(&buffer)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
tmp, err := types.DecodeSignedMessage(buffer.Bytes())
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
s.MsgUuids[c.Uuid] = tmp
|
|
||||||
|
|
||||||
_, err = s.Mpool.Push(context.TODO(), tmp, false)
|
|
||||||
// Since this is only meant to keep messages in sync, ignore any error which
|
|
||||||
// shows the message already exists in the mpool
|
|
||||||
if err != nil && !api.ErrorIsIn(err, []error{messagepool.ErrExistingNonce}) {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return s, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ consensus.Op = &ConsensusOp{}
|
|
||||||
|
|
||||||
// Consensus handles the work of keeping a shared-state between
|
|
||||||
// the peers of a Lotus Cluster, as well as modifying that state and
|
|
||||||
// applying any updates in a thread-safe manner.
|
|
||||||
type Consensus struct {
|
|
||||||
ctx context.Context
|
|
||||||
cancel func()
|
|
||||||
config *ClusterRaftConfig
|
|
||||||
|
|
||||||
host host.Host
|
|
||||||
|
|
||||||
consensus consensus.OpLogConsensus
|
|
||||||
actor consensus.Actor
|
|
||||||
raft *raftWrapper
|
|
||||||
state *RaftState
|
|
||||||
|
|
||||||
RpcClient *rpc.Client
|
|
||||||
rpcReady chan struct{}
|
|
||||||
readyCh chan struct{}
|
|
||||||
|
|
||||||
peerSet []peer.ID
|
|
||||||
repo repo.LockedRepo
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewConsensus builds a new ClusterConsensus component using Raft.
|
|
||||||
//
|
|
||||||
// Raft saves state snapshots regularly and persists log data in a bolt
|
|
||||||
// datastore. Therefore, unless memory usage is a concern, it is recommended
|
|
||||||
// to use an in-memory go-datastore as store parameter.
|
|
||||||
//
|
|
||||||
// The staging parameter controls if the Raft peer should start in
|
|
||||||
// staging mode (used when joining a new Raft peerset with other peers).
|
|
||||||
func NewConsensus(host host.Host, cfg *ClusterRaftConfig, mpool *messagepool.MessagePool, repo repo.LockedRepo, staging bool) (*Consensus, error) {
|
|
||||||
err := ValidateConfig(cfg)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
|
|
||||||
logger.Debug("starting Consensus and waiting for a leader...")
|
|
||||||
state := newRaftState(mpool)
|
|
||||||
|
|
||||||
consensus := libp2praft.NewOpLog(state, &ConsensusOp{})
|
|
||||||
|
|
||||||
raft, err := newRaftWrapper(host, cfg, consensus.FSM(), repo, staging)
|
|
||||||
if err != nil {
|
|
||||||
logger.Error("error creating raft: ", err)
|
|
||||||
cancel()
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
actor := libp2praft.NewActor(raft.raft)
|
|
||||||
consensus.SetActor(actor)
|
|
||||||
|
|
||||||
peers := []peer.ID{}
|
|
||||||
addrInfos, err := addrutil.ParseAddresses(ctx, cfg.InitPeerset)
|
|
||||||
if err != nil {
|
|
||||||
logger.Error("error parsing addresses: ", err)
|
|
||||||
cancel()
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, addrInfo := range addrInfos {
|
|
||||||
peers = append(peers, addrInfo.ID)
|
|
||||||
|
|
||||||
// Add peer to address book
|
|
||||||
host.Peerstore().AddAddrs(addrInfo.ID, addrInfo.Addrs, time.Hour*100)
|
|
||||||
}
|
|
||||||
|
|
||||||
cc := &Consensus{
|
|
||||||
ctx: ctx,
|
|
||||||
cancel: cancel,
|
|
||||||
config: cfg,
|
|
||||||
host: host,
|
|
||||||
consensus: consensus,
|
|
||||||
actor: actor,
|
|
||||||
state: state,
|
|
||||||
raft: raft,
|
|
||||||
peerSet: peers,
|
|
||||||
rpcReady: make(chan struct{}, 1),
|
|
||||||
readyCh: make(chan struct{}, 1),
|
|
||||||
repo: repo,
|
|
||||||
}
|
|
||||||
|
|
||||||
go cc.finishBootstrap()
|
|
||||||
return cc, nil
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: Merge with NewConsensus and remove the rpcReady chan
|
|
||||||
func NewConsensusWithRPCClient(staging bool) func(host host.Host,
|
|
||||||
cfg *ClusterRaftConfig,
|
|
||||||
rpcClient *rpc.Client,
|
|
||||||
mpool *messagepool.MessagePool,
|
|
||||||
repo repo.LockedRepo,
|
|
||||||
) (*Consensus, error) {
|
|
||||||
|
|
||||||
return func(host host.Host, cfg *ClusterRaftConfig, rpcClient *rpc.Client, mpool *messagepool.MessagePool, repo repo.LockedRepo) (*Consensus, error) {
|
|
||||||
cc, err := NewConsensus(host, cfg, mpool, repo, staging)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
cc.RpcClient = rpcClient
|
|
||||||
cc.rpcReady <- struct{}{}
|
|
||||||
return cc, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WaitForSync waits for a leader and for the state to be up to date, then returns.
|
|
||||||
func (cc *Consensus) WaitForSync(ctx context.Context) error {
|
|
||||||
|
|
||||||
leaderCtx, cancel := context.WithTimeout(ctx, cc.config.WaitForLeaderTimeout)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
// 1 - wait for leader
|
|
||||||
// 2 - wait until we are a Voter
|
|
||||||
// 3 - wait until last index is applied
|
|
||||||
|
|
||||||
// From raft docs:
|
|
||||||
|
|
||||||
// once a staging server receives enough log entries to be sufficiently
|
|
||||||
// caught up to the leader's log, the leader will invoke a membership
|
|
||||||
// change to change the Staging server to a Voter
|
|
||||||
|
|
||||||
// Thus, waiting to be a Voter is a guarantee that we have a reasonable
|
|
||||||
// up to date state. Otherwise, we might return too early (see
|
|
||||||
// https://github.com/ipfs-cluster/ipfs-cluster/issues/378)
|
|
||||||
|
|
||||||
_, err := cc.raft.WaitForLeader(leaderCtx)
|
|
||||||
if err != nil {
|
|
||||||
return errors.New("error waiting for leader: " + err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
err = cc.raft.WaitForVoter(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return errors.New("error waiting to become a Voter: " + err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
err = cc.raft.WaitForUpdates(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return errors.New("error waiting for consensus updates: " + err.Error())
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// waits until there is a consensus leader and syncs the state
|
|
||||||
// to the tracker. If errors happen, this will return and never
|
|
||||||
// signal the component as Ready.
|
|
||||||
func (cc *Consensus) finishBootstrap() {
|
|
||||||
// wait until we have RPC to perform any actions.
|
|
||||||
select {
|
|
||||||
case <-cc.ctx.Done():
|
|
||||||
return
|
|
||||||
case <-cc.rpcReady:
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sometimes bootstrap is a no-Op. It only applies when
|
|
||||||
// no state exists and staging=false.
|
|
||||||
_, err := cc.raft.Bootstrap()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.Debugf("Bootstrap finished")
|
|
||||||
err = cc.WaitForSync(cc.ctx)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
logger.Debug("Raft state is now up to date")
|
|
||||||
logger.Debug("consensus ready")
|
|
||||||
cc.readyCh <- struct{}{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Shutdown stops the component so it will not process any
|
|
||||||
// more updates. The underlying consensus is permanently
|
|
||||||
// shutdown, along with the libp2p transport.
|
|
||||||
func (cc *Consensus) Shutdown(ctx context.Context) error {
|
|
||||||
|
|
||||||
logger.Info("stopping Consensus component")
|
|
||||||
|
|
||||||
// Raft Shutdown
|
|
||||||
err := cc.raft.Shutdown(ctx)
|
|
||||||
if err != nil {
|
|
||||||
logger.Error(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
cc.cancel()
|
|
||||||
close(cc.rpcReady)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ready returns a channel which is signaled when the Consensus
|
|
||||||
// algorithm has finished bootstrapping and is ready to use
|
|
||||||
func (cc *Consensus) Ready(ctx context.Context) <-chan struct{} {
|
|
||||||
return cc.readyCh
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsTrustedPeer returns true. In Raft we trust all peers.
|
|
||||||
func (cc *Consensus) IsTrustedPeer(ctx context.Context, p peer.ID) bool {
|
|
||||||
return slices.Contains(cc.peerSet, p)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Trust is a no-Op.
|
|
||||||
func (cc *Consensus) Trust(ctx context.Context, pid peer.ID) error { return nil }
|
|
||||||
|
|
||||||
// Distrust is a no-Op.
|
|
||||||
func (cc *Consensus) Distrust(ctx context.Context, pid peer.ID) error { return nil }
|
|
||||||
|
|
||||||
// returns true if the operation was redirected to the leader
|
|
||||||
// note that if the leader just dissappeared, the rpc call will
|
|
||||||
// fail because we haven't heard that it's gone.
|
|
||||||
func (cc *Consensus) RedirectToLeader(method string, arg interface{}, ret interface{}) (bool, error) {
|
|
||||||
ctx := cc.ctx
|
|
||||||
|
|
||||||
var finalErr error
|
|
||||||
|
|
||||||
// Retry redirects
|
|
||||||
for i := 0; i <= cc.config.CommitRetries; i++ {
|
|
||||||
logger.Debugf("redirect try %d", i)
|
|
||||||
leader, err := cc.Leader(ctx)
|
|
||||||
|
|
||||||
// No leader, wait for one
|
|
||||||
if err != nil {
|
|
||||||
logger.Warn("there seems to be no leader. Waiting for one")
|
|
||||||
rctx, cancel := context.WithTimeout(ctx, cc.config.WaitForLeaderTimeout)
|
|
||||||
defer cancel()
|
|
||||||
pidstr, err := cc.raft.WaitForLeader(rctx)
|
|
||||||
|
|
||||||
// means we timed out waiting for a leader
|
|
||||||
// we don't retry in this case
|
|
||||||
if err != nil {
|
|
||||||
return false, fmt.Errorf("timed out waiting for leader: %s", err)
|
|
||||||
}
|
|
||||||
leader, err = peer.Decode(pidstr)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.Infof("leader: %s, curr host: %s, peerSet: %s", leader, cc.host.ID(), cc.peerSet)
|
|
||||||
|
|
||||||
// We are the leader. Do not redirect
|
|
||||||
if leader == cc.host.ID() {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.Debugf("redirecting %s to leader: %s", method, leader)
|
|
||||||
finalErr = cc.RpcClient.CallContext(
|
|
||||||
ctx,
|
|
||||||
leader,
|
|
||||||
"Consensus",
|
|
||||||
method,
|
|
||||||
arg,
|
|
||||||
ret,
|
|
||||||
)
|
|
||||||
if finalErr != nil {
|
|
||||||
logger.Errorf("retrying to redirect request to leader: %s", finalErr)
|
|
||||||
time.Sleep(2 * cc.config.RaftConfig.HeartbeatTimeout)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// We tried to redirect, but something happened
|
|
||||||
return true, finalErr
|
|
||||||
}
|
|
||||||
|
|
||||||
// commit submits a cc.consensus commit. It retries upon failures.
|
|
||||||
func (cc *Consensus) Commit(ctx context.Context, op *ConsensusOp) error {
|
|
||||||
|
|
||||||
var finalErr error
|
|
||||||
for i := 0; i <= cc.config.CommitRetries; i++ {
|
|
||||||
logger.Debugf("attempt #%d: committing %+v", i, op)
|
|
||||||
|
|
||||||
// this means we are retrying
|
|
||||||
if finalErr != nil {
|
|
||||||
logger.Errorf("retrying upon failed commit (retry %d): %s ",
|
|
||||||
i, finalErr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Being here means we are the LEADER. We can commit.
|
|
||||||
// now commit the changes to our state
|
|
||||||
_, finalErr = cc.consensus.CommitOp(op)
|
|
||||||
if finalErr != nil {
|
|
||||||
goto RETRY
|
|
||||||
}
|
|
||||||
|
|
||||||
RETRY:
|
|
||||||
time.Sleep(cc.config.CommitRetryDelay)
|
|
||||||
}
|
|
||||||
return finalErr
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddPeer adds a new peer to participate in this consensus. It will
|
|
||||||
// forward the operation to the leader if this is not it.
|
|
||||||
func (cc *Consensus) AddPeer(ctx context.Context, pid peer.ID) error {
|
|
||||||
var finalErr error
|
|
||||||
for i := 0; i <= cc.config.CommitRetries; i++ {
|
|
||||||
logger.Debugf("attempt #%d: AddPeer %s", i, pid)
|
|
||||||
if finalErr != nil {
|
|
||||||
logger.Errorf("retrying to add peer. Attempt #%d failed: %s", i, finalErr)
|
|
||||||
}
|
|
||||||
ok, err := cc.RedirectToLeader("AddPeer", pid, struct{}{})
|
|
||||||
if err != nil || ok {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// Being here means we are the leader and can commit
|
|
||||||
finalErr = cc.raft.AddPeer(ctx, pid)
|
|
||||||
if finalErr != nil {
|
|
||||||
time.Sleep(cc.config.CommitRetryDelay)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
logger.Infof("peer added to Raft: %s", pid)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
return finalErr
|
|
||||||
}
|
|
||||||
|
|
||||||
// RmPeer removes a peer from this consensus. It will
|
|
||||||
// forward the operation to the leader if this is not it.
|
|
||||||
func (cc *Consensus) RmPeer(ctx context.Context, pid peer.ID) error {
|
|
||||||
var finalErr error
|
|
||||||
for i := 0; i <= cc.config.CommitRetries; i++ {
|
|
||||||
logger.Debugf("attempt #%d: RmPeer %s", i, pid)
|
|
||||||
if finalErr != nil {
|
|
||||||
logger.Errorf("retrying to remove peer. Attempt #%d failed: %s", i, finalErr)
|
|
||||||
}
|
|
||||||
ok, err := cc.RedirectToLeader("RmPeer", pid, struct{}{})
|
|
||||||
if err != nil || ok {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// Being here means we are the leader and can commit
|
|
||||||
finalErr = cc.raft.RemovePeer(ctx, pid.String())
|
|
||||||
if finalErr != nil {
|
|
||||||
time.Sleep(cc.config.CommitRetryDelay)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
logger.Infof("peer removed from Raft: %s", pid)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
return finalErr
|
|
||||||
}
|
|
||||||
|
|
||||||
// RaftState retrieves the current consensus RaftState. It may error if no RaftState has
|
|
||||||
// been agreed upon or the state is not consistent. The returned RaftState is the
|
|
||||||
// last agreed-upon RaftState known by this node. No writes are allowed, as all
|
|
||||||
// writes to the shared state should happen through the Consensus component
|
|
||||||
// methods.
|
|
||||||
func (cc *Consensus) State(ctx context.Context) (*RaftState, error) {
|
|
||||||
st, err := cc.consensus.GetLogHead()
|
|
||||||
if err == libp2praft.ErrNoState {
|
|
||||||
return newRaftState(nil), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
state, ok := st.(*RaftState)
|
|
||||||
if !ok {
|
|
||||||
return nil, errors.New("wrong state type")
|
|
||||||
}
|
|
||||||
return state, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Leader returns the peerID of the Leader of the
|
|
||||||
// cluster. It returns an error when there is no leader.
|
|
||||||
func (cc *Consensus) Leader(ctx context.Context) (peer.ID, error) {
|
|
||||||
// Note the hard-dependency on raft here...
|
|
||||||
raftactor := cc.actor.(*libp2praft.Actor)
|
|
||||||
return raftactor.Leader()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clean removes the Raft persisted state.
|
|
||||||
func (cc *Consensus) Clean(ctx context.Context) error {
|
|
||||||
//return CleanupRaft(cc.config)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
//Rollback replaces the current agreed-upon
|
|
||||||
//state with the state provided. Only the consensus leader
|
|
||||||
//can perform this operation.
|
|
||||||
//func (cc *Consensus) Rollback(state RaftState) error {
|
|
||||||
// // This is unused. It *might* be used for upgrades.
|
|
||||||
// // There is rather untested magic in libp2p-raft's FSM()
|
|
||||||
// // to make this possible.
|
|
||||||
// return cc.consensus.Rollback(state)
|
|
||||||
//}
|
|
||||||
|
|
||||||
// Peers return the current list of peers in the consensus.
|
|
||||||
// The list will be sorted alphabetically.
|
|
||||||
func (cc *Consensus) Peers(ctx context.Context) ([]peer.ID, error) {
|
|
||||||
|
|
||||||
peers := []peer.ID{}
|
|
||||||
raftPeers, err := cc.raft.Peers(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("cannot retrieve list of peers: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
sort.Strings(raftPeers)
|
|
||||||
|
|
||||||
for _, p := range raftPeers {
|
|
||||||
id, err := peer.Decode(p)
|
|
||||||
if err != nil {
|
|
||||||
panic("could not decode peer")
|
|
||||||
}
|
|
||||||
peers = append(peers, id)
|
|
||||||
}
|
|
||||||
return peers, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cc *Consensus) IsLeader(ctx context.Context) bool {
|
|
||||||
leader, _ := cc.Leader(ctx)
|
|
||||||
return leader == cc.host.ID()
|
|
||||||
}
|
|
@ -1,41 +0,0 @@
|
|||||||
package consensus
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
|
|
||||||
consensus "github.com/libp2p/go-libp2p-consensus"
|
|
||||||
"github.com/libp2p/go-libp2p/core/peer"
|
|
||||||
)
|
|
||||||
|
|
||||||
type ConsensusAPI interface {
|
|
||||||
// Returns a channel to signal that the consensus layer is ready
|
|
||||||
// allowing the main component to wait for it during start.
|
|
||||||
Ready(context.Context) <-chan struct{}
|
|
||||||
|
|
||||||
AddPeer(context.Context, peer.ID) error
|
|
||||||
RmPeer(context.Context, peer.ID) error
|
|
||||||
State(context.Context) (consensus.State, error)
|
|
||||||
// Provide a node which is responsible to perform
|
|
||||||
// specific tasks which must only run in 1 cluster peer.
|
|
||||||
Leader(context.Context) (peer.ID, error)
|
|
||||||
// Only returns when the consensus state has all log
|
|
||||||
// updates applied to it.
|
|
||||||
WaitForSync(context.Context) error
|
|
||||||
// Clean removes all consensus data.
|
|
||||||
Clean(context.Context) error
|
|
||||||
// Peers returns the peerset participating in the Consensus.
|
|
||||||
Peers(context.Context) ([]peer.ID, error)
|
|
||||||
// IsTrustedPeer returns true if the given peer is "trusted".
|
|
||||||
// This will grant access to more rpc endpoints and a
|
|
||||||
// non-trusted one. This should be fast as it will be
|
|
||||||
// called repeatedly for every remote RPC request.
|
|
||||||
IsTrustedPeer(context.Context, peer.ID) bool
|
|
||||||
// Trust marks a peer as "trusted".
|
|
||||||
Trust(context.Context, peer.ID) error
|
|
||||||
// Distrust removes a peer from the "trusted" set.
|
|
||||||
Distrust(context.Context, peer.ID) error
|
|
||||||
// Returns true if current node is the cluster leader
|
|
||||||
IsLeader(ctx context.Context) bool
|
|
||||||
|
|
||||||
Shutdown(context.Context) error
|
|
||||||
}
|
|
@ -1,563 +0,0 @@
|
|||||||
package consensus
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/hashicorp/go-hclog"
|
|
||||||
hraft "github.com/hashicorp/raft"
|
|
||||||
raftboltdb "github.com/hashicorp/raft-boltdb"
|
|
||||||
"github.com/ipfs/go-log/v2"
|
|
||||||
p2praft "github.com/libp2p/go-libp2p-raft"
|
|
||||||
host "github.com/libp2p/go-libp2p/core/host"
|
|
||||||
peer "github.com/libp2p/go-libp2p/core/peer"
|
|
||||||
"go.uber.org/multierr"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/lib/addrutil"
|
|
||||||
"github.com/filecoin-project/lotus/node/repo"
|
|
||||||
)
|
|
||||||
|
|
||||||
var raftLogger = log.Logger("raft-cluster")
|
|
||||||
|
|
||||||
// ErrWaitingForSelf is returned when we are waiting for ourselves to depart
|
|
||||||
// the peer set, which won't happen
|
|
||||||
var errWaitingForSelf = errors.New("waiting for ourselves to depart")
|
|
||||||
|
|
||||||
// RaftMaxSnapshots indicates how many snapshots to keep in the consensus data
|
|
||||||
// folder.
|
|
||||||
// TODO: Maybe include this in Config. Not sure how useful it is to touch
|
|
||||||
// this anyways.
|
|
||||||
var RaftMaxSnapshots = 5
|
|
||||||
|
|
||||||
// RaftLogCacheSize is the maximum number of logs to cache in-memory.
|
|
||||||
// This is used to reduce disk I/O for the recently committed entries.
|
|
||||||
var RaftLogCacheSize = 512
|
|
||||||
|
|
||||||
// How long we wait for updates during shutdown before snapshotting
|
|
||||||
var waitForUpdatesShutdownTimeout = 5 * time.Second
|
|
||||||
var waitForUpdatesInterval = 400 * time.Millisecond
|
|
||||||
|
|
||||||
// How many times to retry snapshotting when shutting down
|
|
||||||
var maxShutdownSnapshotRetries = 5
|
|
||||||
|
|
||||||
// raftWrapper wraps the hraft.Raft object and related things like the
|
|
||||||
// different stores used or the hraft.Configuration.
|
|
||||||
// Its methods provide functionality for working with Raft.
|
|
||||||
type raftWrapper struct {
|
|
||||||
ctx context.Context
|
|
||||||
cancel context.CancelFunc
|
|
||||||
raft *hraft.Raft
|
|
||||||
config *ClusterRaftConfig
|
|
||||||
host host.Host
|
|
||||||
serverConfig hraft.Configuration
|
|
||||||
transport *hraft.NetworkTransport
|
|
||||||
snapshotStore hraft.SnapshotStore
|
|
||||||
logStore hraft.LogStore
|
|
||||||
stableStore hraft.StableStore
|
|
||||||
boltdb *raftboltdb.BoltStore
|
|
||||||
repo repo.LockedRepo
|
|
||||||
staging bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// newRaftWrapper creates a Raft instance and initializes
|
|
||||||
// everything leaving it ready to use. Note, that Bootstrap() should be called
|
|
||||||
// to make sure the raft instance is usable.
|
|
||||||
func newRaftWrapper(
|
|
||||||
host host.Host,
|
|
||||||
cfg *ClusterRaftConfig,
|
|
||||||
fsm hraft.FSM,
|
|
||||||
repo repo.LockedRepo,
|
|
||||||
staging bool,
|
|
||||||
) (*raftWrapper, error) {
|
|
||||||
|
|
||||||
raftW := &raftWrapper{}
|
|
||||||
raftW.config = cfg
|
|
||||||
raftW.host = host
|
|
||||||
raftW.staging = staging
|
|
||||||
raftW.repo = repo
|
|
||||||
// Set correct LocalID
|
|
||||||
cfg.RaftConfig.LocalID = hraft.ServerID(host.ID().String())
|
|
||||||
|
|
||||||
df := cfg.GetDataFolder(repo)
|
|
||||||
err := makeDataFolder(df)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = raftW.makeServerConfig()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = raftW.makeTransport()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = raftW.makeStores()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
raftLogger.Debug("creating Raft")
|
|
||||||
raftW.raft, err = hraft.NewRaft(
|
|
||||||
cfg.RaftConfig,
|
|
||||||
fsm,
|
|
||||||
raftW.logStore,
|
|
||||||
raftW.stableStore,
|
|
||||||
raftW.snapshotStore,
|
|
||||||
raftW.transport,
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
raftLogger.Error("initializing raft: ", err)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
raftW.ctx, raftW.cancel = context.WithCancel(context.Background())
|
|
||||||
|
|
||||||
return raftW, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// makeDataFolder creates the folder that is meant to store Raft data. Ensures
|
|
||||||
// we always set 0700 mode.
|
|
||||||
func makeDataFolder(folder string) error {
|
|
||||||
return os.MkdirAll(folder, 0700)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rw *raftWrapper) makeTransport() (err error) {
|
|
||||||
raftLogger.Debug("creating libp2p Raft transport")
|
|
||||||
rw.transport, err = p2praft.NewLibp2pTransport(
|
|
||||||
rw.host,
|
|
||||||
rw.config.NetworkTimeout,
|
|
||||||
)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rw *raftWrapper) makeStores() error {
|
|
||||||
raftLogger.Debug("creating BoltDB store")
|
|
||||||
df := rw.config.GetDataFolder(rw.repo)
|
|
||||||
store, err := raftboltdb.NewBoltStore(filepath.Join(df, "raft.db"))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// wraps the store in a LogCache to improve performance.
|
|
||||||
// See consul/agent/consul/server.go
|
|
||||||
cacheStore, err := hraft.NewLogCache(RaftLogCacheSize, store)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
raftLogger.Debug("creating raft snapshot store")
|
|
||||||
snapstore, err := hraft.NewFileSnapshotStoreWithLogger(
|
|
||||||
df,
|
|
||||||
RaftMaxSnapshots,
|
|
||||||
hclog.FromStandardLogger(zap.NewStdLog(log.Logger("raft-snapshot").SugaredLogger.Desugar()), hclog.DefaultOptions),
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
rw.logStore = cacheStore
|
|
||||||
rw.stableStore = store
|
|
||||||
rw.snapshotStore = snapstore
|
|
||||||
rw.boltdb = store
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Bootstrap calls BootstrapCluster on the Raft instance with a valid
|
|
||||||
// Configuration (generated from InitPeerset) when Raft has no state
|
|
||||||
// and we are not setting up a staging peer. It returns if Raft
|
|
||||||
// was boostrapped (true) and an error.
|
|
||||||
func (rw *raftWrapper) Bootstrap() (bool, error) {
|
|
||||||
logger.Debug("checking for existing raft states")
|
|
||||||
hasState, err := hraft.HasExistingState(
|
|
||||||
rw.logStore,
|
|
||||||
rw.stableStore,
|
|
||||||
rw.snapshotStore,
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if hasState {
|
|
||||||
logger.Debug("raft cluster is already initialized")
|
|
||||||
|
|
||||||
// Inform the user that we are working with a pre-existing peerset
|
|
||||||
logger.Info("existing Raft state found! raft.InitPeerset will be ignored")
|
|
||||||
cf := rw.raft.GetConfiguration()
|
|
||||||
if err := cf.Error(); err != nil {
|
|
||||||
logger.Debug(err)
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
currentCfg := cf.Configuration()
|
|
||||||
srvs := ""
|
|
||||||
for _, s := range currentCfg.Servers {
|
|
||||||
srvs += fmt.Sprintf(" %s\n", s.ID)
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.Debugf("Current Raft Peerset:\n%s\n", srvs)
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if rw.staging {
|
|
||||||
logger.Debug("staging servers do not need initialization")
|
|
||||||
logger.Info("peer is ready to join a cluster")
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
voters := ""
|
|
||||||
for _, s := range rw.serverConfig.Servers {
|
|
||||||
voters += fmt.Sprintf(" %s\n", s.ID)
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.Infof("initializing raft cluster with the following voters:\n%s\n", voters)
|
|
||||||
|
|
||||||
future := rw.raft.BootstrapCluster(rw.serverConfig)
|
|
||||||
if err := future.Error(); err != nil {
|
|
||||||
logger.Error("bootstrapping cluster: ", err)
|
|
||||||
return true, err
|
|
||||||
}
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// create Raft servers configuration. The result is used
|
|
||||||
// by Bootstrap() when it proceeds to Bootstrap.
|
|
||||||
func (rw *raftWrapper) makeServerConfig() error {
|
|
||||||
peers := []peer.ID{}
|
|
||||||
addrInfos, err := addrutil.ParseAddresses(context.Background(), rw.config.InitPeerset)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for _, addrInfo := range addrInfos {
|
|
||||||
peers = append(peers, addrInfo.ID)
|
|
||||||
}
|
|
||||||
rw.serverConfig = makeServerConf(append(peers, rw.host.ID()))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// creates a server configuration with all peers as Voters.
|
|
||||||
func makeServerConf(peers []peer.ID) hraft.Configuration {
|
|
||||||
sm := make(map[string]struct{})
|
|
||||||
|
|
||||||
servers := make([]hraft.Server, 0)
|
|
||||||
|
|
||||||
// Servers are peers + self. We avoid duplicate entries below
|
|
||||||
for _, pid := range peers {
|
|
||||||
p := pid.String()
|
|
||||||
_, ok := sm[p]
|
|
||||||
if !ok { // avoid dups
|
|
||||||
sm[p] = struct{}{}
|
|
||||||
servers = append(servers, hraft.Server{
|
|
||||||
Suffrage: hraft.Voter,
|
|
||||||
ID: hraft.ServerID(p),
|
|
||||||
Address: hraft.ServerAddress(p),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return hraft.Configuration{Servers: servers}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WaitForLeader holds until Raft says we have a leader.
|
|
||||||
// Returns if ctx is canceled.
|
|
||||||
func (rw *raftWrapper) WaitForLeader(ctx context.Context) (string, error) {
|
|
||||||
ticker := time.NewTicker(time.Second / 2)
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ticker.C:
|
|
||||||
if l := rw.raft.Leader(); l != "" {
|
|
||||||
logger.Debug("waitForleaderTimer")
|
|
||||||
logger.Infof("Current Raft Leader: %s", l)
|
|
||||||
ticker.Stop()
|
|
||||||
return string(l), nil
|
|
||||||
}
|
|
||||||
case <-ctx.Done():
|
|
||||||
return "", ctx.Err()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rw *raftWrapper) WaitForVoter(ctx context.Context) error {
|
|
||||||
logger.Debug("waiting until we are promoted to a voter")
|
|
||||||
|
|
||||||
pid := hraft.ServerID(rw.host.ID().String())
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return ctx.Err()
|
|
||||||
default:
|
|
||||||
logger.Debugf("%s: get configuration", pid)
|
|
||||||
configFuture := rw.raft.GetConfiguration()
|
|
||||||
if err := configFuture.Error(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if isVoter(pid, configFuture.Configuration()) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
logger.Debugf("%s: not voter yet", pid)
|
|
||||||
|
|
||||||
time.Sleep(waitForUpdatesInterval)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func isVoter(srvID hraft.ServerID, cfg hraft.Configuration) bool {
|
|
||||||
for _, server := range cfg.Servers {
|
|
||||||
if server.ID == srvID && server.Suffrage == hraft.Voter {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// WaitForUpdates holds until Raft has synced to the last index in the log
|
|
||||||
func (rw *raftWrapper) WaitForUpdates(ctx context.Context) error {
|
|
||||||
|
|
||||||
logger.Debug("Raft state is catching up to the latest known version. Please wait...")
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return ctx.Err()
|
|
||||||
default:
|
|
||||||
lai := rw.raft.AppliedIndex()
|
|
||||||
li := rw.raft.LastIndex()
|
|
||||||
logger.Debugf("current Raft index: %d/%d",
|
|
||||||
lai, li)
|
|
||||||
if lai == li {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
time.Sleep(waitForUpdatesInterval)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rw *raftWrapper) WaitForPeer(ctx context.Context, pid string, depart bool) error {
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return ctx.Err()
|
|
||||||
default:
|
|
||||||
peers, err := rw.Peers(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(peers) == 1 && pid == peers[0] && depart {
|
|
||||||
return errWaitingForSelf
|
|
||||||
}
|
|
||||||
|
|
||||||
found := find(peers, pid)
|
|
||||||
|
|
||||||
// departing
|
|
||||||
if depart && !found {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// joining
|
|
||||||
if !depart && found {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
time.Sleep(50 * time.Millisecond)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Snapshot tells Raft to take a snapshot.
|
|
||||||
func (rw *raftWrapper) Snapshot() error {
|
|
||||||
future := rw.raft.Snapshot()
|
|
||||||
err := future.Error()
|
|
||||||
if err != nil && err.Error() != hraft.ErrNothingNewToSnapshot.Error() {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// snapshotOnShutdown attempts to take a snapshot before a shutdown.
|
|
||||||
// Snapshotting might fail if the raft applied index is not the last index.
|
|
||||||
// This waits for the updates and tries to take a snapshot when the
|
|
||||||
// applied index is up to date.
|
|
||||||
// It will retry if the snapshot still fails, in case more updates have arrived.
|
|
||||||
// If waiting for updates times-out, it will not try anymore, since something
|
|
||||||
// is wrong. This is a best-effort solution as there is no way to tell Raft
|
|
||||||
// to stop processing entries because we want to take a snapshot before
|
|
||||||
// shutting down.
|
|
||||||
func (rw *raftWrapper) snapshotOnShutdown() error {
|
|
||||||
var err error
|
|
||||||
for i := 0; i < maxShutdownSnapshotRetries; i++ {
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), waitForUpdatesShutdownTimeout)
|
|
||||||
err = rw.WaitForUpdates(ctx)
|
|
||||||
cancel()
|
|
||||||
if err != nil {
|
|
||||||
logger.Warn("timed out waiting for state updates before shutdown. Snapshotting may fail")
|
|
||||||
return rw.Snapshot()
|
|
||||||
}
|
|
||||||
|
|
||||||
err = rw.Snapshot()
|
|
||||||
if err == nil {
|
|
||||||
return nil // things worked
|
|
||||||
}
|
|
||||||
|
|
||||||
// There was an error
|
|
||||||
err = errors.New("could not snapshot raft: " + err.Error())
|
|
||||||
logger.Warnf("retrying to snapshot (%d/%d)...", i+1, maxShutdownSnapshotRetries)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Shutdown shutdown Raft and closes the BoltDB.
|
|
||||||
func (rw *raftWrapper) Shutdown(ctx context.Context) error {
|
|
||||||
|
|
||||||
rw.cancel()
|
|
||||||
|
|
||||||
var finalErr error
|
|
||||||
|
|
||||||
err := rw.snapshotOnShutdown()
|
|
||||||
if err != nil {
|
|
||||||
finalErr = multierr.Append(finalErr, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
future := rw.raft.Shutdown()
|
|
||||||
err = future.Error()
|
|
||||||
if err != nil {
|
|
||||||
finalErr = multierr.Append(finalErr, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = rw.boltdb.Close() // important!
|
|
||||||
if err != nil {
|
|
||||||
finalErr = multierr.Append(finalErr, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return finalErr
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddPeer adds a peer to Raft
|
|
||||||
func (rw *raftWrapper) AddPeer(ctx context.Context, peerId peer.ID) error {
|
|
||||||
|
|
||||||
// Check that we don't have it to not waste
|
|
||||||
// log entries if so.
|
|
||||||
peers, err := rw.Peers(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if find(peers, peerId.String()) {
|
|
||||||
logger.Infof("%s is already a raft peerStr", peerId.String())
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
err = rw.host.Connect(ctx, peer.AddrInfo{ID: peerId})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
future := rw.raft.AddVoter(
|
|
||||||
hraft.ServerID(peerId.String()),
|
|
||||||
hraft.ServerAddress(peerId.String()),
|
|
||||||
0,
|
|
||||||
0,
|
|
||||||
) // TODO: Extra cfg value?
|
|
||||||
err = future.Error()
|
|
||||||
if err != nil {
|
|
||||||
logger.Error("raft cannot add peer: ", err)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemovePeer removes a peer from Raft
|
|
||||||
func (rw *raftWrapper) RemovePeer(ctx context.Context, peer string) error {
|
|
||||||
// Check that we have it to not waste
|
|
||||||
// log entries if we don't.
|
|
||||||
peers, err := rw.Peers(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !find(peers, peer) {
|
|
||||||
logger.Infof("%s is not among raft peers", peer)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(peers) == 1 && peers[0] == peer {
|
|
||||||
return errors.New("cannot remove ourselves from a 1-peer cluster")
|
|
||||||
}
|
|
||||||
|
|
||||||
rmFuture := rw.raft.RemoveServer(
|
|
||||||
hraft.ServerID(peer),
|
|
||||||
0,
|
|
||||||
0,
|
|
||||||
)
|
|
||||||
err = rmFuture.Error()
|
|
||||||
if err != nil {
|
|
||||||
logger.Error("raft cannot remove peer: ", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Leader returns Raft's leader. It may be an empty string if
|
|
||||||
// there is no leader or it is unknown.
|
|
||||||
func (rw *raftWrapper) Leader(ctx context.Context) string {
|
|
||||||
return string(rw.raft.Leader())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rw *raftWrapper) Peers(ctx context.Context) ([]string, error) {
|
|
||||||
ids := make([]string, 0)
|
|
||||||
|
|
||||||
configFuture := rw.raft.GetConfiguration()
|
|
||||||
if err := configFuture.Error(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, server := range configFuture.Configuration().Servers {
|
|
||||||
ids = append(ids, string(server.ID))
|
|
||||||
}
|
|
||||||
|
|
||||||
return ids, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// CleanupRaft moves the current data folder to a backup location
|
|
||||||
//func CleanupRaft(cfg *Config) error {
|
|
||||||
// dataFolder := cfg.GetDataFolder()
|
|
||||||
// keep := cfg.BackupsRotate
|
|
||||||
//
|
|
||||||
// meta, _, err := latestSnapshot(dataFolder)
|
|
||||||
// if meta == nil && err == nil {
|
|
||||||
// // no snapshots at all. Avoid creating backups
|
|
||||||
// // from empty state folders.
|
|
||||||
// logger.Infof("cleaning empty Raft data folder (%s)", dataFolder)
|
|
||||||
// os.RemoveAll(dataFolder)
|
|
||||||
// return nil
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// logger.Infof("cleaning and backing up Raft data folder (%s)", dataFolder)
|
|
||||||
// dbh := newDataBackupHelper(dataFolder, keep)
|
|
||||||
// err = dbh.makeBackup()
|
|
||||||
// if err != nil {
|
|
||||||
// logger.Warn(err)
|
|
||||||
// logger.Warn("the state could not be cleaned properly")
|
|
||||||
// logger.Warn("manual intervention may be needed before starting cluster again")
|
|
||||||
// }
|
|
||||||
// return nil
|
|
||||||
//}
|
|
||||||
|
|
||||||
// only call when Raft is shutdown
|
|
||||||
func (rw *raftWrapper) Clean() error {
|
|
||||||
//return CleanupRaft(rw.config)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func find(s []string, elem string) bool {
|
|
||||||
for _, selem := range s {
|
|
||||||
if selem == elem {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
@ -127,7 +127,6 @@ const (
|
|||||||
SettlePaymentChannelsKey
|
SettlePaymentChannelsKey
|
||||||
RunPeerTaggerKey
|
RunPeerTaggerKey
|
||||||
SetupFallbackBlockstoresKey
|
SetupFallbackBlockstoresKey
|
||||||
GoRPCServer
|
|
||||||
|
|
||||||
ConsensusReporterKey
|
ConsensusReporterKey
|
||||||
|
|
||||||
|
@ -3,7 +3,6 @@ package node
|
|||||||
import (
|
import (
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
gorpc "github.com/libp2p/go-libp2p-gorpc"
|
|
||||||
"go.uber.org/fx"
|
"go.uber.org/fx"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
@ -31,7 +30,6 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/chain/wallet"
|
"github.com/filecoin-project/lotus/chain/wallet"
|
||||||
ledgerwallet "github.com/filecoin-project/lotus/chain/wallet/ledger"
|
ledgerwallet "github.com/filecoin-project/lotus/chain/wallet/ledger"
|
||||||
"github.com/filecoin-project/lotus/chain/wallet/remotewallet"
|
"github.com/filecoin-project/lotus/chain/wallet/remotewallet"
|
||||||
raftcns "github.com/filecoin-project/lotus/lib/consensus/raft"
|
|
||||||
"github.com/filecoin-project/lotus/lib/peermgr"
|
"github.com/filecoin-project/lotus/lib/peermgr"
|
||||||
"github.com/filecoin-project/lotus/markets/retrievaladapter"
|
"github.com/filecoin-project/lotus/markets/retrievaladapter"
|
||||||
"github.com/filecoin-project/lotus/markets/storageadapter"
|
"github.com/filecoin-project/lotus/markets/storageadapter"
|
||||||
@ -251,17 +249,6 @@ func ConfigFullNode(c interface{}) Option {
|
|||||||
Override(new(wallet.Default), wallet.NilDefault),
|
Override(new(wallet.Default), wallet.NilDefault),
|
||||||
),
|
),
|
||||||
|
|
||||||
// Chain node cluster enabled
|
|
||||||
If(cfg.Cluster.ClusterModeEnabled,
|
|
||||||
Override(new(*gorpc.Client), modules.NewRPCClient),
|
|
||||||
Override(new(*raftcns.ClusterRaftConfig), raftcns.NewClusterRaftConfig(&cfg.Cluster)),
|
|
||||||
Override(new(*raftcns.Consensus), raftcns.NewConsensusWithRPCClient(false)),
|
|
||||||
Override(new(*messagesigner.MessageSignerConsensus), messagesigner.NewMessageSignerConsensus),
|
|
||||||
Override(new(messagesigner.MsgSigner), From(new(*messagesigner.MessageSignerConsensus))),
|
|
||||||
Override(new(*modules.RPCHandler), modules.NewRPCHandler),
|
|
||||||
Override(GoRPCServer, modules.NewRPCServer),
|
|
||||||
),
|
|
||||||
|
|
||||||
// Actor event filtering support
|
// Actor event filtering support
|
||||||
Override(new(events.EventAPI), From(new(modules.EventAPI))),
|
Override(new(events.EventAPI), From(new(modules.EventAPI))),
|
||||||
|
|
||||||
|
@ -106,7 +106,6 @@ func DefaultFullNode() *FullNode {
|
|||||||
HotstoreMaxSpaceSafetyBuffer: 50_000_000_000,
|
HotstoreMaxSpaceSafetyBuffer: 50_000_000_000,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Cluster: *DefaultUserRaftConfig(),
|
|
||||||
Fevm: FevmConfig{
|
Fevm: FevmConfig{
|
||||||
EnableEthRPC: false,
|
EnableEthRPC: false,
|
||||||
EthTxHashMappingLifetimeDays: 0,
|
EthTxHashMappingLifetimeDays: 0,
|
||||||
@ -327,28 +326,6 @@ const (
|
|||||||
ResourceFilteringDisabled = ResourceFilteringStrategy("disabled")
|
ResourceFilteringDisabled = ResourceFilteringStrategy("disabled")
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
|
||||||
DefaultDataSubFolder = "raft"
|
|
||||||
DefaultWaitForLeaderTimeout = 15 * time.Second
|
|
||||||
DefaultCommitRetries = 1
|
|
||||||
DefaultNetworkTimeout = 100 * time.Second
|
|
||||||
DefaultCommitRetryDelay = 200 * time.Millisecond
|
|
||||||
DefaultBackupsRotate = 6
|
|
||||||
)
|
|
||||||
|
|
||||||
func DefaultUserRaftConfig() *UserRaftConfig {
|
|
||||||
var cfg UserRaftConfig
|
|
||||||
cfg.DataFolder = "" // empty so it gets omitted
|
|
||||||
cfg.InitPeersetMultiAddr = []string{}
|
|
||||||
cfg.WaitForLeaderTimeout = Duration(DefaultWaitForLeaderTimeout)
|
|
||||||
cfg.NetworkTimeout = Duration(DefaultNetworkTimeout)
|
|
||||||
cfg.CommitRetries = DefaultCommitRetries
|
|
||||||
cfg.CommitRetryDelay = Duration(DefaultCommitRetryDelay)
|
|
||||||
cfg.BackupsRotate = DefaultBackupsRotate
|
|
||||||
|
|
||||||
return &cfg
|
|
||||||
}
|
|
||||||
|
|
||||||
func DefaultLotusProvider() *LotusProviderConfig {
|
func DefaultLotusProvider() *LotusProviderConfig {
|
||||||
return &LotusProviderConfig{
|
return &LotusProviderConfig{
|
||||||
Subsystems: ProviderSubsystemsConfig{
|
Subsystems: ProviderSubsystemsConfig{
|
||||||
|
@ -494,12 +494,6 @@ Set to 0 to keep all mappings`,
|
|||||||
|
|
||||||
Comment: ``,
|
Comment: ``,
|
||||||
},
|
},
|
||||||
{
|
|
||||||
Name: "Cluster",
|
|
||||||
Type: "UserRaftConfig",
|
|
||||||
|
|
||||||
Comment: ``,
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
Name: "Fevm",
|
Name: "Fevm",
|
||||||
Type: "FevmConfig",
|
Type: "FevmConfig",
|
||||||
@ -1692,68 +1686,6 @@ HotstoreMaxSpaceTarget - HotstoreMaxSpaceSafetyBuffer`,
|
|||||||
Comment: ``,
|
Comment: ``,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"UserRaftConfig": {
|
|
||||||
{
|
|
||||||
Name: "ClusterModeEnabled",
|
|
||||||
Type: "bool",
|
|
||||||
|
|
||||||
Comment: `EXPERIMENTAL. config to enabled node cluster with raft consensus`,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "DataFolder",
|
|
||||||
Type: "string",
|
|
||||||
|
|
||||||
Comment: `A folder to store Raft's data.`,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "InitPeersetMultiAddr",
|
|
||||||
Type: "[]string",
|
|
||||||
|
|
||||||
Comment: `InitPeersetMultiAddr provides the list of initial cluster peers for new Raft
|
|
||||||
peers (with no prior state). It is ignored when Raft was already
|
|
||||||
initialized or when starting in staging mode.`,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "WaitForLeaderTimeout",
|
|
||||||
Type: "Duration",
|
|
||||||
|
|
||||||
Comment: `LeaderTimeout specifies how long to wait for a leader before
|
|
||||||
failing an operation.`,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "NetworkTimeout",
|
|
||||||
Type: "Duration",
|
|
||||||
|
|
||||||
Comment: `NetworkTimeout specifies how long before a Raft network
|
|
||||||
operation is timed out`,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "CommitRetries",
|
|
||||||
Type: "int",
|
|
||||||
|
|
||||||
Comment: `CommitRetries specifies how many times we retry a failed commit until
|
|
||||||
we give up.`,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "CommitRetryDelay",
|
|
||||||
Type: "Duration",
|
|
||||||
|
|
||||||
Comment: `How long to wait between retries`,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "BackupsRotate",
|
|
||||||
Type: "int",
|
|
||||||
|
|
||||||
Comment: `BackupsRotate specifies the maximum number of Raft's DataFolder
|
|
||||||
copies that we keep as backups (renaming) after cleanup.`,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "Tracing",
|
|
||||||
Type: "bool",
|
|
||||||
|
|
||||||
Comment: `Tracing enables propagation of contexts across binary boundaries.`,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"Wallet": {
|
"Wallet": {
|
||||||
{
|
{
|
||||||
Name: "RemoteBackend",
|
Name: "RemoteBackend",
|
||||||
|
@ -26,7 +26,6 @@ type FullNode struct {
|
|||||||
Wallet Wallet
|
Wallet Wallet
|
||||||
Fees FeeConfig
|
Fees FeeConfig
|
||||||
Chainstore Chainstore
|
Chainstore Chainstore
|
||||||
Cluster UserRaftConfig
|
|
||||||
Fevm FevmConfig
|
Fevm FevmConfig
|
||||||
Index IndexConfig
|
Index IndexConfig
|
||||||
FaultReporter FaultReporterConfig
|
FaultReporter FaultReporterConfig
|
||||||
@ -749,33 +748,6 @@ type FeeConfig struct {
|
|||||||
DefaultMaxFee types.FIL
|
DefaultMaxFee types.FIL
|
||||||
}
|
}
|
||||||
|
|
||||||
type UserRaftConfig struct {
|
|
||||||
// EXPERIMENTAL. config to enabled node cluster with raft consensus
|
|
||||||
ClusterModeEnabled bool
|
|
||||||
// A folder to store Raft's data.
|
|
||||||
DataFolder string
|
|
||||||
// InitPeersetMultiAddr provides the list of initial cluster peers for new Raft
|
|
||||||
// peers (with no prior state). It is ignored when Raft was already
|
|
||||||
// initialized or when starting in staging mode.
|
|
||||||
InitPeersetMultiAddr []string
|
|
||||||
// LeaderTimeout specifies how long to wait for a leader before
|
|
||||||
// failing an operation.
|
|
||||||
WaitForLeaderTimeout Duration
|
|
||||||
// NetworkTimeout specifies how long before a Raft network
|
|
||||||
// operation is timed out
|
|
||||||
NetworkTimeout Duration
|
|
||||||
// CommitRetries specifies how many times we retry a failed commit until
|
|
||||||
// we give up.
|
|
||||||
CommitRetries int
|
|
||||||
// How long to wait between retries
|
|
||||||
CommitRetryDelay Duration
|
|
||||||
// BackupsRotate specifies the maximum number of Raft's DataFolder
|
|
||||||
// copies that we keep as backups (renaming) after cleanup.
|
|
||||||
BackupsRotate int
|
|
||||||
// Tracing enables propagation of contexts across binary boundaries.
|
|
||||||
Tracing bool
|
|
||||||
}
|
|
||||||
|
|
||||||
type FevmConfig struct {
|
type FevmConfig struct {
|
||||||
// EnableEthRPC enables eth_ rpc, and enables storing a mapping of eth transaction hashes to filecoin message Cids.
|
// EnableEthRPC enables eth_ rpc, and enables storing a mapping of eth transaction hashes to filecoin message Cids.
|
||||||
// This will also enable the RealTimeFilterAPI and HistoricFilterAPI by default, but they can be disabled by config options above.
|
// This will also enable the RealTimeFilterAPI and HistoricFilterAPI by default, but they can be disabled by config options above.
|
||||||
|
@ -34,7 +34,6 @@ type FullNodeAPI struct {
|
|||||||
full.MsigAPI
|
full.MsigAPI
|
||||||
full.WalletAPI
|
full.WalletAPI
|
||||||
full.SyncAPI
|
full.SyncAPI
|
||||||
full.RaftAPI
|
|
||||||
full.EthAPI
|
full.EthAPI
|
||||||
|
|
||||||
DS dtypes.MetadataDS
|
DS dtypes.MetadataDS
|
||||||
@ -119,12 +118,4 @@ func (n *FullNodeAPI) NodeStatus(ctx context.Context, inclChainStatus bool) (sta
|
|||||||
return status, nil
|
return status, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *FullNodeAPI) RaftState(ctx context.Context) (*api.RaftStateData, error) {
|
|
||||||
return n.RaftAPI.GetRaftState(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *FullNodeAPI) RaftLeader(ctx context.Context) (peer.ID, error) {
|
|
||||||
return n.RaftAPI.Leader(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ api.FullNode = &FullNodeAPI{}
|
var _ api.FullNode = &FullNodeAPI{}
|
||||||
|
@ -44,8 +44,6 @@ type MpoolAPI struct {
|
|||||||
WalletAPI
|
WalletAPI
|
||||||
GasAPI
|
GasAPI
|
||||||
|
|
||||||
RaftAPI
|
|
||||||
|
|
||||||
MessageSigner messagesigner.MsgSigner
|
MessageSigner messagesigner.MsgSigner
|
||||||
|
|
||||||
PushLocks *dtypes.MpoolLocker
|
PushLocks *dtypes.MpoolLocker
|
||||||
@ -145,20 +143,6 @@ func (a *MpoolAPI) MpoolPushMessage(ctx context.Context, msg *types.Message, spe
|
|||||||
msg = &cp
|
msg = &cp
|
||||||
inMsg := *msg
|
inMsg := *msg
|
||||||
|
|
||||||
// Redirect to leader if current node is not leader. A single non raft based node is always the leader
|
|
||||||
if !a.RaftAPI.IsLeader(ctx) {
|
|
||||||
var signedMsg types.SignedMessage
|
|
||||||
redirected, err := a.RaftAPI.RedirectToLeader(ctx, "MpoolPushMessage", api.MpoolMessageWhole{Msg: msg, Spec: spec}, &signedMsg)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// It's possible that the current node became the leader between the check and the redirect
|
|
||||||
// In that case, continue with rest of execution and only return signedMsg if something was redirected
|
|
||||||
if redirected {
|
|
||||||
return &signedMsg, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Generate spec and uuid if not available in the message
|
// Generate spec and uuid if not available in the message
|
||||||
if spec == nil {
|
if spec == nil {
|
||||||
spec = &api.MessageSendSpec{
|
spec = &api.MessageSendSpec{
|
||||||
|
@ -1,50 +0,0 @@
|
|||||||
package full
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
|
|
||||||
"github.com/libp2p/go-libp2p/core/peer"
|
|
||||||
"go.uber.org/fx"
|
|
||||||
"golang.org/x/xerrors"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
|
||||||
"github.com/filecoin-project/lotus/chain/messagesigner"
|
|
||||||
)
|
|
||||||
|
|
||||||
type RaftAPI struct {
|
|
||||||
fx.In
|
|
||||||
|
|
||||||
MessageSigner *messagesigner.MessageSignerConsensus `optional:"true"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *RaftAPI) GetRaftState(ctx context.Context) (*api.RaftStateData, error) {
|
|
||||||
if r.MessageSigner == nil {
|
|
||||||
return nil, xerrors.Errorf("raft consensus not enabled. Please check your configuration")
|
|
||||||
}
|
|
||||||
raftState, err := r.MessageSigner.GetRaftState(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &api.RaftStateData{NonceMap: raftState.NonceMap, MsgUuids: raftState.MsgUuids}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *RaftAPI) Leader(ctx context.Context) (peer.ID, error) {
|
|
||||||
if r.MessageSigner == nil {
|
|
||||||
return "", xerrors.Errorf("raft consensus not enabled. Please check your configuration")
|
|
||||||
}
|
|
||||||
return r.MessageSigner.Leader(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *RaftAPI) IsLeader(ctx context.Context) bool {
|
|
||||||
if r.MessageSigner == nil {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return r.MessageSigner.IsLeader(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *RaftAPI) RedirectToLeader(ctx context.Context, method string, arg interface{}, ret interface{}) (bool, error) {
|
|
||||||
if r.MessageSigner == nil {
|
|
||||||
return false, xerrors.Errorf("raft consensus not enabled. Please check your configuration")
|
|
||||||
}
|
|
||||||
return r.MessageSigner.RedirectToLeader(ctx, method, arg, ret)
|
|
||||||
}
|
|
@ -1,55 +0,0 @@
|
|||||||
package modules
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
|
|
||||||
rpc "github.com/libp2p/go-libp2p-gorpc"
|
|
||||||
"github.com/libp2p/go-libp2p/core/host"
|
|
||||||
"github.com/libp2p/go-libp2p/core/peer"
|
|
||||||
"github.com/libp2p/go-libp2p/core/protocol"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
|
||||||
consensus "github.com/filecoin-project/lotus/lib/consensus/raft"
|
|
||||||
"github.com/filecoin-project/lotus/node/impl/full"
|
|
||||||
)
|
|
||||||
|
|
||||||
type RPCHandler struct {
|
|
||||||
mpoolAPI full.MpoolAPI
|
|
||||||
cons *consensus.Consensus
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewRPCHandler(mpoolAPI full.MpoolAPI, cons *consensus.Consensus) *RPCHandler {
|
|
||||||
return &RPCHandler{mpoolAPI, cons}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *RPCHandler) MpoolPushMessage(ctx context.Context, msgWhole *api.MpoolMessageWhole, ret *types.SignedMessage) error {
|
|
||||||
signedMsg, err := h.mpoolAPI.MpoolPushMessage(ctx, msgWhole.Msg, msgWhole.Spec)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
*ret = *signedMsg
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *RPCHandler) AddPeer(ctx context.Context, pid peer.ID, ret *struct{}) error {
|
|
||||||
return h.cons.AddPeer(ctx, pid)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add other consensus RPC calls here
|
|
||||||
|
|
||||||
func NewRPCClient(host host.Host) *rpc.Client {
|
|
||||||
protocolID := protocol.ID("/rpc/lotus-chain/v0")
|
|
||||||
return rpc.NewClient(host, protocolID)
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewRPCServer(ctx context.Context, host host.Host, rpcHandler *RPCHandler) error {
|
|
||||||
|
|
||||||
authF := func(pid peer.ID, svc, method string) bool {
|
|
||||||
return rpcHandler.cons.IsTrustedPeer(ctx, pid)
|
|
||||||
}
|
|
||||||
|
|
||||||
protocolID := protocol.ID("/rpc/lotus-chain/v0")
|
|
||||||
rpcServer := rpc.NewServer(host, protocolID, rpc.WithAuthorizeFunc(authF))
|
|
||||||
return rpcServer.RegisterName("Consensus", rpcHandler)
|
|
||||||
}
|
|
Loading…
Reference in New Issue
Block a user