WIP: Raft consensus for lotus nodes in a cluster

This commit is contained in:
Shrenuj Bansal 2022-09-08 14:20:05 -04:00
parent 2532300156
commit 8f1b1bb1ff
26 changed files with 2341 additions and 19 deletions

View File

@ -959,6 +959,11 @@ workflows:
suite: itest-paych_cli
target: "./itests/paych_cli_test.go"
- test:
name: test-itest-raft_messagesigner
suite: itest-raft_messagesigner
target: "./itests/raft_messagesigner_test.go"
- test:
name: test-itest-sdr_upgrade
suite: itest-sdr_upgrade

View File

@ -8,6 +8,7 @@ import (
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
consensus "github.com/libp2p/go-libp2p-consensus"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/filecoin-project/go-address"
@ -751,6 +752,9 @@ type FullNode interface {
// LOTUS_BACKUP_BASE_PATH environment variable set to some path, and that
// the path specified when calling CreateBackup is within the base path
CreateBackup(ctx context.Context, fpath string) error //perm:admin
RaftState(ctx context.Context) (consensus.State, error) //perm:read
RaftLeader(ctx context.Context) (peer.ID, error) //perm:read
}
type StorageAsk struct {

View File

@ -14,6 +14,7 @@ import (
uuid "github.com/google/uuid"
blocks "github.com/ipfs/go-block-format"
cid "github.com/ipfs/go-cid"
consensus "github.com/libp2p/go-libp2p-consensus"
metrics "github.com/libp2p/go-libp2p/core/metrics"
network0 "github.com/libp2p/go-libp2p/core/network"
peer "github.com/libp2p/go-libp2p/core/peer"
@ -2243,6 +2244,36 @@ func (mr *MockFullNodeMockRecorder) PaychVoucherSubmit(arg0, arg1, arg2, arg3, a
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherSubmit", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherSubmit), arg0, arg1, arg2, arg3, arg4)
}
// RaftLeader mocks base method.
func (m *MockFullNode) RaftLeader(arg0 context.Context) (peer.ID, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "RaftLeader", arg0)
ret0, _ := ret[0].(peer.ID)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// RaftLeader indicates an expected call of RaftLeader.
func (mr *MockFullNodeMockRecorder) RaftLeader(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RaftLeader", reflect.TypeOf((*MockFullNode)(nil).RaftLeader), arg0)
}
// RaftState mocks base method.
func (m *MockFullNode) RaftState(arg0 context.Context) (consensus.State, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "RaftState", arg0)
ret0, _ := ret[0].(consensus.State)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// RaftState indicates an expected call of RaftState.
func (mr *MockFullNodeMockRecorder) RaftState(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RaftState", reflect.TypeOf((*MockFullNode)(nil).RaftState), arg0)
}
// Session mocks base method.
func (m *MockFullNode) Session(arg0 context.Context) (uuid.UUID, error) {
m.ctrl.T.Helper()

View File

@ -10,6 +10,7 @@ import (
"github.com/google/uuid"
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
consensus "github.com/libp2p/go-libp2p-consensus"
"github.com/libp2p/go-libp2p/core/metrics"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
@ -339,6 +340,10 @@ type FullNodeStruct struct {
PaychVoucherSubmit func(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 []byte) (cid.Cid, error) `perm:"sign"`
RaftLeader func(p0 context.Context) (peer.ID, error) `perm:"read"`
RaftState func(p0 context.Context) (consensus.State, error) `perm:"read"`
StateAccountKey func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) `perm:"read"`
StateActorCodeCIDs func(p0 context.Context, p1 abinetwork.Version) (map[string]cid.Cid, error) `perm:"read"`
@ -2439,6 +2444,28 @@ func (s *FullNodeStub) PaychVoucherSubmit(p0 context.Context, p1 address.Address
return *new(cid.Cid), ErrNotSupported
}
func (s *FullNodeStruct) RaftLeader(p0 context.Context) (peer.ID, error) {
if s.Internal.RaftLeader == nil {
return *new(peer.ID), ErrNotSupported
}
return s.Internal.RaftLeader(p0)
}
func (s *FullNodeStub) RaftLeader(p0 context.Context) (peer.ID, error) {
return *new(peer.ID), ErrNotSupported
}
func (s *FullNodeStruct) RaftState(p0 context.Context) (consensus.State, error) {
if s.Internal.RaftState == nil {
return *new(consensus.State), ErrNotSupported
}
return s.Internal.RaftState(p0)
}
func (s *FullNodeStub) RaftState(p0 context.Context) (consensus.State, error) {
return *new(consensus.State), ErrNotSupported
}
func (s *FullNodeStruct) StateAccountKey(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) {
if s.Internal.StateAccountKey == nil {
return *new(address.Address), ErrNotSupported

View File

@ -58,6 +58,11 @@ type MessageSendSpec struct {
MsgUuid uuid.UUID
}
type MpoolMessageWhole struct {
Msg *types.Message
Spec *MessageSendSpec
}
// GraphSyncDataTransfer provides diagnostics on a data transfer happening over graphsync
type GraphSyncDataTransfer struct {
// GraphSync request id for this transfer

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -9,6 +9,8 @@ import (
"github.com/ipfs/go-datastore"
"github.com/ipfs/go-datastore/namespace"
logging "github.com/ipfs/go-log/v2"
consensus "github.com/libp2p/go-libp2p-consensus"
"github.com/libp2p/go-libp2p/core/peer"
cbg "github.com/whyrusleeping/cbor-gen"
"golang.org/x/xerrors"
@ -29,6 +31,19 @@ type MpoolNonceAPI interface {
GetActor(context.Context, address.Address, types.TipSetKey) (*types.Actor, error)
}
type MsgSigner interface {
SignMessage(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec, cb func(*types.SignedMessage) error) (*types.SignedMessage, error)
GetSignedMessage(ctx context.Context, uuid uuid.UUID) (*types.SignedMessage, error)
StoreSignedMessage(ctx context.Context, uuid uuid.UUID, message *types.SignedMessage) error
NextNonce(ctx context.Context, addr address.Address) (uint64, error)
SaveNonce(ctx context.Context, addr address.Address, nonce uint64) error
DstoreKey(addr address.Address) datastore.Key
IsLeader(ctx context.Context) bool
RaftLeader(ctx context.Context) (peer.ID, error)
RedirectToLeader(ctx context.Context, method string, arg interface{}, ret interface{}) (bool, error)
GetRaftState(ctx context.Context) (consensus.State, error)
}
// MessageSigner keeps track of nonces per address, and increments the nonce
// when signing a message
type MessageSigner struct {
@ -38,6 +53,8 @@ type MessageSigner struct {
ds datastore.Batching
}
//var _ full.MsgSigner = &MessageSigner{}
func NewMessageSigner(wallet api.Wallet, mpool MpoolNonceAPI, ds dtypes.MetadataDS) *MessageSigner {
ds = namespace.Wrap(ds, datastore.NewKey("/message-signer/"))
return &MessageSigner{
@ -49,12 +66,12 @@ func NewMessageSigner(wallet api.Wallet, mpool MpoolNonceAPI, ds dtypes.Metadata
// SignMessage increments the nonce for the message From address, and signs
// the message
func (ms *MessageSigner) SignMessage(ctx context.Context, msg *types.Message, cb func(*types.SignedMessage) error) (*types.SignedMessage, error) {
func (ms *MessageSigner) SignMessage(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec, cb func(*types.SignedMessage) error) (*types.SignedMessage, error) {
ms.lk.Lock()
defer ms.lk.Unlock()
// Get the next message nonce
nonce, err := ms.nextNonce(ctx, msg.From)
nonce, err := ms.NextNonce(ctx, msg.From)
if err != nil {
return nil, xerrors.Errorf("failed to create nonce: %w", err)
}
@ -72,7 +89,7 @@ func (ms *MessageSigner) SignMessage(ctx context.Context, msg *types.Message, cb
Extra: mb.RawData(),
})
if err != nil {
return nil, xerrors.Errorf("failed to sign message: %w", err)
return nil, xerrors.Errorf("failed to sign message: %w, addr=%s", err, msg.From)
}
// Callback with the signed message
@ -86,7 +103,7 @@ func (ms *MessageSigner) SignMessage(ctx context.Context, msg *types.Message, cb
}
// If the callback executed successfully, write the nonce to the datastore
if err := ms.saveNonce(ctx, msg.From, nonce); err != nil {
if err := ms.SaveNonce(ctx, msg.From, nonce); err != nil {
return nil, xerrors.Errorf("failed to save nonce: %w", err)
}
@ -113,9 +130,9 @@ func (ms *MessageSigner) StoreSignedMessage(ctx context.Context, uuid uuid.UUID,
return ms.ds.Put(ctx, key, serializedMsg)
}
// nextNonce gets the next nonce for the given address.
// NextNonce gets the next nonce for the given address.
// If there is no nonce in the datastore, gets the nonce from the message pool.
func (ms *MessageSigner) nextNonce(ctx context.Context, addr address.Address) (uint64, error) {
func (ms *MessageSigner) NextNonce(ctx context.Context, addr address.Address) (uint64, error) {
// Nonces used to be created by the mempool and we need to support nodes
// that have mempool nonces, so first check the mempool for a nonce for
// this address. Note that the mempool returns the actor state's nonce
@ -126,7 +143,7 @@ func (ms *MessageSigner) nextNonce(ctx context.Context, addr address.Address) (u
}
// Get the next nonce for this address from the datastore
addrNonceKey := ms.dstoreKey(addr)
addrNonceKey := ms.DstoreKey(addr)
dsNonceBytes, err := ms.ds.Get(ctx, addrNonceKey)
switch {
@ -159,14 +176,14 @@ func (ms *MessageSigner) nextNonce(ctx context.Context, addr address.Address) (u
}
}
// saveNonce increments the nonce for this address and writes it to the
// SaveNonce increments the nonce for this address and writes it to the
// datastore
func (ms *MessageSigner) saveNonce(ctx context.Context, addr address.Address, nonce uint64) error {
func (ms *MessageSigner) SaveNonce(ctx context.Context, addr address.Address, nonce uint64) error {
// Increment the nonce
nonce++
// Write the nonce to the datastore
addrNonceKey := ms.dstoreKey(addr)
addrNonceKey := ms.DstoreKey(addr)
buf := bytes.Buffer{}
_, err := buf.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, nonce))
if err != nil {
@ -179,6 +196,22 @@ func (ms *MessageSigner) saveNonce(ctx context.Context, addr address.Address, no
return nil
}
func (ms *MessageSigner) dstoreKey(addr address.Address) datastore.Key {
func (ms *MessageSigner) DstoreKey(addr address.Address) datastore.Key {
return datastore.KeyWithNamespaces([]string{dsKeyActorNonce, addr.String()})
}
func (ms *MessageSigner) IsLeader(ctx context.Context) bool {
return true
}
func (ms *MessageSigner) RedirectToLeader(ctx context.Context, method string, arg interface{}, ret interface{}) (bool, error) {
return false, xerrors.Errorf("Single node shouldn't have any redirects")
}
func (ms *MessageSigner) GetRaftState(ctx context.Context) (consensus.State, error) {
return nil, xerrors.Errorf("This is a non raft consensus message signer")
}
func (ms *MessageSigner) RaftLeader(ctx context.Context) (peer.ID, error) {
return "", xerrors.Errorf("No leaders in non raft message signer")
}

View File

@ -0,0 +1,99 @@
package messagesigner
import (
"context"
"github.com/google/uuid"
"github.com/ipfs/go-datastore"
"github.com/ipfs/go-datastore/namespace"
libp2pconsensus "github.com/libp2p/go-libp2p-consensus"
"github.com/libp2p/go-libp2p/core/peer"
"golang.org/x/xerrors"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/types"
consensus "github.com/filecoin-project/lotus/lib/consensus/raft"
"github.com/filecoin-project/lotus/node/modules/dtypes"
)
type MessageSignerConsensus struct {
MsgSigner
consensus *consensus.Consensus
}
func NewMessageSignerConsensus(
wallet api.Wallet,
mpool MpoolNonceAPI,
ds dtypes.MetadataDS,
consensus *consensus.Consensus) *MessageSignerConsensus {
ds = namespace.Wrap(ds, datastore.NewKey("/message-signer-consensus/"))
return &MessageSignerConsensus{
MsgSigner: &MessageSigner{
wallet: wallet,
mpool: mpool,
ds: ds,
},
consensus: consensus,
}
}
func (ms *MessageSignerConsensus) IsLeader(ctx context.Context) bool {
return ms.consensus.IsLeader(ctx)
}
func (ms *MessageSignerConsensus) RedirectToLeader(ctx context.Context, method string, arg interface{}, ret interface{}) (bool, error) {
ok, err := ms.consensus.RedirectToLeader(method, arg, ret.(*types.SignedMessage))
if err != nil {
return ok, err
}
return ok, nil
}
func (ms *MessageSignerConsensus) SignMessage(
ctx context.Context,
msg *types.Message,
spec *api.MessageSendSpec,
cb func(*types.SignedMessage) error) (*types.SignedMessage, error) {
signedMsg, err := ms.MsgSigner.SignMessage(ctx, msg, spec, cb)
if err != nil {
return nil, err
}
// We can't have an empty/default uuid as part of the consensus state so generate a new uuid if spec is empty
u := uuid.New()
if spec != nil {
u = spec.MsgUuid
}
op := &consensus.ConsensusOp{signedMsg.Message.Nonce, u, signedMsg.Message.From, signedMsg}
err = ms.consensus.Commit(ctx, op)
if err != nil {
return nil, err
}
return signedMsg, nil
}
func (ms *MessageSignerConsensus) GetSignedMessage(ctx context.Context, uuid uuid.UUID) (*types.SignedMessage, error) {
state, err := ms.consensus.State(ctx)
if err != nil {
return nil, err
}
cstate := state.(consensus.RaftState)
msg, ok := cstate.MsgUuids[uuid]
if !ok {
return nil, xerrors.Errorf("Msg with Uuid %s not available", uuid)
}
return msg, nil
}
func (ms *MessageSignerConsensus) GetRaftState(ctx context.Context) (libp2pconsensus.State, error) {
return ms.consensus.State(ctx)
}
func (ms *MessageSignerConsensus) RaftLeader(ctx context.Context) (peer.ID, error) {
return ms.consensus.Leader(ctx)
}

View File

@ -164,6 +164,9 @@
* [PaychVoucherCreate](#PaychVoucherCreate)
* [PaychVoucherList](#PaychVoucherList)
* [PaychVoucherSubmit](#PaychVoucherSubmit)
* [Raft](#Raft)
* [RaftLeader](#RaftLeader)
* [RaftState](#RaftState)
* [State](#State)
* [StateAccountKey](#StateAccountKey)
* [StateActorCodeCIDs](#StateActorCodeCIDs)
@ -5047,6 +5050,27 @@ Response:
}
```
## Raft
### RaftLeader
Perms: read
Inputs: `null`
Response: `"12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf"`
### RaftState
Perms: read
Inputs: `null`
Response: `{}`
## State
The State methods are used to query, inspect, and interact with chain state.
Most methods take a TipSetKey as a parameter. The state looked up is the parent state of the tipset.

9
go.mod
View File

@ -68,6 +68,7 @@ require (
github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e
github.com/hashicorp/go-multierror v1.1.1
github.com/hashicorp/golang-lru v0.5.4
github.com/hashicorp/raft v1.1.1
github.com/icza/backscanner v0.0.0-20210726202459-ac2ffc679f94
github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab
github.com/ipfs/bbloom v0.0.4
@ -110,9 +111,12 @@ require (
github.com/koalacxr/quantile v0.0.1
github.com/libp2p/go-buffer-pool v0.1.0
github.com/libp2p/go-libp2p v0.22.0
github.com/libp2p/go-libp2p-consensus v0.0.1
github.com/libp2p/go-libp2p-gorpc v0.4.0
github.com/libp2p/go-libp2p-kad-dht v0.17.0
github.com/libp2p/go-libp2p-peerstore v0.7.1
github.com/libp2p/go-libp2p-pubsub v0.8.0
github.com/libp2p/go-libp2p-raft v0.1.8
github.com/libp2p/go-libp2p-record v0.1.3
github.com/libp2p/go-libp2p-routing-helpers v0.2.3
github.com/libp2p/go-maddr-filter v0.1.0
@ -165,6 +169,7 @@ require (
github.com/Stebalien/go-bitfield v0.0.1 // indirect
github.com/akavel/rsrc v0.8.0 // indirect
github.com/alecthomas/units v0.0.0-20210927113745-59d0afb8317a // indirect
github.com/armon/go-metrics v0.3.9 // indirect
github.com/benbjohnson/clock v1.3.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/bep/debounce v1.2.0 // indirect
@ -216,6 +221,9 @@ require (
github.com/google/gopacket v1.1.19 // indirect
github.com/hannahhoward/cbor-gen-for v0.0.0-20200817222906-ea96cece81f1 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-hclog v0.16.2 // indirect
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
github.com/hashicorp/go-msgpack v0.5.5 // indirect
github.com/huin/goupnp v1.0.3 // indirect
github.com/iancoleman/orderedmap v0.1.0 // indirect
github.com/ipfs/go-bitfield v1.0.0 // indirect
@ -306,6 +314,7 @@ require (
github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 // indirect
github.com/spaolacci/murmur3 v1.1.0 // indirect
github.com/uber/jaeger-client-go v2.25.0+incompatible // indirect
github.com/ugorji/go/codec v1.2.6 // indirect
github.com/valyala/bytebufferpool v1.0.0 // indirect
github.com/valyala/fasttemplate v1.0.1 // indirect
github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11 // indirect

53
go.sum
View File

@ -49,6 +49,8 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03
github.com/BurntSushi/toml v1.1.0 h1:ksErzDEI1khOiGPgpwuI7x2ebx/uXQNw7xJpn9Eq1+I=
github.com/BurntSushi/toml v1.1.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
github.com/DataDog/zstd v1.4.1 h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM=
github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
github.com/GeertJohan/go.incremental v1.0.0 h1:7AH+pY1XUgQE4Y1HcXYaMqAI0m9yrFqo/jt0CW30vsg=
@ -99,6 +101,9 @@ github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg=
github.com/armon/go-metrics v0.3.9 h1:O2sNqxBdvq8Eq5xmzljcYzAORli6RWCvEym4cJf9m18=
github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A=
@ -120,6 +125,7 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r
github.com/bep/debounce v1.2.0 h1:wXds8Kq8qRfwAOpAxHrJDbCXgC5aHSzgQb/0gKsHQqo=
github.com/bep/debounce v1.2.0/go.mod h1:H8yggRPQKLUhUoqrJC1bO2xNya7vanpDl7xR3ISbCJ0=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
github.com/briandowns/spinner v1.11.1/go.mod h1:QOuQk7x+EaDASo80FEXwlwiA+j/PPIcX3FScO+3/ZPQ=
github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8=
@ -164,6 +170,8 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn
github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
github.com/cilium/ebpf v0.4.0 h1:QlHdikaxALkqWasW8hAC1mfR0jdmvbfaBdBPFmRSglA=
github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
@ -377,6 +385,7 @@ github.com/filecoin-project/storetheindex v0.4.17/go.mod h1:y2dL8C5D3PXi183hdxgG
github.com/filecoin-project/test-vectors/schema v0.0.5 h1:w3zHQhzM4pYxJDl21avXjOKBLF8egrvwUwjpT8TquDg=
github.com/filecoin-project/test-vectors/schema v0.0.5/go.mod h1:iQ9QXLpYWL3m7warwvK1JC/pTri8mnfEmKygNDqqY6E=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6/go.mod h1:1i71OnUq3iUe1ma7Lr6yG6/rjvM3emb6yoL7xLFzcVQ=
github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ=
github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk=
@ -590,17 +599,27 @@ github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyN
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs=
github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=
github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI=
github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE=
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
@ -612,6 +631,9 @@ github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
github.com/hashicorp/raft v1.1.1 h1:HJr7UE1x/JrJSc9Oy6aDBHtNHUUBHjcQjTgvUVihoZs=
github.com/hashicorp/raft v1.1.1/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8=
github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea/go.mod h1:pNv7Wc3ycL6F5oOWn+tPGo2gWD4a5X+yp/ntwdKLjRk=
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
github.com/hodgesds/perf-utils v0.0.8/go.mod h1:F6TfvsbtrF88i++hou29dTXlI2sfsJv+gRZDtmTJkAs=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
@ -977,7 +999,9 @@ github.com/libp2p/go-libp2p v0.6.1/go.mod h1:CTFnWXogryAHjXAKEbOf1OWY+VeAP3lDMZk
github.com/libp2p/go-libp2p v0.7.0/go.mod h1:hZJf8txWeCduQRDC/WSqBGMxaTHCOYHt2xSU1ivxn0k=
github.com/libp2p/go-libp2p v0.7.4/go.mod h1:oXsBlTLF1q7pxr+9w6lqzS1ILpyHsaBPniVO7zIHGMw=
github.com/libp2p/go-libp2p v0.8.1/go.mod h1:QRNH9pwdbEBpx5DTJYg+qxcVaDMAz3Ee/qDKwXujH5o=
github.com/libp2p/go-libp2p v0.13.0/go.mod h1:pM0beYdACRfHO1WcJlp65WXyG2A6NqYM+t2DTVAJxMo=
github.com/libp2p/go-libp2p v0.14.3/go.mod h1:d12V4PdKbpL0T1/gsUNN8DfgMuRPDX8bS2QxCZlwRH0=
github.com/libp2p/go-libp2p v0.14.4/go.mod h1:EIRU0Of4J5S8rkockZM7eJp2S0UrCyi55m2kJVru3rM=
github.com/libp2p/go-libp2p v0.16.0/go.mod h1:ump42BsirwAWxKzsCiFnTtN1Yc+DuPu76fyMX364/O4=
github.com/libp2p/go-libp2p v0.17.0/go.mod h1:Fkin50rsGdv5mm5BshBUtPRZknt9esfmYXBOYcwOTgw=
github.com/libp2p/go-libp2p v0.22.0 h1:2Tce0kHOp5zASFKJbNzRElvh0iZwdtG5uZheNW8chIw=
@ -992,6 +1016,7 @@ github.com/libp2p/go-libp2p-autonat v0.1.1/go.mod h1:OXqkeGOY2xJVWKAGV2inNF5aKN/
github.com/libp2p/go-libp2p-autonat v0.2.0/go.mod h1:DX+9teU4pEEoZUqR1PiMlqliONQdNbfzE1C718tcViI=
github.com/libp2p/go-libp2p-autonat v0.2.1/go.mod h1:MWtAhV5Ko1l6QBsHQNSuM6b1sRkXrpk0/LqCr+vCVxI=
github.com/libp2p/go-libp2p-autonat v0.2.2/go.mod h1:HsM62HkqZmHR2k1xgX34WuWDzk/nBwNHoeyyT4IWV6A=
github.com/libp2p/go-libp2p-autonat v0.4.0/go.mod h1:YxaJlpr81FhdOv3W3BTconZPfhaYivRdf53g+S2wobk=
github.com/libp2p/go-libp2p-autonat v0.4.2/go.mod h1:YxaJlpr81FhdOv3W3BTconZPfhaYivRdf53g+S2wobk=
github.com/libp2p/go-libp2p-autonat v0.6.0/go.mod h1:bFC6kY8jwzNNWoqc8iGE57vsfwyJ/lP4O4DOV1e0B2o=
github.com/libp2p/go-libp2p-autonat v0.7.0/go.mod h1:uPvPn6J7cN+LCfFwW5tpOYvAz5NvPTc4iBamTV/WDMg=
@ -1009,6 +1034,8 @@ github.com/libp2p/go-libp2p-connmgr v0.2.4/go.mod h1:YV0b/RIm8NGPnnNWM7hG9Q38OeQ
github.com/libp2p/go-libp2p-connmgr v0.3.0/go.mod h1:RVoyPjJm0J9Vd1m6qUN2Tn7kJm4rL1Ml20pFsFgPGik=
github.com/libp2p/go-libp2p-connmgr v0.4.0 h1:q/KZUS1iMDIQckMZarMYwhQisJqiFPHAVC1c4DR3hDE=
github.com/libp2p/go-libp2p-connmgr v0.4.0/go.mod h1:exFQQm19PFAx+QuJmBPw4MM58QejzPJRFFFYnNmgi2w=
github.com/libp2p/go-libp2p-consensus v0.0.1 h1:jcVbHRZLwTXU9iT/mPi+Lx4/OrIzq3bU1TbZNhYFCV8=
github.com/libp2p/go-libp2p-consensus v0.0.1/go.mod h1:+9Wrfhc5QOqWB0gXI0m6ARlkHfdJpcFXmRU0WoHz4Mo=
github.com/libp2p/go-libp2p-core v0.0.1/go.mod h1:g/VxnTZ/1ygHxH3dKok7Vno1VfpvGcGip57wjTU4fco=
github.com/libp2p/go-libp2p-core v0.0.2/go.mod h1:9dAcntw/n46XycV4RnlBq3BpgrmyUi9LuoTNdPrbUco=
github.com/libp2p/go-libp2p-core v0.0.3/go.mod h1:j+YQMNz9WNSkNezXOsahp9kwZBKBvxLpKD316QWSJXE=
@ -1051,6 +1078,9 @@ github.com/libp2p/go-libp2p-discovery v0.2.0/go.mod h1:s4VGaxYMbw4+4+tsoQTqh7wfx
github.com/libp2p/go-libp2p-discovery v0.3.0/go.mod h1:o03drFnz9BVAZdzC/QUQ+NeQOu38Fu7LJGEOK2gQltw=
github.com/libp2p/go-libp2p-discovery v0.5.0/go.mod h1:+srtPIU9gDaBNu//UHvcdliKBIcr4SfDcm0/PfPJLug=
github.com/libp2p/go-libp2p-discovery v0.6.0/go.mod h1:/u1voHt0tKIe5oIA1RHBKQLVCWPna2dXmPNHc2zR9S8=
github.com/libp2p/go-libp2p-gorpc v0.4.0 h1:kxHg5C3IuXeOq5FHPGbMHwQzKDlTVeB/NDr0ndc8J/g=
github.com/libp2p/go-libp2p-gorpc v0.4.0/go.mod h1:jux2Mb6BfUE1n58KbVCmWtqvpiZo0DDaKobKInf4s5o=
github.com/libp2p/go-libp2p-gostream v0.3.1/go.mod h1:1V3b+u4Zhaq407UUY9JLCpboaeufAeVQbnvAt12LRsI=
github.com/libp2p/go-libp2p-gostream v0.4.0 h1:heduMMEB78yBqeEQv+P7Fn5X926MHC2jDIC7/7yLpYA=
github.com/libp2p/go-libp2p-gostream v0.4.0/go.mod h1:21DVGBcCQwRfEXZpCnZ2kG24QiEkBpEQvG53gYXE4u0=
github.com/libp2p/go-libp2p-host v0.0.1/go.mod h1:qWd+H1yuU0m5CwzAkvbSjqKairayEHdR5MMl7Cwa7Go=
@ -1084,6 +1114,7 @@ github.com/libp2p/go-libp2p-netutil v0.0.1/go.mod h1:GdusFvujWZI9Vt0X5BKqwWWmZFx
github.com/libp2p/go-libp2p-netutil v0.1.0/go.mod h1:3Qv/aDqtMLTUyQeundkKsA+YCThNdbQD54k3TqjpbFU=
github.com/libp2p/go-libp2p-netutil v0.2.0 h1:DecSQ5nRnE5RfOmlNx+qubPL+rX8NSC6vyPRCiJXzg4=
github.com/libp2p/go-libp2p-netutil v0.2.0/go.mod h1:5ny0YEgWwWLxPwRJn5gGYr5bh9msiDI1CStSesvqYRM=
github.com/libp2p/go-libp2p-noise v0.1.1/go.mod h1:QDFLdKX7nluB7DEnlVPbz7xlLHdwHFA9HiohJRr3vwM=
github.com/libp2p/go-libp2p-noise v0.2.0/go.mod h1:IEbYhBBzGyvdLBoxxULL/SGbJARhUeqlO8lVSREYu2Q=
github.com/libp2p/go-libp2p-noise v0.3.0/go.mod h1:JNjHbociDJKHD64KTkzGnzqJ0FEV5gHJa6AB00kbCNQ=
github.com/libp2p/go-libp2p-noise v0.5.0 h1:gwJZ/3iH3MRnBrLIyr/YLdCOnmqfJMptlsFFUIc3j0Y=
@ -1114,6 +1145,8 @@ github.com/libp2p/go-libp2p-quic-transport v0.10.0/go.mod h1:RfJbZ8IqXIhxBRm5hqU
github.com/libp2p/go-libp2p-quic-transport v0.13.0/go.mod h1:39/ZWJ1TW/jx1iFkKzzUg00W6tDJh73FC0xYudjr7Hc=
github.com/libp2p/go-libp2p-quic-transport v0.15.0/go.mod h1:wv4uGwjcqe8Mhjj7N/Ic0aKjA+/10UnMlSzLO0yRpYQ=
github.com/libp2p/go-libp2p-quic-transport v0.15.2/go.mod h1:wv4uGwjcqe8Mhjj7N/Ic0aKjA+/10UnMlSzLO0yRpYQ=
github.com/libp2p/go-libp2p-raft v0.1.8 h1:Fq0aWHbbhi6WJXf+yaOQeMzV+9UgkbHIIGyaJbH3vpo=
github.com/libp2p/go-libp2p-raft v0.1.8/go.mod h1:+YDisn3uszb7vxshLgKoDdRGs79WSbHRgrOdrYqDPk4=
github.com/libp2p/go-libp2p-record v0.0.1/go.mod h1:grzqg263Rug/sRex85QrDOLntdFAymLDLm7lxMgU79Q=
github.com/libp2p/go-libp2p-record v0.1.0/go.mod h1:ujNc8iuE5dlKWVy6wuL6dd58t0n7xI4hAIl8pE6wu5Q=
github.com/libp2p/go-libp2p-record v0.1.2/go.mod h1:pal0eNcT5nqZaTV7UGhqeGqxFgGdsU/9W//C8dqjQDk=
@ -1133,6 +1166,7 @@ github.com/libp2p/go-libp2p-swarm v0.2.2/go.mod h1:fvmtQ0T1nErXym1/aa1uJEyN7JzaT
github.com/libp2p/go-libp2p-swarm v0.2.3/go.mod h1:P2VO/EpxRyDxtChXz/VPVXyTnszHvokHKRhfkEgFKNM=
github.com/libp2p/go-libp2p-swarm v0.2.8/go.mod h1:JQKMGSth4SMqonruY0a8yjlPVIkb0mdNSwckW7OYziM=
github.com/libp2p/go-libp2p-swarm v0.3.0/go.mod h1:hdv95GWCTmzkgeJpP+GK/9D9puJegb7H57B5hWQR5Kk=
github.com/libp2p/go-libp2p-swarm v0.4.0/go.mod h1:XVFcO52VoLoo0eitSxNQWYq4D6sydGOweTOAjJNraCw=
github.com/libp2p/go-libp2p-swarm v0.5.0/go.mod h1:sU9i6BoHE0Ve5SKz3y9WfKrh8dUat6JknzUehFx8xW4=
github.com/libp2p/go-libp2p-swarm v0.8.0/go.mod h1:sOMp6dPuqco0r0GHTzfVheVBh6UEL0L1lXUZ5ot2Fvc=
github.com/libp2p/go-libp2p-swarm v0.9.0/go.mod h1:2f8d8uxTJmpeqHF/1ujjdXZp+98nNIbujVOMEZxCbZ8=
@ -1161,6 +1195,7 @@ github.com/libp2p/go-libp2p-transport-upgrader v0.0.4/go.mod h1:RGq+tupk+oj7PzL2
github.com/libp2p/go-libp2p-transport-upgrader v0.1.1/go.mod h1:IEtA6or8JUbsV07qPW4r01GnTenLW4oi3lOPbUMGJJA=
github.com/libp2p/go-libp2p-transport-upgrader v0.2.0/go.mod h1:mQcrHj4asu6ArfSoMuyojOdjx73Q47cYD7s5+gZOlns=
github.com/libp2p/go-libp2p-transport-upgrader v0.3.0/go.mod h1:i+SKzbRnvXdVbU3D1dwydnTmKRPXiAR/fyvi1dXuL4o=
github.com/libp2p/go-libp2p-transport-upgrader v0.4.0/go.mod h1:J4ko0ObtZSmgn5BX5AmegP+dK3CSnU2lMCKsSq/EY0s=
github.com/libp2p/go-libp2p-transport-upgrader v0.4.2/go.mod h1:NR8ne1VwfreD5VIWIU62Agt/J18ekORFU/j1i2y8zvk=
github.com/libp2p/go-libp2p-transport-upgrader v0.4.3/go.mod h1:bpkldbOWXMrXhpZbSV1mQxTrefOg2Fi+k1ClDSA4ppw=
github.com/libp2p/go-libp2p-transport-upgrader v0.5.0/go.mod h1:Rc+XODlB3yce7dvFV4q/RmyJGsFcCZRkeZMu/Zdg0mo=
@ -1175,6 +1210,7 @@ github.com/libp2p/go-libp2p-yamux v0.2.7/go.mod h1:X28ENrBMU/nm4I3Nx4sZ4dgjZ6VhL
github.com/libp2p/go-libp2p-yamux v0.2.8/go.mod h1:/t6tDqeuZf0INZMTgd0WxIRbtK2EzI2h7HbFm9eAKI4=
github.com/libp2p/go-libp2p-yamux v0.4.0/go.mod h1:+DWDjtFMzoAwYLVkNZftoucn7PelNoy5nm3tZ3/Zw30=
github.com/libp2p/go-libp2p-yamux v0.5.0/go.mod h1:AyR8k5EzyM2QN9Bbdg6X1SkVVuqLwTGf0L4DFq9g6po=
github.com/libp2p/go-libp2p-yamux v0.5.1/go.mod h1:dowuvDu8CRWmr0iqySMiSxK+W0iL5cMVO9S94Y6gkv4=
github.com/libp2p/go-libp2p-yamux v0.5.4/go.mod h1:tfrXbyaTqqSU654GTvK3ocnSZL3BuHoeTSqhcel1wsE=
github.com/libp2p/go-libp2p-yamux v0.6.0/go.mod h1:MRhd6mAYnFRnSISp4M8i0ClV/j+mWHo2mYLifWGw33k=
github.com/libp2p/go-libp2p-yamux v0.7.0/go.mod h1:fMyA0CsPfHkIuBU0wjRGrCjTBFiXTXxG0k5M4ETv+08=
@ -1236,7 +1272,9 @@ github.com/libp2p/go-tcp-transport v0.0.4/go.mod h1:+E8HvC8ezEVOxIo3V5vCK9l1y/19
github.com/libp2p/go-tcp-transport v0.1.0/go.mod h1:oJ8I5VXryj493DEJ7OsBieu8fcg2nHGctwtInJVpipc=
github.com/libp2p/go-tcp-transport v0.1.1/go.mod h1:3HzGvLbx6etZjnFlERyakbaYPdfjg2pWP97dFZworkY=
github.com/libp2p/go-tcp-transport v0.2.0/go.mod h1:vX2U0CnWimU4h0SGSEsg++AzvBcroCGYw28kh94oLe0=
github.com/libp2p/go-tcp-transport v0.2.1/go.mod h1:zskiJ70MEfWz2MKxvFB/Pv+tPIB1PpPUrHIWQ8aFw7M=
github.com/libp2p/go-tcp-transport v0.2.3/go.mod h1:9dvr03yqrPyYGIEN6Dy5UvdJZjyPFvl1S/igQ5QD1SU=
github.com/libp2p/go-tcp-transport v0.2.4/go.mod h1:9dvr03yqrPyYGIEN6Dy5UvdJZjyPFvl1S/igQ5QD1SU=
github.com/libp2p/go-tcp-transport v0.4.0/go.mod h1:0y52Rwrn4076xdJYu/51/qJIdxz+EWDAOG2S45sV3VI=
github.com/libp2p/go-testutil v0.0.1/go.mod h1:iAcJc/DKJQanJ5ws2V+u5ywdL2n12X1WbbEG+Jjy69I=
github.com/libp2p/go-testutil v0.1.0/go.mod h1:81b2n5HypcVyrCg/MJx4Wgfp/VHojytjVe/gLzZ2Ehc=
@ -1255,6 +1293,7 @@ github.com/libp2p/go-yamux v1.3.5/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZ
github.com/libp2p/go-yamux v1.3.7/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE=
github.com/libp2p/go-yamux v1.4.0/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE=
github.com/libp2p/go-yamux v1.4.1/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE=
github.com/libp2p/go-yamux/v2 v2.0.0/go.mod h1:NVWira5+sVUIU6tu1JWvaRn1dRnG+cawOJiflsAM+7U=
github.com/libp2p/go-yamux/v2 v2.2.0/go.mod h1:3So6P6TV6r75R9jiBpiIKgU/66lOarCZjqROGxzPpPQ=
github.com/libp2p/go-yamux/v2 v2.3.0/go.mod h1:iTU+lOIn/2h0AgKcL49clNTwfEw+WSfDYrXe05EyKIs=
github.com/libp2p/go-yamux/v3 v3.1.2 h1:lNEy28MBk1HavUAlzKgShp+F6mn/ea1nDYWftZhFW9Q=
@ -1309,6 +1348,7 @@ github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNx
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
@ -1334,6 +1374,7 @@ github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00v
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/miekg/dns v1.1.12/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/miekg/dns v1.1.28/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4=
github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA=
@ -1432,6 +1473,7 @@ github.com/multiformats/go-multistream v0.0.1/go.mod h1:fJTiDfXJVmItycydCnNx4+wS
github.com/multiformats/go-multistream v0.0.4/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg=
github.com/multiformats/go-multistream v0.1.0/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg=
github.com/multiformats/go-multistream v0.1.1/go.mod h1:KmHZ40hzVxiaiwlj3MEbYgK9JFk2/9UktWZAF54Du38=
github.com/multiformats/go-multistream v0.2.0/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k=
github.com/multiformats/go-multistream v0.2.1/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k=
github.com/multiformats/go-multistream v0.2.2/go.mod h1:UIcnm7Zuo8HKG+HkWgfQsGL+/MIEhyTqbODbIUwSXKs=
github.com/multiformats/go-multistream v0.3.3 h1:d5PZpjwRgVlbwfdTDjife7XszfZd8KYWfROYFlGcR8o=
@ -1505,6 +1547,8 @@ github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnh
github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0=
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y=
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
@ -1534,6 +1578,7 @@ github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og=
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU=
@ -1700,6 +1745,7 @@ github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0
github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4=
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
github.com/uber/jaeger-client-go v2.15.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
github.com/uber/jaeger-client-go v2.23.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
github.com/uber/jaeger-client-go v2.25.0+incompatible h1:IxcNZ7WRY1Y3G4poYlx24szfsn/3LvK9QHCq9oQw8+U=
@ -1707,7 +1753,12 @@ github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMW
github.com/uber/jaeger-lib v1.5.1-0.20181102163054-1fc5c315e03c/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
github.com/uber/jaeger-lib v2.2.0+incompatible h1:MxZXOiR2JuoANZ3J6DE/U0kSFv/eJ/GfSYVCjK7dyaw=
github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
github.com/ugorji/go v1.1.13/go.mod h1:jxau1n+/wyTGLQoCkjok9r5zFa/FxT6eI5HiHKQszjc=
github.com/ugorji/go v1.2.6/go.mod h1:anCg0y61KIhDlPZmnH+so+RQbysYVyDko0IMgJv0Nn0=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/ugorji/go/codec v1.1.13/go.mod h1:oNVt3Dq+FO91WNQ/9JnHKQP2QJxTzoN7wCBFCq1OeuU=
github.com/ugorji/go/codec v1.2.6 h1:7kbGefxLoDBuYXOms4yD7223OpNMMPNPZxXk5TvFcyQ=
github.com/ugorji/go/codec v1.2.6/go.mod h1:V6TCNZ4PHqoHGFZuSG1W8nrCzzdgA2DozYxWFFpvxTw=
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
@ -1898,6 +1949,7 @@ golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
@ -2072,6 +2124,7 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190523142557-0e01d883c5c5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190524122548-abf6ff778158/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190524152521-dbbf3f1254d4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190526052359-791d8a0f4d09/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=

View File

@ -13,6 +13,7 @@ import (
"testing"
"time"
"github.com/google/uuid"
"github.com/ipfs/go-datastore"
"github.com/ipfs/go-datastore/namespace"
libp2pcrypto "github.com/libp2p/go-libp2p/core/crypto"
@ -176,6 +177,16 @@ func (n *Ensemble) Mocknet() mocknet.Mocknet {
return n.mn
}
func (n *Ensemble) NewPrivKey() (libp2pcrypto.PrivKey, peer.ID) {
privkey, _, err := libp2pcrypto.GenerateEd25519Key(rand.Reader)
require.NoError(n.t, err)
peerId, err := peer.IDFromPrivateKey(privkey)
require.NoError(n.t, err)
return privkey, peerId
}
// FullNode enrolls a new full node.
func (n *Ensemble) FullNode(full *TestFullNode, opts ...NodeOpt) *Ensemble {
options := DefaultNodeOpts
@ -201,13 +212,14 @@ func (n *Ensemble) FullNode(full *TestFullNode, opts ...NodeOpt) *Ensemble {
}
*full = TestFullNode{t: n.t, options: options, DefaultKey: key}
n.inactive.fullnodes = append(n.inactive.fullnodes, full)
return n
}
// Miner enrolls a new miner, using the provided full node for chain
// interactions.
func (n *Ensemble) Miner(minerNode *TestMiner, full *TestFullNode, opts ...NodeOpt) *Ensemble {
func (n *Ensemble) MinerEnroll(minerNode *TestMiner, full *TestFullNode, opts ...NodeOpt) *Ensemble {
require.NotNil(n.t, full, "full node required when instantiating miner")
options := DefaultNodeOpts
@ -292,11 +304,21 @@ func (n *Ensemble) Miner(minerNode *TestMiner, full *TestFullNode, opts ...NodeO
minerNode.Libp2p.PeerID = peerId
minerNode.Libp2p.PrivKey = privkey
n.inactive.miners = append(n.inactive.miners, minerNode)
//n.inactive.miners = append(n.inactive.miners, minerNode)
return n
}
func (n *Ensemble) AddInactiveMiner(m *TestMiner) {
n.inactive.miners = append(n.inactive.miners, m)
}
func (n *Ensemble) Miner(minerNode *TestMiner, full *TestFullNode, opts ...NodeOpt) *Ensemble {
n.MinerEnroll(minerNode, full, opts...)
n.AddInactiveMiner(minerNode)
return n
}
// Worker enrolls a new worker, using the provided full node for chain
// interactions.
func (n *Ensemble) Worker(minerNode *TestMiner, worker *TestWorker, opts ...NodeOpt) *Ensemble {
@ -359,6 +381,21 @@ func (n *Ensemble) Start() *Ensemble {
lr, err := r.Lock(repo.FullNode)
require.NoError(n.t, err)
ks, err := lr.KeyStore()
require.NoError(n.t, err)
if full.Pkey != nil {
pk, err := libp2pcrypto.MarshalPrivateKey(full.Pkey.PrivKey)
require.NoError(n.t, err)
err = ks.Put("libp2p-host", types.KeyInfo{
Type: "libp2p-host",
PrivateKey: pk,
})
require.NoError(n.t, err)
}
c, err := lr.Config()
require.NoError(n.t, err)
@ -417,6 +454,7 @@ func (n *Ensemble) Start() *Ensemble {
// Construct the full node.
stop, err := node.New(ctx, opts...)
full.Stop = stop
require.NoError(n.t, err)
@ -478,7 +516,9 @@ func (n *Ensemble) Start() *Ensemble {
Method: power.Methods.CreateMiner,
Params: params,
}
signed, err := m.FullNode.FullNode.MpoolPushMessage(ctx, createStorageMinerMsg, nil)
signed, err := m.FullNode.FullNode.MpoolPushMessage(ctx, createStorageMinerMsg, &api.MessageSendSpec{
MsgUuid: uuid.New(),
})
require.NoError(n.t, err)
mw, err := m.FullNode.FullNode.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence, api.LookbackNoLimit, true)
@ -502,7 +542,9 @@ func (n *Ensemble) Start() *Ensemble {
Value: types.NewInt(0),
}
signed, err2 := m.FullNode.FullNode.MpoolPushMessage(ctx, msg, nil)
signed, err2 := m.FullNode.FullNode.MpoolPushMessage(ctx, msg, &api.MessageSendSpec{
MsgUuid: uuid.New(),
})
require.NoError(n.t, err2)
mw, err2 := m.FullNode.FullNode.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence, api.LookbackNoLimit, true)
@ -612,7 +654,9 @@ func (n *Ensemble) Start() *Ensemble {
Value: types.NewInt(0),
}
_, err2 := m.FullNode.MpoolPushMessage(ctx, msg, nil)
_, err2 := m.FullNode.MpoolPushMessage(ctx, msg, &api.MessageSendSpec{
MsgUuid: uuid.New(),
})
require.NoError(n.t, err2)
}
@ -814,9 +858,9 @@ func (n *Ensemble) Start() *Ensemble {
wait.Unlock()
})
wait.Lock()
n.bootstrapped = true
}
n.bootstrapped = true
return n
}

View File

@ -6,6 +6,8 @@ import (
"testing"
"time"
libp2pcrypto "github.com/libp2p/go-libp2p/core/crypto"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/multiformats/go-multiaddr"
"github.com/stretchr/testify/require"
@ -16,8 +18,14 @@ import (
"github.com/filecoin-project/lotus/api/v1api"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/wallet/key"
"github.com/filecoin-project/lotus/node"
)
type Libp2p struct {
PeerID peer.ID
PrivKey libp2pcrypto.PrivKey
}
// TestFullNode represents a full node enrolled in an Ensemble.
type TestFullNode struct {
v1api.FullNode
@ -29,9 +37,21 @@ type TestFullNode struct {
ListenAddr multiaddr.Multiaddr
DefaultKey *key.Key
//Libp2p struct {
// PeerID peer.ID
// PrivKey libp2pcrypto.PrivKey
//}
Pkey *Libp2p
Stop node.StopFunc
options nodeOpts
}
func (f TestFullNode) Shutdown(ctx context.Context) error {
return f.Stop(ctx)
}
func (f *TestFullNode) ClientImportCARFile(ctx context.Context, rseed int, size int) (res *api.ImportRes, carv1FilePath string, origFilePath string) {
carv1FilePath, origFilePath = CreateRandomCARv1(f.t, rseed, size)
res, err := f.ClientImport(ctx, api.FileRef{Path: carv1FilePath, IsCAR: true})
@ -86,6 +106,10 @@ func (f *TestFullNode) WaitForSectorActive(ctx context.Context, t *testing.T, sn
}
}
func (t *TestFullNode) AssignPrivKey(pkey *Libp2p) {
t.Pkey = pkey
}
// ChainPredicate encapsulates a chain condition.
type ChainPredicate func(set *types.TipSet) bool

View File

@ -0,0 +1,236 @@
package itests
import (
"context"
"crypto/rand"
"fmt"
"reflect"
"testing"
"time"
"github.com/google/uuid"
gorpc "github.com/libp2p/go-libp2p-gorpc"
libp2pcrypto "github.com/libp2p/go-libp2p/core/crypto"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/exitcode"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/messagesigner"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/itests/kit"
consensus "github.com/filecoin-project/lotus/lib/consensus/raft"
"github.com/filecoin-project/lotus/node"
"github.com/filecoin-project/lotus/node/modules"
)
func generatePrivKey() (*kit.Libp2p, error) {
privkey, _, err := libp2pcrypto.GenerateEd25519Key(rand.Reader)
if err != nil {
return nil, err
}
peerId, err := peer.IDFromPrivateKey(privkey)
if err != nil {
return nil, err
}
return &kit.Libp2p{peerId, privkey}, nil
}
func getRaftState(t *testing.T, ctx context.Context, node *kit.TestFullNode) consensus.RaftState {
raftState, err := node.RaftState(ctx)
require.NoError(t, err)
rstate := raftState.(consensus.RaftState)
return rstate
}
func setup(t *testing.T, ctx context.Context, node0 *kit.TestFullNode, node1 *kit.TestFullNode, node2 *kit.TestFullNode, miner *kit.TestMiner) *kit.Ensemble {
blockTime := 5 * time.Millisecond
pkey0, _ := generatePrivKey()
pkey1, _ := generatePrivKey()
pkey2, _ := generatePrivKey()
initPeerSet := []peer.ID{pkey0.PeerID, pkey1.PeerID, pkey2.PeerID}
raftOps := kit.ConstructorOpts(
node.Override(new(*gorpc.Client), modules.NewRPCClient),
node.Override(new(*consensus.Config), func() *consensus.Config {
cfg := consensus.NewDefaultConfig()
cfg.InitPeerset = initPeerSet
return cfg
}),
node.Override(new(*consensus.Consensus), consensus.NewConsensusWithRPCClient(false)),
node.Override(new(*messagesigner.MessageSignerConsensus), messagesigner.NewMessageSignerConsensus),
node.Override(new(messagesigner.MsgSigner), func(ms *messagesigner.MessageSignerConsensus) *messagesigner.MessageSignerConsensus { return ms }),
node.Override(new(*modules.RPCHandler), modules.NewRPCHandler),
node.Override(node.RPCServer, modules.NewRPCServer),
)
//raftOps := kit.ConstructorOpts()
ens := kit.NewEnsemble(t).FullNode(node0, raftOps).FullNode(node1, raftOps).FullNode(node2, raftOps)
node0.AssignPrivKey(pkey0)
node1.AssignPrivKey(pkey1)
node2.AssignPrivKey(pkey2)
ens.MinerEnroll(miner, node0, kit.WithAllSubsystems())
ens.Start()
// Import miner wallet to all nodes
addr0, err := node0.WalletImport(ctx, &miner.OwnerKey.KeyInfo)
require.NoError(t, err)
addr1, err := node1.WalletImport(ctx, &miner.OwnerKey.KeyInfo)
require.NoError(t, err)
addr2, err := node2.WalletImport(ctx, &miner.OwnerKey.KeyInfo)
require.NoError(t, err)
fmt.Println(addr0, addr1, addr2)
ens.InterconnectAll()
ens.AddInactiveMiner(miner)
ens.Start()
ens.InterconnectAll().BeginMining(blockTime)
return ens
}
func TestRaftState(t *testing.T) {
kit.QuietMiningLogs()
ctx := context.Background()
var (
node0 kit.TestFullNode
node1 kit.TestFullNode
node2 kit.TestFullNode
miner kit.TestMiner
)
setup(t, ctx, &node0, &node1, &node2, &miner)
fmt.Println(node0.WalletList(context.Background()))
fmt.Println(node1.WalletList(context.Background()))
fmt.Println(node2.WalletList(context.Background()))
bal, err := node0.WalletBalance(ctx, node0.DefaultKey.Address)
require.NoError(t, err)
msgHalfBal := &types.Message{
From: miner.OwnerKey.Address,
To: node0.DefaultKey.Address,
Value: big.Div(bal, big.NewInt(2)),
}
mu := uuid.New()
smHalfBal, err := node0.MpoolPushMessage(ctx, msgHalfBal, &api.MessageSendSpec{
MsgUuid: mu,
})
require.NoError(t, err)
mLookup, err := node0.StateWaitMsg(ctx, smHalfBal.Cid(), 3, api.LookbackNoLimit, true)
require.NoError(t, err)
require.Equal(t, exitcode.Ok, mLookup.Receipt.ExitCode)
rstate0 := getRaftState(t, ctx, &node0)
rstate1 := getRaftState(t, ctx, &node1)
rstate2 := getRaftState(t, ctx, &node2)
require.True(t, reflect.DeepEqual(rstate0, rstate1))
require.True(t, reflect.DeepEqual(rstate0, rstate2))
}
func TestRaftStateLeaderDisconnects(t *testing.T) {
kit.QuietMiningLogs()
ctx := context.Background()
var (
node0 kit.TestFullNode
node1 kit.TestFullNode
node2 kit.TestFullNode
miner kit.TestMiner
)
nodes := []*kit.TestFullNode{&node0, &node1, &node2}
setup(t, ctx, &node0, &node1, &node2, &miner)
peerToNode := make(map[peer.ID]*kit.TestFullNode)
for _, n := range nodes {
peerToNode[n.Pkey.PeerID] = n
}
bal, err := node0.WalletBalance(ctx, node0.DefaultKey.Address)
require.NoError(t, err)
msgHalfBal := &types.Message{
From: miner.OwnerKey.Address,
To: node0.DefaultKey.Address,
Value: big.Div(bal, big.NewInt(2)),
}
mu := uuid.New()
smHalfBal, err := node0.MpoolPushMessage(ctx, msgHalfBal, &api.MessageSendSpec{
MsgUuid: mu,
})
require.NoError(t, err)
mLookup, err := node0.StateWaitMsg(ctx, smHalfBal.Cid(), 3, api.LookbackNoLimit, true)
require.NoError(t, err)
require.Equal(t, exitcode.Ok, mLookup.Receipt.ExitCode)
rstate0 := getRaftState(t, ctx, &node0)
rstate1 := getRaftState(t, ctx, &node1)
rstate2 := getRaftState(t, ctx, &node2)
require.True(t, reflect.DeepEqual(rstate0, rstate1))
require.True(t, reflect.DeepEqual(rstate0, rstate2))
leader, err := node1.RaftLeader(ctx)
require.NoError(t, err)
leaderNode := peerToNode[leader]
leaderNode.Stop(ctx)
oldLeaderNode := leaderNode
time.Sleep(5 * time.Second)
newLeader := leader
for _, n := range nodes {
if n != leaderNode {
newLeader, err = n.RaftLeader(ctx)
require.NoError(t, err)
require.NotEqual(t, newLeader, leader)
}
}
require.NotEqual(t, newLeader, leader)
leaderNode = peerToNode[newLeader]
msg2 := &types.Message{
From: miner.OwnerKey.Address,
To: leaderNode.DefaultKey.Address,
Value: big.NewInt(100000),
}
mu2 := uuid.New()
signedMsg2, err := leaderNode.MpoolPushMessage(ctx, msg2, &api.MessageSendSpec{
MsgUuid: mu2,
})
require.NoError(t, err)
mLookup, err = leaderNode.StateWaitMsg(ctx, signedMsg2.Cid(), 3, api.LookbackNoLimit, true)
require.NoError(t, err)
require.Equal(t, exitcode.Ok, mLookup.Receipt.ExitCode)
rstate := getRaftState(t, ctx, leaderNode)
for _, n := range nodes {
if n != oldLeaderNode {
rs := getRaftState(t, ctx, n)
require.True(t, reflect.DeepEqual(rs, rstate))
}
}
}

View File

@ -0,0 +1,319 @@
package consensus
import (
"io/ioutil"
"time"
hraft "github.com/hashicorp/raft"
"github.com/libp2p/go-libp2p/core/peer"
)
// ConfigKey is the default configuration key for holding this component's
// configuration section.
var configKey = "raft"
var envConfigKey = "cluster_raft"
// Configuration defaults
var (
DefaultDataSubFolder = "raft"
DefaultWaitForLeaderTimeout = 15 * time.Second
DefaultCommitRetries = 1
DefaultNetworkTimeout = 100 * time.Second
DefaultCommitRetryDelay = 200 * time.Millisecond
DefaultBackupsRotate = 6
DefaultDatastoreNamespace = "/r" // from "/raft"
)
// Config allows to configure the Raft Consensus component for ipfs-cluster.
// The component's configuration section is represented by ConfigJSON.
// Config implements the ComponentConfig interface.
type Config struct {
//config.Saver
//
//// will shutdown libp2p host on shutdown. Useful for testing
hostShutdown bool
// A folder to store Raft's data.
DataFolder string
// InitPeerset provides the list of initial cluster peers for new Raft
// peers (with no prior state). It is ignored when Raft was already
// initialized or when starting in staging mode.
InitPeerset []peer.ID
// LeaderTimeout specifies how long to wait for a leader before
// failing an operation.
WaitForLeaderTimeout time.Duration
// NetworkTimeout specifies how long before a Raft network
// operation is timed out
NetworkTimeout time.Duration
// CommitRetries specifies how many times we retry a failed commit until
// we give up.
CommitRetries int
// How long to wait between retries
CommitRetryDelay time.Duration
// BackupsRotate specifies the maximum number of Raft's DataFolder
// copies that we keep as backups (renaming) after cleanup.
BackupsRotate int
// Namespace to use when writing keys to the datastore
DatastoreNamespace string
// A Hashicorp Raft's configuration object.
RaftConfig *hraft.Config
// Tracing enables propagation of contexts across binary boundaries.
Tracing bool
}
// ConfigJSON represents a human-friendly Config
// object which can be saved to JSON. Most configuration keys are converted
// into simple types like strings, and key names aim to be self-explanatory
// for the user.
// Check https://godoc.org/github.com/hashicorp/raft#Config for extended
// description on all Raft-specific keys.
//type jsonConfig struct {
// // Storage folder for snapshots, log store etc. Used by
// // the Raft.
// DataFolder string `json:"data_folder,omitempty"`
//
// // InitPeerset provides the list of initial cluster peers for new Raft
// // peers (with no prior state). It is ignored when Raft was already
// // initialized or when starting in staging mode.
// InitPeerset []string `json:"init_peerset"`
//
// // How long to wait for a leader before failing
// WaitForLeaderTimeout string `json:"wait_for_leader_timeout"`
//
// // How long to wait before timing out network operations
// NetworkTimeout string `json:"network_timeout"`
//
// // How many retries to make upon a failed commit
// CommitRetries int `json:"commit_retries"`
//
// // How long to wait between commit retries
// CommitRetryDelay string `json:"commit_retry_delay"`
//
// // BackupsRotate specifies the maximum number of Raft's DataFolder
// // copies that we keep as backups (renaming) after cleanup.
// BackupsRotate int `json:"backups_rotate"`
//
// DatastoreNamespace string `json:"datastore_namespace,omitempty"`
//
// // HeartbeatTimeout specifies the time in follower state without
// // a leader before we attempt an election.
// HeartbeatTimeout string `json:"heartbeat_timeout,omitempty"`
//
// // ElectionTimeout specifies the time in candidate state without
// // a leader before we attempt an election.
// ElectionTimeout string `json:"election_timeout,omitempty"`
//
// // CommitTimeout controls the time without an Apply() operation
// // before we heartbeat to ensure a timely commit.
// CommitTimeout string `json:"commit_timeout,omitempty"`
//
// // MaxAppendEntries controls the maximum number of append entries
// // to send at once.
// MaxAppendEntries int `json:"max_append_entries,omitempty"`
//
// // TrailingLogs controls how many logs we leave after a snapshot.
// TrailingLogs uint64 `json:"trailing_logs,omitempty"`
//
// // SnapshotInterval controls how often we check if we should perform
// // a snapshot.
// SnapshotInterval string `json:"snapshot_interval,omitempty"`
//
// // SnapshotThreshold controls how many outstanding logs there must be
// // before we perform a snapshot.
// SnapshotThreshold uint64 `json:"snapshot_threshold,omitempty"`
//
// // LeaderLeaseTimeout is used to control how long the "lease" lasts
// // for being the leader without being able to contact a quorum
// // of nodes. If we reach this interval without contact, we will
// // step down as leader.
// LeaderLeaseTimeout string `json:"leader_lease_timeout,omitempty"`
//
// // The unique ID for this server across all time. When running with
// // ProtocolVersion < 3, you must set this to be the same as the network
// // address of your transport.
// // LocalID string `json:local_id`
//}
// ConfigKey returns a human-friendly indentifier for this Config.
func (cfg *Config) ConfigKey() string {
return configKey
}
//// Validate checks that this configuration has working values,
//// at least in appearance.
//func (cfg *Config) Validate() error {
// if cfg.RaftConfig == nil {
// return errors.New("no hashicorp/raft.Config")
// }
// if cfg.WaitForLeaderTimeout <= 0 {
// return errors.New("wait_for_leader_timeout <= 0")
// }
//
// if cfg.NetworkTimeout <= 0 {
// return errors.New("network_timeout <= 0")
// }
//
// if cfg.CommitRetries < 0 {
// return errors.New("commit_retries is invalid")
// }
//
// if cfg.CommitRetryDelay <= 0 {
// return errors.New("commit_retry_delay is invalid")
// }
//
// if cfg.BackupsRotate <= 0 {
// return errors.New("backups_rotate should be larger than 0")
// }
//
// return hraft.ValidateConfig(cfg.RaftConfig)
//}
// LoadJSON parses a json-encoded configuration (see jsonConfig).
// The Config will have default values for all fields not explicited
// in the given json object.
//func (cfg *Config) LoadJSON(raw []byte) error {
// jcfg := &jsonConfig{}
// err := json.Unmarshal(raw, jcfg)
// if err != nil {
// logger.Error("Error unmarshaling raft config")
// return err
// }
//
// cfg.Default()
//
// return cfg.applyJSONConfig(jcfg)
//}
//
//func (cfg *Config) applyJSONConfig(jcfg *jsonConfig) error {
// parseDuration := func(txt string) time.Duration {
// d, _ := time.ParseDuration(txt)
// if txt != "" && d == 0 {
// logger.Warnf("%s is not a valid duration. Default will be used", txt)
// }
// return d
// }
//
// // Parse durations. We ignore errors as 0 will take Default values.
// waitForLeaderTimeout := parseDuration(jcfg.WaitForLeaderTimeout)
// networkTimeout := parseDuration(jcfg.NetworkTimeout)
// commitRetryDelay := parseDuration(jcfg.CommitRetryDelay)
// heartbeatTimeout := parseDuration(jcfg.HeartbeatTimeout)
// electionTimeout := parseDuration(jcfg.ElectionTimeout)
// commitTimeout := parseDuration(jcfg.CommitTimeout)
// snapshotInterval := parseDuration(jcfg.SnapshotInterval)
// leaderLeaseTimeout := parseDuration(jcfg.LeaderLeaseTimeout)
//
// // Set all values in config. For some, take defaults if they are 0.
// // Set values from jcfg if they are not 0 values
//
// // Own values
// config.SetIfNotDefault(jcfg.DataFolder, &cfg.DataFolder)
// config.SetIfNotDefault(waitForLeaderTimeout, &cfg.WaitForLeaderTimeout)
// config.SetIfNotDefault(networkTimeout, &cfg.NetworkTimeout)
// cfg.CommitRetries = jcfg.CommitRetries
// config.SetIfNotDefault(commitRetryDelay, &cfg.CommitRetryDelay)
// config.SetIfNotDefault(jcfg.BackupsRotate, &cfg.BackupsRotate)
//
// // Raft values
// config.SetIfNotDefault(heartbeatTimeout, &cfg.RaftConfig.HeartbeatTimeout)
// config.SetIfNotDefault(electionTimeout, &cfg.RaftConfig.ElectionTimeout)
// config.SetIfNotDefault(commitTimeout, &cfg.RaftConfig.CommitTimeout)
// config.SetIfNotDefault(jcfg.MaxAppendEntries, &cfg.RaftConfig.MaxAppendEntries)
// config.SetIfNotDefault(jcfg.TrailingLogs, &cfg.RaftConfig.TrailingLogs)
// config.SetIfNotDefault(snapshotInterval, &cfg.RaftConfig.SnapshotInterval)
// config.SetIfNotDefault(jcfg.SnapshotThreshold, &cfg.RaftConfig.SnapshotThreshold)
// config.SetIfNotDefault(leaderLeaseTimeout, &cfg.RaftConfig.LeaderLeaseTimeout)
//
// cfg.InitPeerset = api.StringsToPeers(jcfg.InitPeerset)
// return cfg.Validate()
//}
//
//// ToJSON returns the pretty JSON representation of a Config.
//func (cfg *Config) ToJSON() ([]byte, error) {
// jcfg := cfg.toJSONConfig()
//
// return config.DefaultJSONMarshal(jcfg)
//}
//
//func (cfg *Config) toJSONConfig() *jsonConfig {
// jcfg := &jsonConfig{
// DataFolder: cfg.DataFolder,
// InitPeerset: api.PeersToStrings(cfg.InitPeerset),
// WaitForLeaderTimeout: cfg.WaitForLeaderTimeout.String(),
// NetworkTimeout: cfg.NetworkTimeout.String(),
// CommitRetries: cfg.CommitRetries,
// CommitRetryDelay: cfg.CommitRetryDelay.String(),
// BackupsRotate: cfg.BackupsRotate,
// HeartbeatTimeout: cfg.RaftConfig.HeartbeatTimeout.String(),
// ElectionTimeout: cfg.RaftConfig.ElectionTimeout.String(),
// CommitTimeout: cfg.RaftConfig.CommitTimeout.String(),
// MaxAppendEntries: cfg.RaftConfig.MaxAppendEntries,
// TrailingLogs: cfg.RaftConfig.TrailingLogs,
// SnapshotInterval: cfg.RaftConfig.SnapshotInterval.String(),
// SnapshotThreshold: cfg.RaftConfig.SnapshotThreshold,
// LeaderLeaseTimeout: cfg.RaftConfig.LeaderLeaseTimeout.String(),
// }
// if cfg.DatastoreNamespace != DefaultDatastoreNamespace {
// jcfg.DatastoreNamespace = cfg.DatastoreNamespace
// // otherwise leave empty so it gets omitted.
// }
// return jcfg
//}
//
// Default initializes this configuration with working defaults.
func (cfg *Config) Default() error {
cfg.DataFolder = "" // empty so it gets omitted
cfg.InitPeerset = []peer.ID{}
cfg.WaitForLeaderTimeout = DefaultWaitForLeaderTimeout
cfg.NetworkTimeout = DefaultNetworkTimeout
cfg.CommitRetries = DefaultCommitRetries
cfg.CommitRetryDelay = DefaultCommitRetryDelay
cfg.BackupsRotate = DefaultBackupsRotate
cfg.DatastoreNamespace = DefaultDatastoreNamespace
cfg.RaftConfig = hraft.DefaultConfig()
// These options are imposed over any Default Raft Config.
cfg.RaftConfig.ShutdownOnRemove = false
cfg.RaftConfig.LocalID = "will_be_set_automatically"
// Set up logging
cfg.RaftConfig.LogOutput = ioutil.Discard
//cfg.RaftConfig.Logger = &hcLogToLogger{}
return nil
}
func NewDefaultConfig() *Config {
var cfg Config
cfg.Default()
return &cfg
}
//
//// ApplyEnvVars fills in any Config fields found
//// as environment variables.
//func (cfg *Config) ApplyEnvVars() error {
// jcfg := cfg.toJSONConfig()
//
// err := envconfig.Process(envConfigKey, jcfg)
// if err != nil {
// return err
// }
//
// return cfg.applyJSONConfig(jcfg)
//}
//
//// GetDataFolder returns the Raft data folder that we are using.
//func (cfg *Config) GetDataFolder() string {
// if cfg.DataFolder == "" {
// return filepath.Join(cfg.BaseDir, DefaultDataSubFolder)
// }
// return cfg.DataFolder
//}
//
//// ToDisplayJSON returns JSON config as a string.
//func (cfg *Config) ToDisplayJSON() ([]byte, error) {
// return config.DisplayJSON(cfg.toJSONConfig())
//}

View File

@ -0,0 +1,562 @@
// Package raft implements a Consensus component for IPFS Cluster which uses
// Raft (go-libp2p-raft).
package consensus
import (
"context"
"errors"
"fmt"
"sort"
"time"
"github.com/google/uuid"
addr "github.com/filecoin-project/go-address"
"github.com/filecoin-project/lotus/chain/types"
//ds "github.com/ipfs/go-datastore"
logging "github.com/ipfs/go-log/v2"
consensus "github.com/libp2p/go-libp2p-consensus"
rpc "github.com/libp2p/go-libp2p-gorpc"
libp2praft "github.com/libp2p/go-libp2p-raft"
host "github.com/libp2p/go-libp2p/core/host"
peer "github.com/libp2p/go-libp2p/core/peer"
)
var logger = logging.Logger("raft")
type RaftState struct {
NonceMap map[addr.Address]uint64
MsgUuids map[uuid.UUID]*types.SignedMessage
}
func newRaftState() RaftState {
return RaftState{NonceMap: make(map[addr.Address]uint64),
MsgUuids: make(map[uuid.UUID]*types.SignedMessage)}
}
type ConsensusOp struct {
Nonce uint64 `codec:"nonce,omitempty"`
Uuid uuid.UUID `codec:"uuid,omitempty"`
Addr addr.Address `codec:"addr,omitempty"`
SignedMsg *types.SignedMessage `codec:"signedMsg,omitempty"`
}
func (c ConsensusOp) ApplyTo(state consensus.State) (consensus.State, error) {
s := state.(RaftState)
s.NonceMap[c.Addr] = c.Nonce
s.MsgUuids[c.Uuid] = c.SignedMsg
return s, nil
}
var _ consensus.Op = &ConsensusOp{}
// Consensus handles the work of keeping a shared-state between
// the peers of an IPFS Cluster, as well as modifying that state and
// applying any updates in a thread-safe manner.
type Consensus struct {
ctx context.Context
cancel func()
config *Config
host host.Host
consensus consensus.OpLogConsensus
actor consensus.Actor
raft *raftWrapper
state RaftState
rpcClient *rpc.Client
rpcReady chan struct{}
readyCh chan struct{}
//shutdownLock sync.RWMutex
//shutdown bool
}
// NewConsensus builds a new ClusterConsensus component using Raft.
//
// Raft saves state snapshots regularly and persists log data in a bolt
// datastore. Therefore, unless memory usage is a concern, it is recommended
// to use an in-memory go-datastore as store parameter.
//
// The staging parameter controls if the Raft peer should start in
// staging mode (used when joining a new Raft peerset with other peers).
func NewConsensus(host host.Host, cfg *Config, staging bool) (*Consensus, error) {
//err := cfg.Validate()
//if err != nil {
// return nil, err
//}
ctx, cancel := context.WithCancel(context.Background())
logger.Debug("starting Consensus and waiting for a leader...")
state := newRaftState()
consensus := libp2praft.NewOpLog(state, &ConsensusOp{})
raft, err := newRaftWrapper(host, cfg, consensus.FSM(), staging)
if err != nil {
logger.Error("error creating raft: ", err)
cancel()
return nil, err
}
actor := libp2praft.NewActor(raft.raft)
consensus.SetActor(actor)
cc := &Consensus{
ctx: ctx,
cancel: cancel,
config: cfg,
host: host,
consensus: consensus,
actor: actor,
state: state,
raft: raft,
rpcReady: make(chan struct{}, 1),
readyCh: make(chan struct{}, 1),
}
go cc.finishBootstrap()
return cc, nil
}
func NewConsensusWithRPCClient(staging bool) func(host host.Host,
cfg *Config,
rpcClient *rpc.Client,
) (*Consensus, error) {
return func(host host.Host, cfg *Config, rpcClient *rpc.Client) (*Consensus, error) {
cc, err := NewConsensus(host, cfg, staging)
if err != nil {
return nil, err
}
cc.SetClient(rpcClient)
return cc, nil
}
}
// WaitForSync waits for a leader and for the state to be up to date, then returns.
func (cc *Consensus) WaitForSync(ctx context.Context) error {
//ctx, span := trace.StartSpan(ctx, "consensus/WaitForSync")
//defer span.End()
leaderCtx, cancel := context.WithTimeout(
ctx,
cc.config.WaitForLeaderTimeout)
defer cancel()
// 1 - wait for leader
// 2 - wait until we are a Voter
// 3 - wait until last index is applied
// From raft docs:
// once a staging server receives enough log entries to be sufficiently
// caught up to the leader's log, the leader will invoke a membership
// change to change the Staging server to a Voter
// Thus, waiting to be a Voter is a guarantee that we have a reasonable
// up to date state. Otherwise, we might return too early (see
// https://github.com/ipfs-cluster/ipfs-cluster/issues/378)
_, err := cc.raft.WaitForLeader(leaderCtx)
if err != nil {
return errors.New("error waiting for leader: " + err.Error())
}
err = cc.raft.WaitForVoter(ctx)
if err != nil {
return errors.New("error waiting to become a Voter: " + err.Error())
}
err = cc.raft.WaitForUpdates(ctx)
if err != nil {
return errors.New("error waiting for consensus updates: " + err.Error())
}
return nil
}
// waits until there is a consensus leader and syncs the state
// to the tracker. If errors happen, this will return and never
// signal the component as Ready.
func (cc *Consensus) finishBootstrap() {
// wait until we have RPC to perform any actions.
select {
case <-cc.ctx.Done():
return
case <-cc.rpcReady:
}
// Sometimes bootstrap is a no-Op. It only applies when
// no state exists and staging=false.
_, err := cc.raft.Bootstrap()
if err != nil {
return
}
logger.Debugf("Bootstrap finished")
err = cc.WaitForSync(cc.ctx)
if err != nil {
return
}
logger.Debug("Raft state is now up to date")
logger.Debug("consensus ready")
cc.readyCh <- struct{}{}
}
// Shutdown stops the component so it will not process any
// more updates. The underlying consensus is permanently
// shutdown, along with the libp2p transport.
func (cc *Consensus) Shutdown(ctx context.Context) error {
//ctx, span := trace.StartSpan(ctx, "consensus/Shutdown")
//defer span.End()
//cc.shutdownLock.Lock()
//defer cc.shutdownLock.Unlock()
//if cc.shutdown {
// logger.Debug("already shutdown")
// return nil
//}
logger.Info("stopping Consensus component")
// Raft Shutdown
err := cc.raft.Shutdown(ctx)
if err != nil {
logger.Error(err)
}
if cc.config.hostShutdown {
cc.host.Close()
}
//cc.shutdown = true
cc.cancel()
close(cc.rpcReady)
return nil
}
// SetClient makes the component ready to perform RPC requets
func (cc *Consensus) SetClient(c *rpc.Client) {
cc.rpcClient = c
cc.rpcReady <- struct{}{}
}
// Ready returns a channel which is signaled when the Consensus
// algorithm has finished bootstrapping and is ready to use
func (cc *Consensus) Ready(ctx context.Context) <-chan struct{} {
//_, span := trace.StartSpan(ctx, "consensus/Ready")
//defer span.End()
return cc.readyCh
}
// IsTrustedPeer returns true. In Raft we trust all peers.
func (cc *Consensus) IsTrustedPeer(ctx context.Context, p peer.ID) bool {
return true
}
// Trust is a no-Op.
func (cc *Consensus) Trust(ctx context.Context, pid peer.ID) error { return nil }
// Distrust is a no-Op.
func (cc *Consensus) Distrust(ctx context.Context, pid peer.ID) error { return nil }
// returns true if the operation was redirected to the leader
// note that if the leader just dissappeared, the rpc call will
// fail because we haven't heard that it's gone.
func (cc *Consensus) RedirectToLeader(method string, arg interface{}, ret interface{}) (bool, error) {
//ctx, span := trace.StartSpan(cc.ctx, "consensus/RedirectToLeader")
//defer span.End()
ctx := cc.ctx
var finalErr error
// Retry redirects
for i := 0; i <= cc.config.CommitRetries; i++ {
logger.Debugf("redirect try %d", i)
leader, err := cc.Leader(ctx)
// No leader, wait for one
if err != nil {
logger.Warn("there seems to be no leader. Waiting for one")
rctx, cancel := context.WithTimeout(
ctx,
cc.config.WaitForLeaderTimeout,
)
defer cancel()
pidstr, err := cc.raft.WaitForLeader(rctx)
// means we timed out waiting for a leader
// we don't retry in this case
if err != nil {
return false, fmt.Errorf("timed out waiting for leader: %s", err)
}
leader, err = peer.Decode(pidstr)
if err != nil {
return false, err
}
}
logger.Infof("leader: %s, curr host: &s", leader, cc.host.ID())
// We are the leader. Do not redirect
if leader == cc.host.ID() {
return false, nil
}
logger.Debugf("redirecting %s to leader: %s", method, leader.Pretty())
finalErr = cc.rpcClient.CallContext(
ctx,
leader,
"Consensus",
method,
arg,
ret,
)
if finalErr != nil {
logger.Errorf("retrying to redirect request to leader: %s", finalErr)
time.Sleep(2 * cc.config.RaftConfig.HeartbeatTimeout)
continue
}
break
}
// We tried to redirect, but something happened
return true, finalErr
}
// commit submits a cc.consensus commit. It retries upon failures.
func (cc *Consensus) Commit(ctx context.Context, op *ConsensusOp) error {
//ctx, span := trace.StartSpan(ctx, "consensus/commit")
//defer span.End()
//
//if cc.config.Tracing {
// // required to cross the serialized boundary
// Op.SpanCtx = span.SpanContext()
// tagmap := tag.FromContext(ctx)
// if tagmap != nil {
// Op.TagCtx = tag.Encode(tagmap)
// }
//}
var finalErr error
for i := 0; i <= cc.config.CommitRetries; i++ {
logger.Debugf("attempt #%d: committing %+v", i, op)
// this means we are retrying
if finalErr != nil {
logger.Errorf("retrying upon failed commit (retry %d): %s ",
i, finalErr)
}
// try to send it to the leader
// RedirectToLeader has it's own retry loop. If this fails
// we're done here.
//ok, err := cc.RedirectToLeader(rpcOp, redirectArg, struct{}{})
//if err != nil || ok {
// return err
//}
// Being here means we are the LEADER. We can commit.
// now commit the changes to our state
//cc.shutdownLock.RLock() // do not shut down while committing
_, finalErr = cc.consensus.CommitOp(op)
//cc.shutdownLock.RUnlock()
if finalErr != nil {
goto RETRY
}
RETRY:
time.Sleep(cc.config.CommitRetryDelay)
}
return finalErr
}
// AddPeer adds a new peer to participate in this consensus. It will
// forward the operation to the leader if this is not it.
func (cc *Consensus) AddPeer(ctx context.Context, pid peer.ID) error {
//ctx, span := trace.StartSpan(ctx, "consensus/AddPeer")
//defer span.End()
var finalErr error
for i := 0; i <= cc.config.CommitRetries; i++ {
logger.Debugf("attempt #%d: AddPeer %s", i, pid.Pretty())
if finalErr != nil {
logger.Errorf("retrying to add peer. Attempt #%d failed: %s", i, finalErr)
}
ok, err := cc.RedirectToLeader("AddPeer", pid, struct{}{})
if err != nil || ok {
return err
}
// Being here means we are the leader and can commit
//cc.shutdownLock.RLock() // do not shutdown while committing
//finalErr = cc.raft.AddPeer(ctx, peer.Encode(pid))
finalErr = cc.raft.AddPeer(ctx, pid)
//cc.shutdownLock.RUnlock()
if finalErr != nil {
time.Sleep(cc.config.CommitRetryDelay)
continue
}
logger.Infof("peer added to Raft: %s", pid.Pretty())
break
}
return finalErr
}
// RmPeer removes a peer from this consensus. It will
// forward the operation to the leader if this is not it.
func (cc *Consensus) RmPeer(ctx context.Context, pid peer.ID) error {
//ctx, span := trace.StartSpan(ctx, "consensus/RmPeer")
//defer span.End()
var finalErr error
for i := 0; i <= cc.config.CommitRetries; i++ {
logger.Debugf("attempt #%d: RmPeer %s", i, pid.Pretty())
if finalErr != nil {
logger.Errorf("retrying to remove peer. Attempt #%d failed: %s", i, finalErr)
}
ok, err := cc.RedirectToLeader("RmPeer", pid, struct{}{})
if err != nil || ok {
return err
}
// Being here means we are the leader and can commit
//cc.shutdownLock.RLock() // do not shutdown while committing
finalErr = cc.raft.RemovePeer(ctx, peer.Encode(pid))
//cc.shutdownLock.RUnlock()
if finalErr != nil {
time.Sleep(cc.config.CommitRetryDelay)
continue
}
logger.Infof("peer removed from Raft: %s", pid.Pretty())
break
}
return finalErr
}
// RaftState retrieves the current consensus RaftState. It may error if no RaftState has
// been agreed upon or the state is not consistent. The returned RaftState is the
// last agreed-upon RaftState known by this node. No writes are allowed, as all
// writes to the shared state should happen through the Consensus component
// methods.
func (cc *Consensus) State(ctx context.Context) (consensus.State, error) {
//_, span := trace.StartSpan(ctx, "consensus/RaftState")
//defer span.End()
st, err := cc.consensus.GetLogHead()
if err == libp2praft.ErrNoState {
fmt.Println("Err no state return")
return newRaftState(), nil
}
if err != nil {
return nil, err
}
state, ok := st.(RaftState)
if !ok {
return nil, errors.New("wrong state type")
}
return state, nil
}
// Leader returns the peerID of the Leader of the
// cluster. It returns an error when there is no leader.
func (cc *Consensus) Leader(ctx context.Context) (peer.ID, error) {
//_, span := trace.StartSpan(ctx, "consensus/Leader")
//defer span.End()
// Note the hard-dependency on raft here...
raftactor := cc.actor.(*libp2praft.Actor)
return raftactor.Leader()
}
// Clean removes the Raft persisted state.
func (cc *Consensus) Clean(ctx context.Context) error {
//_, span := trace.StartSpan(ctx, "consensus/Clean")
//defer span.End()
//cc.shutdownLock.RLock()
//defer cc.shutdownLock.RUnlock()
//if !cc.shutdown {
// return errors.New("consensus component is not shutdown")
//}
//return CleanupRaft(cc.config)
return nil
}
//Rollback replaces the current agreed-upon
//state with the state provided. Only the consensus leader
//can perform this operation.
//func (cc *Consensus) Rollback(state RaftState) error {
// // This is unused. It *might* be used for upgrades.
// // There is rather untested magic in libp2p-raft's FSM()
// // to make this possible.
// return cc.consensus.Rollback(state)
//}
// Peers return the current list of peers in the consensus.
// The list will be sorted alphabetically.
func (cc *Consensus) Peers(ctx context.Context) ([]peer.ID, error) {
//ctx, span := trace.StartSpan(ctx, "consensus/Peers")
//defer span.End()
//cc.shutdownLock.RLock() // prevent shutdown while here
//defer cc.shutdownLock.RUnlock()
//
//if cc.shutdown { // things hang a lot in this case
// return nil, errors.New("consensus is shutdown")
//}
peers := []peer.ID{}
raftPeers, err := cc.raft.Peers(ctx)
if err != nil {
return nil, fmt.Errorf("cannot retrieve list of peers: %s", err)
}
sort.Strings(raftPeers)
for _, p := range raftPeers {
id, err := peer.Decode(p)
if err != nil {
panic("could not decode peer")
}
peers = append(peers, id)
}
return peers, nil
}
func (cc *Consensus) IsLeader(ctx context.Context) bool {
leader, _ := cc.Leader(ctx)
return leader == cc.host.ID()
}
// OfflineState state returns a cluster state by reading the Raft data and
// writing it to the given datastore which is then wrapped as a state.RaftState.
// Usually an in-memory datastore suffices. The given datastore should be
// thread-safe.
//func OfflineState(cfg *Config, store ds.Datastore) (state.RaftState, error) {
// r, snapExists, err := LastStateRaw(cfg)
// if err != nil {
// return nil, err
// }
//
// st, err := dsstate.New(context.Background(), store, cfg.DatastoreNamespace, dsstate.DefaultHandle())
// if err != nil {
// return nil, err
// }
// if !snapExists {
// return st, nil
// }
//
// err = st.Unmarshal(r)
// if err != nil {
// return nil, err
// }
// return st, nil
//}

View File

@ -0,0 +1,50 @@
package consensus
import (
"context"
consensus "github.com/libp2p/go-libp2p-consensus"
rpc "github.com/libp2p/go-libp2p-gorpc"
"github.com/libp2p/go-libp2p/core/peer"
)
// Component represents a piece of ipfscluster. Cluster components
// usually run their own goroutines (a http server for example). They
// communicate with the main Cluster component and other components
// (both local and remote), using an instance of rpc.Client.
type Component interface {
SetClient(*rpc.Client)
Shutdown(context.Context) error
}
type ConsensusAPI interface {
Component
// Returns a channel to signal that the consensus layer is ready
// allowing the main component to wait for it during start.
Ready(context.Context) <-chan struct{}
AddPeer(context.Context, peer.ID) error
RmPeer(context.Context, peer.ID) error
State(context.Context) (consensus.State, error)
// Provide a node which is responsible to perform
// specific tasks which must only run in 1 cluster peer.
Leader(context.Context) (peer.ID, error)
// Only returns when the consensus state has all log
// updates applied to it.
WaitForSync(context.Context) error
// Clean removes all consensus data.
Clean(context.Context) error
// Peers returns the peerset participating in the Consensus.
Peers(context.Context) ([]peer.ID, error)
// IsTrustedPeer returns true if the given peer is "trusted".
// This will grant access to more rpc endpoints and a
// non-trusted one. This should be fast as it will be
// called repeatedly for every remote RPC request.
IsTrustedPeer(context.Context, peer.ID) bool
// Trust marks a peer as "trusted".
Trust(context.Context, peer.ID) error
// Distrust removes a peer from the "trusted" set.
Distrust(context.Context, peer.ID) error
// Returns true if current node is the cluster leader
IsLeader(ctx context.Context) bool
}

698
lib/consensus/raft/raft.go Normal file
View File

@ -0,0 +1,698 @@
package consensus
import (
"context"
"errors"
"fmt"
"io"
"os"
"time"
hraft "github.com/hashicorp/raft"
p2praft "github.com/libp2p/go-libp2p-raft"
host "github.com/libp2p/go-libp2p/core/host"
peer "github.com/libp2p/go-libp2p/core/peer"
)
// ErrWaitingForSelf is returned when we are waiting for ourselves to depart
// the peer set, which won't happen
var errWaitingForSelf = errors.New("waiting for ourselves to depart")
// RaftMaxSnapshots indicates how many snapshots to keep in the consensus data
// folder.
// TODO: Maybe include this in Config. Not sure how useful it is to touch
// this anyways.
var RaftMaxSnapshots = 5
// RaftLogCacheSize is the maximum number of logs to cache in-memory.
// This is used to reduce disk I/O for the recently committed entries.
var RaftLogCacheSize = 512
// How long we wait for updates during shutdown before snapshotting
var waitForUpdatesShutdownTimeout = 5 * time.Second
var waitForUpdatesInterval = 400 * time.Millisecond
// How many times to retry snapshotting when shutting down
var maxShutdownSnapshotRetries = 5
// raftWrapper wraps the hraft.Raft object and related things like the
// different stores used or the hraft.Configuration.
// Its methods provide functionality for working with Raft.
type raftWrapper struct {
ctx context.Context
cancel context.CancelFunc
raft *hraft.Raft
config *Config
host host.Host
serverConfig hraft.Configuration
transport *hraft.NetworkTransport
snapshotStore hraft.SnapshotStore
logStore hraft.LogStore
stableStore hraft.StableStore
//boltdb *raftboltdb.BoltStore
staging bool
}
// newRaftWrapper creates a Raft instance and initializes
// everything leaving it ready to use. Note, that Bootstrap() should be called
// to make sure the raft instance is usable.
func newRaftWrapper(
host host.Host,
cfg *Config,
fsm hraft.FSM,
staging bool,
) (*raftWrapper, error) {
raftW := &raftWrapper{}
raftW.config = cfg
raftW.host = host
raftW.staging = staging
// Set correct LocalID
cfg.RaftConfig.LocalID = hraft.ServerID(peer.Encode(host.ID()))
//df := cfg.GetDataFolder()
//err := makeDataFolder(df)
//if err != nil {
// return nil, err
//}
raftW.makeServerConfig()
err := raftW.makeTransport()
if err != nil {
return nil, err
}
err = raftW.makeStores()
if err != nil {
return nil, err
}
logger.Debug("creating Raft")
raftW.raft, err = hraft.NewRaft(
cfg.RaftConfig,
fsm,
raftW.logStore,
raftW.stableStore,
raftW.snapshotStore,
raftW.transport,
)
if err != nil {
logger.Error("initializing raft: ", err)
return nil, err
}
raftW.ctx, raftW.cancel = context.WithCancel(context.Background())
//go raftW.observePeers()
return raftW, nil
}
// makeDataFolder creates the folder that is meant to store Raft data. Ensures
// we always set 0700 mode.
func makeDataFolder(folder string) error {
return os.MkdirAll(folder, 0700)
}
func (rw *raftWrapper) makeTransport() (err error) {
logger.Debug("creating libp2p Raft transport")
rw.transport, err = p2praft.NewLibp2pTransport(
rw.host,
rw.config.NetworkTimeout,
)
return err
}
func (rw *raftWrapper) makeStores() error {
//logger.Debug("creating BoltDB store")
//df := rw.config.GetDataFolder()
//store, err := raftboltdb.NewBoltStore(filepath.Join(df, "raft.db"))
//if err != nil {
// return err
//}
store := hraft.NewInmemStore()
// wraps the store in a LogCache to improve performance.
// See consul/agent/consul/server.go
cacheStore, err := hraft.NewLogCache(RaftLogCacheSize, store)
if err != nil {
return err
}
//logger.Debug("creating raft snapshot store")
//snapstore, err := hraft.NewFileSnapshotStoreWithLogger(
// df,
// RaftMaxSnapshots,
// raftStdLogger,
//)
snapstore := hraft.NewInmemSnapshotStore()
//if err != nil {
// return err
//}
rw.logStore = cacheStore
rw.stableStore = store
rw.snapshotStore = snapstore
//rw.boltdb = store
return nil
}
// Bootstrap calls BootstrapCluster on the Raft instance with a valid
// Configuration (generated from InitPeerset) when Raft has no state
// and we are not setting up a staging peer. It returns if Raft
// was boostrapped (true) and an error.
func (rw *raftWrapper) Bootstrap() (bool, error) {
logger.Debug("checking for existing raft states")
hasState, err := hraft.HasExistingState(
rw.logStore,
rw.stableStore,
rw.snapshotStore,
)
if err != nil {
return false, err
}
if hasState {
logger.Debug("raft cluster is already initialized")
// Inform the user that we are working with a pre-existing peerset
logger.Info("existing Raft state found! raft.InitPeerset will be ignored")
cf := rw.raft.GetConfiguration()
if err := cf.Error(); err != nil {
logger.Debug(err)
return false, err
}
currentCfg := cf.Configuration()
srvs := ""
for _, s := range currentCfg.Servers {
srvs += fmt.Sprintf(" %s\n", s.ID)
}
logger.Debugf("Current Raft Peerset:\n%s\n", srvs)
return false, nil
}
if rw.staging {
logger.Debug("staging servers do not need initialization")
logger.Info("peer is ready to join a cluster")
return false, nil
}
voters := ""
for _, s := range rw.serverConfig.Servers {
voters += fmt.Sprintf(" %s\n", s.ID)
}
logger.Infof("initializing raft cluster with the following voters:\n%s\n", voters)
future := rw.raft.BootstrapCluster(rw.serverConfig)
if err := future.Error(); err != nil {
logger.Error("bootstrapping cluster: ", err)
return true, err
}
return true, nil
}
// create Raft servers configuration. The result is used
// by Bootstrap() when it proceeds to Bootstrap.
func (rw *raftWrapper) makeServerConfig() {
rw.serverConfig = makeServerConf(append(rw.config.InitPeerset, rw.host.ID()))
}
// creates a server configuration with all peers as Voters.
func makeServerConf(peers []peer.ID) hraft.Configuration {
sm := make(map[string]struct{})
servers := make([]hraft.Server, 0)
// Servers are peers + self. We avoid duplicate entries below
for _, pid := range peers {
p := peer.Encode(pid)
_, ok := sm[p]
if !ok { // avoid dups
sm[p] = struct{}{}
servers = append(servers, hraft.Server{
Suffrage: hraft.Voter,
ID: hraft.ServerID(p),
Address: hraft.ServerAddress(p),
})
}
}
return hraft.Configuration{Servers: servers}
}
// WaitForLeader holds until Raft says we have a leader.
// Returns if ctx is canceled.
func (rw *raftWrapper) WaitForLeader(ctx context.Context) (string, error) {
//ctx, span := trace.StartSpan(ctx, "consensus/raft/WaitForLeader")
//defer span.End()
ticker := time.NewTicker(time.Second / 2)
for {
select {
case <-ticker.C:
if l := rw.raft.Leader(); l != "" {
logger.Debug("waitForleaderTimer")
logger.Infof("Current Raft Leader: %s", l)
ticker.Stop()
return string(l), nil
}
case <-ctx.Done():
return "", ctx.Err()
}
}
}
func (rw *raftWrapper) WaitForVoter(ctx context.Context) error {
//ctx, span := trace.StartSpan(ctx, "consensus/raft/WaitForVoter")
//defer span.End()
logger.Debug("waiting until we are promoted to a voter")
pid := hraft.ServerID(peer.Encode(rw.host.ID()))
for {
select {
case <-ctx.Done():
return ctx.Err()
default:
logger.Debugf("%s: get configuration", pid)
configFuture := rw.raft.GetConfiguration()
if err := configFuture.Error(); err != nil {
return err
}
if isVoter(pid, configFuture.Configuration()) {
return nil
}
logger.Debugf("%s: not voter yet", pid)
time.Sleep(waitForUpdatesInterval)
}
}
}
func isVoter(srvID hraft.ServerID, cfg hraft.Configuration) bool {
for _, server := range cfg.Servers {
if server.ID == srvID && server.Suffrage == hraft.Voter {
return true
}
}
return false
}
// WaitForUpdates holds until Raft has synced to the last index in the log
func (rw *raftWrapper) WaitForUpdates(ctx context.Context) error {
//ctx, span := trace.StartSpan(ctx, "consensus/raft/WaitForUpdates")
//defer span.End()
logger.Debug("Raft state is catching up to the latest known version. Please wait...")
for {
select {
case <-ctx.Done():
return ctx.Err()
default:
lai := rw.raft.AppliedIndex()
li := rw.raft.LastIndex()
logger.Debugf("current Raft index: %d/%d",
lai, li)
if lai == li {
return nil
}
time.Sleep(waitForUpdatesInterval)
}
}
}
func (rw *raftWrapper) WaitForPeer(ctx context.Context, pid string, depart bool) error {
//ctx, span := trace.StartSpan(ctx, "consensus/raft/WaitForPeer")
//defer span.End()
for {
select {
case <-ctx.Done():
return ctx.Err()
default:
peers, err := rw.Peers(ctx)
if err != nil {
return err
}
if len(peers) == 1 && pid == peers[0] && depart {
return errWaitingForSelf
}
found := find(peers, pid)
// departing
if depart && !found {
return nil
}
// joining
if !depart && found {
return nil
}
time.Sleep(50 * time.Millisecond)
}
}
}
// Snapshot tells Raft to take a snapshot.
func (rw *raftWrapper) Snapshot() error {
future := rw.raft.Snapshot()
err := future.Error()
if err != nil && err.Error() != hraft.ErrNothingNewToSnapshot.Error() {
return err
}
return nil
}
// snapshotOnShutdown attempts to take a snapshot before a shutdown.
// Snapshotting might fail if the raft applied index is not the last index.
// This waits for the updates and tries to take a snapshot when the
// applied index is up to date.
// It will retry if the snapshot still fails, in case more updates have arrived.
// If waiting for updates times-out, it will not try anymore, since something
// is wrong. This is a best-effort solution as there is no way to tell Raft
// to stop processing entries because we want to take a snapshot before
// shutting down.
func (rw *raftWrapper) snapshotOnShutdown() error {
var err error
for i := 0; i < maxShutdownSnapshotRetries; i++ {
ctx, cancel := context.WithTimeout(context.Background(), waitForUpdatesShutdownTimeout)
err = rw.WaitForUpdates(ctx)
cancel()
if err != nil {
logger.Warn("timed out waiting for state updates before shutdown. Snapshotting may fail")
return rw.Snapshot()
}
err = rw.Snapshot()
if err == nil {
return nil // things worked
}
// There was an error
err = errors.New("could not snapshot raft: " + err.Error())
logger.Warnf("retrying to snapshot (%d/%d)...", i+1, maxShutdownSnapshotRetries)
}
return err
}
// Shutdown shutdown Raft and closes the BoltDB.
func (rw *raftWrapper) Shutdown(ctx context.Context) error {
//_, span := trace.StartSpan(ctx, "consensus/raft/Shutdown")
//defer span.End()
errMsgs := ""
rw.cancel()
err := rw.snapshotOnShutdown()
if err != nil {
errMsgs += err.Error() + ".\n"
}
future := rw.raft.Shutdown()
err = future.Error()
if err != nil {
errMsgs += "could not shutdown raft: " + err.Error() + ".\n"
}
//err = rw.boltdb.Close() // important!
//if err != nil {
// errMsgs += "could not close boltdb: " + err.Error()
//}
if errMsgs != "" {
return errors.New(errMsgs)
}
return nil
}
// AddPeer adds a peer to Raft
func (rw *raftWrapper) AddPeer(ctx context.Context, peerId peer.ID) error {
//ctx, span := trace.StartSpan(ctx, "consensus/raft/AddPeer")
//defer span.End()
// Check that we don't have it to not waste
// log entries if so.
peers, err := rw.Peers(ctx)
if err != nil {
return err
}
if find(peers, peerId.String()) {
logger.Infof("%s is already a raft peerStr", peerId.String())
return nil
}
err = rw.host.Connect(ctx, peer.AddrInfo{ID: peerId})
if err != nil {
return err
}
future := rw.raft.AddVoter(
hraft.ServerID(peerId.String()),
hraft.ServerAddress(peerId.String()),
0,
0,
) // TODO: Extra cfg value?
err = future.Error()
if err != nil {
logger.Error("raft cannot add peer: ", err)
}
return err
}
// RemovePeer removes a peer from Raft
func (rw *raftWrapper) RemovePeer(ctx context.Context, peer string) error {
//ctx, span := trace.StartSpan(ctx, "consensus/RemovePeer")
//defer span.End()
// Check that we have it to not waste
// log entries if we don't.
peers, err := rw.Peers(ctx)
if err != nil {
return err
}
if !find(peers, peer) {
logger.Infof("%s is not among raft peers", peer)
return nil
}
if len(peers) == 1 && peers[0] == peer {
return errors.New("cannot remove ourselves from a 1-peer cluster")
}
rmFuture := rw.raft.RemoveServer(
hraft.ServerID(peer),
0,
0,
) // TODO: Extra cfg value?
err = rmFuture.Error()
if err != nil {
logger.Error("raft cannot remove peer: ", err)
return err
}
return nil
}
// Leader returns Raft's leader. It may be an empty string if
// there is no leader or it is unknown.
func (rw *raftWrapper) Leader(ctx context.Context) string {
//_, span := trace.StartSpan(ctx, "consensus/raft/Leader")
//defer span.End()
return string(rw.raft.Leader())
}
func (rw *raftWrapper) Peers(ctx context.Context) ([]string, error) {
//_, span := trace.StartSpan(ctx, "consensus/raft/Peers")
//defer span.End()
ids := make([]string, 0)
configFuture := rw.raft.GetConfiguration()
if err := configFuture.Error(); err != nil {
return nil, err
}
for _, server := range configFuture.Configuration().Servers {
ids = append(ids, string(server.ID))
}
return ids, nil
}
// latestSnapshot looks for the most recent raft snapshot stored at the
// provided basedir. It returns the snapshot's metadata, and a reader
// to the snapshot's bytes
func latestSnapshot(raftDataFolder string) (*hraft.SnapshotMeta, io.ReadCloser, error) {
store, err := hraft.NewFileSnapshotStore(raftDataFolder, RaftMaxSnapshots, nil)
if err != nil {
return nil, nil, err
}
snapMetas, err := store.List()
if err != nil {
return nil, nil, err
}
if len(snapMetas) == 0 { // no error if snapshot isn't found
return nil, nil, nil
}
meta, r, err := store.Open(snapMetas[0].ID)
if err != nil {
return nil, nil, err
}
return meta, r, nil
}
// LastStateRaw returns the bytes of the last snapshot stored, its metadata,
// and a flag indicating whether any snapshot was found.
//func LastStateRaw(cfg *Config) (io.Reader, bool, error) {
// // Read most recent snapshot
// dataFolder := cfg.GetDataFolder()
// if _, err := os.Stat(dataFolder); os.IsNotExist(err) {
// // nothing to read
// return nil, false, nil
// }
//
// meta, r, err := latestSnapshot(dataFolder)
// if err != nil {
// return nil, false, err
// }
// if meta == nil { // no snapshots could be read
// return nil, false, nil
// }
// return r, true, nil
//}
// SnapshotSave saves the provided state to a snapshot in the
// raft data path. Old raft data is backed up and replaced
// by the new snapshot. pids contains the config-specified
// peer ids to include in the snapshot metadata if no snapshot exists
// from which to copy the raft metadata
//func SnapshotSave(cfg *Config, newState state.RaftState, pids []peer.ID) error {
// dataFolder := cfg.GetDataFolder()
// err := makeDataFolder(dataFolder)
// if err != nil {
// return err
// }
// meta, _, err := latestSnapshot(dataFolder)
// if err != nil {
// return err
// }
//
// // make a new raft snapshot
// var raftSnapVersion hraft.SnapshotVersion = 1 // As of hraft v1.0.0 this is always 1
// configIndex := uint64(1)
// var raftIndex uint64
// var raftTerm uint64
// var srvCfg hraft.Configuration
// if meta != nil {
// raftIndex = meta.Index
// raftTerm = meta.Term
// srvCfg = meta.Configuration
// CleanupRaft(cfg)
// } else {
// // Begin the log after the index of a fresh start so that
// // the snapshot's state propagate's during bootstrap
// raftIndex = uint64(2)
// raftTerm = uint64(1)
// srvCfg = makeServerConf(pids)
// }
//
// snapshotStore, err := hraft.NewFileSnapshotStoreWithLogger(dataFolder, RaftMaxSnapshots, nil)
// if err != nil {
// return err
// }
// _, dummyTransport := hraft.NewInmemTransport("")
//
// sink, err := snapshotStore.Create(raftSnapVersion, raftIndex, raftTerm, srvCfg, configIndex, dummyTransport)
// if err != nil {
// return err
// }
//
// err = p2praft.EncodeSnapshot(newState, sink)
// if err != nil {
// sink.Cancel()
// return err
// }
// err = sink.Close()
// if err != nil {
// return err
// }
// return nil
//}
// CleanupRaft moves the current data folder to a backup location
//func CleanupRaft(cfg *Config) error {
// dataFolder := cfg.GetDataFolder()
// keep := cfg.BackupsRotate
//
// meta, _, err := latestSnapshot(dataFolder)
// if meta == nil && err == nil {
// // no snapshots at all. Avoid creating backups
// // from empty state folders.
// logger.Infof("cleaning empty Raft data folder (%s)", dataFolder)
// os.RemoveAll(dataFolder)
// return nil
// }
//
// logger.Infof("cleaning and backing up Raft data folder (%s)", dataFolder)
// dbh := newDataBackupHelper(dataFolder, keep)
// err = dbh.makeBackup()
// if err != nil {
// logger.Warn(err)
// logger.Warn("the state could not be cleaned properly")
// logger.Warn("manual intervention may be needed before starting cluster again")
// }
// return nil
//}
// only call when Raft is shutdown
func (rw *raftWrapper) Clean() error {
//return CleanupRaft(rw.config)
return nil
}
func find(s []string, elem string) bool {
for _, selem := range s {
if selem == elem {
return true
}
}
return false
}
func (rw *raftWrapper) observePeers() {
obsCh := make(chan hraft.Observation, 1)
defer close(obsCh)
observer := hraft.NewObserver(obsCh, true, func(o *hraft.Observation) bool {
po, ok := o.Data.(hraft.PeerObservation)
return ok && po.Removed
})
rw.raft.RegisterObserver(observer)
defer rw.raft.DeregisterObserver(observer)
for {
select {
case obs := <-obsCh:
pObs := obs.Data.(hraft.PeerObservation)
logger.Info("raft peer departed. Removing from peerstore: ", pObs.Peer.ID)
pID, err := peer.Decode(string(pObs.Peer.ID))
if err != nil {
logger.Error(err)
continue
}
rw.host.Peerstore().ClearAddrs(pID)
case <-rw.ctx.Done():
logger.Debug("stopped observing raft peers")
return
}
}
}

View File

@ -119,6 +119,7 @@ const (
SettlePaymentChannelsKey
RunPeerTaggerKey
SetupFallbackBlockstoresKey
RPCServer
SetApiEndpointKey

View File

@ -105,6 +105,7 @@ var ChainNode = Options(
// Service: Wallet
Override(new(*messagesigner.MessageSigner), messagesigner.NewMessageSigner),
Override(new(messagesigner.MsgSigner), func(ms *messagesigner.MessageSigner) *messagesigner.MessageSigner { return ms }),
Override(new(*wallet.LocalWallet), wallet.NewWallet),
Override(new(wallet.Default), From(new(*wallet.LocalWallet))),
Override(new(api.Wallet), From(new(wallet.MultiWallet))),

View File

@ -5,6 +5,7 @@ import (
"time"
logging "github.com/ipfs/go-log/v2"
consensus "github.com/libp2p/go-libp2p-consensus"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/filecoin-project/lotus/api"
@ -117,4 +118,8 @@ func (n *FullNodeAPI) NodeStatus(ctx context.Context, inclChainStatus bool) (sta
return status, nil
}
func (n *FullNodeAPI) RaftState(ctx context.Context) (consensus.State, error) {
return n.MpoolAPI.GetRaftState(ctx)
}
var _ api.FullNode = &FullNodeAPI{}

View File

@ -5,6 +5,8 @@ import (
"encoding/json"
"github.com/ipfs/go-cid"
consensus "github.com/libp2p/go-libp2p-consensus"
"github.com/libp2p/go-libp2p/core/peer"
"go.uber.org/fx"
"golang.org/x/xerrors"
@ -41,9 +43,11 @@ type MpoolAPI struct {
MpoolModuleAPI
WalletAPI
GasAPI
MessageSigner *messagesigner.MessageSigner
MessageSigner messagesigner.MsgSigner
// MessageSigner *messagesigner.MessageSigner
PushLocks *dtypes.MpoolLocker
}
@ -142,6 +146,20 @@ func (a *MpoolAPI) MpoolPushMessage(ctx context.Context, msg *types.Message, spe
msg = &cp
inMsg := *msg
// Redirect to leader if current node is not leader. A single non raft based node is always the leader
if !a.MessageSigner.IsLeader(ctx) {
var signedMsg types.SignedMessage
redirected, err := a.MessageSigner.RedirectToLeader(ctx, "MpoolPushMessage", api.MpoolMessageWhole{msg, spec}, &signedMsg)
if err != nil {
return nil, err
}
// It's possible that the current node became the leader between the check and the redirect
// In that case, continue with rest of execution and only return signedMsg if something was redirected
if redirected {
return &signedMsg, nil
}
}
// Check if this uuid has already been processed
if spec != nil {
signedMessage, err := a.MessageSigner.GetSignedMessage(ctx, spec.MsgUuid)
@ -195,7 +213,7 @@ func (a *MpoolAPI) MpoolPushMessage(ctx context.Context, msg *types.Message, spe
}
// Sign and push the message
signedMsg, err := a.MessageSigner.SignMessage(ctx, msg, func(smsg *types.SignedMessage) error {
signedMsg, err := a.MessageSigner.SignMessage(ctx, msg, spec, func(smsg *types.SignedMessage) error {
if _, err := a.MpoolModuleAPI.MpoolPush(ctx, smsg); err != nil {
return xerrors.Errorf("mpool push: failed to push message: %w", err)
}
@ -271,3 +289,11 @@ func (a *MpoolAPI) MpoolGetNonce(ctx context.Context, addr address.Address) (uin
func (a *MpoolAPI) MpoolSub(ctx context.Context) (<-chan api.MpoolUpdate, error) {
return a.Mpool.Updates(ctx)
}
func (a *MpoolAPI) GetRaftState(ctx context.Context) (consensus.State, error) {
return a.MessageSigner.GetRaftState(ctx)
}
func (a *MpoolAPI) RaftLeader(ctx context.Context) (peer.ID, error) {
return a.MessageSigner.RaftLeader(ctx)
}

66
node/modules/rpc.go Normal file
View File

@ -0,0 +1,66 @@
package modules
import (
"context"
rpc "github.com/libp2p/go-libp2p-gorpc"
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/protocol"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/types"
consensus "github.com/filecoin-project/lotus/lib/consensus/raft"
"github.com/filecoin-project/lotus/node/impl/full"
)
type RPCHandler struct {
mpoolAPI full.MpoolAPI
cons *consensus.Consensus
}
//type ConsensusRPCAPI struct {
// cons *consensus.Consensus
// rpcHandler *RPCHandler
//}
func NewRPCHandler(mpoolAPI full.MpoolAPI, cons *consensus.Consensus) *RPCHandler {
return &RPCHandler{mpoolAPI, cons}
}
func (h *RPCHandler) MpoolPushMessage(ctx context.Context, msgWhole *api.MpoolMessageWhole, ret *types.SignedMessage) error {
signedMsg, err := h.mpoolAPI.MpoolPushMessage(ctx, msgWhole.Msg, msgWhole.Spec)
if err != nil {
return err
}
*ret = *signedMsg
return nil
}
func (h *RPCHandler) AddPeer(ctx context.Context, pid peer.ID, ret *struct{}) error {
return h.cons.AddPeer(ctx, pid)
}
// Add other consensus RPC calls here
func NewRPCClient(host host.Host) *rpc.Client {
protocolID := protocol.ID("/p2p/rpc/ping")
return rpc.NewClient(host, protocolID)
}
func NewRPCServer(host host.Host, rpcHandler *RPCHandler) error {
protocolID := protocol.ID("/p2p/rpc/ping")
rpcServer := rpc.NewServer(host, protocolID)
return rpcServer.RegisterName("Consensus", rpcHandler)
//return err
}
// contructorsfor rpc client and rpc server
// rpc handler
// rpcClient
// Consensus
// MessageSigner
// MpoolAPI
// RPC handler
// RPC server