Add Mpool ref to raft state and rearrange some APIs
This commit is contained in:
parent
570f61438a
commit
f89a682d98
@ -29,7 +29,6 @@ import (
|
||||
lminer "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/power"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
consensus2 "github.com/filecoin-project/lotus/lib/consensus/raft"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
"github.com/filecoin-project/lotus/node/repo/imports"
|
||||
)
|
||||
@ -753,7 +752,7 @@ type FullNode interface {
|
||||
// the path specified when calling CreateBackup is within the base path
|
||||
CreateBackup(ctx context.Context, fpath string) error //perm:admin
|
||||
|
||||
RaftState(ctx context.Context) (*consensus2.RaftState, error) //perm:read
|
||||
RaftState(ctx context.Context) (*RaftStateData, error) //perm:read
|
||||
RaftLeader(ctx context.Context) (peer.ID, error) //perm:read
|
||||
}
|
||||
|
||||
|
@ -40,7 +40,6 @@ import (
|
||||
"github.com/filecoin-project/lotus/api/v0api"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
consensus "github.com/filecoin-project/lotus/lib/consensus/raft"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
"github.com/filecoin-project/lotus/node/repo/imports"
|
||||
sealing "github.com/filecoin-project/lotus/storage/pipeline"
|
||||
@ -341,7 +340,7 @@ func init() {
|
||||
addExample(map[string]bitfield.BitField{
|
||||
"": bitfield.NewFromSet([]uint64{5, 6, 7, 10}),
|
||||
})
|
||||
addExample(&consensus.RaftState{
|
||||
addExample(&api.RaftStateData{
|
||||
NonceMap: make(map[address.Address]uint64),
|
||||
MsgUuids: make(map[uuid.UUID]*types.SignedMessage),
|
||||
})
|
||||
@ -357,6 +356,7 @@ func init() {
|
||||
Headers: nil,
|
||||
},
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func GetAPIType(name, pkg string) (i interface{}, t reflect.Type, permStruct []reflect.Type) {
|
||||
|
@ -37,7 +37,6 @@ import (
|
||||
miner0 "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
types "github.com/filecoin-project/lotus/chain/types"
|
||||
alerting "github.com/filecoin-project/lotus/journal/alerting"
|
||||
consensus "github.com/filecoin-project/lotus/lib/consensus/raft"
|
||||
dtypes "github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
imports "github.com/filecoin-project/lotus/node/repo/imports"
|
||||
)
|
||||
@ -2260,10 +2259,10 @@ func (mr *MockFullNodeMockRecorder) RaftLeader(arg0 interface{}) *gomock.Call {
|
||||
}
|
||||
|
||||
// RaftState mocks base method.
|
||||
func (m *MockFullNode) RaftState(arg0 context.Context) (*consensus.RaftState, error) {
|
||||
func (m *MockFullNode) RaftState(arg0 context.Context) (*api.RaftStateData, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "RaftState", arg0)
|
||||
ret0, _ := ret[0].(*consensus.RaftState)
|
||||
ret0, _ := ret[0].(*api.RaftStateData)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
@ -36,7 +36,6 @@ import (
|
||||
lminer "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/journal/alerting"
|
||||
consensus2 "github.com/filecoin-project/lotus/lib/consensus/raft"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
"github.com/filecoin-project/lotus/node/repo/imports"
|
||||
"github.com/filecoin-project/lotus/storage/pipeline/sealiface"
|
||||
@ -342,7 +341,7 @@ type FullNodeStruct struct {
|
||||
|
||||
RaftLeader func(p0 context.Context) (peer.ID, error) `perm:"read"`
|
||||
|
||||
RaftState func(p0 context.Context) (*consensus2.RaftState, error) `perm:"read"`
|
||||
RaftState func(p0 context.Context) (*RaftStateData, error) `perm:"read"`
|
||||
|
||||
StateAccountKey func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) `perm:"read"`
|
||||
|
||||
@ -2465,14 +2464,14 @@ func (s *FullNodeStub) RaftLeader(p0 context.Context) (peer.ID, error) {
|
||||
return *new(peer.ID), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) RaftState(p0 context.Context) (*consensus2.RaftState, error) {
|
||||
func (s *FullNodeStruct) RaftState(p0 context.Context) (*RaftStateData, error) {
|
||||
if s.Internal.RaftState == nil {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
return s.Internal.RaftState(p0)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) RaftState(p0 context.Context) (*consensus2.RaftState, error) {
|
||||
func (s *FullNodeStub) RaftState(p0 context.Context) (*RaftStateData, error) {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
|
58
api/types.go
58
api/types.go
@ -337,3 +337,61 @@ type ForkUpgradeParams struct {
|
||||
UpgradeChocolateHeight abi.ChainEpoch
|
||||
UpgradeOhSnapHeight abi.ChainEpoch
|
||||
}
|
||||
|
||||
type NonceMapType map[address.Address]uint64
|
||||
type MsgUuidMapType map[uuid.UUID]*types.SignedMessage
|
||||
|
||||
type RaftStateData struct {
|
||||
NonceMap NonceMapType
|
||||
MsgUuids MsgUuidMapType
|
||||
}
|
||||
|
||||
func (n *NonceMapType) MarshalJSON() ([]byte, error) {
|
||||
marshalled := make(map[string]uint64)
|
||||
for a, n := range *n {
|
||||
marshalled[a.String()] = n
|
||||
}
|
||||
return json.Marshal(marshalled)
|
||||
}
|
||||
|
||||
func (n *NonceMapType) UnmarshalJSON(b []byte) error {
|
||||
unmarshalled := make(map[string]uint64)
|
||||
err := json.Unmarshal(b, &unmarshalled)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*n = make(map[address.Address]uint64)
|
||||
for saddr, nonce := range unmarshalled {
|
||||
a, err := address.NewFromString(saddr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
(*n)[a] = nonce
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MsgUuidMapType) MarshalJSON() ([]byte, error) {
|
||||
marshalled := make(map[string]*types.SignedMessage)
|
||||
for u, msg := range *m {
|
||||
marshalled[u.String()] = msg
|
||||
}
|
||||
return json.Marshal(marshalled)
|
||||
}
|
||||
|
||||
func (m *MsgUuidMapType) UnmarshalJSON(b []byte) error {
|
||||
unmarshalled := make(map[string]*types.SignedMessage)
|
||||
err := json.Unmarshal(b, &unmarshalled)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*m = make(map[uuid.UUID]*types.SignedMessage)
|
||||
for suid, msg := range unmarshalled {
|
||||
u, err := uuid.Parse(suid)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
(*m)[u] = msg
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -1583,3 +1583,8 @@ func getBaseFeeLowerBound(baseFee, factor types.BigInt) types.BigInt {
|
||||
|
||||
return baseFeeLowerBound
|
||||
}
|
||||
|
||||
type MpoolNonceAPI interface {
|
||||
GetNonce(context.Context, address.Address, types.TipSetKey) (uint64, error)
|
||||
GetActor(context.Context, address.Address, types.TipSetKey) (*types.Actor, error)
|
||||
}
|
||||
|
@ -10,7 +10,7 @@ import (
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/messagesigner"
|
||||
//"github.com/filecoin-project/lotus/chain/messagesigner"
|
||||
"github.com/filecoin-project/lotus/chain/stmgr"
|
||||
"github.com/filecoin-project/lotus/chain/store"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
@ -39,7 +39,7 @@ type mpoolProvider struct {
|
||||
sm *stmgr.StateManager
|
||||
ps *pubsub.PubSub
|
||||
|
||||
lite messagesigner.MpoolNonceAPI
|
||||
lite MpoolNonceAPI
|
||||
}
|
||||
|
||||
var _ Provider = (*mpoolProvider)(nil)
|
||||
@ -48,7 +48,7 @@ func NewProvider(sm *stmgr.StateManager, ps *pubsub.PubSub) Provider {
|
||||
return &mpoolProvider{sm: sm, ps: ps}
|
||||
}
|
||||
|
||||
func NewProviderLite(sm *stmgr.StateManager, ps *pubsub.PubSub, noncer messagesigner.MpoolNonceAPI) Provider {
|
||||
func NewProviderLite(sm *stmgr.StateManager, ps *pubsub.PubSub, noncer MpoolNonceAPI) Provider {
|
||||
return &mpoolProvider{sm: sm, ps: ps, lite: noncer}
|
||||
}
|
||||
|
||||
|
@ -15,6 +15,7 @@ import (
|
||||
"github.com/filecoin-project/go-address"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/messagepool"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
)
|
||||
@ -24,11 +25,6 @@ const dsKeyMsgUUIDSet = "MsgUuidSet"
|
||||
|
||||
var log = logging.Logger("messagesigner")
|
||||
|
||||
type MpoolNonceAPI interface {
|
||||
GetNonce(context.Context, address.Address, types.TipSetKey) (uint64, error)
|
||||
GetActor(context.Context, address.Address, types.TipSetKey) (*types.Actor, error)
|
||||
}
|
||||
|
||||
type MsgSigner interface {
|
||||
SignMessage(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec, cb func(*types.SignedMessage) error) (*types.SignedMessage, error)
|
||||
GetSignedMessage(ctx context.Context, uuid uuid.UUID) (*types.SignedMessage, error)
|
||||
@ -47,13 +43,13 @@ type MsgSigner interface {
|
||||
type MessageSigner struct {
|
||||
wallet api.Wallet
|
||||
lk sync.Mutex
|
||||
mpool MpoolNonceAPI
|
||||
mpool messagepool.MpoolNonceAPI
|
||||
ds datastore.Batching
|
||||
}
|
||||
|
||||
//var _ full.MsgSigner = &MessageSigner{}
|
||||
|
||||
func NewMessageSigner(wallet api.Wallet, mpool MpoolNonceAPI, ds dtypes.MetadataDS) *MessageSigner {
|
||||
func NewMessageSigner(wallet api.Wallet, mpool messagepool.MpoolNonceAPI, ds dtypes.MetadataDS) *MessageSigner {
|
||||
ds = namespace.Wrap(ds, datastore.NewKey("/message-signer/"))
|
||||
return &MessageSigner{
|
||||
wallet: wallet,
|
||||
|
@ -10,6 +10,7 @@ import (
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/messagepool"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
consensus "github.com/filecoin-project/lotus/lib/consensus/raft"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
@ -22,7 +23,7 @@ type MessageSignerConsensus struct {
|
||||
|
||||
func NewMessageSignerConsensus(
|
||||
wallet api.Wallet,
|
||||
mpool MpoolNonceAPI,
|
||||
mpool messagepool.MpoolNonceAPI,
|
||||
ds dtypes.MetadataDS,
|
||||
consensus *consensus.Consensus) *MessageSignerConsensus {
|
||||
|
||||
|
@ -4,8 +4,6 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/filecoin-project/lotus/lib/retry"
|
||||
"go.uber.org/atomic"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
@ -17,6 +15,7 @@ import (
|
||||
|
||||
"github.com/mitchellh/go-homedir"
|
||||
"github.com/urfave/cli/v2"
|
||||
"go.uber.org/atomic"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-jsonrpc"
|
||||
@ -25,6 +24,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/api/client"
|
||||
"github.com/filecoin-project/lotus/api/v0api"
|
||||
"github.com/filecoin-project/lotus/api/v1api"
|
||||
"github.com/filecoin-project/lotus/lib/retry"
|
||||
"github.com/filecoin-project/lotus/node/repo"
|
||||
)
|
||||
|
||||
|
@ -0,0 +1,302 @@
|
||||
[API]
|
||||
# Binding address for the Lotus API
|
||||
#
|
||||
# type: string
|
||||
# env var: LOTUS_API_LISTENADDRESS
|
||||
#ListenAddress = "/ip4/127.0.0.1/tcp/1234/http"
|
||||
|
||||
# type: string
|
||||
# env var: LOTUS_API_REMOTELISTENADDRESS
|
||||
#RemoteListenAddress = ""
|
||||
|
||||
# type: Duration
|
||||
# env var: LOTUS_API_TIMEOUT
|
||||
#Timeout = "30s"
|
||||
|
||||
|
||||
[Backup]
|
||||
# When set to true disables metadata log (.lotus/kvlog). This can save disk
|
||||
# space by reducing metadata redundancy.
|
||||
#
|
||||
# Note that in case of metadata corruption it might be much harder to recover
|
||||
# your node if metadata log is disabled
|
||||
#
|
||||
# type: bool
|
||||
# env var: LOTUS_BACKUP_DISABLEMETADATALOG
|
||||
#DisableMetadataLog = true
|
||||
|
||||
|
||||
[Logging]
|
||||
[Logging.SubsystemLevels]
|
||||
# env var: LOTUS_LOGGING_SUBSYSTEMLEVELS_EXAMPLE-SUBSYSTEM
|
||||
#example-subsystem = "INFO"
|
||||
|
||||
|
||||
[Libp2p]
|
||||
# Binding address for the libp2p host - 0 means random port.
|
||||
# Format: multiaddress; see https://multiformats.io/multiaddr/
|
||||
#
|
||||
# type: []string
|
||||
# env var: LOTUS_LIBP2P_LISTENADDRESSES
|
||||
#ListenAddresses = ["/ip4/0.0.0.0/tcp/0", "/ip6/::/tcp/0"]
|
||||
|
||||
# Addresses to explicitally announce to other peers. If not specified,
|
||||
# all interface addresses are announced
|
||||
# Format: multiaddress
|
||||
#
|
||||
# type: []string
|
||||
# env var: LOTUS_LIBP2P_ANNOUNCEADDRESSES
|
||||
#AnnounceAddresses = []
|
||||
|
||||
# Addresses to not announce
|
||||
# Format: multiaddress
|
||||
#
|
||||
# type: []string
|
||||
# env var: LOTUS_LIBP2P_NOANNOUNCEADDRESSES
|
||||
#NoAnnounceAddresses = []
|
||||
|
||||
# When not disabled (default), lotus asks NAT devices (e.g., routers), to
|
||||
# open up an external port and forward it to the port lotus is running on.
|
||||
# When this works (i.e., when your router supports NAT port forwarding),
|
||||
# it makes the local lotus node accessible from the public internet
|
||||
#
|
||||
# type: bool
|
||||
# env var: LOTUS_LIBP2P_DISABLENATPORTMAP
|
||||
#DisableNatPortMap = false
|
||||
|
||||
# ConnMgrLow is the number of connections that the basic connection manager
|
||||
# will trim down to.
|
||||
#
|
||||
# type: uint
|
||||
# env var: LOTUS_LIBP2P_CONNMGRLOW
|
||||
#ConnMgrLow = 150
|
||||
|
||||
# ConnMgrHigh is the number of connections that, when exceeded, will trigger
|
||||
# a connection GC operation. Note: protected/recently formed connections don't
|
||||
# count towards this limit.
|
||||
#
|
||||
# type: uint
|
||||
# env var: LOTUS_LIBP2P_CONNMGRHIGH
|
||||
#ConnMgrHigh = 180
|
||||
|
||||
# ConnMgrGrace is a time duration that new connections are immune from being
|
||||
# closed by the connection manager.
|
||||
#
|
||||
# type: Duration
|
||||
# env var: LOTUS_LIBP2P_CONNMGRGRACE
|
||||
#ConnMgrGrace = "20s"
|
||||
|
||||
|
||||
[Pubsub]
|
||||
# Run the node in bootstrap-node mode
|
||||
#
|
||||
# type: bool
|
||||
# env var: LOTUS_PUBSUB_BOOTSTRAPPER
|
||||
#Bootstrapper = false
|
||||
|
||||
# type: string
|
||||
# env var: LOTUS_PUBSUB_REMOTETRACER
|
||||
#RemoteTracer = ""
|
||||
|
||||
|
||||
[Client]
|
||||
# type: bool
|
||||
# env var: LOTUS_CLIENT_USEIPFS
|
||||
#UseIpfs = false
|
||||
|
||||
# type: bool
|
||||
# env var: LOTUS_CLIENT_IPFSONLINEMODE
|
||||
#IpfsOnlineMode = false
|
||||
|
||||
# type: string
|
||||
# env var: LOTUS_CLIENT_IPFSMADDR
|
||||
#IpfsMAddr = ""
|
||||
|
||||
# type: bool
|
||||
# env var: LOTUS_CLIENT_IPFSUSEFORRETRIEVAL
|
||||
#IpfsUseForRetrieval = false
|
||||
|
||||
# The maximum number of simultaneous data transfers between the client
|
||||
# and storage providers for storage deals
|
||||
#
|
||||
# type: uint64
|
||||
# env var: LOTUS_CLIENT_SIMULTANEOUSTRANSFERSFORSTORAGE
|
||||
#SimultaneousTransfersForStorage = 20
|
||||
|
||||
# The maximum number of simultaneous data transfers between the client
|
||||
# and storage providers for retrieval deals
|
||||
#
|
||||
# type: uint64
|
||||
# env var: LOTUS_CLIENT_SIMULTANEOUSTRANSFERSFORRETRIEVAL
|
||||
#SimultaneousTransfersForRetrieval = 20
|
||||
|
||||
# Require that retrievals perform no on-chain operations. Paid retrievals
|
||||
# without existing payment channels with available funds will fail instead
|
||||
# of automatically performing on-chain operations.
|
||||
#
|
||||
# type: bool
|
||||
# env var: LOTUS_CLIENT_OFFCHAINRETRIEVAL
|
||||
#OffChainRetrieval = false
|
||||
|
||||
|
||||
[Wallet]
|
||||
# type: string
|
||||
# env var: LOTUS_WALLET_REMOTEBACKEND
|
||||
#RemoteBackend = ""
|
||||
|
||||
# type: bool
|
||||
# env var: LOTUS_WALLET_ENABLELEDGER
|
||||
#EnableLedger = false
|
||||
|
||||
# type: bool
|
||||
# env var: LOTUS_WALLET_DISABLELOCAL
|
||||
#DisableLocal = false
|
||||
|
||||
|
||||
[Fees]
|
||||
# type: types.FIL
|
||||
# env var: LOTUS_FEES_DEFAULTMAXFEE
|
||||
#DefaultMaxFee = "0.07 FIL"
|
||||
|
||||
|
||||
[Chainstore]
|
||||
# type: bool
|
||||
# env var: LOTUS_CHAINSTORE_ENABLESPLITSTORE
|
||||
#EnableSplitstore = false
|
||||
|
||||
[Chainstore.Splitstore]
|
||||
# ColdStoreType specifies the type of the coldstore.
|
||||
# It can be "universal" (default) or "discard" for discarding cold blocks.
|
||||
#
|
||||
# type: string
|
||||
# env var: LOTUS_CHAINSTORE_SPLITSTORE_COLDSTORETYPE
|
||||
#ColdStoreType = "universal"
|
||||
|
||||
# HotStoreType specifies the type of the hotstore.
|
||||
# Only currently supported value is "badger".
|
||||
#
|
||||
# type: string
|
||||
# env var: LOTUS_CHAINSTORE_SPLITSTORE_HOTSTORETYPE
|
||||
#HotStoreType = "badger"
|
||||
|
||||
# MarkSetType specifies the type of the markset.
|
||||
# It can be "map" for in memory marking or "badger" (default) for on-disk marking.
|
||||
#
|
||||
# type: string
|
||||
# env var: LOTUS_CHAINSTORE_SPLITSTORE_MARKSETTYPE
|
||||
#MarkSetType = "badger"
|
||||
|
||||
# HotStoreMessageRetention specifies the retention policy for messages, in finalities beyond
|
||||
# the compaction boundary; default is 0.
|
||||
#
|
||||
# type: uint64
|
||||
# env var: LOTUS_CHAINSTORE_SPLITSTORE_HOTSTOREMESSAGERETENTION
|
||||
#HotStoreMessageRetention = 0
|
||||
|
||||
# HotStoreFullGCFrequency specifies how often to perform a full (moving) GC on the hotstore.
|
||||
# A value of 0 disables, while a value 1 will do full GC in every compaction.
|
||||
# Default is 20 (about once a week).
|
||||
#
|
||||
# type: uint64
|
||||
# env var: LOTUS_CHAINSTORE_SPLITSTORE_HOTSTOREFULLGCFREQUENCY
|
||||
#HotStoreFullGCFrequency = 20
|
||||
|
||||
# EnableColdStoreAutoPrune turns on compaction of the cold store i.e. pruning
|
||||
# where hotstore compaction occurs every finality epochs pruning happens every 3 finalities
|
||||
# Default is false
|
||||
#
|
||||
# type: bool
|
||||
# env var: LOTUS_CHAINSTORE_SPLITSTORE_ENABLECOLDSTOREAUTOPRUNE
|
||||
#EnableColdStoreAutoPrune = false
|
||||
|
||||
# ColdStoreFullGCFrequency specifies how often to performa a full (moving) GC on the coldstore.
|
||||
# Only applies if auto prune is enabled. A value of 0 disables while a value of 1 will do
|
||||
# full GC in every prune.
|
||||
# Default is 7 (about once every a week)
|
||||
#
|
||||
# type: uint64
|
||||
# env var: LOTUS_CHAINSTORE_SPLITSTORE_COLDSTOREFULLGCFREQUENCY
|
||||
#ColdStoreFullGCFrequency = 7
|
||||
|
||||
# ColdStoreRetention specifies the retention policy for data reachable from the chain, in
|
||||
# finalities beyond the compaction boundary, default is 0, -1 retains everything
|
||||
#
|
||||
# type: int64
|
||||
# env var: LOTUS_CHAINSTORE_SPLITSTORE_COLDSTORERETENTION
|
||||
#ColdStoreRetention = 0
|
||||
|
||||
|
||||
[Raft]
|
||||
# config to enabled node cluster with raft consensus
|
||||
#
|
||||
# type: bool
|
||||
# env var: LOTUS_RAFT_CLUSTERMODEENABLED
|
||||
#ClusterModeEnabled = false
|
||||
|
||||
# will shutdown libp2p host on shutdown. Useful for testing
|
||||
#
|
||||
# type: bool
|
||||
# env var: LOTUS_RAFT_HOSTSHUTDOWN
|
||||
#HostShutdown = false
|
||||
|
||||
# A folder to store Raft's data.
|
||||
#
|
||||
# type: string
|
||||
# env var: LOTUS_RAFT_DATAFOLDER
|
||||
#DataFolder = ""
|
||||
|
||||
# InitPeerset provides the list of initial cluster peers for new Raft
|
||||
# peers (with no prior state). It is ignored when Raft was already
|
||||
# initialized or when starting in staging mode.
|
||||
#
|
||||
# type: []peer.ID
|
||||
# env var: LOTUS_RAFT_INITPEERSET
|
||||
#InitPeerset = []
|
||||
|
||||
# LeaderTimeout specifies how long to wait for a leader before
|
||||
# failing an operation.
|
||||
#
|
||||
# type: Duration
|
||||
# env var: LOTUS_RAFT_WAITFORLEADERTIMEOUT
|
||||
#WaitForLeaderTimeout = "15s"
|
||||
|
||||
# NetworkTimeout specifies how long before a Raft network
|
||||
# operation is timed out
|
||||
#
|
||||
# type: Duration
|
||||
# env var: LOTUS_RAFT_NETWORKTIMEOUT
|
||||
#NetworkTimeout = "1m40s"
|
||||
|
||||
# CommitRetries specifies how many times we retry a failed commit until
|
||||
# we give up.
|
||||
#
|
||||
# type: int
|
||||
# env var: LOTUS_RAFT_COMMITRETRIES
|
||||
#CommitRetries = 1
|
||||
|
||||
# How long to wait between retries
|
||||
#
|
||||
# type: Duration
|
||||
# env var: LOTUS_RAFT_COMMITRETRYDELAY
|
||||
#CommitRetryDelay = "200ms"
|
||||
|
||||
# BackupsRotate specifies the maximum number of Raft's DataFolder
|
||||
# copies that we keep as backups (renaming) after cleanup.
|
||||
#
|
||||
# type: int
|
||||
# env var: LOTUS_RAFT_BACKUPSROTATE
|
||||
#BackupsRotate = 6
|
||||
|
||||
# Namespace to use when writing keys to the datastore
|
||||
#
|
||||
# type: string
|
||||
# env var: LOTUS_RAFT_DATASTORENAMESPACE
|
||||
#DatastoreNamespace = "/raft"
|
||||
|
||||
# Tracing enables propagation of contexts across binary boundaries.
|
||||
#
|
||||
# type: bool
|
||||
# env var: LOTUS_RAFT_TRACING
|
||||
#Tracing = false
|
||||
|
||||
|
@ -42,7 +42,7 @@ func generatePrivKey() (*kit.Libp2p, error) {
|
||||
return &kit.Libp2p{PeerID: peerId, PrivKey: privkey}, nil
|
||||
}
|
||||
|
||||
func getRaftState(ctx context.Context, t *testing.T, node *kit.TestFullNode) *consensus.RaftState {
|
||||
func getRaftState(ctx context.Context, t *testing.T, node *kit.TestFullNode) *api.RaftStateData {
|
||||
raftState, err := node.RaftState(ctx)
|
||||
require.NoError(t, err)
|
||||
//rstate := raftState.(*consensus.RaftState)
|
||||
@ -51,7 +51,7 @@ func getRaftState(ctx context.Context, t *testing.T, node *kit.TestFullNode) *co
|
||||
|
||||
func setup(ctx context.Context, t *testing.T, node0 *kit.TestFullNode, node1 *kit.TestFullNode, node2 *kit.TestFullNode, miner *kit.TestMiner) *kit.Ensemble {
|
||||
|
||||
//blockTime := 1000 * time.Millisecond
|
||||
blockTime := 1 * time.Second
|
||||
|
||||
pkey0, _ := generatePrivKey()
|
||||
pkey1, _ := generatePrivKey()
|
||||
@ -61,8 +61,8 @@ func setup(ctx context.Context, t *testing.T, node0 *kit.TestFullNode, node1 *ki
|
||||
|
||||
raftOps := kit.ConstructorOpts(
|
||||
node.Override(new(*gorpc.Client), modules.NewRPCClient),
|
||||
node.Override(new(*config.ClusterRaftConfig), func() *config.ClusterRaftConfig {
|
||||
cfg := config.DefaultClusterRaftConfig()
|
||||
node.Override(new(*consensus.ClusterRaftConfig), func() *consensus.ClusterRaftConfig {
|
||||
cfg := consensus.DefaultClusterRaftConfig()
|
||||
cfg.InitPeerset = initPeerSet
|
||||
return cfg
|
||||
}),
|
||||
@ -304,35 +304,11 @@ func TestRaftStateLeaderDisconnectsMiner(t *testing.T) {
|
||||
peerToNode[n.Pkey.PeerID] = n
|
||||
}
|
||||
|
||||
//bal, err := node0.WalletBalance(ctx, node0.DefaultKey.Address)
|
||||
//require.NoError(t, err)
|
||||
|
||||
//msgHalfBal := &types.Message{
|
||||
// From: miner.OwnerKey.Address,
|
||||
// To: node0.DefaultKey.Address,
|
||||
// Value: big.Div(bal, big.NewInt(2)),
|
||||
//}
|
||||
//mu := uuid.New()
|
||||
//smHalfBal, err := miner.FullNode.MpoolPushMessage(ctx, msgHalfBal, &api.MessageSendSpec{
|
||||
// MsgUuid: mu,
|
||||
//})
|
||||
//require.NoError(t, err)
|
||||
//mLookup, err := node0.StateWaitMsg(ctx, smHalfBal.Cid(), 3, api.LookbackNoLimit, true)
|
||||
//require.NoError(t, err)
|
||||
//require.Equal(t, exitcode.Ok, mLookup.Receipt.ExitCode)
|
||||
//
|
||||
//rstate0 := getRaftState(ctx, t, &node0)
|
||||
//rstate1 := getRaftState(ctx, t, &node1)
|
||||
//rstate2 := getRaftState(ctx, t, &node2)
|
||||
//
|
||||
//require.True(t, reflect.DeepEqual(rstate0, rstate1))
|
||||
//require.True(t, reflect.DeepEqual(rstate0, rstate2))
|
||||
|
||||
// Take leader node down
|
||||
leader, err := node1.RaftLeader(ctx)
|
||||
leader, err := node0.RaftLeader(ctx)
|
||||
require.NoError(t, err)
|
||||
leaderNode := peerToNode[leader]
|
||||
|
||||
// Take leader node down
|
||||
err = leaderNode.Stop(ctx)
|
||||
require.NoError(t, err)
|
||||
oldLeaderNode := leaderNode
|
||||
@ -351,11 +327,6 @@ func TestRaftStateLeaderDisconnectsMiner(t *testing.T) {
|
||||
require.NotEqual(t, newLeader, leader)
|
||||
leaderNode = peerToNode[newLeader]
|
||||
|
||||
fmt.Println("New leader: ", newLeader)
|
||||
|
||||
//err = node0.Stop(ctx)
|
||||
//require.NoError(t, err)
|
||||
|
||||
msg2 := &types.Message{
|
||||
From: miner.OwnerKey.Address,
|
||||
To: node0.DefaultKey.Address,
|
||||
@ -373,8 +344,6 @@ func TestRaftStateLeaderDisconnectsMiner(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, exitcode.Ok, mLookup.Receipt.ExitCode)
|
||||
|
||||
fmt.Println("!!!!!!!!!!!!!!!!TEST FINISHED!!!!!!!!!!!!!!!!!!!")
|
||||
|
||||
rstate := getRaftState(ctx, t, leaderNode)
|
||||
|
||||
for _, n := range nodes {
|
||||
@ -420,24 +389,16 @@ func TestLeaderDisconnectsCheckMsgStateOnNewLeader(t *testing.T) {
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
//
|
||||
//rstate0 := getRaftState(ctx, t, &node0)
|
||||
//rstate1 := getRaftState(ctx, t, &node1)
|
||||
//rstate2 := getRaftState(ctx, t, &node2)
|
||||
//
|
||||
//require.True(t, reflect.DeepEqual(rstate0, rstate1))
|
||||
//require.True(t, reflect.DeepEqual(rstate0, rstate2))
|
||||
|
||||
// Take leader node down
|
||||
leader, err := node1.RaftLeader(ctx)
|
||||
leader, err := node0.RaftLeader(ctx)
|
||||
require.NoError(t, err)
|
||||
leaderNode := peerToNode[leader]
|
||||
|
||||
// Take leader node down
|
||||
err = leaderNode.Stop(ctx)
|
||||
require.NoError(t, err)
|
||||
oldLeaderNode := leaderNode
|
||||
|
||||
//time.Sleep(5 * time.Second)
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
newLeader := leader
|
||||
for _, n := range nodes {
|
||||
@ -451,17 +412,10 @@ func TestLeaderDisconnectsCheckMsgStateOnNewLeader(t *testing.T) {
|
||||
require.NotEqual(t, newLeader, leader)
|
||||
leaderNode = peerToNode[newLeader]
|
||||
|
||||
fmt.Println("New leader: ", newLeader)
|
||||
|
||||
mLookup, err := leaderNode.StateWaitMsg(ctx, smHalfBal.Cid(), 3, api.LookbackNoLimit, true)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, exitcode.Ok, mLookup.Receipt.ExitCode)
|
||||
|
||||
//err = node0.Stop(ctx)
|
||||
//require.NoError(t, err)
|
||||
|
||||
fmt.Println("!!!!!!!!!!!!!!!!TEST FINISHED!!!!!!!!!!!!!!!!!!!")
|
||||
|
||||
rstate := getRaftState(ctx, t, leaderNode)
|
||||
|
||||
for _, n := range nodes {
|
||||
@ -471,3 +425,53 @@ func TestLeaderDisconnectsCheckMsgStateOnNewLeader(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestChainStoreSync(t *testing.T) {
|
||||
|
||||
kit.QuietMiningLogs()
|
||||
ctx := context.Background()
|
||||
|
||||
var (
|
||||
node0 kit.TestFullNode
|
||||
node1 kit.TestFullNode
|
||||
node2 kit.TestFullNode
|
||||
miner kit.TestMiner
|
||||
)
|
||||
|
||||
nodes := []*kit.TestFullNode{&node0, &node1, &node2}
|
||||
|
||||
setup(ctx, t, &node0, &node1, &node2, &miner)
|
||||
|
||||
peerToNode := make(map[peer.ID]*kit.TestFullNode)
|
||||
for _, n := range nodes {
|
||||
peerToNode[n.Pkey.PeerID] = n
|
||||
}
|
||||
|
||||
bal, err := node0.WalletBalance(ctx, node0.DefaultKey.Address)
|
||||
require.NoError(t, err)
|
||||
|
||||
leader, err := node0.RaftLeader(ctx)
|
||||
require.NoError(t, err)
|
||||
leaderNode := peerToNode[leader]
|
||||
|
||||
msgHalfBal := &types.Message{
|
||||
From: miner.OwnerKey.Address,
|
||||
To: node0.DefaultKey.Address,
|
||||
Value: big.Div(bal, big.NewInt(2)),
|
||||
}
|
||||
mu := uuid.New()
|
||||
smHalfBal, err := miner.FullNode.MpoolPushMessage(ctx, msgHalfBal, &api.MessageSendSpec{
|
||||
MsgUuid: mu,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, n := range nodes {
|
||||
fmt.Println(n != leaderNode)
|
||||
if n != leaderNode {
|
||||
mLookup, err := n.StateWaitMsg(ctx, smHalfBal.Cid(), 3, api.LookbackNoLimit, true)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, exitcode.Ok, mLookup.Receipt.ExitCode)
|
||||
//break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,9 +1,11 @@
|
||||
package consensus
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"time"
|
||||
|
||||
hraft "github.com/hashicorp/raft"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
@ -25,45 +27,88 @@ var (
|
||||
DefaultDatastoreNamespace = "/r" // from "/raft"
|
||||
)
|
||||
|
||||
// Config allows to configure the Raft Consensus component for ipfs-cluster.
|
||||
// The component's configuration section is represented by ConfigJSON.
|
||||
// Config implements the ComponentConfig interface.
|
||||
//type Config struct {
|
||||
// //config.Saver
|
||||
// //
|
||||
// //// will shutdown libp2p host on shutdown. Useful for testing
|
||||
// hostShutdown bool
|
||||
//
|
||||
// // A folder to store Raft's data.
|
||||
// DataFolder string
|
||||
//
|
||||
// // InitPeerset provides the list of initial cluster peers for new Raft
|
||||
// // peers (with no prior state). It is ignored when Raft was already
|
||||
// // initialized or when starting in staging mode.
|
||||
// InitPeerset []peer.ID
|
||||
// // LeaderTimeout specifies how long to wait for a leader before
|
||||
// // failing an operation.
|
||||
// WaitForLeaderTimeout time.Duration
|
||||
// // NetworkTimeout specifies how long before a Raft network
|
||||
// // operation is timed out
|
||||
// NetworkTimeout time.Duration
|
||||
// // CommitRetries specifies how many times we retry a failed commit until
|
||||
// // we give up.
|
||||
// CommitRetries int
|
||||
// // How long to wait between retries
|
||||
// CommitRetryDelay time.Duration
|
||||
// // BackupsRotate specifies the maximum number of Raft's DataFolder
|
||||
// // copies that we keep as backups (renaming) after cleanup.
|
||||
// BackupsRotate int
|
||||
// // Namespace to use when writing keys to the datastore
|
||||
// DatastoreNamespace string
|
||||
//
|
||||
// // A Hashicorp Raft's configuration object.
|
||||
// RaftConfig *hraft.Config
|
||||
//
|
||||
// // Tracing enables propagation of contexts across binary boundaries.
|
||||
// Tracing bool
|
||||
//}
|
||||
// ClusterRaftConfig allows to configure the Raft Consensus component for the node cluster.
|
||||
type ClusterRaftConfig struct {
|
||||
// config to enabled node cluster with raft consensus
|
||||
ClusterModeEnabled bool
|
||||
// will shutdown libp2p host on shutdown. Useful for testing
|
||||
HostShutdown bool
|
||||
// A folder to store Raft's data.
|
||||
DataFolder string
|
||||
// InitPeerset provides the list of initial cluster peers for new Raft
|
||||
// peers (with no prior state). It is ignored when Raft was already
|
||||
// initialized or when starting in staging mode.
|
||||
InitPeerset []peer.ID
|
||||
// LeaderTimeout specifies how long to wait for a leader before
|
||||
// failing an operation.
|
||||
WaitForLeaderTimeout time.Duration
|
||||
// NetworkTimeout specifies how long before a Raft network
|
||||
// operation is timed out
|
||||
NetworkTimeout time.Duration
|
||||
// CommitRetries specifies how many times we retry a failed commit until
|
||||
// we give up.
|
||||
CommitRetries int
|
||||
// How long to wait between retries
|
||||
CommitRetryDelay time.Duration
|
||||
// BackupsRotate specifies the maximum number of Raft's DataFolder
|
||||
// copies that we keep as backups (renaming) after cleanup.
|
||||
BackupsRotate int
|
||||
// Namespace to use when writing keys to the datastore
|
||||
DatastoreNamespace string
|
||||
|
||||
// A Hashicorp Raft's configuration object.
|
||||
RaftConfig *hraft.Config
|
||||
|
||||
// Tracing enables propagation of contexts across binary boundaries.
|
||||
Tracing bool
|
||||
}
|
||||
|
||||
func DefaultClusterRaftConfig() *ClusterRaftConfig {
|
||||
var cfg ClusterRaftConfig
|
||||
cfg.DataFolder = "" // empty so it gets omitted
|
||||
cfg.InitPeerset = []peer.ID{}
|
||||
cfg.WaitForLeaderTimeout = DefaultWaitForLeaderTimeout
|
||||
cfg.NetworkTimeout = DefaultNetworkTimeout
|
||||
cfg.CommitRetries = DefaultCommitRetries
|
||||
cfg.CommitRetryDelay = DefaultCommitRetryDelay
|
||||
cfg.BackupsRotate = DefaultBackupsRotate
|
||||
cfg.DatastoreNamespace = DefaultDatastoreNamespace
|
||||
cfg.RaftConfig = hraft.DefaultConfig()
|
||||
|
||||
// These options are imposed over any Default Raft Config.
|
||||
cfg.RaftConfig.ShutdownOnRemove = false
|
||||
cfg.RaftConfig.LocalID = "will_be_set_automatically"
|
||||
|
||||
// Set up logging
|
||||
cfg.RaftConfig.LogOutput = ioutil.Discard
|
||||
//cfg.RaftConfig.Logger = &hcLogToLogger{}
|
||||
return &cfg
|
||||
}
|
||||
|
||||
func NewClusterRaftConfig(userRaftConfig *config.UserRaftConfig) *ClusterRaftConfig {
|
||||
var cfg ClusterRaftConfig
|
||||
cfg.DataFolder = userRaftConfig.DataFolder
|
||||
cfg.InitPeerset = userRaftConfig.InitPeerset
|
||||
cfg.WaitForLeaderTimeout = time.Duration(userRaftConfig.WaitForLeaderTimeout)
|
||||
cfg.NetworkTimeout = time.Duration(userRaftConfig.NetworkTimeout)
|
||||
cfg.CommitRetries = userRaftConfig.CommitRetries
|
||||
cfg.CommitRetryDelay = time.Duration(userRaftConfig.CommitRetryDelay)
|
||||
cfg.BackupsRotate = userRaftConfig.BackupsRotate
|
||||
cfg.DatastoreNamespace = userRaftConfig.DatastoreNamespace
|
||||
|
||||
// Keep this to be default hraft config for now
|
||||
cfg.RaftConfig = hraft.DefaultConfig()
|
||||
|
||||
// These options are imposed over any Default Raft Config.
|
||||
cfg.RaftConfig.ShutdownOnRemove = false
|
||||
cfg.RaftConfig.LocalID = "will_be_set_automatically"
|
||||
|
||||
// Set up logging
|
||||
cfg.RaftConfig.LogOutput = ioutil.Discard
|
||||
//cfg.RaftConfig.Logger = &hcLogToLogger{}
|
||||
return &cfg
|
||||
|
||||
}
|
||||
|
||||
// ConfigJSON represents a human-friendly Config
|
||||
// object which can be saved to JSON. Most configuration keys are converted
|
||||
@ -145,7 +190,7 @@ var (
|
||||
|
||||
//// Validate checks that this configuration has working values,
|
||||
//// at least in appearance.
|
||||
func ValidateConfig(cfg *config.ClusterRaftConfig) error {
|
||||
func ValidateConfig(cfg *ClusterRaftConfig) error {
|
||||
if cfg.RaftConfig == nil {
|
||||
return xerrors.Errorf("no hashicorp/raft.Config")
|
||||
}
|
||||
|
@ -4,7 +4,6 @@ package consensus
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
@ -15,8 +14,9 @@ import (
|
||||
|
||||
addr "github.com/filecoin-project/go-address"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/messagepool"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
|
||||
//ds "github.com/ipfs/go-datastore"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
@ -29,67 +29,21 @@ import (
|
||||
|
||||
var logger = logging.Logger("raft")
|
||||
|
||||
type NonceMapType map[addr.Address]uint64
|
||||
type MsgUuidMapType map[uuid.UUID]*types.SignedMessage
|
||||
//type NonceMapType map[addr.Address]uint64
|
||||
//type MsgUuidMapType map[uuid.UUID]*types.SignedMessage
|
||||
|
||||
type RaftState struct {
|
||||
NonceMap NonceMapType
|
||||
MsgUuids MsgUuidMapType
|
||||
NonceMap api.NonceMapType
|
||||
MsgUuids api.MsgUuidMapType
|
||||
Mpool *messagepool.MessagePool
|
||||
}
|
||||
|
||||
func newRaftState() *RaftState {
|
||||
return &RaftState{NonceMap: make(map[addr.Address]uint64),
|
||||
MsgUuids: make(map[uuid.UUID]*types.SignedMessage)}
|
||||
func newRaftState(mpool *messagepool.MessagePool) *RaftState {
|
||||
return &RaftState{
|
||||
NonceMap: make(map[addr.Address]uint64),
|
||||
MsgUuids: make(map[uuid.UUID]*types.SignedMessage),
|
||||
Mpool: mpool,
|
||||
}
|
||||
|
||||
func (n *NonceMapType) MarshalJSON() ([]byte, error) {
|
||||
marshalled := make(map[string]uint64)
|
||||
for a, n := range *n {
|
||||
marshalled[a.String()] = n
|
||||
}
|
||||
return json.Marshal(marshalled)
|
||||
}
|
||||
|
||||
func (n *NonceMapType) UnmarshalJSON(b []byte) error {
|
||||
unmarshalled := make(map[string]uint64)
|
||||
err := json.Unmarshal(b, &unmarshalled)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*n = make(map[addr.Address]uint64)
|
||||
for saddr, nonce := range unmarshalled {
|
||||
a, err := addr.NewFromString(saddr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
(*n)[a] = nonce
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MsgUuidMapType) MarshalJSON() ([]byte, error) {
|
||||
marshalled := make(map[string]*types.SignedMessage)
|
||||
for u, msg := range *m {
|
||||
marshalled[u.String()] = msg
|
||||
}
|
||||
return json.Marshal(marshalled)
|
||||
}
|
||||
|
||||
func (m *MsgUuidMapType) UnmarshalJSON(b []byte) error {
|
||||
unmarshalled := make(map[string]*types.SignedMessage)
|
||||
err := json.Unmarshal(b, &unmarshalled)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*m = make(map[uuid.UUID]*types.SignedMessage)
|
||||
for suid, msg := range unmarshalled {
|
||||
u, err := uuid.Parse(suid)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
(*m)[u] = msg
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type ConsensusOp struct {
|
||||
@ -103,6 +57,7 @@ func (c ConsensusOp) ApplyTo(state consensus.State) (consensus.State, error) {
|
||||
s := state.(*RaftState)
|
||||
s.NonceMap[c.Addr] = c.Nonce
|
||||
s.MsgUuids[c.Uuid] = c.SignedMsg
|
||||
s.Mpool.Add(context.TODO(), c.SignedMsg)
|
||||
return s, nil
|
||||
}
|
||||
|
||||
@ -114,7 +69,7 @@ var _ consensus.Op = &ConsensusOp{}
|
||||
type Consensus struct {
|
||||
ctx context.Context
|
||||
cancel func()
|
||||
config *config.ClusterRaftConfig
|
||||
config *ClusterRaftConfig
|
||||
|
||||
host host.Host
|
||||
|
||||
@ -141,7 +96,7 @@ type Consensus struct {
|
||||
//
|
||||
// The staging parameter controls if the Raft peer should start in
|
||||
// staging mode (used when joining a new Raft peerset with other peers).
|
||||
func NewConsensus(host host.Host, cfg *config.ClusterRaftConfig, staging bool) (*Consensus, error) {
|
||||
func NewConsensus(host host.Host, cfg *ClusterRaftConfig, mpool *messagepool.MessagePool, staging bool) (*Consensus, error) {
|
||||
err := ValidateConfig(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -150,7 +105,7 @@ func NewConsensus(host host.Host, cfg *config.ClusterRaftConfig, staging bool) (
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
logger.Debug("starting Consensus and waiting for a leader...")
|
||||
state := newRaftState()
|
||||
state := newRaftState(mpool)
|
||||
|
||||
consensus := libp2praft.NewOpLog(state, &ConsensusOp{})
|
||||
|
||||
@ -183,12 +138,13 @@ func NewConsensus(host host.Host, cfg *config.ClusterRaftConfig, staging bool) (
|
||||
}
|
||||
|
||||
func NewConsensusWithRPCClient(staging bool) func(host host.Host,
|
||||
cfg *config.ClusterRaftConfig,
|
||||
cfg *ClusterRaftConfig,
|
||||
rpcClient *rpc.Client,
|
||||
mpool *messagepool.MessagePool,
|
||||
) (*Consensus, error) {
|
||||
|
||||
return func(host host.Host, cfg *config.ClusterRaftConfig, rpcClient *rpc.Client) (*Consensus, error) {
|
||||
cc, err := NewConsensus(host, cfg, staging)
|
||||
return func(host host.Host, cfg *ClusterRaftConfig, rpcClient *rpc.Client, mpool *messagepool.MessagePool) (*Consensus, error) {
|
||||
cc, err := NewConsensus(host, cfg, mpool, staging)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -506,7 +462,7 @@ func (cc *Consensus) State(ctx context.Context) (*RaftState, error) {
|
||||
|
||||
st, err := cc.consensus.GetLogHead()
|
||||
if err == libp2praft.ErrNoState {
|
||||
return newRaftState(), nil
|
||||
return newRaftState(nil), nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
|
@ -12,8 +12,6 @@ import (
|
||||
p2praft "github.com/libp2p/go-libp2p-raft"
|
||||
host "github.com/libp2p/go-libp2p/core/host"
|
||||
peer "github.com/libp2p/go-libp2p/core/peer"
|
||||
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
)
|
||||
|
||||
// ErrWaitingForSelf is returned when we are waiting for ourselves to depart
|
||||
@ -44,7 +42,7 @@ type raftWrapper struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
raft *hraft.Raft
|
||||
config *config.ClusterRaftConfig
|
||||
config *ClusterRaftConfig
|
||||
host host.Host
|
||||
serverConfig hraft.Configuration
|
||||
transport *hraft.NetworkTransport
|
||||
@ -60,7 +58,7 @@ type raftWrapper struct {
|
||||
// to make sure the raft instance is usable.
|
||||
func newRaftWrapper(
|
||||
host host.Host,
|
||||
cfg *config.ClusterRaftConfig,
|
||||
cfg *ClusterRaftConfig,
|
||||
fsm hraft.FSM,
|
||||
staging bool,
|
||||
) (*raftWrapper, error) {
|
||||
|
@ -143,7 +143,7 @@ var ChainNode = Options(
|
||||
// Lite node API
|
||||
ApplyIf(isLiteNode,
|
||||
Override(new(messagepool.Provider), messagepool.NewProviderLite),
|
||||
Override(new(messagesigner.MpoolNonceAPI), From(new(modules.MpoolNonceAPI))),
|
||||
Override(new(messagepool.MpoolNonceAPI), From(new(modules.MpoolNonceAPI))),
|
||||
Override(new(full.ChainModuleAPI), From(new(api.Gateway))),
|
||||
Override(new(full.GasModuleAPI), From(new(api.Gateway))),
|
||||
Override(new(full.MpoolModuleAPI), From(new(api.Gateway))),
|
||||
@ -154,7 +154,7 @@ var ChainNode = Options(
|
||||
// Full node API / service startup
|
||||
ApplyIf(isFullNode,
|
||||
Override(new(messagepool.Provider), messagepool.NewProvider),
|
||||
Override(new(messagesigner.MpoolNonceAPI), From(new(*messagepool.MessagePool))),
|
||||
Override(new(messagepool.MpoolNonceAPI), From(new(*messagepool.MessagePool))),
|
||||
Override(new(full.ChainModuleAPI), From(new(full.ChainModule))),
|
||||
Override(new(full.GasModuleAPI), From(new(full.GasModule))),
|
||||
Override(new(full.MpoolModuleAPI), From(new(full.MpoolModule))),
|
||||
|
@ -2,12 +2,10 @@ package config
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
hraft "github.com/hashicorp/raft"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
|
||||
@ -102,7 +100,7 @@ func DefaultFullNode() *FullNode {
|
||||
ColdStoreFullGCFrequency: 7,
|
||||
},
|
||||
},
|
||||
Raft: *DefaultClusterRaftConfig(),
|
||||
Raft: *DefaultUserRaftConfig(),
|
||||
}
|
||||
}
|
||||
|
||||
@ -290,8 +288,30 @@ var (
|
||||
DefaultDatastoreNamespace = "/raft"
|
||||
)
|
||||
|
||||
func DefaultClusterRaftConfig() *ClusterRaftConfig {
|
||||
var cfg ClusterRaftConfig
|
||||
//func DefaultClusterRaftConfig() *ClusterRaftConfig {
|
||||
// var cfg ClusterRaftConfig
|
||||
// cfg.DataFolder = "" // empty so it gets omitted
|
||||
// cfg.InitPeerset = []peer.ID{}
|
||||
// cfg.WaitForLeaderTimeout = Duration(DefaultWaitForLeaderTimeout)
|
||||
// cfg.NetworkTimeout = Duration(DefaultNetworkTimeout)
|
||||
// cfg.CommitRetries = DefaultCommitRetries
|
||||
// cfg.CommitRetryDelay = Duration(DefaultCommitRetryDelay)
|
||||
// cfg.BackupsRotate = DefaultBackupsRotate
|
||||
// cfg.DatastoreNamespace = DefaultDatastoreNamespace
|
||||
// cfg.RaftConfig = hraft.DefaultConfig()
|
||||
//
|
||||
// // These options are imposed over any Default Raft Config.
|
||||
// cfg.RaftConfig.ShutdownOnRemove = false
|
||||
// cfg.RaftConfig.LocalID = "will_be_set_automatically"
|
||||
//
|
||||
// // Set up logging
|
||||
// cfg.RaftConfig.LogOutput = ioutil.Discard
|
||||
// //cfg.RaftConfig.Logger = &hcLogToLogger{}
|
||||
// return &cfg
|
||||
//}
|
||||
|
||||
func DefaultUserRaftConfig() *UserRaftConfig {
|
||||
var cfg UserRaftConfig
|
||||
cfg.DataFolder = "" // empty so it gets omitted
|
||||
cfg.InitPeerset = []peer.ID{}
|
||||
cfg.WaitForLeaderTimeout = Duration(DefaultWaitForLeaderTimeout)
|
||||
@ -300,14 +320,6 @@ func DefaultClusterRaftConfig() *ClusterRaftConfig {
|
||||
cfg.CommitRetryDelay = Duration(DefaultCommitRetryDelay)
|
||||
cfg.BackupsRotate = DefaultBackupsRotate
|
||||
cfg.DatastoreNamespace = DefaultDatastoreNamespace
|
||||
cfg.RaftConfig = hraft.DefaultConfig()
|
||||
|
||||
// These options are imposed over any Default Raft Config.
|
||||
cfg.RaftConfig.ShutdownOnRemove = false
|
||||
cfg.RaftConfig.LocalID = "will_be_set_automatically"
|
||||
|
||||
// Set up logging
|
||||
cfg.RaftConfig.LogOutput = ioutil.Discard
|
||||
//cfg.RaftConfig.Logger = &hcLogToLogger{}
|
||||
return &cfg
|
||||
}
|
||||
|
@ -117,86 +117,6 @@ without existing payment channels with available funds will fail instead
|
||||
of automatically performing on-chain operations.`,
|
||||
},
|
||||
},
|
||||
"ClusterRaftConfig": []DocField{
|
||||
{
|
||||
Name: "ClusterModeEnabled",
|
||||
Type: "bool",
|
||||
|
||||
Comment: `config to enabled node cluster with raft consensus`,
|
||||
},
|
||||
{
|
||||
Name: "HostShutdown",
|
||||
Type: "bool",
|
||||
|
||||
Comment: `will shutdown libp2p host on shutdown. Useful for testing`,
|
||||
},
|
||||
{
|
||||
Name: "DataFolder",
|
||||
Type: "string",
|
||||
|
||||
Comment: `A folder to store Raft's data.`,
|
||||
},
|
||||
{
|
||||
Name: "InitPeerset",
|
||||
Type: "[]peer.ID",
|
||||
|
||||
Comment: `InitPeerset provides the list of initial cluster peers for new Raft
|
||||
peers (with no prior state). It is ignored when Raft was already
|
||||
initialized or when starting in staging mode.`,
|
||||
},
|
||||
{
|
||||
Name: "WaitForLeaderTimeout",
|
||||
Type: "Duration",
|
||||
|
||||
Comment: `LeaderTimeout specifies how long to wait for a leader before
|
||||
failing an operation.`,
|
||||
},
|
||||
{
|
||||
Name: "NetworkTimeout",
|
||||
Type: "Duration",
|
||||
|
||||
Comment: `NetworkTimeout specifies how long before a Raft network
|
||||
operation is timed out`,
|
||||
},
|
||||
{
|
||||
Name: "CommitRetries",
|
||||
Type: "int",
|
||||
|
||||
Comment: `CommitRetries specifies how many times we retry a failed commit until
|
||||
we give up.`,
|
||||
},
|
||||
{
|
||||
Name: "CommitRetryDelay",
|
||||
Type: "Duration",
|
||||
|
||||
Comment: `How long to wait between retries`,
|
||||
},
|
||||
{
|
||||
Name: "BackupsRotate",
|
||||
Type: "int",
|
||||
|
||||
Comment: `BackupsRotate specifies the maximum number of Raft's DataFolder
|
||||
copies that we keep as backups (renaming) after cleanup.`,
|
||||
},
|
||||
{
|
||||
Name: "DatastoreNamespace",
|
||||
Type: "string",
|
||||
|
||||
Comment: `Namespace to use when writing keys to the datastore`,
|
||||
},
|
||||
{
|
||||
Name: "RaftConfig",
|
||||
Type: "*hraft.Config",
|
||||
|
||||
Comment: `A Hashicorp Raft's configuration object.`,
|
||||
},
|
||||
{
|
||||
Name: "Tracing",
|
||||
Type: "bool",
|
||||
|
||||
Comment: `Tracing enables propagation of contexts across binary boundaries.`,
|
||||
},
|
||||
},
|
||||
"Common": []DocField{
|
||||
{
|
||||
Name: "API",
|
||||
@ -456,7 +376,7 @@ see https://lotus.filecoin.io/storage-providers/advanced-configurations/market/#
|
||||
},
|
||||
{
|
||||
Name: "Raft",
|
||||
Type: "ClusterRaftConfig",
|
||||
Type: "UserRaftConfig",
|
||||
|
||||
Comment: ``,
|
||||
},
|
||||
@ -1303,6 +1223,80 @@ finalities beyond the compaction boundary, default is 0, -1 retains everything`,
|
||||
Comment: ``,
|
||||
},
|
||||
},
|
||||
"UserRaftConfig": []DocField{
|
||||
{
|
||||
Name: "ClusterModeEnabled",
|
||||
Type: "bool",
|
||||
|
||||
Comment: `config to enabled node cluster with raft consensus`,
|
||||
},
|
||||
{
|
||||
Name: "HostShutdown",
|
||||
Type: "bool",
|
||||
|
||||
Comment: `will shutdown libp2p host on shutdown. Useful for testing`,
|
||||
},
|
||||
{
|
||||
Name: "DataFolder",
|
||||
Type: "string",
|
||||
|
||||
Comment: `A folder to store Raft's data.`,
|
||||
},
|
||||
{
|
||||
Name: "InitPeerset",
|
||||
Type: "[]peer.ID",
|
||||
|
||||
Comment: `InitPeerset provides the list of initial cluster peers for new Raft
|
||||
peers (with no prior state). It is ignored when Raft was already
|
||||
initialized or when starting in staging mode.`,
|
||||
},
|
||||
{
|
||||
Name: "WaitForLeaderTimeout",
|
||||
Type: "Duration",
|
||||
|
||||
Comment: `LeaderTimeout specifies how long to wait for a leader before
|
||||
failing an operation.`,
|
||||
},
|
||||
{
|
||||
Name: "NetworkTimeout",
|
||||
Type: "Duration",
|
||||
|
||||
Comment: `NetworkTimeout specifies how long before a Raft network
|
||||
operation is timed out`,
|
||||
},
|
||||
{
|
||||
Name: "CommitRetries",
|
||||
Type: "int",
|
||||
|
||||
Comment: `CommitRetries specifies how many times we retry a failed commit until
|
||||
we give up.`,
|
||||
},
|
||||
{
|
||||
Name: "CommitRetryDelay",
|
||||
Type: "Duration",
|
||||
|
||||
Comment: `How long to wait between retries`,
|
||||
},
|
||||
{
|
||||
Name: "BackupsRotate",
|
||||
Type: "int",
|
||||
|
||||
Comment: `BackupsRotate specifies the maximum number of Raft's DataFolder
|
||||
copies that we keep as backups (renaming) after cleanup.`,
|
||||
},
|
||||
{
|
||||
Name: "DatastoreNamespace",
|
||||
Type: "string",
|
||||
|
||||
Comment: `Namespace to use when writing keys to the datastore`,
|
||||
},
|
||||
{
|
||||
Name: "Tracing",
|
||||
Type: "bool",
|
||||
|
||||
Comment: `Tracing enables propagation of contexts across binary boundaries.`,
|
||||
},
|
||||
},
|
||||
"Wallet": []DocField{
|
||||
{
|
||||
Name: "RemoteBackend",
|
||||
|
@ -1,7 +1,6 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
hraft "github.com/hashicorp/raft"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
|
||||
@ -29,7 +28,7 @@ type FullNode struct {
|
||||
Wallet Wallet
|
||||
Fees FeeConfig
|
||||
Chainstore Chainstore
|
||||
Raft ClusterRaftConfig
|
||||
Raft UserRaftConfig
|
||||
}
|
||||
|
||||
// // Common
|
||||
@ -612,8 +611,43 @@ type FeeConfig struct {
|
||||
DefaultMaxFee types.FIL
|
||||
}
|
||||
|
||||
// ClusterRaftConfig allows to configure the Raft Consensus component for the node cluster.
|
||||
type ClusterRaftConfig struct {
|
||||
//// ClusterRaftConfig allows to configure the Raft Consensus component for the node cluster.
|
||||
//type ClusterRaftConfig struct {
|
||||
// // config to enabled node cluster with raft consensus
|
||||
// ClusterModeEnabled bool
|
||||
// // will shutdown libp2p host on shutdown. Useful for testing
|
||||
// HostShutdown bool
|
||||
// // A folder to store Raft's data.
|
||||
// DataFolder string
|
||||
// // InitPeerset provides the list of initial cluster peers for new Raft
|
||||
// // peers (with no prior state). It is ignored when Raft was already
|
||||
// // initialized or when starting in staging mode.
|
||||
// InitPeerset []peer.ID
|
||||
// // LeaderTimeout specifies how long to wait for a leader before
|
||||
// // failing an operation.
|
||||
// WaitForLeaderTimeout Duration
|
||||
// // NetworkTimeout specifies how long before a Raft network
|
||||
// // operation is timed out
|
||||
// NetworkTimeout Duration
|
||||
// // CommitRetries specifies how many times we retry a failed commit until
|
||||
// // we give up.
|
||||
// CommitRetries int
|
||||
// // How long to wait between retries
|
||||
// CommitRetryDelay Duration
|
||||
// // BackupsRotate specifies the maximum number of Raft's DataFolder
|
||||
// // copies that we keep as backups (renaming) after cleanup.
|
||||
// BackupsRotate int
|
||||
// // Namespace to use when writing keys to the datastore
|
||||
// DatastoreNamespace string
|
||||
//
|
||||
// // A Hashicorp Raft's configuration object.
|
||||
// RaftConfig *hraft.Config
|
||||
//
|
||||
// // Tracing enables propagation of contexts across binary boundaries.
|
||||
// Tracing bool
|
||||
//}
|
||||
|
||||
type UserRaftConfig struct {
|
||||
// config to enabled node cluster with raft consensus
|
||||
ClusterModeEnabled bool
|
||||
// will shutdown libp2p host on shutdown. Useful for testing
|
||||
@ -642,7 +676,7 @@ type ClusterRaftConfig struct {
|
||||
DatastoreNamespace string
|
||||
|
||||
// A Hashicorp Raft's configuration object.
|
||||
RaftConfig *hraft.Config
|
||||
//RaftConfig *hraft.Config
|
||||
|
||||
// Tracing enables propagation of contexts across binary boundaries.
|
||||
Tracing bool
|
||||
|
@ -9,7 +9,8 @@ import (
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
consensus2 "github.com/filecoin-project/lotus/lib/consensus/raft"
|
||||
|
||||
//consensus2 "github.com/filecoin-project/lotus/lib/consensus/raft"
|
||||
"github.com/filecoin-project/lotus/node/impl/client"
|
||||
"github.com/filecoin-project/lotus/node/impl/common"
|
||||
"github.com/filecoin-project/lotus/node/impl/full"
|
||||
@ -119,7 +120,7 @@ func (n *FullNodeAPI) NodeStatus(ctx context.Context, inclChainStatus bool) (sta
|
||||
return status, nil
|
||||
}
|
||||
|
||||
func (n *FullNodeAPI) RaftState(ctx context.Context) (*consensus2.RaftState, error) {
|
||||
func (n *FullNodeAPI) RaftState(ctx context.Context) (*api.RaftStateData, error) {
|
||||
return n.RaftAPI.GetRaftState(ctx)
|
||||
}
|
||||
|
||||
|
@ -2,12 +2,13 @@ package full
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"go.uber.org/fx"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/messagesigner"
|
||||
consensus "github.com/filecoin-project/lotus/lib/consensus/raft"
|
||||
)
|
||||
|
||||
type RaftAPI struct {
|
||||
@ -16,11 +17,15 @@ type RaftAPI struct {
|
||||
MessageSigner *messagesigner.MessageSignerConsensus `optional:"true"`
|
||||
}
|
||||
|
||||
func (r *RaftAPI) GetRaftState(ctx context.Context) (*consensus.RaftState, error) {
|
||||
func (r *RaftAPI) GetRaftState(ctx context.Context) (*api.RaftStateData, error) {
|
||||
if r.MessageSigner == nil {
|
||||
return nil, xerrors.Errorf("Raft consensus not enabled. Please check your configuration")
|
||||
}
|
||||
return r.MessageSigner.GetRaftState(ctx)
|
||||
raftState, err := r.MessageSigner.GetRaftState(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &api.RaftStateData{NonceMap: raftState.NonceMap, MsgUuids: raftState.MsgUuids}, nil
|
||||
}
|
||||
|
||||
func (r *RaftAPI) Leader(ctx context.Context) (peer.ID, error) {
|
||||
|
@ -9,7 +9,7 @@ import (
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/messagesigner"
|
||||
"github.com/filecoin-project/lotus/chain/messagepool"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/node/impl/full"
|
||||
)
|
||||
@ -104,4 +104,4 @@ func (a *MpoolNonceAPI) GetActor(ctx context.Context, addr address.Address, tsk
|
||||
return act, nil
|
||||
}
|
||||
|
||||
var _ messagesigner.MpoolNonceAPI = (*MpoolNonceAPI)(nil)
|
||||
var _ messagepool.MpoolNonceAPI = (*MpoolNonceAPI)(nil)
|
||||
|
Loading…
Reference in New Issue
Block a user