WIP: rest of the stuff

This commit is contained in:
Shrenuj Bansal 2022-09-13 13:05:48 -04:00
parent 4171be0b98
commit 3441224b2f
14 changed files with 432 additions and 114 deletions

BIN
devgen.car Normal file

Binary file not shown.

2
extern/filecoin-ffi vendored

@ -1 +1 @@
Subproject commit f997fe6c77632c0bc58d0b1fdf53ee7a93f6027c Subproject commit c70caaf14f8ff5b15eb5166093d9e57055e571f8

View File

@ -37,10 +37,6 @@ type TestFullNode struct {
ListenAddr multiaddr.Multiaddr ListenAddr multiaddr.Multiaddr
DefaultKey *key.Key DefaultKey *key.Key
//Libp2p struct {
// PeerID peer.ID
// PrivKey libp2pcrypto.PrivKey
//}
Pkey *Libp2p Pkey *Libp2p
Stop node.StopFunc Stop node.StopFunc

View File

@ -0,0 +1,53 @@
package itests
import (
"context"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/node/config"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/exitcode"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/itests/kit"
)
func TestMpoolPushWithoutUuidWithMaxFee(t *testing.T) {
//stm: @TOKEN_WALLET_SIGN_001, @CHAIN_MEMPOOL_PUSH_001
ctx := context.Background()
kit.QuietMiningLogs()
client15, _, ens := kit.EnsembleMinimal(t, kit.MockProofs())
ens.InterconnectAll().BeginMining(10 * time.Millisecond)
bal, err := client15.WalletBalance(ctx, client15.DefaultKey.Address)
require.NoError(t, err)
// send self half of account balance
msgHalfBal := &types.Message{
From: client15.DefaultKey.Address,
To: client15.DefaultKey.Address,
Value: big.Div(bal, big.NewInt(2)),
}
smHalfBal, err := client15.MpoolPushMessage(ctx, msgHalfBal, &api.MessageSendSpec{MaxFee: abi.TokenAmount(config.DefaultDefaultMaxFee)})
require.NoError(t, err)
mLookup, err := client15.StateWaitMsg(ctx, smHalfBal.Cid(), 3, api.LookbackNoLimit, true)
require.NoError(t, err)
require.Equal(t, exitcode.Ok, mLookup.Receipt.ExitCode)
msgQuarterBal := &types.Message{
From: client15.DefaultKey.Address,
To: client15.DefaultKey.Address,
Value: big.Div(bal, big.NewInt(4)),
}
smcid, err := client15.MpoolPushMessage(ctx, msgQuarterBal, &api.MessageSendSpec{MaxFee: abi.TokenAmount(config.DefaultDefaultMaxFee)})
require.NoError(t, err)
mLookup, err = client15.StateWaitMsg(ctx, smcid.Cid(), 3, api.LookbackNoLimit, true)
require.NoError(t, err)
require.Equal(t, exitcode.Ok, mLookup.Receipt.ExitCode)
}

View File

@ -4,6 +4,7 @@ import (
"context" "context"
"crypto/rand" "crypto/rand"
"fmt" "fmt"
"github.com/filecoin-project/lotus/node/config"
"reflect" "reflect"
"testing" "testing"
"time" "time"
@ -59,8 +60,8 @@ func setup(ctx context.Context, t *testing.T, node0 *kit.TestFullNode, node1 *ki
raftOps := kit.ConstructorOpts( raftOps := kit.ConstructorOpts(
node.Override(new(*gorpc.Client), modules.NewRPCClient), node.Override(new(*gorpc.Client), modules.NewRPCClient),
node.Override(new(*consensus.Config), func() *consensus.Config { node.Override(new(*config.ClusterRaftConfig), func() *config.ClusterRaftConfig {
cfg := consensus.NewDefaultConfig() cfg := config.DefaultClusterRaftConfig()
cfg.InitPeerset = initPeerSet cfg.InitPeerset = initPeerSet
return cfg return cfg
}), }),

View File

@ -26,7 +26,7 @@ func TestSelfSentTxnV15(t *testing.T) {
kit.QuietMiningLogs() kit.QuietMiningLogs()
client15, _, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.GenesisNetworkVersion(network.Version15)) client15, _, ens := kit.EnsembleMinimal(t, kit.MockProofs()) //, kit.GenesisNetworkVersion(network.Version15))
ens.InterconnectAll().BeginMining(10 * time.Millisecond) ens.InterconnectAll().BeginMining(10 * time.Millisecond)
bal, err := client15.WalletBalance(ctx, client15.DefaultKey.Address) bal, err := client15.WalletBalance(ctx, client15.DefaultKey.Address)

View File

@ -1,11 +1,10 @@
package consensus package consensus
import ( import (
"io/ioutil" "github.com/filecoin-project/lotus/node/config"
"time"
hraft "github.com/hashicorp/raft" hraft "github.com/hashicorp/raft"
"github.com/libp2p/go-libp2p/core/peer" "golang.org/x/xerrors"
"time"
) )
// ConfigKey is the default configuration key for holding this component's // ConfigKey is the default configuration key for holding this component's
@ -27,42 +26,42 @@ var (
// Config allows to configure the Raft Consensus component for ipfs-cluster. // Config allows to configure the Raft Consensus component for ipfs-cluster.
// The component's configuration section is represented by ConfigJSON. // The component's configuration section is represented by ConfigJSON.
// Config implements the ComponentConfig interface. // Config implements the ComponentConfig interface.
type Config struct { //type Config struct {
//config.Saver // //config.Saver
// //
// //// will shutdown libp2p host on shutdown. Useful for testing
// hostShutdown bool
// //
//// will shutdown libp2p host on shutdown. Useful for testing // // A folder to store Raft's data.
hostShutdown bool // DataFolder string
//
// A folder to store Raft's data. // // InitPeerset provides the list of initial cluster peers for new Raft
DataFolder string // // peers (with no prior state). It is ignored when Raft was already
// // initialized or when starting in staging mode.
// InitPeerset provides the list of initial cluster peers for new Raft // InitPeerset []peer.ID
// peers (with no prior state). It is ignored when Raft was already // // LeaderTimeout specifies how long to wait for a leader before
// initialized or when starting in staging mode. // // failing an operation.
InitPeerset []peer.ID // WaitForLeaderTimeout time.Duration
// LeaderTimeout specifies how long to wait for a leader before // // NetworkTimeout specifies how long before a Raft network
// failing an operation. // // operation is timed out
WaitForLeaderTimeout time.Duration // NetworkTimeout time.Duration
// NetworkTimeout specifies how long before a Raft network // // CommitRetries specifies how many times we retry a failed commit until
// operation is timed out // // we give up.
NetworkTimeout time.Duration // CommitRetries int
// CommitRetries specifies how many times we retry a failed commit until // // How long to wait between retries
// we give up. // CommitRetryDelay time.Duration
CommitRetries int // // BackupsRotate specifies the maximum number of Raft's DataFolder
// How long to wait between retries // // copies that we keep as backups (renaming) after cleanup.
CommitRetryDelay time.Duration // BackupsRotate int
// BackupsRotate specifies the maximum number of Raft's DataFolder // // Namespace to use when writing keys to the datastore
// copies that we keep as backups (renaming) after cleanup. // DatastoreNamespace string
BackupsRotate int //
// Namespace to use when writing keys to the datastore // // A Hashicorp Raft's configuration object.
DatastoreNamespace string // RaftConfig *hraft.Config
//
// A Hashicorp Raft's configuration object. // // Tracing enables propagation of contexts across binary boundaries.
RaftConfig *hraft.Config // Tracing bool
//}
// Tracing enables propagation of contexts across binary boundaries.
Tracing bool
}
// ConfigJSON represents a human-friendly Config // ConfigJSON represents a human-friendly Config
// object which can be saved to JSON. Most configuration keys are converted // object which can be saved to JSON. Most configuration keys are converted
@ -138,38 +137,38 @@ type Config struct {
//} //}
// ConfigKey returns a human-friendly indentifier for this Config. // ConfigKey returns a human-friendly indentifier for this Config.
func (cfg *Config) ConfigKey() string { //func (cfg *config.ClusterRaftConfig) ConfigKey() string {
return configKey // return configKey
} //}
//// Validate checks that this configuration has working values, //// Validate checks that this configuration has working values,
//// at least in appearance. //// at least in appearance.
//func (cfg *Config) Validate() error { func ValidateConfig(cfg *config.ClusterRaftConfig) error {
// if cfg.RaftConfig == nil { if cfg.RaftConfig == nil {
// return errors.New("no hashicorp/raft.Config") return xerrors.Errorf("no hashicorp/raft.Config")
// } }
// if cfg.WaitForLeaderTimeout <= 0 { if cfg.WaitForLeaderTimeout <= 0 {
// return errors.New("wait_for_leader_timeout <= 0") return xerrors.Errorf("wait_for_leader_timeout <= 0")
// } }
//
// if cfg.NetworkTimeout <= 0 { if cfg.NetworkTimeout <= 0 {
// return errors.New("network_timeout <= 0") return xerrors.Errorf("network_timeout <= 0")
// } }
//
// if cfg.CommitRetries < 0 { if cfg.CommitRetries < 0 {
// return errors.New("commit_retries is invalid") return xerrors.Errorf("commit_retries is invalid")
// } }
//
// if cfg.CommitRetryDelay <= 0 { if cfg.CommitRetryDelay <= 0 {
// return errors.New("commit_retry_delay is invalid") return xerrors.Errorf("commit_retry_delay is invalid")
// } }
//
// if cfg.BackupsRotate <= 0 { if cfg.BackupsRotate <= 0 {
// return errors.New("backups_rotate should be larger than 0") return xerrors.Errorf("backups_rotate should be larger than 0")
// } }
//
// return hraft.ValidateConfig(cfg.RaftConfig) return hraft.ValidateConfig(cfg.RaftConfig)
//} }
// LoadJSON parses a json-encoded configuration (see jsonConfig). // LoadJSON parses a json-encoded configuration (see jsonConfig).
// The Config will have default values for all fields not explicited // The Config will have default values for all fields not explicited
@ -264,31 +263,31 @@ func (cfg *Config) ConfigKey() string {
//} //}
// //
// Default initializes this configuration with working defaults. // Default initializes this configuration with working defaults.
func (cfg *Config) Default() { //func (cfg *config.ClusterRaftConfig) Default() {
cfg.DataFolder = "" // empty so it gets omitted // cfg.DataFolder = "" // empty so it gets omitted
cfg.InitPeerset = []peer.ID{} // cfg.InitPeerset = []peer.ID{}
cfg.WaitForLeaderTimeout = DefaultWaitForLeaderTimeout // cfg.WaitForLeaderTimeout = DefaultWaitForLeaderTimeout
cfg.NetworkTimeout = DefaultNetworkTimeout // cfg.NetworkTimeout = DefaultNetworkTimeout
cfg.CommitRetries = DefaultCommitRetries // cfg.CommitRetries = DefaultCommitRetries
cfg.CommitRetryDelay = DefaultCommitRetryDelay // cfg.CommitRetryDelay = DefaultCommitRetryDelay
cfg.BackupsRotate = DefaultBackupsRotate // cfg.BackupsRotate = DefaultBackupsRotate
cfg.DatastoreNamespace = DefaultDatastoreNamespace // cfg.DatastoreNamespace = DefaultDatastoreNamespace
cfg.RaftConfig = hraft.DefaultConfig() // cfg.RaftConfig = hraft.DefaultConfig()
//
// These options are imposed over any Default Raft Config. // // These options are imposed over any Default Raft Config.
cfg.RaftConfig.ShutdownOnRemove = false // cfg.RaftConfig.ShutdownOnRemove = false
cfg.RaftConfig.LocalID = "will_be_set_automatically" // cfg.RaftConfig.LocalID = "will_be_set_automatically"
//
// Set up logging // // Set up logging
cfg.RaftConfig.LogOutput = ioutil.Discard // cfg.RaftConfig.LogOutput = ioutil.Discard
//cfg.RaftConfig.Logger = &hcLogToLogger{} // //cfg.RaftConfig.Logger = &hcLogToLogger{}
} //}
//
func NewDefaultConfig() *Config { //func NewDefaultConfig() *config.ClusterRaftConfig {
var cfg Config // var cfg config.ClusterRaftConfig
cfg.Default() // cfg.Default()
return &cfg // return &cfg
} //}
// //
//// ApplyEnvVars fills in any Config fields found //// ApplyEnvVars fills in any Config fields found

View File

@ -6,6 +6,7 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"github.com/filecoin-project/lotus/node/config"
"sort" "sort"
"time" "time"
@ -58,7 +59,7 @@ var _ consensus.Op = &ConsensusOp{}
type Consensus struct { type Consensus struct {
ctx context.Context ctx context.Context
cancel func() cancel func()
config *Config config *config.ClusterRaftConfig
host host.Host host host.Host
@ -83,11 +84,11 @@ type Consensus struct {
// //
// The staging parameter controls if the Raft peer should start in // The staging parameter controls if the Raft peer should start in
// staging mode (used when joining a new Raft peerset with other peers). // staging mode (used when joining a new Raft peerset with other peers).
func NewConsensus(host host.Host, cfg *Config, staging bool) (*Consensus, error) { func NewConsensus(host host.Host, cfg *config.ClusterRaftConfig, staging bool) (*Consensus, error) {
//err := cfg.Validate() err := ValidateConfig(cfg)
//if err != nil { if err != nil {
// return nil, err return nil, err
//} }
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
@ -124,11 +125,11 @@ func NewConsensus(host host.Host, cfg *Config, staging bool) (*Consensus, error)
} }
func NewConsensusWithRPCClient(staging bool) func(host host.Host, func NewConsensusWithRPCClient(staging bool) func(host host.Host,
cfg *Config, cfg *config.ClusterRaftConfig,
rpcClient *rpc.Client, rpcClient *rpc.Client,
) (*Consensus, error) { ) (*Consensus, error) {
return func(host host.Host, cfg *Config, rpcClient *rpc.Client) (*Consensus, error) { return func(host host.Host, cfg *config.ClusterRaftConfig, rpcClient *rpc.Client) (*Consensus, error) {
cc, err := NewConsensus(host, cfg, staging) cc, err := NewConsensus(host, cfg, staging)
if err != nil { if err != nil {
return nil, err return nil, err
@ -230,7 +231,7 @@ func (cc *Consensus) Shutdown(ctx context.Context) error {
logger.Error(err) logger.Error(err)
} }
if cc.config.hostShutdown { if cc.config.HostShutdown {
cc.host.Close() cc.host.Close()
} }

View File

@ -4,6 +4,7 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"github.com/filecoin-project/lotus/node/config"
"io" "io"
"os" "os"
"time" "time"
@ -42,7 +43,7 @@ type raftWrapper struct {
ctx context.Context ctx context.Context
cancel context.CancelFunc cancel context.CancelFunc
raft *hraft.Raft raft *hraft.Raft
config *Config config *config.ClusterRaftConfig
host host.Host host host.Host
serverConfig hraft.Configuration serverConfig hraft.Configuration
transport *hraft.NetworkTransport transport *hraft.NetworkTransport
@ -58,7 +59,7 @@ type raftWrapper struct {
// to make sure the raft instance is usable. // to make sure the raft instance is usable.
func newRaftWrapper( func newRaftWrapper(
host host.Host, host host.Host,
cfg *Config, cfg *config.ClusterRaftConfig,
fsm hraft.FSM, fsm hraft.FSM,
staging bool, staging bool,
) (*raftWrapper, error) { ) (*raftWrapper, error) {

112
localnet.json Normal file
View File

@ -0,0 +1,112 @@
{
"NetworkVersion": 16,
"Accounts": [
{
"Type": "account",
"Balance": "50000000000000000000000000",
"Meta": {
"Owner": "t3v26vdulzazde5z2fvxtyy7pm24qn2ztxp6jvqsfh3yqe3vkj44w4mlmvfbwuxp2c7n3yc44o7sbge2snbqha"
}
}
],
"Miners": [
{
"ID": "t01000",
"Owner": "t3v26vdulzazde5z2fvxtyy7pm24qn2ztxp6jvqsfh3yqe3vkj44w4mlmvfbwuxp2c7n3yc44o7sbge2snbqha",
"Worker": "t3v26vdulzazde5z2fvxtyy7pm24qn2ztxp6jvqsfh3yqe3vkj44w4mlmvfbwuxp2c7n3yc44o7sbge2snbqha",
"PeerId": "12D3KooWPp8ochucGCrspFfuYqEf26wz3DZUUWAd7dGSw5e1bqio",
"MarketBalance": "0",
"PowerBalance": "0",
"SectorSize": 2048,
"Sectors": [
{
"CommR": {
"/": "bagboea4b5abcbiikcvp43xiaxqwvzs56vmefz72hj5zpv4myynrsymnsfhv3luzh"
},
"CommD": {
"/": "baga6ea4seaqlaz34kjgrpb5l6isyjjsodxzs74rkq6ym5rsw67zqpk2hsxyruky"
},
"SectorID": 0,
"Deal": {
"PieceCID": {
"/": "baga6ea4seaqlaz34kjgrpb5l6isyjjsodxzs74rkq6ym5rsw67zqpk2hsxyruky"
},
"PieceSize": 2048,
"VerifiedDeal": false,
"Client": "t3v26vdulzazde5z2fvxtyy7pm24qn2ztxp6jvqsfh3yqe3vkj44w4mlmvfbwuxp2c7n3yc44o7sbge2snbqha",
"Provider": "t01000",
"Label": "0",
"StartEpoch": 0,
"EndEpoch": 9001,
"StoragePricePerEpoch": "0",
"ProviderCollateral": "0",
"ClientCollateral": "0"
},
"DealClientKey": {
"Type": "bls",
"PrivateKey": "jM6TtJBVoBleVAABOX7oQxbOqxiK2e0cjyEo34VOPjw=",
"PublicKey": "rr1R0XkGRk7nRa3njH3s1yDdZnd/k1hIp94gTdVJ5y3GLZUobUu/Qvt3gXOO/IJi",
"Address": "t3v26vdulzazde5z2fvxtyy7pm24qn2ztxp6jvqsfh3yqe3vkj44w4mlmvfbwuxp2c7n3yc44o7sbge2snbqha"
},
"ProofType": 5
},
{
"CommR": {
"/": "bagboea4b5abcazkra3xmise6fchdg3ownnmvsahz5gonqjiil2sgdgjy63rz4qkb"
},
"CommD": {
"/": "baga6ea4seaqbxdb2tltuyvx3i7o74ups4jn3vtmzpdwlh4oqzcfo7fxxapviqha"
},
"SectorID": 1,
"Deal": {
"PieceCID": {
"/": "baga6ea4seaqbxdb2tltuyvx3i7o74ups4jn3vtmzpdwlh4oqzcfo7fxxapviqha"
},
"PieceSize": 2048,
"VerifiedDeal": false,
"Client": "t3v26vdulzazde5z2fvxtyy7pm24qn2ztxp6jvqsfh3yqe3vkj44w4mlmvfbwuxp2c7n3yc44o7sbge2snbqha",
"Provider": "t01000",
"Label": "1",
"StartEpoch": 0,
"EndEpoch": 9001,
"StoragePricePerEpoch": "0",
"ProviderCollateral": "0",
"ClientCollateral": "0"
},
"DealClientKey": {
"Type": "bls",
"PrivateKey": "jM6TtJBVoBleVAABOX7oQxbOqxiK2e0cjyEo34VOPjw=",
"PublicKey": "rr1R0XkGRk7nRa3njH3s1yDdZnd/k1hIp94gTdVJ5y3GLZUobUu/Qvt3gXOO/IJi",
"Address": "t3v26vdulzazde5z2fvxtyy7pm24qn2ztxp6jvqsfh3yqe3vkj44w4mlmvfbwuxp2c7n3yc44o7sbge2snbqha"
},
"ProofType": 5
}
]
}
],
"NetworkName": "localnet-2602dfef-e23d-4d09-9893-3010f961e1b9",
"VerifregRootKey": {
"Type": "multisig",
"Balance": "0",
"Meta": {
"Signers": [
"t1ceb34gnsc6qk5dt6n7xg6ycwzasjhbxm3iylkiy"
],
"Threshold": 1,
"VestingDuration": 0,
"VestingStart": 0
}
},
"RemainderAccount": {
"Type": "multisig",
"Balance": "0",
"Meta": {
"Signers": [
"t1ceb34gnsc6qk5dt6n7xg6ycwzasjhbxm3iylkiy"
],
"Threshold": 1,
"VestingDuration": 0,
"VestingStart": 0
}
}
}

View File

@ -2,6 +2,9 @@ package config
import ( import (
"encoding" "encoding"
hraft "github.com/hashicorp/raft"
"github.com/libp2p/go-libp2p/core/peer"
"io/ioutil"
"os" "os"
"strconv" "strconv"
"time" "time"
@ -99,6 +102,7 @@ func DefaultFullNode() *FullNode {
ColdStoreFullGCFrequency: 7, ColdStoreFullGCFrequency: 7,
}, },
}, },
Raft: *DefaultClusterRaftConfig(),
} }
} }
@ -274,3 +278,35 @@ func (dur Duration) MarshalText() ([]byte, error) {
d := time.Duration(dur) d := time.Duration(dur)
return []byte(d.String()), nil return []byte(d.String()), nil
} }
var (
DefaultDataSubFolder = "raft"
DefaultWaitForLeaderTimeout = 15 * time.Second
DefaultCommitRetries = 1
DefaultNetworkTimeout = 100 * time.Second
DefaultCommitRetryDelay = 200 * time.Millisecond
DefaultBackupsRotate = 6
DefaultDatastoreNamespace = "/r" // from "/raft"
)
func DefaultClusterRaftConfig() *ClusterRaftConfig {
var cfg ClusterRaftConfig
cfg.DataFolder = "" // empty so it gets omitted
cfg.InitPeerset = []peer.ID{}
cfg.WaitForLeaderTimeout = DefaultWaitForLeaderTimeout
cfg.NetworkTimeout = DefaultNetworkTimeout
cfg.CommitRetries = DefaultCommitRetries
cfg.CommitRetryDelay = DefaultCommitRetryDelay
cfg.BackupsRotate = DefaultBackupsRotate
cfg.DatastoreNamespace = DefaultDatastoreNamespace
cfg.RaftConfig = hraft.DefaultConfig()
// These options are imposed over any Default Raft Config.
cfg.RaftConfig.ShutdownOnRemove = false
cfg.RaftConfig.LocalID = "will_be_set_automatically"
// Set up logging
cfg.RaftConfig.LogOutput = ioutil.Discard
//cfg.RaftConfig.Logger = &hcLogToLogger{}
return &cfg
}

View File

@ -117,6 +117,82 @@ without existing payment channels with available funds will fail instead
of automatically performing on-chain operations.`, of automatically performing on-chain operations.`,
}, },
}, },
"ClusterRaftConfig": []DocField{
{
Name: "HostShutdown",
Type: "bool",
Comment: `config.Saver
will shutdown libp2p host on shutdown. Useful for testing`,
},
{
Name: "DataFolder",
Type: "string",
Comment: `A folder to store Raft's data.`,
},
{
Name: "InitPeerset",
Type: "[]peer.ID",
Comment: `InitPeerset provides the list of initial cluster peers for new Raft
peers (with no prior state). It is ignored when Raft was already
initialized or when starting in staging mode.`,
},
{
Name: "WaitForLeaderTimeout",
Type: "time.Duration",
Comment: `LeaderTimeout specifies how long to wait for a leader before
failing an operation.`,
},
{
Name: "NetworkTimeout",
Type: "time.Duration",
Comment: `NetworkTimeout specifies how long before a Raft network
operation is timed out`,
},
{
Name: "CommitRetries",
Type: "int",
Comment: `CommitRetries specifies how many times we retry a failed commit until
we give up.`,
},
{
Name: "CommitRetryDelay",
Type: "time.Duration",
Comment: `How long to wait between retries`,
},
{
Name: "BackupsRotate",
Type: "int",
Comment: `BackupsRotate specifies the maximum number of Raft's DataFolder
copies that we keep as backups (renaming) after cleanup.`,
},
{
Name: "DatastoreNamespace",
Type: "string",
Comment: `Namespace to use when writing keys to the datastore`,
},
{
Name: "RaftConfig",
Type: "*hraft.Config",
Comment: `A Hashicorp Raft's configuration object.`,
},
{
Name: "Tracing",
Type: "bool",
Comment: `Tracing enables propagation of contexts across binary boundaries.`,
},
},
"Common": []DocField{ "Common": []DocField{
{ {
Name: "API", Name: "API",
@ -372,6 +448,12 @@ see https://docs.filecoin.io/mine/lotus/miner-configuration/#using-filters-for-f
Name: "Chainstore", Name: "Chainstore",
Type: "Chainstore", Type: "Chainstore",
Comment: ``,
},
{
Name: "Raft",
Type: "ClusterRaftConfig",
Comment: ``, Comment: ``,
}, },
}, },

View File

@ -1,7 +1,10 @@
package config package config
import ( import (
hraft "github.com/hashicorp/raft"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
"github.com/libp2p/go-libp2p/core/peer"
"time"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/storage/sealer" "github.com/filecoin-project/lotus/storage/sealer"
@ -27,6 +30,7 @@ type FullNode struct {
Wallet Wallet Wallet Wallet
Fees FeeConfig Fees FeeConfig
Chainstore Chainstore Chainstore Chainstore
Raft ClusterRaftConfig
} }
// // Common // // Common
@ -590,3 +594,37 @@ type Wallet struct {
type FeeConfig struct { type FeeConfig struct {
DefaultMaxFee types.FIL DefaultMaxFee types.FIL
} }
// ClusterRaftConfig allows to configure the Raft Consensus component for the node cluster.
type ClusterRaftConfig struct {
// will shutdown libp2p host on shutdown. Useful for testing
HostShutdown bool
// A folder to store Raft's data.
DataFolder string
// InitPeerset provides the list of initial cluster peers for new Raft
// peers (with no prior state). It is ignored when Raft was already
// initialized or when starting in staging mode.
InitPeerset []peer.ID
// LeaderTimeout specifies how long to wait for a leader before
// failing an operation.
WaitForLeaderTimeout time.Duration
// NetworkTimeout specifies how long before a Raft network
// operation is timed out
NetworkTimeout time.Duration
// CommitRetries specifies how many times we retry a failed commit until
// we give up.
CommitRetries int
// How long to wait between retries
CommitRetryDelay time.Duration
// BackupsRotate specifies the maximum number of Raft's DataFolder
// copies that we keep as backups (renaming) after cleanup.
BackupsRotate int
// Namespace to use when writing keys to the datastore
DatastoreNamespace string
// A Hashicorp Raft's configuration object.
RaftConfig *hraft.Config
// Tracing enables propagation of contexts across binary boundaries.
Tracing bool
}

View File

@ -3,7 +3,6 @@ package full
import ( import (
"context" "context"
"encoding/json" "encoding/json"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
consensus "github.com/libp2p/go-libp2p-consensus" consensus "github.com/libp2p/go-libp2p-consensus"
"github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/peer"