WIP: rest of the stuff

This commit is contained in:
Shrenuj Bansal 2022-09-13 13:05:48 -04:00
parent 4171be0b98
commit 3441224b2f
14 changed files with 432 additions and 114 deletions

BIN
devgen.car Normal file

Binary file not shown.

2
extern/filecoin-ffi vendored

@ -1 +1 @@
Subproject commit f997fe6c77632c0bc58d0b1fdf53ee7a93f6027c
Subproject commit c70caaf14f8ff5b15eb5166093d9e57055e571f8

View File

@ -37,10 +37,6 @@ type TestFullNode struct {
ListenAddr multiaddr.Multiaddr
DefaultKey *key.Key
//Libp2p struct {
// PeerID peer.ID
// PrivKey libp2pcrypto.PrivKey
//}
Pkey *Libp2p
Stop node.StopFunc

View File

@ -0,0 +1,53 @@
package itests
import (
"context"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/node/config"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/exitcode"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/itests/kit"
)
func TestMpoolPushWithoutUuidWithMaxFee(t *testing.T) {
//stm: @TOKEN_WALLET_SIGN_001, @CHAIN_MEMPOOL_PUSH_001
ctx := context.Background()
kit.QuietMiningLogs()
client15, _, ens := kit.EnsembleMinimal(t, kit.MockProofs())
ens.InterconnectAll().BeginMining(10 * time.Millisecond)
bal, err := client15.WalletBalance(ctx, client15.DefaultKey.Address)
require.NoError(t, err)
// send self half of account balance
msgHalfBal := &types.Message{
From: client15.DefaultKey.Address,
To: client15.DefaultKey.Address,
Value: big.Div(bal, big.NewInt(2)),
}
smHalfBal, err := client15.MpoolPushMessage(ctx, msgHalfBal, &api.MessageSendSpec{MaxFee: abi.TokenAmount(config.DefaultDefaultMaxFee)})
require.NoError(t, err)
mLookup, err := client15.StateWaitMsg(ctx, smHalfBal.Cid(), 3, api.LookbackNoLimit, true)
require.NoError(t, err)
require.Equal(t, exitcode.Ok, mLookup.Receipt.ExitCode)
msgQuarterBal := &types.Message{
From: client15.DefaultKey.Address,
To: client15.DefaultKey.Address,
Value: big.Div(bal, big.NewInt(4)),
}
smcid, err := client15.MpoolPushMessage(ctx, msgQuarterBal, &api.MessageSendSpec{MaxFee: abi.TokenAmount(config.DefaultDefaultMaxFee)})
require.NoError(t, err)
mLookup, err = client15.StateWaitMsg(ctx, smcid.Cid(), 3, api.LookbackNoLimit, true)
require.NoError(t, err)
require.Equal(t, exitcode.Ok, mLookup.Receipt.ExitCode)
}

View File

@ -4,6 +4,7 @@ import (
"context"
"crypto/rand"
"fmt"
"github.com/filecoin-project/lotus/node/config"
"reflect"
"testing"
"time"
@ -59,8 +60,8 @@ func setup(ctx context.Context, t *testing.T, node0 *kit.TestFullNode, node1 *ki
raftOps := kit.ConstructorOpts(
node.Override(new(*gorpc.Client), modules.NewRPCClient),
node.Override(new(*consensus.Config), func() *consensus.Config {
cfg := consensus.NewDefaultConfig()
node.Override(new(*config.ClusterRaftConfig), func() *config.ClusterRaftConfig {
cfg := config.DefaultClusterRaftConfig()
cfg.InitPeerset = initPeerSet
return cfg
}),

View File

@ -26,7 +26,7 @@ func TestSelfSentTxnV15(t *testing.T) {
kit.QuietMiningLogs()
client15, _, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.GenesisNetworkVersion(network.Version15))
client15, _, ens := kit.EnsembleMinimal(t, kit.MockProofs()) //, kit.GenesisNetworkVersion(network.Version15))
ens.InterconnectAll().BeginMining(10 * time.Millisecond)
bal, err := client15.WalletBalance(ctx, client15.DefaultKey.Address)

View File

@ -1,11 +1,10 @@
package consensus
import (
"io/ioutil"
"time"
"github.com/filecoin-project/lotus/node/config"
hraft "github.com/hashicorp/raft"
"github.com/libp2p/go-libp2p/core/peer"
"golang.org/x/xerrors"
"time"
)
// ConfigKey is the default configuration key for holding this component's
@ -27,42 +26,42 @@ var (
// Config allows to configure the Raft Consensus component for ipfs-cluster.
// The component's configuration section is represented by ConfigJSON.
// Config implements the ComponentConfig interface.
type Config struct {
//config.Saver
//
//// will shutdown libp2p host on shutdown. Useful for testing
hostShutdown bool
// A folder to store Raft's data.
DataFolder string
// InitPeerset provides the list of initial cluster peers for new Raft
// peers (with no prior state). It is ignored when Raft was already
// initialized or when starting in staging mode.
InitPeerset []peer.ID
// LeaderTimeout specifies how long to wait for a leader before
// failing an operation.
WaitForLeaderTimeout time.Duration
// NetworkTimeout specifies how long before a Raft network
// operation is timed out
NetworkTimeout time.Duration
// CommitRetries specifies how many times we retry a failed commit until
// we give up.
CommitRetries int
// How long to wait between retries
CommitRetryDelay time.Duration
// BackupsRotate specifies the maximum number of Raft's DataFolder
// copies that we keep as backups (renaming) after cleanup.
BackupsRotate int
// Namespace to use when writing keys to the datastore
DatastoreNamespace string
// A Hashicorp Raft's configuration object.
RaftConfig *hraft.Config
// Tracing enables propagation of contexts across binary boundaries.
Tracing bool
}
//type Config struct {
// //config.Saver
// //
// //// will shutdown libp2p host on shutdown. Useful for testing
// hostShutdown bool
//
// // A folder to store Raft's data.
// DataFolder string
//
// // InitPeerset provides the list of initial cluster peers for new Raft
// // peers (with no prior state). It is ignored when Raft was already
// // initialized or when starting in staging mode.
// InitPeerset []peer.ID
// // LeaderTimeout specifies how long to wait for a leader before
// // failing an operation.
// WaitForLeaderTimeout time.Duration
// // NetworkTimeout specifies how long before a Raft network
// // operation is timed out
// NetworkTimeout time.Duration
// // CommitRetries specifies how many times we retry a failed commit until
// // we give up.
// CommitRetries int
// // How long to wait between retries
// CommitRetryDelay time.Duration
// // BackupsRotate specifies the maximum number of Raft's DataFolder
// // copies that we keep as backups (renaming) after cleanup.
// BackupsRotate int
// // Namespace to use when writing keys to the datastore
// DatastoreNamespace string
//
// // A Hashicorp Raft's configuration object.
// RaftConfig *hraft.Config
//
// // Tracing enables propagation of contexts across binary boundaries.
// Tracing bool
//}
// ConfigJSON represents a human-friendly Config
// object which can be saved to JSON. Most configuration keys are converted
@ -138,38 +137,38 @@ type Config struct {
//}
// ConfigKey returns a human-friendly indentifier for this Config.
func (cfg *Config) ConfigKey() string {
return configKey
}
//func (cfg *config.ClusterRaftConfig) ConfigKey() string {
// return configKey
//}
//// Validate checks that this configuration has working values,
//// at least in appearance.
//func (cfg *Config) Validate() error {
// if cfg.RaftConfig == nil {
// return errors.New("no hashicorp/raft.Config")
// }
// if cfg.WaitForLeaderTimeout <= 0 {
// return errors.New("wait_for_leader_timeout <= 0")
// }
//
// if cfg.NetworkTimeout <= 0 {
// return errors.New("network_timeout <= 0")
// }
//
// if cfg.CommitRetries < 0 {
// return errors.New("commit_retries is invalid")
// }
//
// if cfg.CommitRetryDelay <= 0 {
// return errors.New("commit_retry_delay is invalid")
// }
//
// if cfg.BackupsRotate <= 0 {
// return errors.New("backups_rotate should be larger than 0")
// }
//
// return hraft.ValidateConfig(cfg.RaftConfig)
//}
func ValidateConfig(cfg *config.ClusterRaftConfig) error {
if cfg.RaftConfig == nil {
return xerrors.Errorf("no hashicorp/raft.Config")
}
if cfg.WaitForLeaderTimeout <= 0 {
return xerrors.Errorf("wait_for_leader_timeout <= 0")
}
if cfg.NetworkTimeout <= 0 {
return xerrors.Errorf("network_timeout <= 0")
}
if cfg.CommitRetries < 0 {
return xerrors.Errorf("commit_retries is invalid")
}
if cfg.CommitRetryDelay <= 0 {
return xerrors.Errorf("commit_retry_delay is invalid")
}
if cfg.BackupsRotate <= 0 {
return xerrors.Errorf("backups_rotate should be larger than 0")
}
return hraft.ValidateConfig(cfg.RaftConfig)
}
// LoadJSON parses a json-encoded configuration (see jsonConfig).
// The Config will have default values for all fields not explicited
@ -264,31 +263,31 @@ func (cfg *Config) ConfigKey() string {
//}
//
// Default initializes this configuration with working defaults.
func (cfg *Config) Default() {
cfg.DataFolder = "" // empty so it gets omitted
cfg.InitPeerset = []peer.ID{}
cfg.WaitForLeaderTimeout = DefaultWaitForLeaderTimeout
cfg.NetworkTimeout = DefaultNetworkTimeout
cfg.CommitRetries = DefaultCommitRetries
cfg.CommitRetryDelay = DefaultCommitRetryDelay
cfg.BackupsRotate = DefaultBackupsRotate
cfg.DatastoreNamespace = DefaultDatastoreNamespace
cfg.RaftConfig = hraft.DefaultConfig()
// These options are imposed over any Default Raft Config.
cfg.RaftConfig.ShutdownOnRemove = false
cfg.RaftConfig.LocalID = "will_be_set_automatically"
// Set up logging
cfg.RaftConfig.LogOutput = ioutil.Discard
//cfg.RaftConfig.Logger = &hcLogToLogger{}
}
func NewDefaultConfig() *Config {
var cfg Config
cfg.Default()
return &cfg
}
//func (cfg *config.ClusterRaftConfig) Default() {
// cfg.DataFolder = "" // empty so it gets omitted
// cfg.InitPeerset = []peer.ID{}
// cfg.WaitForLeaderTimeout = DefaultWaitForLeaderTimeout
// cfg.NetworkTimeout = DefaultNetworkTimeout
// cfg.CommitRetries = DefaultCommitRetries
// cfg.CommitRetryDelay = DefaultCommitRetryDelay
// cfg.BackupsRotate = DefaultBackupsRotate
// cfg.DatastoreNamespace = DefaultDatastoreNamespace
// cfg.RaftConfig = hraft.DefaultConfig()
//
// // These options are imposed over any Default Raft Config.
// cfg.RaftConfig.ShutdownOnRemove = false
// cfg.RaftConfig.LocalID = "will_be_set_automatically"
//
// // Set up logging
// cfg.RaftConfig.LogOutput = ioutil.Discard
// //cfg.RaftConfig.Logger = &hcLogToLogger{}
//}
//
//func NewDefaultConfig() *config.ClusterRaftConfig {
// var cfg config.ClusterRaftConfig
// cfg.Default()
// return &cfg
//}
//
//// ApplyEnvVars fills in any Config fields found

View File

@ -6,6 +6,7 @@ import (
"context"
"errors"
"fmt"
"github.com/filecoin-project/lotus/node/config"
"sort"
"time"
@ -58,7 +59,7 @@ var _ consensus.Op = &ConsensusOp{}
type Consensus struct {
ctx context.Context
cancel func()
config *Config
config *config.ClusterRaftConfig
host host.Host
@ -83,11 +84,11 @@ type Consensus struct {
//
// The staging parameter controls if the Raft peer should start in
// staging mode (used when joining a new Raft peerset with other peers).
func NewConsensus(host host.Host, cfg *Config, staging bool) (*Consensus, error) {
//err := cfg.Validate()
//if err != nil {
// return nil, err
//}
func NewConsensus(host host.Host, cfg *config.ClusterRaftConfig, staging bool) (*Consensus, error) {
err := ValidateConfig(cfg)
if err != nil {
return nil, err
}
ctx, cancel := context.WithCancel(context.Background())
@ -124,11 +125,11 @@ func NewConsensus(host host.Host, cfg *Config, staging bool) (*Consensus, error)
}
func NewConsensusWithRPCClient(staging bool) func(host host.Host,
cfg *Config,
cfg *config.ClusterRaftConfig,
rpcClient *rpc.Client,
) (*Consensus, error) {
return func(host host.Host, cfg *Config, rpcClient *rpc.Client) (*Consensus, error) {
return func(host host.Host, cfg *config.ClusterRaftConfig, rpcClient *rpc.Client) (*Consensus, error) {
cc, err := NewConsensus(host, cfg, staging)
if err != nil {
return nil, err
@ -230,7 +231,7 @@ func (cc *Consensus) Shutdown(ctx context.Context) error {
logger.Error(err)
}
if cc.config.hostShutdown {
if cc.config.HostShutdown {
cc.host.Close()
}

View File

@ -4,6 +4,7 @@ import (
"context"
"errors"
"fmt"
"github.com/filecoin-project/lotus/node/config"
"io"
"os"
"time"
@ -42,7 +43,7 @@ type raftWrapper struct {
ctx context.Context
cancel context.CancelFunc
raft *hraft.Raft
config *Config
config *config.ClusterRaftConfig
host host.Host
serverConfig hraft.Configuration
transport *hraft.NetworkTransport
@ -58,7 +59,7 @@ type raftWrapper struct {
// to make sure the raft instance is usable.
func newRaftWrapper(
host host.Host,
cfg *Config,
cfg *config.ClusterRaftConfig,
fsm hraft.FSM,
staging bool,
) (*raftWrapper, error) {

112
localnet.json Normal file
View File

@ -0,0 +1,112 @@
{
"NetworkVersion": 16,
"Accounts": [
{
"Type": "account",
"Balance": "50000000000000000000000000",
"Meta": {
"Owner": "t3v26vdulzazde5z2fvxtyy7pm24qn2ztxp6jvqsfh3yqe3vkj44w4mlmvfbwuxp2c7n3yc44o7sbge2snbqha"
}
}
],
"Miners": [
{
"ID": "t01000",
"Owner": "t3v26vdulzazde5z2fvxtyy7pm24qn2ztxp6jvqsfh3yqe3vkj44w4mlmvfbwuxp2c7n3yc44o7sbge2snbqha",
"Worker": "t3v26vdulzazde5z2fvxtyy7pm24qn2ztxp6jvqsfh3yqe3vkj44w4mlmvfbwuxp2c7n3yc44o7sbge2snbqha",
"PeerId": "12D3KooWPp8ochucGCrspFfuYqEf26wz3DZUUWAd7dGSw5e1bqio",
"MarketBalance": "0",
"PowerBalance": "0",
"SectorSize": 2048,
"Sectors": [
{
"CommR": {
"/": "bagboea4b5abcbiikcvp43xiaxqwvzs56vmefz72hj5zpv4myynrsymnsfhv3luzh"
},
"CommD": {
"/": "baga6ea4seaqlaz34kjgrpb5l6isyjjsodxzs74rkq6ym5rsw67zqpk2hsxyruky"
},
"SectorID": 0,
"Deal": {
"PieceCID": {
"/": "baga6ea4seaqlaz34kjgrpb5l6isyjjsodxzs74rkq6ym5rsw67zqpk2hsxyruky"
},
"PieceSize": 2048,
"VerifiedDeal": false,
"Client": "t3v26vdulzazde5z2fvxtyy7pm24qn2ztxp6jvqsfh3yqe3vkj44w4mlmvfbwuxp2c7n3yc44o7sbge2snbqha",
"Provider": "t01000",
"Label": "0",
"StartEpoch": 0,
"EndEpoch": 9001,
"StoragePricePerEpoch": "0",
"ProviderCollateral": "0",
"ClientCollateral": "0"
},
"DealClientKey": {
"Type": "bls",
"PrivateKey": "jM6TtJBVoBleVAABOX7oQxbOqxiK2e0cjyEo34VOPjw=",
"PublicKey": "rr1R0XkGRk7nRa3njH3s1yDdZnd/k1hIp94gTdVJ5y3GLZUobUu/Qvt3gXOO/IJi",
"Address": "t3v26vdulzazde5z2fvxtyy7pm24qn2ztxp6jvqsfh3yqe3vkj44w4mlmvfbwuxp2c7n3yc44o7sbge2snbqha"
},
"ProofType": 5
},
{
"CommR": {
"/": "bagboea4b5abcazkra3xmise6fchdg3ownnmvsahz5gonqjiil2sgdgjy63rz4qkb"
},
"CommD": {
"/": "baga6ea4seaqbxdb2tltuyvx3i7o74ups4jn3vtmzpdwlh4oqzcfo7fxxapviqha"
},
"SectorID": 1,
"Deal": {
"PieceCID": {
"/": "baga6ea4seaqbxdb2tltuyvx3i7o74ups4jn3vtmzpdwlh4oqzcfo7fxxapviqha"
},
"PieceSize": 2048,
"VerifiedDeal": false,
"Client": "t3v26vdulzazde5z2fvxtyy7pm24qn2ztxp6jvqsfh3yqe3vkj44w4mlmvfbwuxp2c7n3yc44o7sbge2snbqha",
"Provider": "t01000",
"Label": "1",
"StartEpoch": 0,
"EndEpoch": 9001,
"StoragePricePerEpoch": "0",
"ProviderCollateral": "0",
"ClientCollateral": "0"
},
"DealClientKey": {
"Type": "bls",
"PrivateKey": "jM6TtJBVoBleVAABOX7oQxbOqxiK2e0cjyEo34VOPjw=",
"PublicKey": "rr1R0XkGRk7nRa3njH3s1yDdZnd/k1hIp94gTdVJ5y3GLZUobUu/Qvt3gXOO/IJi",
"Address": "t3v26vdulzazde5z2fvxtyy7pm24qn2ztxp6jvqsfh3yqe3vkj44w4mlmvfbwuxp2c7n3yc44o7sbge2snbqha"
},
"ProofType": 5
}
]
}
],
"NetworkName": "localnet-2602dfef-e23d-4d09-9893-3010f961e1b9",
"VerifregRootKey": {
"Type": "multisig",
"Balance": "0",
"Meta": {
"Signers": [
"t1ceb34gnsc6qk5dt6n7xg6ycwzasjhbxm3iylkiy"
],
"Threshold": 1,
"VestingDuration": 0,
"VestingStart": 0
}
},
"RemainderAccount": {
"Type": "multisig",
"Balance": "0",
"Meta": {
"Signers": [
"t1ceb34gnsc6qk5dt6n7xg6ycwzasjhbxm3iylkiy"
],
"Threshold": 1,
"VestingDuration": 0,
"VestingStart": 0
}
}
}

View File

@ -2,6 +2,9 @@ package config
import (
"encoding"
hraft "github.com/hashicorp/raft"
"github.com/libp2p/go-libp2p/core/peer"
"io/ioutil"
"os"
"strconv"
"time"
@ -99,6 +102,7 @@ func DefaultFullNode() *FullNode {
ColdStoreFullGCFrequency: 7,
},
},
Raft: *DefaultClusterRaftConfig(),
}
}
@ -274,3 +278,35 @@ func (dur Duration) MarshalText() ([]byte, error) {
d := time.Duration(dur)
return []byte(d.String()), nil
}
var (
DefaultDataSubFolder = "raft"
DefaultWaitForLeaderTimeout = 15 * time.Second
DefaultCommitRetries = 1
DefaultNetworkTimeout = 100 * time.Second
DefaultCommitRetryDelay = 200 * time.Millisecond
DefaultBackupsRotate = 6
DefaultDatastoreNamespace = "/r" // from "/raft"
)
func DefaultClusterRaftConfig() *ClusterRaftConfig {
var cfg ClusterRaftConfig
cfg.DataFolder = "" // empty so it gets omitted
cfg.InitPeerset = []peer.ID{}
cfg.WaitForLeaderTimeout = DefaultWaitForLeaderTimeout
cfg.NetworkTimeout = DefaultNetworkTimeout
cfg.CommitRetries = DefaultCommitRetries
cfg.CommitRetryDelay = DefaultCommitRetryDelay
cfg.BackupsRotate = DefaultBackupsRotate
cfg.DatastoreNamespace = DefaultDatastoreNamespace
cfg.RaftConfig = hraft.DefaultConfig()
// These options are imposed over any Default Raft Config.
cfg.RaftConfig.ShutdownOnRemove = false
cfg.RaftConfig.LocalID = "will_be_set_automatically"
// Set up logging
cfg.RaftConfig.LogOutput = ioutil.Discard
//cfg.RaftConfig.Logger = &hcLogToLogger{}
return &cfg
}

View File

@ -117,6 +117,82 @@ without existing payment channels with available funds will fail instead
of automatically performing on-chain operations.`,
},
},
"ClusterRaftConfig": []DocField{
{
Name: "HostShutdown",
Type: "bool",
Comment: `config.Saver
will shutdown libp2p host on shutdown. Useful for testing`,
},
{
Name: "DataFolder",
Type: "string",
Comment: `A folder to store Raft's data.`,
},
{
Name: "InitPeerset",
Type: "[]peer.ID",
Comment: `InitPeerset provides the list of initial cluster peers for new Raft
peers (with no prior state). It is ignored when Raft was already
initialized or when starting in staging mode.`,
},
{
Name: "WaitForLeaderTimeout",
Type: "time.Duration",
Comment: `LeaderTimeout specifies how long to wait for a leader before
failing an operation.`,
},
{
Name: "NetworkTimeout",
Type: "time.Duration",
Comment: `NetworkTimeout specifies how long before a Raft network
operation is timed out`,
},
{
Name: "CommitRetries",
Type: "int",
Comment: `CommitRetries specifies how many times we retry a failed commit until
we give up.`,
},
{
Name: "CommitRetryDelay",
Type: "time.Duration",
Comment: `How long to wait between retries`,
},
{
Name: "BackupsRotate",
Type: "int",
Comment: `BackupsRotate specifies the maximum number of Raft's DataFolder
copies that we keep as backups (renaming) after cleanup.`,
},
{
Name: "DatastoreNamespace",
Type: "string",
Comment: `Namespace to use when writing keys to the datastore`,
},
{
Name: "RaftConfig",
Type: "*hraft.Config",
Comment: `A Hashicorp Raft's configuration object.`,
},
{
Name: "Tracing",
Type: "bool",
Comment: `Tracing enables propagation of contexts across binary boundaries.`,
},
},
"Common": []DocField{
{
Name: "API",
@ -372,6 +448,12 @@ see https://docs.filecoin.io/mine/lotus/miner-configuration/#using-filters-for-f
Name: "Chainstore",
Type: "Chainstore",
Comment: ``,
},
{
Name: "Raft",
Type: "ClusterRaftConfig",
Comment: ``,
},
},

View File

@ -1,7 +1,10 @@
package config
import (
hraft "github.com/hashicorp/raft"
"github.com/ipfs/go-cid"
"github.com/libp2p/go-libp2p/core/peer"
"time"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/storage/sealer"
@ -27,6 +30,7 @@ type FullNode struct {
Wallet Wallet
Fees FeeConfig
Chainstore Chainstore
Raft ClusterRaftConfig
}
// // Common
@ -590,3 +594,37 @@ type Wallet struct {
type FeeConfig struct {
DefaultMaxFee types.FIL
}
// ClusterRaftConfig allows to configure the Raft Consensus component for the node cluster.
type ClusterRaftConfig struct {
// will shutdown libp2p host on shutdown. Useful for testing
HostShutdown bool
// A folder to store Raft's data.
DataFolder string
// InitPeerset provides the list of initial cluster peers for new Raft
// peers (with no prior state). It is ignored when Raft was already
// initialized or when starting in staging mode.
InitPeerset []peer.ID
// LeaderTimeout specifies how long to wait for a leader before
// failing an operation.
WaitForLeaderTimeout time.Duration
// NetworkTimeout specifies how long before a Raft network
// operation is timed out
NetworkTimeout time.Duration
// CommitRetries specifies how many times we retry a failed commit until
// we give up.
CommitRetries int
// How long to wait between retries
CommitRetryDelay time.Duration
// BackupsRotate specifies the maximum number of Raft's DataFolder
// copies that we keep as backups (renaming) after cleanup.
BackupsRotate int
// Namespace to use when writing keys to the datastore
DatastoreNamespace string
// A Hashicorp Raft's configuration object.
RaftConfig *hraft.Config
// Tracing enables propagation of contexts across binary boundaries.
Tracing bool
}

View File

@ -3,7 +3,6 @@ package full
import (
"context"
"encoding/json"
"github.com/ipfs/go-cid"
consensus "github.com/libp2p/go-libp2p-consensus"
"github.com/libp2p/go-libp2p/core/peer"