some cleanup

This commit is contained in:
Shrenuj Bansal 2022-10-17 16:13:30 -04:00
parent b77ca54719
commit 900525f8c2
10 changed files with 8 additions and 102 deletions

View File

@ -238,17 +238,17 @@ func FullNodeProxy[T api.FullNode](ins []T, outstr *api.FullNodeStruct) {
}
for _, out := range outs {
rint := reflect.ValueOf(out).Elem()
rProxyInternal := reflect.ValueOf(out).Elem()
for f := 0; f < rint.NumField(); f++ {
field := rint.Type().Field(f)
for f := 0; f < rProxyInternal.NumField(); f++ {
field := rProxyInternal.Type().Field(f)
var fns []reflect.Value
for _, rin := range rins {
fns = append(fns, rin.MethodByName(field.Name))
}
rint.Field(f).Set(reflect.MakeFunc(field.Type, func(args []reflect.Value) (results []reflect.Value) {
rProxyInternal.Field(f).Set(reflect.MakeFunc(field.Type, func(args []reflect.Value) (results []reflect.Value) {
errorsToRetry := []error{&jsonrpc.RPCConnectionError{}, &jsonrpc.ErrClient{}}
initialBackoff, err := time.ParseDuration("1s")
if err != nil {

View File

@ -81,7 +81,7 @@ func init() {
//
// var full TestFullNode
// var miner TestMiner
// ens.FullNodes(&full, opts...) // populates a full node
// ens.FullNode(&full, opts...) // populates a full node
// ens.Miner(&miner, &full, opts...) // populates a miner, using the full node as its chain daemon
//
// It is possible to pass functional options to set initial balances,
@ -101,7 +101,7 @@ func init() {
//
// The API is chainable, so it's possible to do a lot in a very succinct way:
//
// kit.NewEnsemble().FullNodes(&full).Miner(&miner, &full).Start().InterconnectAll().BeginMining()
// kit.NewEnsemble().FullNode(&full).Miner(&miner, &full).Start().InterconnectAll().BeginMining()
//
// You can also find convenient fullnode:miner presets, such as 1:1, 1:2,
// and 2:1, e.g.:
@ -303,8 +303,6 @@ func (n *Ensemble) MinerEnroll(minerNode *TestMiner, full *TestFullNode, opts ..
minerNode.Libp2p.PeerID = peerId
minerNode.Libp2p.PrivKey = privkey
//n.inactive.miners = append(n.inactive.miners, minerNode)
return n
}
@ -318,12 +316,6 @@ func (n *Ensemble) Miner(minerNode *TestMiner, full *TestFullNode, opts ...NodeO
return n
}
//func (n *Ensemble) MinerWithMultipleNodes(minerNode *TestMiner, full []*TestFullNode, opts ...NodeOpt) *Ensemble {
// n.MinerEnroll(minerNode, full, opts...)
// n.AddInactiveMiner(minerNode)
// return n
//}
// Worker enrolls a new worker, using the provided full node for chain
// interactions.
func (n *Ensemble) Worker(minerNode *TestMiner, worker *TestWorker, opts ...NodeOpt) *Ensemble {
@ -576,7 +568,7 @@ func (n *Ensemble) Start() *Ensemble {
}
// // Set it as the default address.
// err = m.FullNodes.WalletSetDefault(ctx, m.OwnerAddr.Address)
// err = m.FullNode.WalletSetDefault(ctx, m.OwnerAddr.Address)
// require.NoError(n.t, err)
r := repo.NewMemory(nil)
@ -676,13 +668,6 @@ func (n *Ensemble) Start() *Ensemble {
assigner := m.options.minerAssigner
disallowRemoteFinalize := m.options.disallowRemoteFinalize
//var wrappedFullNode api.FullNodeStruct
//var fullNodes []api.FullNode
//for _, fn := range m.FullNodes {
// fullNodes = append(fullNodes, fn.FullNode)
//}
//proxy(fullNodes, &wrappedFullNode)
var mineBlock = make(chan lotusminer.MineReq)
copy := *m.FullNode

View File

@ -47,11 +47,6 @@ type TestFullNode struct {
func MergeFullNodes(fullNodes []*TestFullNode) *TestFullNode {
var wrappedFullNode TestFullNode
//var fnapis []api.FullNode
//for _, fullNode := range fullNodes {
// fnapis = append(fnapis, fullNode)
//}
var fns api.FullNodeStruct
wrappedFullNode.FullNode = &fns

View File

@ -27,11 +27,6 @@ func CreateRPCServer(t *testing.T, handler http.Handler, listener net.Listener)
}
testServ.Start()
//t.Cleanup(func() {
// waitUpTo(testServ.Close, time.Second, "Gave up waiting for RPC server to close after 1s")
//})
//t.Cleanup(testServ.CloseClientConnections)
addr := testServ.Listener.Addr()
maddr, err := manet.FromNetAddr(addr)
require.NoError(t, err)
@ -71,7 +66,6 @@ func fullRpc(t *testing.T, f *TestFullNode) (*TestFullNode, Closer) {
cl, stop, err := client.NewFullNodeRPCV1(context.Background(), "ws://"+srv.Listener.Addr().String()+"/rpc/v1", nil)
require.NoError(t, err)
//t.Cleanup(stop)
f.ListenAddr, f.FullNode = maddr, cl
return f, func() { stop(); rpcCloser() }

View File

@ -137,6 +137,7 @@ func TestRaftState(t *testing.T) {
To: node0.DefaultKey.Address,
Value: big.Div(bal, big.NewInt(2)),
}
mu := uuid.New()
smHalfBal, err := node0.MpoolPushMessage(ctx, msgHalfBal, &api.MessageSendSpec{
MsgUuid: mu,
@ -150,8 +151,6 @@ func TestRaftState(t *testing.T) {
rstate1 := getRaftState(ctx, t, &node1)
rstate2 := getRaftState(ctx, t, &node2)
require.Equal(t, rstate0.NonceMap[miner.OwnerKey.Address], uint64(0))
require.EqualValues(t, rstate0, rstate1)
require.EqualValues(t, rstate0, rstate2)
}

View File

@ -287,28 +287,6 @@ var (
DefaultDatastoreNamespace = "/raft"
)
//func DefaultClusterRaftConfig() *ClusterRaftConfig {
// var cfg ClusterRaftConfig
// cfg.DataFolder = "" // empty so it gets omitted
// cfg.InitPeerset = []peer.ID{}
// cfg.WaitForLeaderTimeout = Duration(DefaultWaitForLeaderTimeout)
// cfg.NetworkTimeout = Duration(DefaultNetworkTimeout)
// cfg.CommitRetries = DefaultCommitRetries
// cfg.CommitRetryDelay = Duration(DefaultCommitRetryDelay)
// cfg.BackupsRotate = DefaultBackupsRotate
// cfg.DatastoreNamespace = DefaultDatastoreNamespace
// cfg.RaftConfig = hraft.DefaultConfig()
//
// // These options are imposed over any Default Raft Config.
// cfg.RaftConfig.ShutdownOnRemove = false
// cfg.RaftConfig.LocalID = "will_be_set_automatically"
//
// // Set up logging
// cfg.RaftConfig.LogOutput = ioutil.Discard
// //cfg.RaftConfig.Logger = &hcLogToLogger{}
// return &cfg
//}
func DefaultUserRaftConfig() *UserRaftConfig {
var cfg UserRaftConfig
cfg.DataFolder = "" // empty so it gets omitted

View File

@ -610,42 +610,6 @@ type FeeConfig struct {
DefaultMaxFee types.FIL
}
//// ClusterRaftConfig allows to configure the Raft Consensus component for the node cluster.
//type ClusterRaftConfig struct {
// // config to enabled node cluster with raft consensus
// ClusterModeEnabled bool
// // will shutdown libp2p host on shutdown. Useful for testing
// HostShutdown bool
// // A folder to store Raft's data.
// DataFolder string
// // InitPeerset provides the list of initial cluster peers for new Raft
// // peers (with no prior state). It is ignored when Raft was already
// // initialized or when starting in staging mode.
// InitPeerset []peer.ID
// // LeaderTimeout specifies how long to wait for a leader before
// // failing an operation.
// WaitForLeaderTimeout Duration
// // NetworkTimeout specifies how long before a Raft network
// // operation is timed out
// NetworkTimeout Duration
// // CommitRetries specifies how many times we retry a failed commit until
// // we give up.
// CommitRetries int
// // How long to wait between retries
// CommitRetryDelay Duration
// // BackupsRotate specifies the maximum number of Raft's DataFolder
// // copies that we keep as backups (renaming) after cleanup.
// BackupsRotate int
// // Namespace to use when writing keys to the datastore
// DatastoreNamespace string
//
// // A Hashicorp Raft's configuration object.
// RaftConfig *hraft.Config
//
// // Tracing enables propagation of contexts across binary boundaries.
// Tracing bool
//}
type UserRaftConfig struct {
// config to enabled node cluster with raft consensus
ClusterModeEnabled bool

View File

@ -10,7 +10,6 @@ import (
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
//consensus2 "github.com/filecoin-project/lotus/lib/consensus/raft"
"github.com/filecoin-project/lotus/node/impl/client"
"github.com/filecoin-project/lotus/node/impl/common"
"github.com/filecoin-project/lotus/node/impl/full"

View File

@ -47,7 +47,6 @@ type MpoolAPI struct {
RaftAPI
MessageSigner messagesigner.MsgSigner
// MessageSigner *messagesigner.MessageSigner
PushLocks *dtypes.MpoolLocker
}

View File

@ -88,13 +88,6 @@ func (a *UuidWrapper) MpoolPushMessage(ctx context.Context, msg *types.Message,
}
spec.MsgUuid = uuid.New()
return a.FullNode.MpoolPushMessage(ctx, msg, spec)
//errorsToRetry := []error{&jsonrpc.RPCConnectionError{}, &jsonrpc.ErrClient{}}
//initialBackoff, err := time.ParseDuration("1s")
//if err != nil {
// return nil, err
//}
//return retry.Retry(5, initialBackoff, errorsToRetry, func() (*types.SignedMessage, error) { return a.FullNode.MpoolPushMessage(ctx, msg, spec) })
}
func MakeUuidWrapper(a v1api.RawFullNodeAPI) v1api.FullNode {