Retries within proxy working
This commit is contained in:
parent
559c2c6d34
commit
570f61438a
@ -4,6 +4,8 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/filecoin-project/lotus/lib/retry"
|
||||
"go.uber.org/atomic"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
@ -11,6 +13,7 @@ import (
|
||||
"reflect"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/mitchellh/go-homedir"
|
||||
"github.com/urfave/cli/v2"
|
||||
@ -256,9 +259,6 @@ func FullNodeProxy[T api.FullNode](ins []T, outstr *api.FullNodeStruct) {
|
||||
}
|
||||
//fn := ra.MethodByName(field.Name)
|
||||
|
||||
//curr := 0
|
||||
//total := len(rins)
|
||||
|
||||
//retryFunc := func(args []reflect.Value) (results []reflect.Value) {
|
||||
// //ctx := args[0].Interface().(context.Context)
|
||||
// //
|
||||
@ -273,26 +273,34 @@ func FullNodeProxy[T api.FullNode](ins []T, outstr *api.FullNodeStruct) {
|
||||
//}
|
||||
|
||||
rint.Field(f).Set(reflect.MakeFunc(field.Type, func(args []reflect.Value) (results []reflect.Value) {
|
||||
//errorsToRetry := []error{&jsonrpc.RPCConnectionError{}}
|
||||
//initialBackoff, err := time.ParseDuration("1s")
|
||||
//if err != nil {
|
||||
// return nil
|
||||
//}
|
||||
//result, err := retry.Retry(5, initialBackoff, errorsToRetry, func() (results []reflect.Value, err2 error) {
|
||||
// //ctx := args[0].Interface().(context.Context)
|
||||
// //
|
||||
// //rin := peertoNode[ins[0].Leader(ctx)]
|
||||
// //fn := rin.MethodByName(field.Name)
|
||||
// //
|
||||
// //return fn.Call(args)
|
||||
//
|
||||
// toCall := curr
|
||||
// curr += 1 % total
|
||||
// result := fns[toCall].Call(args)
|
||||
// return result, results[len(results)-1].Interface().(error)
|
||||
//})
|
||||
//return result
|
||||
return fns[0].Call(args)
|
||||
errorsToRetry := []error{&jsonrpc.RPCConnectionError{}, &jsonrpc.ErrClient{}}
|
||||
initialBackoff, err := time.ParseDuration("1s")
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
var curr atomic.Int64
|
||||
curr.Store(-1)
|
||||
total := len(rins)
|
||||
ctx := args[0].Interface().(context.Context)
|
||||
result, err := retry.Retry(ctx, 5, initialBackoff, errorsToRetry, func() (results []reflect.Value, err2 error) {
|
||||
//ctx := args[0].Interface().(context.Context)
|
||||
//
|
||||
//rin := peertoNode[ins[0].Leader(ctx)]
|
||||
//fn := rin.MethodByName(field.Name)
|
||||
//
|
||||
//return fn.Call(args)
|
||||
|
||||
toCall := curr.Inc() % int64(total)
|
||||
|
||||
result := fns[toCall].Call(args)
|
||||
if result[len(result)-1].IsNil() {
|
||||
return result, nil
|
||||
}
|
||||
e := result[len(result)-1].Interface().(error)
|
||||
return result, e
|
||||
})
|
||||
return result
|
||||
//return fns[0].Call(args)
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
@ -51,7 +51,7 @@ func getRaftState(ctx context.Context, t *testing.T, node *kit.TestFullNode) *co
|
||||
|
||||
func setup(ctx context.Context, t *testing.T, node0 *kit.TestFullNode, node1 *kit.TestFullNode, node2 *kit.TestFullNode, miner *kit.TestMiner) *kit.Ensemble {
|
||||
|
||||
blockTime := 1000 * time.Millisecond
|
||||
//blockTime := 1000 * time.Millisecond
|
||||
|
||||
pkey0, _ := generatePrivKey()
|
||||
pkey1, _ := generatePrivKey()
|
||||
@ -304,6 +304,108 @@ func TestRaftStateLeaderDisconnectsMiner(t *testing.T) {
|
||||
peerToNode[n.Pkey.PeerID] = n
|
||||
}
|
||||
|
||||
//bal, err := node0.WalletBalance(ctx, node0.DefaultKey.Address)
|
||||
//require.NoError(t, err)
|
||||
|
||||
//msgHalfBal := &types.Message{
|
||||
// From: miner.OwnerKey.Address,
|
||||
// To: node0.DefaultKey.Address,
|
||||
// Value: big.Div(bal, big.NewInt(2)),
|
||||
//}
|
||||
//mu := uuid.New()
|
||||
//smHalfBal, err := miner.FullNode.MpoolPushMessage(ctx, msgHalfBal, &api.MessageSendSpec{
|
||||
// MsgUuid: mu,
|
||||
//})
|
||||
//require.NoError(t, err)
|
||||
//mLookup, err := node0.StateWaitMsg(ctx, smHalfBal.Cid(), 3, api.LookbackNoLimit, true)
|
||||
//require.NoError(t, err)
|
||||
//require.Equal(t, exitcode.Ok, mLookup.Receipt.ExitCode)
|
||||
//
|
||||
//rstate0 := getRaftState(ctx, t, &node0)
|
||||
//rstate1 := getRaftState(ctx, t, &node1)
|
||||
//rstate2 := getRaftState(ctx, t, &node2)
|
||||
//
|
||||
//require.True(t, reflect.DeepEqual(rstate0, rstate1))
|
||||
//require.True(t, reflect.DeepEqual(rstate0, rstate2))
|
||||
|
||||
// Take leader node down
|
||||
leader, err := node1.RaftLeader(ctx)
|
||||
require.NoError(t, err)
|
||||
leaderNode := peerToNode[leader]
|
||||
|
||||
err = leaderNode.Stop(ctx)
|
||||
require.NoError(t, err)
|
||||
oldLeaderNode := leaderNode
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
newLeader := leader
|
||||
for _, n := range nodes {
|
||||
if n != leaderNode {
|
||||
newLeader, err = n.RaftLeader(ctx)
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, newLeader, leader)
|
||||
}
|
||||
}
|
||||
|
||||
require.NotEqual(t, newLeader, leader)
|
||||
leaderNode = peerToNode[newLeader]
|
||||
|
||||
fmt.Println("New leader: ", newLeader)
|
||||
|
||||
//err = node0.Stop(ctx)
|
||||
//require.NoError(t, err)
|
||||
|
||||
msg2 := &types.Message{
|
||||
From: miner.OwnerKey.Address,
|
||||
To: node0.DefaultKey.Address,
|
||||
Value: big.NewInt(100000),
|
||||
}
|
||||
mu2 := uuid.New()
|
||||
|
||||
signedMsg2, err := miner.FullNode.MpoolPushMessage(ctx, msg2, &api.MessageSendSpec{
|
||||
MaxFee: abi.TokenAmount(config.DefaultDefaultMaxFee),
|
||||
MsgUuid: mu2,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
mLookup, err := leaderNode.StateWaitMsg(ctx, signedMsg2.Cid(), 3, api.LookbackNoLimit, true)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, exitcode.Ok, mLookup.Receipt.ExitCode)
|
||||
|
||||
fmt.Println("!!!!!!!!!!!!!!!!TEST FINISHED!!!!!!!!!!!!!!!!!!!")
|
||||
|
||||
rstate := getRaftState(ctx, t, leaderNode)
|
||||
|
||||
for _, n := range nodes {
|
||||
if n != oldLeaderNode {
|
||||
rs := getRaftState(ctx, t, n)
|
||||
require.True(t, reflect.DeepEqual(rs, rstate))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLeaderDisconnectsCheckMsgStateOnNewLeader(t *testing.T) {
|
||||
|
||||
kit.QuietMiningLogs()
|
||||
ctx := context.Background()
|
||||
|
||||
var (
|
||||
node0 kit.TestFullNode
|
||||
node1 kit.TestFullNode
|
||||
node2 kit.TestFullNode
|
||||
miner kit.TestMiner
|
||||
)
|
||||
|
||||
nodes := []*kit.TestFullNode{&node0, &node1, &node2}
|
||||
|
||||
setup(ctx, t, &node0, &node1, &node2, &miner)
|
||||
|
||||
peerToNode := make(map[peer.ID]*kit.TestFullNode)
|
||||
for _, n := range nodes {
|
||||
peerToNode[n.Pkey.PeerID] = n
|
||||
}
|
||||
|
||||
bal, err := node0.WalletBalance(ctx, node0.DefaultKey.Address)
|
||||
require.NoError(t, err)
|
||||
|
||||
@ -317,65 +419,55 @@ func TestRaftStateLeaderDisconnectsMiner(t *testing.T) {
|
||||
MsgUuid: mu,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
mLookup, err := node0.StateWaitMsg(ctx, smHalfBal.Cid(), 3, api.LookbackNoLimit, true)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, exitcode.Ok, mLookup.Receipt.ExitCode)
|
||||
|
||||
rstate0 := getRaftState(ctx, t, &node0)
|
||||
rstate1 := getRaftState(ctx, t, &node1)
|
||||
rstate2 := getRaftState(ctx, t, &node2)
|
||||
|
||||
require.True(t, reflect.DeepEqual(rstate0, rstate1))
|
||||
require.True(t, reflect.DeepEqual(rstate0, rstate2))
|
||||
//
|
||||
//rstate0 := getRaftState(ctx, t, &node0)
|
||||
//rstate1 := getRaftState(ctx, t, &node1)
|
||||
//rstate2 := getRaftState(ctx, t, &node2)
|
||||
//
|
||||
//require.True(t, reflect.DeepEqual(rstate0, rstate1))
|
||||
//require.True(t, reflect.DeepEqual(rstate0, rstate2))
|
||||
|
||||
// Take leader node down
|
||||
leader, err := node1.RaftLeader(ctx)
|
||||
require.NoError(t, err)
|
||||
leaderNode := peerToNode[leader]
|
||||
//
|
||||
//err = leaderNode.Stop(ctx)
|
||||
//require.NoError(t, err)
|
||||
//oldLeaderNode := leaderNode
|
||||
//
|
||||
|
||||
err = leaderNode.Stop(ctx)
|
||||
require.NoError(t, err)
|
||||
oldLeaderNode := leaderNode
|
||||
|
||||
//time.Sleep(5 * time.Second)
|
||||
//
|
||||
//newLeader := leader
|
||||
//for _, n := range nodes {
|
||||
// if n != leaderNode {
|
||||
// newLeader, err = n.RaftLeader(ctx)
|
||||
// require.NoError(t, err)
|
||||
// require.NotEqual(t, newLeader, leader)
|
||||
// }
|
||||
//}
|
||||
//
|
||||
//require.NotEqual(t, newLeader, leader)
|
||||
//leaderNode = peerToNode[newLeader]
|
||||
|
||||
err = node0.Stop(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
msg2 := &types.Message{
|
||||
From: miner.OwnerKey.Address,
|
||||
To: node0.DefaultKey.Address,
|
||||
Value: big.NewInt(100000),
|
||||
newLeader := leader
|
||||
for _, n := range nodes {
|
||||
if n != leaderNode {
|
||||
newLeader, err = n.RaftLeader(ctx)
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, newLeader, leader)
|
||||
}
|
||||
}
|
||||
mu2 := uuid.New()
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
signedMsg2, err := miner.FullNode.MpoolPushMessage(ctx, msg2, &api.MessageSendSpec{
|
||||
MaxFee: abi.TokenAmount(config.DefaultDefaultMaxFee),
|
||||
MsgUuid: mu2,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
mLookup, err = leaderNode.StateWaitMsg(ctx, signedMsg2.Cid(), 3, api.LookbackNoLimit, true)
|
||||
require.NotEqual(t, newLeader, leader)
|
||||
leaderNode = peerToNode[newLeader]
|
||||
|
||||
fmt.Println("New leader: ", newLeader)
|
||||
|
||||
mLookup, err := leaderNode.StateWaitMsg(ctx, smHalfBal.Cid(), 3, api.LookbackNoLimit, true)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, exitcode.Ok, mLookup.Receipt.ExitCode)
|
||||
|
||||
//rstate := getRaftState(ctx, t, leaderNode)
|
||||
//err = node0.Stop(ctx)
|
||||
//require.NoError(t, err)
|
||||
|
||||
//for _, n := range nodes {
|
||||
// if n != oldLeaderNode {
|
||||
// rs := getRaftState(ctx, t, n)
|
||||
// require.True(t, reflect.DeepEqual(rs, rstate))
|
||||
// }
|
||||
//}
|
||||
fmt.Println("!!!!!!!!!!!!!!!!TEST FINISHED!!!!!!!!!!!!!!!!!!!")
|
||||
|
||||
rstate := getRaftState(ctx, t, leaderNode)
|
||||
|
||||
for _, n := range nodes {
|
||||
if n != oldLeaderNode {
|
||||
rs := getRaftState(ctx, t, n)
|
||||
require.True(t, reflect.DeepEqual(rs, rstate))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,6 +1,7 @@
|
||||
package retry
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
@ -10,10 +11,11 @@ import (
|
||||
|
||||
var log = logging.Logger("retry")
|
||||
|
||||
func Retry[T any](attempts int, initialBackoff time.Duration, errorTypes []error, f func() (T, error)) (result T, err error) {
|
||||
func Retry[T any](ctx context.Context, attempts int, initialBackoff time.Duration, errorTypes []error, f func() (T, error)) (result T, err error) {
|
||||
for i := 0; i < attempts; i++ {
|
||||
if i > 0 {
|
||||
log.Info("Retrying after error:", err)
|
||||
//debug.PrintStack()
|
||||
time.Sleep(initialBackoff)
|
||||
initialBackoff *= 2
|
||||
}
|
||||
@ -21,6 +23,9 @@ func Retry[T any](attempts int, initialBackoff time.Duration, errorTypes []error
|
||||
if err == nil || !api.ErrorIsIn(err, errorTypes) {
|
||||
return result, err
|
||||
}
|
||||
if ctx.Err() != nil {
|
||||
return result, ctx.Err()
|
||||
}
|
||||
}
|
||||
log.Errorf("Failed after %d attempts, last error: %s", attempts, err)
|
||||
return result, err
|
||||
|
@ -2,7 +2,6 @@ package full
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"go.uber.org/fx"
|
||||
"golang.org/x/xerrors"
|
||||
|
@ -37,7 +37,6 @@ import (
|
||||
storageimpl "github.com/filecoin-project/go-fil-markets/storagemarket/impl"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket/impl/storedask"
|
||||
smnet "github.com/filecoin-project/go-fil-markets/storagemarket/network"
|
||||
"github.com/filecoin-project/go-jsonrpc"
|
||||
"github.com/filecoin-project/go-jsonrpc/auth"
|
||||
"github.com/filecoin-project/go-paramfetch"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
@ -56,7 +55,6 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/gen/slashfilter"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/journal"
|
||||
"github.com/filecoin-project/lotus/lib/retry"
|
||||
"github.com/filecoin-project/lotus/markets"
|
||||
"github.com/filecoin-project/lotus/markets/dagstore"
|
||||
"github.com/filecoin-project/lotus/markets/idxprov"
|
||||
@ -89,12 +87,14 @@ func (a *UuidWrapper) MpoolPushMessage(ctx context.Context, msg *types.Message,
|
||||
spec = new(api.MessageSendSpec)
|
||||
}
|
||||
spec.MsgUuid = uuid.New()
|
||||
errorsToRetry := []error{&jsonrpc.RPCConnectionError{}}
|
||||
initialBackoff, err := time.ParseDuration("1s")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return retry.Retry(5, initialBackoff, errorsToRetry, func() (*types.SignedMessage, error) { return a.FullNode.MpoolPushMessage(ctx, msg, spec) })
|
||||
return a.FullNode.MpoolPushMessage(ctx, msg, spec)
|
||||
|
||||
//errorsToRetry := []error{&jsonrpc.RPCConnectionError{}, &jsonrpc.ErrClient{}}
|
||||
//initialBackoff, err := time.ParseDuration("1s")
|
||||
//if err != nil {
|
||||
// return nil, err
|
||||
//}
|
||||
//return retry.Retry(5, initialBackoff, errorsToRetry, func() (*types.SignedMessage, error) { return a.FullNode.MpoolPushMessage(ctx, msg, spec) })
|
||||
}
|
||||
|
||||
func MakeUuidWrapper(a v1api.RawFullNodeAPI) v1api.FullNode {
|
||||
|
Loading…
Reference in New Issue
Block a user