forked from cerc-io/plugeth
les: renamed lespay to vflux (#22347)
This commit is contained in:
parent
d36276d85e
commit
c027507e03
@ -23,7 +23,7 @@ import (
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
"github.com/ethereum/go-ethereum/common/mclock"
|
"github.com/ethereum/go-ethereum/common/mclock"
|
||||||
lps "github.com/ethereum/go-ethereum/les/lespay/server"
|
vfs "github.com/ethereum/go-ethereum/les/vflux/server"
|
||||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -37,7 +37,7 @@ var (
|
|||||||
// PrivateLightServerAPI provides an API to access the LES light server.
|
// PrivateLightServerAPI provides an API to access the LES light server.
|
||||||
type PrivateLightServerAPI struct {
|
type PrivateLightServerAPI struct {
|
||||||
server *LesServer
|
server *LesServer
|
||||||
defaultPosFactors, defaultNegFactors lps.PriceFactors
|
defaultPosFactors, defaultNegFactors vfs.PriceFactors
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewPrivateLightServerAPI creates a new LES light server API.
|
// NewPrivateLightServerAPI creates a new LES light server API.
|
||||||
@ -107,7 +107,7 @@ func (api *PrivateLightServerAPI) clientInfo(c *clientInfo) map[string]interface
|
|||||||
|
|
||||||
// setParams either sets the given parameters for a single connected client (if specified)
|
// setParams either sets the given parameters for a single connected client (if specified)
|
||||||
// or the default parameters applicable to clients connected in the future
|
// or the default parameters applicable to clients connected in the future
|
||||||
func (api *PrivateLightServerAPI) setParams(params map[string]interface{}, client *clientInfo, posFactors, negFactors *lps.PriceFactors) (updateFactors bool, err error) {
|
func (api *PrivateLightServerAPI) setParams(params map[string]interface{}, client *clientInfo, posFactors, negFactors *vfs.PriceFactors) (updateFactors bool, err error) {
|
||||||
defParams := client == nil
|
defParams := client == nil
|
||||||
for name, value := range params {
|
for name, value := range params {
|
||||||
errValue := func() error {
|
errValue := func() error {
|
||||||
|
@ -36,7 +36,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/eth/gasprice"
|
"github.com/ethereum/go-ethereum/eth/gasprice"
|
||||||
"github.com/ethereum/go-ethereum/event"
|
"github.com/ethereum/go-ethereum/event"
|
||||||
"github.com/ethereum/go-ethereum/internal/ethapi"
|
"github.com/ethereum/go-ethereum/internal/ethapi"
|
||||||
lpc "github.com/ethereum/go-ethereum/les/lespay/client"
|
vfc "github.com/ethereum/go-ethereum/les/vflux/client"
|
||||||
"github.com/ethereum/go-ethereum/light"
|
"github.com/ethereum/go-ethereum/light"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/node"
|
"github.com/ethereum/go-ethereum/node"
|
||||||
@ -58,7 +58,7 @@ type LightEthereum struct {
|
|||||||
txPool *light.TxPool
|
txPool *light.TxPool
|
||||||
blockchain *light.LightChain
|
blockchain *light.LightChain
|
||||||
serverPool *serverPool
|
serverPool *serverPool
|
||||||
valueTracker *lpc.ValueTracker
|
valueTracker *vfc.ValueTracker
|
||||||
dialCandidates enode.Iterator
|
dialCandidates enode.Iterator
|
||||||
pruner *pruner
|
pruner *pruner
|
||||||
|
|
||||||
@ -108,7 +108,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*LightEthereum, error) {
|
|||||||
engine: ethconfig.CreateConsensusEngine(stack, chainConfig, &config.Ethash, nil, false, chainDb),
|
engine: ethconfig.CreateConsensusEngine(stack, chainConfig, &config.Ethash, nil, false, chainDb),
|
||||||
bloomRequests: make(chan chan *bloombits.Retrieval),
|
bloomRequests: make(chan chan *bloombits.Retrieval),
|
||||||
bloomIndexer: core.NewBloomIndexer(chainDb, params.BloomBitsBlocksClient, params.HelperTrieConfirmations),
|
bloomIndexer: core.NewBloomIndexer(chainDb, params.BloomBitsBlocksClient, params.HelperTrieConfirmations),
|
||||||
valueTracker: lpc.NewValueTracker(lespayDb, &mclock.System{}, requestList, time.Minute, 1/float64(time.Hour), 1/float64(time.Hour*100), 1/float64(time.Hour*1000)),
|
valueTracker: vfc.NewValueTracker(lespayDb, &mclock.System{}, requestList, time.Minute, 1/float64(time.Hour), 1/float64(time.Hour*100), 1/float64(time.Hour*1000)),
|
||||||
p2pServer: stack.Server(),
|
p2pServer: stack.Server(),
|
||||||
p2pConfig: &stack.Config().P2P,
|
p2pConfig: &stack.Config().P2P,
|
||||||
}
|
}
|
||||||
@ -193,18 +193,18 @@ func New(stack *node.Node, config *ethconfig.Config) (*LightEthereum, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// vtSubscription implements serverPeerSubscriber
|
// vtSubscription implements serverPeerSubscriber
|
||||||
type vtSubscription lpc.ValueTracker
|
type vtSubscription vfc.ValueTracker
|
||||||
|
|
||||||
// registerPeer implements serverPeerSubscriber
|
// registerPeer implements serverPeerSubscriber
|
||||||
func (v *vtSubscription) registerPeer(p *serverPeer) {
|
func (v *vtSubscription) registerPeer(p *serverPeer) {
|
||||||
vt := (*lpc.ValueTracker)(v)
|
vt := (*vfc.ValueTracker)(v)
|
||||||
p.setValueTracker(vt, vt.Register(p.ID()))
|
p.setValueTracker(vt, vt.Register(p.ID()))
|
||||||
p.updateVtParams()
|
p.updateVtParams()
|
||||||
}
|
}
|
||||||
|
|
||||||
// unregisterPeer implements serverPeerSubscriber
|
// unregisterPeer implements serverPeerSubscriber
|
||||||
func (v *vtSubscription) unregisterPeer(p *serverPeer) {
|
func (v *vtSubscription) unregisterPeer(p *serverPeer) {
|
||||||
vt := (*lpc.ValueTracker)(v)
|
vt := (*vfc.ValueTracker)(v)
|
||||||
vt.Unregister(p.ID())
|
vt.Unregister(p.ID())
|
||||||
p.setValueTracker(nil, nil)
|
p.setValueTracker(nil, nil)
|
||||||
}
|
}
|
||||||
@ -263,9 +263,9 @@ func (s *LightEthereum) APIs() []rpc.API {
|
|||||||
Service: NewPrivateLightAPI(&s.lesCommons),
|
Service: NewPrivateLightAPI(&s.lesCommons),
|
||||||
Public: false,
|
Public: false,
|
||||||
}, {
|
}, {
|
||||||
Namespace: "lespay",
|
Namespace: "vflux",
|
||||||
Version: "1.0",
|
Version: "1.0",
|
||||||
Service: lpc.NewPrivateClientAPI(s.valueTracker),
|
Service: vfc.NewPrivateClientAPI(s.valueTracker),
|
||||||
Public: false,
|
Public: false,
|
||||||
},
|
},
|
||||||
}...)
|
}...)
|
||||||
|
@ -23,8 +23,8 @@ import (
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common/mclock"
|
"github.com/ethereum/go-ethereum/common/mclock"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
lps "github.com/ethereum/go-ethereum/les/lespay/server"
|
|
||||||
"github.com/ethereum/go-ethereum/les/utils"
|
"github.com/ethereum/go-ethereum/les/utils"
|
||||||
|
vfs "github.com/ethereum/go-ethereum/les/vflux/server"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||||
@ -64,17 +64,17 @@ const (
|
|||||||
// and negative banalce. Boeth positive balance and negative balance will decrease
|
// and negative banalce. Boeth positive balance and negative balance will decrease
|
||||||
// exponentially. If the balance is low enough, then the record will be dropped.
|
// exponentially. If the balance is low enough, then the record will be dropped.
|
||||||
type clientPool struct {
|
type clientPool struct {
|
||||||
lps.BalanceTrackerSetup
|
vfs.BalanceTrackerSetup
|
||||||
lps.PriorityPoolSetup
|
vfs.PriorityPoolSetup
|
||||||
lock sync.Mutex
|
lock sync.Mutex
|
||||||
clock mclock.Clock
|
clock mclock.Clock
|
||||||
closed bool
|
closed bool
|
||||||
removePeer func(enode.ID)
|
removePeer func(enode.ID)
|
||||||
ns *nodestate.NodeStateMachine
|
ns *nodestate.NodeStateMachine
|
||||||
pp *lps.PriorityPool
|
pp *vfs.PriorityPool
|
||||||
bt *lps.BalanceTracker
|
bt *vfs.BalanceTracker
|
||||||
|
|
||||||
defaultPosFactors, defaultNegFactors lps.PriceFactors
|
defaultPosFactors, defaultNegFactors vfs.PriceFactors
|
||||||
posExpTC, negExpTC uint64
|
posExpTC, negExpTC uint64
|
||||||
minCap uint64 // The minimal capacity value allowed for any client
|
minCap uint64 // The minimal capacity value allowed for any client
|
||||||
connectedBias time.Duration
|
connectedBias time.Duration
|
||||||
@ -101,7 +101,7 @@ type clientInfo struct {
|
|||||||
peer clientPoolPeer
|
peer clientPoolPeer
|
||||||
connected, priority bool
|
connected, priority bool
|
||||||
connectedAt mclock.AbsTime
|
connectedAt mclock.AbsTime
|
||||||
balance *lps.NodeBalance
|
balance *vfs.NodeBalance
|
||||||
}
|
}
|
||||||
|
|
||||||
// newClientPool creates a new client pool
|
// newClientPool creates a new client pool
|
||||||
@ -115,8 +115,8 @@ func newClientPool(ns *nodestate.NodeStateMachine, lespayDb ethdb.Database, minC
|
|||||||
connectedBias: connectedBias,
|
connectedBias: connectedBias,
|
||||||
removePeer: removePeer,
|
removePeer: removePeer,
|
||||||
}
|
}
|
||||||
pool.bt = lps.NewBalanceTracker(ns, balanceTrackerSetup, lespayDb, clock, &utils.Expirer{}, &utils.Expirer{})
|
pool.bt = vfs.NewBalanceTracker(ns, balanceTrackerSetup, lespayDb, clock, &utils.Expirer{}, &utils.Expirer{})
|
||||||
pool.pp = lps.NewPriorityPool(ns, priorityPoolSetup, clock, minCap, connectedBias, 4)
|
pool.pp = vfs.NewPriorityPool(ns, priorityPoolSetup, clock, minCap, connectedBias, 4)
|
||||||
|
|
||||||
// set default expiration constants used by tests
|
// set default expiration constants used by tests
|
||||||
// Note: server overwrites this if token sale is active
|
// Note: server overwrites this if token sale is active
|
||||||
@ -221,7 +221,7 @@ func (f *clientPool) connect(peer clientPoolPeer) (uint64, error) {
|
|||||||
}
|
}
|
||||||
f.ns.SetField(node, clientInfoField, c)
|
f.ns.SetField(node, clientInfoField, c)
|
||||||
f.ns.SetField(node, connAddressField, freeID)
|
f.ns.SetField(node, connAddressField, freeID)
|
||||||
if c.balance, _ = f.ns.GetField(node, f.BalanceField).(*lps.NodeBalance); c.balance == nil {
|
if c.balance, _ = f.ns.GetField(node, f.BalanceField).(*vfs.NodeBalance); c.balance == nil {
|
||||||
f.disconnect(peer)
|
f.disconnect(peer)
|
||||||
return 0, nil
|
return 0, nil
|
||||||
}
|
}
|
||||||
@ -266,7 +266,7 @@ func (f *clientPool) disconnectNode(node *enode.Node) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// setDefaultFactors sets the default price factors applied to subsequently connected clients
|
// setDefaultFactors sets the default price factors applied to subsequently connected clients
|
||||||
func (f *clientPool) setDefaultFactors(posFactors, negFactors lps.PriceFactors) {
|
func (f *clientPool) setDefaultFactors(posFactors, negFactors vfs.PriceFactors) {
|
||||||
f.lock.Lock()
|
f.lock.Lock()
|
||||||
defer f.lock.Unlock()
|
defer f.lock.Unlock()
|
||||||
|
|
||||||
@ -305,7 +305,7 @@ func (f *clientPool) setCapacity(node *enode.Node, freeID string, capacity uint6
|
|||||||
c = &clientInfo{node: node}
|
c = &clientInfo{node: node}
|
||||||
f.ns.SetField(node, clientInfoField, c)
|
f.ns.SetField(node, clientInfoField, c)
|
||||||
f.ns.SetField(node, connAddressField, freeID)
|
f.ns.SetField(node, connAddressField, freeID)
|
||||||
if c.balance, _ = f.ns.GetField(node, f.BalanceField).(*lps.NodeBalance); c.balance == nil {
|
if c.balance, _ = f.ns.GetField(node, f.BalanceField).(*vfs.NodeBalance); c.balance == nil {
|
||||||
log.Error("BalanceField is missing", "node", node.ID())
|
log.Error("BalanceField is missing", "node", node.ID())
|
||||||
return 0, fmt.Errorf("BalanceField of %064x is missing", node.ID())
|
return 0, fmt.Errorf("BalanceField of %064x is missing", node.ID())
|
||||||
}
|
}
|
||||||
@ -371,7 +371,7 @@ func (f *clientPool) forClients(ids []enode.ID, cb func(client *clientInfo)) {
|
|||||||
c = &clientInfo{node: node}
|
c = &clientInfo{node: node}
|
||||||
f.ns.SetField(node, clientInfoField, c)
|
f.ns.SetField(node, clientInfoField, c)
|
||||||
f.ns.SetField(node, connAddressField, "")
|
f.ns.SetField(node, connAddressField, "")
|
||||||
if c.balance, _ = f.ns.GetField(node, f.BalanceField).(*lps.NodeBalance); c.balance != nil {
|
if c.balance, _ = f.ns.GetField(node, f.BalanceField).(*vfs.NodeBalance); c.balance != nil {
|
||||||
cb(c)
|
cb(c)
|
||||||
} else {
|
} else {
|
||||||
log.Error("BalanceField is missing")
|
log.Error("BalanceField is missing")
|
||||||
|
@ -24,7 +24,7 @@ import (
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common/mclock"
|
"github.com/ethereum/go-ethereum/common/mclock"
|
||||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||||
lps "github.com/ethereum/go-ethereum/les/lespay/server"
|
vfs "github.com/ethereum/go-ethereum/les/vflux/server"
|
||||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||||
"github.com/ethereum/go-ethereum/p2p/nodestate"
|
"github.com/ethereum/go-ethereum/p2p/nodestate"
|
||||||
@ -100,7 +100,7 @@ func getBalance(pool *clientPool, p *poolTestPeer) (pos, neg uint64) {
|
|||||||
if temp {
|
if temp {
|
||||||
pool.ns.SetField(p.node, connAddressField, p.freeClientId())
|
pool.ns.SetField(p.node, connAddressField, p.freeClientId())
|
||||||
}
|
}
|
||||||
n, _ := pool.ns.GetField(p.node, pool.BalanceField).(*lps.NodeBalance)
|
n, _ := pool.ns.GetField(p.node, pool.BalanceField).(*vfs.NodeBalance)
|
||||||
pos, neg = n.GetBalance()
|
pos, neg = n.GetBalance()
|
||||||
if temp {
|
if temp {
|
||||||
pool.ns.SetField(p.node, connAddressField, nil)
|
pool.ns.SetField(p.node, connAddressField, nil)
|
||||||
@ -138,7 +138,7 @@ func testClientPool(t *testing.T, activeLimit, clientCount, paidCount int, rando
|
|||||||
pool.ns.Start()
|
pool.ns.Start()
|
||||||
|
|
||||||
pool.setLimits(activeLimit, uint64(activeLimit))
|
pool.setLimits(activeLimit, uint64(activeLimit))
|
||||||
pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
|
pool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
|
||||||
|
|
||||||
// pool should accept new peers up to its connected limit
|
// pool should accept new peers up to its connected limit
|
||||||
for i := 0; i < activeLimit; i++ {
|
for i := 0; i < activeLimit; i++ {
|
||||||
@ -243,7 +243,7 @@ func TestConnectPaidClient(t *testing.T) {
|
|||||||
pool.ns.Start()
|
pool.ns.Start()
|
||||||
defer pool.stop()
|
defer pool.stop()
|
||||||
pool.setLimits(10, uint64(10))
|
pool.setLimits(10, uint64(10))
|
||||||
pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
|
pool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
|
||||||
|
|
||||||
// Add balance for an external client and mark it as paid client
|
// Add balance for an external client and mark it as paid client
|
||||||
addBalance(pool, newPoolTestPeer(0, nil).node.ID(), int64(time.Minute))
|
addBalance(pool, newPoolTestPeer(0, nil).node.ID(), int64(time.Minute))
|
||||||
@ -259,7 +259,7 @@ func TestConnectPaidClientToSmallPool(t *testing.T) {
|
|||||||
pool.ns.Start()
|
pool.ns.Start()
|
||||||
defer pool.stop()
|
defer pool.stop()
|
||||||
pool.setLimits(10, uint64(10)) // Total capacity limit is 10
|
pool.setLimits(10, uint64(10)) // Total capacity limit is 10
|
||||||
pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
|
pool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
|
||||||
|
|
||||||
// Add balance for an external client and mark it as paid client
|
// Add balance for an external client and mark it as paid client
|
||||||
addBalance(pool, newPoolTestPeer(0, nil).node.ID(), int64(time.Minute))
|
addBalance(pool, newPoolTestPeer(0, nil).node.ID(), int64(time.Minute))
|
||||||
@ -278,7 +278,7 @@ func TestConnectPaidClientToFullPool(t *testing.T) {
|
|||||||
pool.ns.Start()
|
pool.ns.Start()
|
||||||
defer pool.stop()
|
defer pool.stop()
|
||||||
pool.setLimits(10, uint64(10)) // Total capacity limit is 10
|
pool.setLimits(10, uint64(10)) // Total capacity limit is 10
|
||||||
pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
|
pool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
|
||||||
|
|
||||||
for i := 0; i < 10; i++ {
|
for i := 0; i < 10; i++ {
|
||||||
addBalance(pool, newPoolTestPeer(i, nil).node.ID(), int64(time.Second*20))
|
addBalance(pool, newPoolTestPeer(i, nil).node.ID(), int64(time.Second*20))
|
||||||
@ -309,7 +309,7 @@ func TestPaidClientKickedOut(t *testing.T) {
|
|||||||
pool.bt.SetExpirationTCs(0, 0)
|
pool.bt.SetExpirationTCs(0, 0)
|
||||||
defer pool.stop()
|
defer pool.stop()
|
||||||
pool.setLimits(10, uint64(10)) // Total capacity limit is 10
|
pool.setLimits(10, uint64(10)) // Total capacity limit is 10
|
||||||
pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
|
pool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
|
||||||
|
|
||||||
for i := 0; i < 10; i++ {
|
for i := 0; i < 10; i++ {
|
||||||
addBalance(pool, newPoolTestPeer(i, kickedCh).node.ID(), 10000000000) // 10 second allowance
|
addBalance(pool, newPoolTestPeer(i, kickedCh).node.ID(), 10000000000) // 10 second allowance
|
||||||
@ -339,7 +339,7 @@ func TestConnectFreeClient(t *testing.T) {
|
|||||||
pool.ns.Start()
|
pool.ns.Start()
|
||||||
defer pool.stop()
|
defer pool.stop()
|
||||||
pool.setLimits(10, uint64(10))
|
pool.setLimits(10, uint64(10))
|
||||||
pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
|
pool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
|
||||||
if cap, _ := pool.connect(newPoolTestPeer(0, nil)); cap == 0 {
|
if cap, _ := pool.connect(newPoolTestPeer(0, nil)); cap == 0 {
|
||||||
t.Fatalf("Failed to connect free client")
|
t.Fatalf("Failed to connect free client")
|
||||||
}
|
}
|
||||||
@ -356,7 +356,7 @@ func TestConnectFreeClientToFullPool(t *testing.T) {
|
|||||||
pool.ns.Start()
|
pool.ns.Start()
|
||||||
defer pool.stop()
|
defer pool.stop()
|
||||||
pool.setLimits(10, uint64(10)) // Total capacity limit is 10
|
pool.setLimits(10, uint64(10)) // Total capacity limit is 10
|
||||||
pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
|
pool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
|
||||||
|
|
||||||
for i := 0; i < 10; i++ {
|
for i := 0; i < 10; i++ {
|
||||||
pool.connect(newPoolTestPeer(i, nil))
|
pool.connect(newPoolTestPeer(i, nil))
|
||||||
@ -386,7 +386,7 @@ func TestFreeClientKickedOut(t *testing.T) {
|
|||||||
pool.ns.Start()
|
pool.ns.Start()
|
||||||
defer pool.stop()
|
defer pool.stop()
|
||||||
pool.setLimits(10, uint64(10)) // Total capacity limit is 10
|
pool.setLimits(10, uint64(10)) // Total capacity limit is 10
|
||||||
pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
|
pool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
|
||||||
|
|
||||||
for i := 0; i < 10; i++ {
|
for i := 0; i < 10; i++ {
|
||||||
pool.connect(newPoolTestPeer(i, kicked))
|
pool.connect(newPoolTestPeer(i, kicked))
|
||||||
@ -428,7 +428,7 @@ func TestPositiveBalanceCalculation(t *testing.T) {
|
|||||||
pool.ns.Start()
|
pool.ns.Start()
|
||||||
defer pool.stop()
|
defer pool.stop()
|
||||||
pool.setLimits(10, uint64(10)) // Total capacity limit is 10
|
pool.setLimits(10, uint64(10)) // Total capacity limit is 10
|
||||||
pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
|
pool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
|
||||||
|
|
||||||
addBalance(pool, newPoolTestPeer(0, kicked).node.ID(), int64(time.Minute*3))
|
addBalance(pool, newPoolTestPeer(0, kicked).node.ID(), int64(time.Minute*3))
|
||||||
testPriorityConnect(t, pool, newPoolTestPeer(0, kicked), 10, true)
|
testPriorityConnect(t, pool, newPoolTestPeer(0, kicked), 10, true)
|
||||||
@ -452,7 +452,7 @@ func TestDowngradePriorityClient(t *testing.T) {
|
|||||||
pool.ns.Start()
|
pool.ns.Start()
|
||||||
defer pool.stop()
|
defer pool.stop()
|
||||||
pool.setLimits(10, uint64(10)) // Total capacity limit is 10
|
pool.setLimits(10, uint64(10)) // Total capacity limit is 10
|
||||||
pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
|
pool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
|
||||||
|
|
||||||
p := newPoolTestPeer(0, kicked)
|
p := newPoolTestPeer(0, kicked)
|
||||||
addBalance(pool, p.node.ID(), int64(time.Minute))
|
addBalance(pool, p.node.ID(), int64(time.Minute))
|
||||||
@ -487,7 +487,7 @@ func TestNegativeBalanceCalculation(t *testing.T) {
|
|||||||
pool.ns.Start()
|
pool.ns.Start()
|
||||||
defer pool.stop()
|
defer pool.stop()
|
||||||
pool.setLimits(10, uint64(10)) // Total capacity limit is 10
|
pool.setLimits(10, uint64(10)) // Total capacity limit is 10
|
||||||
pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1e-3, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1e-3, CapacityFactor: 0, RequestFactor: 1})
|
pool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1e-3, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1e-3, CapacityFactor: 0, RequestFactor: 1})
|
||||||
|
|
||||||
for i := 0; i < 10; i++ {
|
for i := 0; i < 10; i++ {
|
||||||
pool.connect(newPoolTestPeer(i, nil))
|
pool.connect(newPoolTestPeer(i, nil))
|
||||||
@ -564,7 +564,7 @@ func TestInactiveClient(t *testing.T) {
|
|||||||
if p2.cap != 0 {
|
if p2.cap != 0 {
|
||||||
t.Fatalf("Failed to deactivate peer #2")
|
t.Fatalf("Failed to deactivate peer #2")
|
||||||
}
|
}
|
||||||
pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 0}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 0})
|
pool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 0}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 0})
|
||||||
p4 := newPoolTestPeer(4, nil)
|
p4 := newPoolTestPeer(4, nil)
|
||||||
addBalance(pool, p4.node.ID(), 1500*int64(time.Second))
|
addBalance(pool, p4.node.ID(), 1500*int64(time.Second))
|
||||||
// p1: 1000 p2: 500 p3: 2000 p4: 1500
|
// p1: 1000 p2: 500 p3: 2000 p4: 1500
|
||||||
|
20
les/peer.go
20
les/peer.go
@ -32,9 +32,9 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core/forkid"
|
"github.com/ethereum/go-ethereum/core/forkid"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/les/flowcontrol"
|
"github.com/ethereum/go-ethereum/les/flowcontrol"
|
||||||
lpc "github.com/ethereum/go-ethereum/les/lespay/client"
|
|
||||||
lps "github.com/ethereum/go-ethereum/les/lespay/server"
|
|
||||||
"github.com/ethereum/go-ethereum/les/utils"
|
"github.com/ethereum/go-ethereum/les/utils"
|
||||||
|
vfc "github.com/ethereum/go-ethereum/les/vflux/client"
|
||||||
|
vfs "github.com/ethereum/go-ethereum/les/vflux/server"
|
||||||
"github.com/ethereum/go-ethereum/light"
|
"github.com/ethereum/go-ethereum/light"
|
||||||
"github.com/ethereum/go-ethereum/p2p"
|
"github.com/ethereum/go-ethereum/p2p"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
@ -349,8 +349,8 @@ type serverPeer struct {
|
|||||||
|
|
||||||
fcServer *flowcontrol.ServerNode // Client side mirror token bucket.
|
fcServer *flowcontrol.ServerNode // Client side mirror token bucket.
|
||||||
vtLock sync.Mutex
|
vtLock sync.Mutex
|
||||||
valueTracker *lpc.ValueTracker
|
valueTracker *vfc.ValueTracker
|
||||||
nodeValueTracker *lpc.NodeValueTracker
|
nodeValueTracker *vfc.NodeValueTracker
|
||||||
sentReqs map[uint64]sentReqEntry
|
sentReqs map[uint64]sentReqEntry
|
||||||
|
|
||||||
// Statistics
|
// Statistics
|
||||||
@ -676,7 +676,7 @@ func (p *serverPeer) Handshake(genesis common.Hash, forkid forkid.ID, forkFilter
|
|||||||
|
|
||||||
// setValueTracker sets the value tracker references for connected servers. Note that the
|
// setValueTracker sets the value tracker references for connected servers. Note that the
|
||||||
// references should be removed upon disconnection by setValueTracker(nil, nil).
|
// references should be removed upon disconnection by setValueTracker(nil, nil).
|
||||||
func (p *serverPeer) setValueTracker(vt *lpc.ValueTracker, nvt *lpc.NodeValueTracker) {
|
func (p *serverPeer) setValueTracker(vt *vfc.ValueTracker, nvt *vfc.NodeValueTracker) {
|
||||||
p.vtLock.Lock()
|
p.vtLock.Lock()
|
||||||
p.valueTracker = vt
|
p.valueTracker = vt
|
||||||
p.nodeValueTracker = nvt
|
p.nodeValueTracker = nvt
|
||||||
@ -739,17 +739,17 @@ func (p *serverPeer) answeredRequest(id uint64) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
var (
|
var (
|
||||||
vtReqs [2]lpc.ServedRequest
|
vtReqs [2]vfc.ServedRequest
|
||||||
reqCount int
|
reqCount int
|
||||||
)
|
)
|
||||||
m := requestMapping[e.reqType]
|
m := requestMapping[e.reqType]
|
||||||
if m.rest == -1 || e.amount <= 1 {
|
if m.rest == -1 || e.amount <= 1 {
|
||||||
reqCount = 1
|
reqCount = 1
|
||||||
vtReqs[0] = lpc.ServedRequest{ReqType: uint32(m.first), Amount: e.amount}
|
vtReqs[0] = vfc.ServedRequest{ReqType: uint32(m.first), Amount: e.amount}
|
||||||
} else {
|
} else {
|
||||||
reqCount = 2
|
reqCount = 2
|
||||||
vtReqs[0] = lpc.ServedRequest{ReqType: uint32(m.first), Amount: 1}
|
vtReqs[0] = vfc.ServedRequest{ReqType: uint32(m.first), Amount: 1}
|
||||||
vtReqs[1] = lpc.ServedRequest{ReqType: uint32(m.rest), Amount: e.amount - 1}
|
vtReqs[1] = vfc.ServedRequest{ReqType: uint32(m.rest), Amount: e.amount - 1}
|
||||||
}
|
}
|
||||||
dt := time.Duration(mclock.Now() - e.at)
|
dt := time.Duration(mclock.Now() - e.at)
|
||||||
vt.Served(nvt, vtReqs[:reqCount], dt)
|
vt.Served(nvt, vtReqs[:reqCount], dt)
|
||||||
@ -765,7 +765,7 @@ type clientPeer struct {
|
|||||||
responseLock sync.Mutex
|
responseLock sync.Mutex
|
||||||
responseCount uint64 // Counter to generate an unique id for request processing.
|
responseCount uint64 // Counter to generate an unique id for request processing.
|
||||||
|
|
||||||
balance *lps.NodeBalance
|
balance *vfs.NodeBalance
|
||||||
|
|
||||||
// invalidLock is used for protecting invalidCount.
|
// invalidLock is used for protecting invalidCount.
|
||||||
invalidLock sync.RWMutex
|
invalidLock sync.RWMutex
|
||||||
|
@ -25,7 +25,7 @@ import (
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
lpc "github.com/ethereum/go-ethereum/les/lespay/client"
|
vfc "github.com/ethereum/go-ethereum/les/vflux/client"
|
||||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
)
|
)
|
||||||
@ -89,7 +89,7 @@ type requestInfo struct {
|
|||||||
refBasketFirst, refBasketRest float64
|
refBasketFirst, refBasketRest float64
|
||||||
}
|
}
|
||||||
|
|
||||||
// reqMapping maps an LES request to one or two lespay service vector entries.
|
// reqMapping maps an LES request to one or two vflux service vector entries.
|
||||||
// If rest != -1 and the request type is used with amounts larger than one then the
|
// If rest != -1 and the request type is used with amounts larger than one then the
|
||||||
// first one of the multi-request is mapped to first while the rest is mapped to rest.
|
// first one of the multi-request is mapped to first while the rest is mapped to rest.
|
||||||
type reqMapping struct {
|
type reqMapping struct {
|
||||||
@ -98,7 +98,7 @@ type reqMapping struct {
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
// requests describes the available LES request types and their initializing amounts
|
// requests describes the available LES request types and their initializing amounts
|
||||||
// in the lespay/client.ValueTracker reference basket. Initial values are estimates
|
// in the vfc.ValueTracker reference basket. Initial values are estimates
|
||||||
// based on the same values as the server's default cost estimates (reqAvgTimeCost).
|
// based on the same values as the server's default cost estimates (reqAvgTimeCost).
|
||||||
requests = map[uint64]requestInfo{
|
requests = map[uint64]requestInfo{
|
||||||
GetBlockHeadersMsg: {"GetBlockHeaders", MaxHeaderFetch, 10, 1000},
|
GetBlockHeadersMsg: {"GetBlockHeaders", MaxHeaderFetch, 10, 1000},
|
||||||
@ -110,25 +110,25 @@ var (
|
|||||||
SendTxV2Msg: {"SendTxV2", MaxTxSend, 1, 0},
|
SendTxV2Msg: {"SendTxV2", MaxTxSend, 1, 0},
|
||||||
GetTxStatusMsg: {"GetTxStatus", MaxTxStatus, 10, 0},
|
GetTxStatusMsg: {"GetTxStatus", MaxTxStatus, 10, 0},
|
||||||
}
|
}
|
||||||
requestList []lpc.RequestInfo
|
requestList []vfc.RequestInfo
|
||||||
requestMapping map[uint32]reqMapping
|
requestMapping map[uint32]reqMapping
|
||||||
)
|
)
|
||||||
|
|
||||||
// init creates a request list and mapping between protocol message codes and lespay
|
// init creates a request list and mapping between protocol message codes and vflux
|
||||||
// service vector indices.
|
// service vector indices.
|
||||||
func init() {
|
func init() {
|
||||||
requestMapping = make(map[uint32]reqMapping)
|
requestMapping = make(map[uint32]reqMapping)
|
||||||
for code, req := range requests {
|
for code, req := range requests {
|
||||||
cost := reqAvgTimeCost[code]
|
cost := reqAvgTimeCost[code]
|
||||||
rm := reqMapping{len(requestList), -1}
|
rm := reqMapping{len(requestList), -1}
|
||||||
requestList = append(requestList, lpc.RequestInfo{
|
requestList = append(requestList, vfc.RequestInfo{
|
||||||
Name: req.name + ".first",
|
Name: req.name + ".first",
|
||||||
InitAmount: req.refBasketFirst,
|
InitAmount: req.refBasketFirst,
|
||||||
InitValue: float64(cost.baseCost + cost.reqCost),
|
InitValue: float64(cost.baseCost + cost.reqCost),
|
||||||
})
|
})
|
||||||
if req.refBasketRest != 0 {
|
if req.refBasketRest != 0 {
|
||||||
rm.rest = len(requestList)
|
rm.rest = len(requestList)
|
||||||
requestList = append(requestList, lpc.RequestInfo{
|
requestList = append(requestList, vfc.RequestInfo{
|
||||||
Name: req.name + ".rest",
|
Name: req.name + ".rest",
|
||||||
InitAmount: req.refBasketRest,
|
InitAmount: req.refBasketRest,
|
||||||
InitValue: float64(cost.reqCost),
|
InitValue: float64(cost.reqCost),
|
||||||
|
@ -26,7 +26,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/eth/ethconfig"
|
"github.com/ethereum/go-ethereum/eth/ethconfig"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/les/flowcontrol"
|
"github.com/ethereum/go-ethereum/les/flowcontrol"
|
||||||
lps "github.com/ethereum/go-ethereum/les/lespay/server"
|
vfs "github.com/ethereum/go-ethereum/les/vflux/server"
|
||||||
"github.com/ethereum/go-ethereum/light"
|
"github.com/ethereum/go-ethereum/light"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/node"
|
"github.com/ethereum/go-ethereum/node"
|
||||||
@ -43,8 +43,8 @@ var (
|
|||||||
clientPeerField = serverSetup.NewField("clientPeer", reflect.TypeOf(&clientPeer{}))
|
clientPeerField = serverSetup.NewField("clientPeer", reflect.TypeOf(&clientPeer{}))
|
||||||
clientInfoField = serverSetup.NewField("clientInfo", reflect.TypeOf(&clientInfo{}))
|
clientInfoField = serverSetup.NewField("clientInfo", reflect.TypeOf(&clientInfo{}))
|
||||||
connAddressField = serverSetup.NewField("connAddr", reflect.TypeOf(""))
|
connAddressField = serverSetup.NewField("connAddr", reflect.TypeOf(""))
|
||||||
balanceTrackerSetup = lps.NewBalanceTrackerSetup(serverSetup)
|
balanceTrackerSetup = vfs.NewBalanceTrackerSetup(serverSetup)
|
||||||
priorityPoolSetup = lps.NewPriorityPoolSetup(serverSetup)
|
priorityPoolSetup = vfs.NewPriorityPoolSetup(serverSetup)
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@ -137,7 +137,7 @@ func NewLesServer(node *node.Node, e ethBackend, config *ethconfig.Config) (*Les
|
|||||||
}
|
}
|
||||||
srv.fcManager.SetCapacityLimits(srv.minCapacity, srv.maxCapacity, srv.minCapacity*2)
|
srv.fcManager.SetCapacityLimits(srv.minCapacity, srv.maxCapacity, srv.minCapacity*2)
|
||||||
srv.clientPool = newClientPool(ns, srv.chainDb, srv.minCapacity, defaultConnectedBias, mclock.System{}, srv.dropClient)
|
srv.clientPool = newClientPool(ns, srv.chainDb, srv.minCapacity, defaultConnectedBias, mclock.System{}, srv.dropClient)
|
||||||
srv.clientPool.setDefaultFactors(lps.PriceFactors{TimeFactor: 0, CapacityFactor: 1, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 0, CapacityFactor: 1, RequestFactor: 1})
|
srv.clientPool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 0, CapacityFactor: 1, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 0, CapacityFactor: 1, RequestFactor: 1})
|
||||||
|
|
||||||
checkpoint := srv.latestLocalCheckpoint()
|
checkpoint := srv.latestLocalCheckpoint()
|
||||||
if !checkpoint.Empty() {
|
if !checkpoint.Empty() {
|
||||||
|
@ -33,7 +33,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core/state"
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
lps "github.com/ethereum/go-ethereum/les/lespay/server"
|
vfs "github.com/ethereum/go-ethereum/les/vflux/server"
|
||||||
"github.com/ethereum/go-ethereum/light"
|
"github.com/ethereum/go-ethereum/light"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/metrics"
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
@ -169,7 +169,7 @@ func (h *serverHandler) handle(p *clientPeer) error {
|
|||||||
p.Log().Debug("Light Ethereum peer rejected", "err", errFullClientPool)
|
p.Log().Debug("Light Ethereum peer rejected", "err", errFullClientPool)
|
||||||
return errFullClientPool
|
return errFullClientPool
|
||||||
}
|
}
|
||||||
p.balance, _ = h.server.ns.GetField(p.Node(), h.server.clientPool.BalanceField).(*lps.NodeBalance)
|
p.balance, _ = h.server.ns.GetField(p.Node(), h.server.clientPool.BalanceField).(*vfs.NodeBalance)
|
||||||
if p.balance == nil {
|
if p.balance == nil {
|
||||||
return p2p.DiscRequested
|
return p2p.DiscRequested
|
||||||
}
|
}
|
||||||
|
@ -26,8 +26,8 @@ import (
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common/mclock"
|
"github.com/ethereum/go-ethereum/common/mclock"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
lpc "github.com/ethereum/go-ethereum/les/lespay/client"
|
|
||||||
"github.com/ethereum/go-ethereum/les/utils"
|
"github.com/ethereum/go-ethereum/les/utils"
|
||||||
|
vfc "github.com/ethereum/go-ethereum/les/vflux/client"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||||
@ -58,23 +58,23 @@ type serverPool struct {
|
|||||||
db ethdb.KeyValueStore
|
db ethdb.KeyValueStore
|
||||||
|
|
||||||
ns *nodestate.NodeStateMachine
|
ns *nodestate.NodeStateMachine
|
||||||
vt *lpc.ValueTracker
|
vt *vfc.ValueTracker
|
||||||
mixer *enode.FairMix
|
mixer *enode.FairMix
|
||||||
mixSources []enode.Iterator
|
mixSources []enode.Iterator
|
||||||
dialIterator enode.Iterator
|
dialIterator enode.Iterator
|
||||||
validSchemes enr.IdentityScheme
|
validSchemes enr.IdentityScheme
|
||||||
trustedURLs []string
|
trustedURLs []string
|
||||||
fillSet *lpc.FillSet
|
fillSet *vfc.FillSet
|
||||||
queryFails uint32
|
queryFails uint32
|
||||||
|
|
||||||
timeoutLock sync.RWMutex
|
timeoutLock sync.RWMutex
|
||||||
timeout time.Duration
|
timeout time.Duration
|
||||||
timeWeights lpc.ResponseTimeWeights
|
timeWeights vfc.ResponseTimeWeights
|
||||||
timeoutRefreshed mclock.AbsTime
|
timeoutRefreshed mclock.AbsTime
|
||||||
}
|
}
|
||||||
|
|
||||||
// nodeHistory keeps track of dial costs which determine node weight together with the
|
// nodeHistory keeps track of dial costs which determine node weight together with the
|
||||||
// service value calculated by lpc.ValueTracker.
|
// service value calculated by vfc.ValueTracker.
|
||||||
type nodeHistory struct {
|
type nodeHistory struct {
|
||||||
dialCost utils.ExpiredValue
|
dialCost utils.ExpiredValue
|
||||||
redialWaitStart, redialWaitEnd int64 // unix time (seconds)
|
redialWaitStart, redialWaitEnd int64 // unix time (seconds)
|
||||||
@ -127,11 +127,11 @@ var (
|
|||||||
},
|
},
|
||||||
)
|
)
|
||||||
sfiNodeWeight = serverPoolSetup.NewField("nodeWeight", reflect.TypeOf(uint64(0)))
|
sfiNodeWeight = serverPoolSetup.NewField("nodeWeight", reflect.TypeOf(uint64(0)))
|
||||||
sfiConnectedStats = serverPoolSetup.NewField("connectedStats", reflect.TypeOf(lpc.ResponseTimeStats{}))
|
sfiConnectedStats = serverPoolSetup.NewField("connectedStats", reflect.TypeOf(vfc.ResponseTimeStats{}))
|
||||||
)
|
)
|
||||||
|
|
||||||
// newServerPool creates a new server pool
|
// newServerPool creates a new server pool
|
||||||
func newServerPool(db ethdb.KeyValueStore, dbKey []byte, vt *lpc.ValueTracker, mixTimeout time.Duration, query queryFunc, clock mclock.Clock, trustedURLs []string) *serverPool {
|
func newServerPool(db ethdb.KeyValueStore, dbKey []byte, vt *vfc.ValueTracker, mixTimeout time.Duration, query queryFunc, clock mclock.Clock, trustedURLs []string) *serverPool {
|
||||||
s := &serverPool{
|
s := &serverPool{
|
||||||
db: db,
|
db: db,
|
||||||
clock: clock,
|
clock: clock,
|
||||||
@ -143,8 +143,8 @@ func newServerPool(db ethdb.KeyValueStore, dbKey []byte, vt *lpc.ValueTracker, m
|
|||||||
}
|
}
|
||||||
s.recalTimeout()
|
s.recalTimeout()
|
||||||
s.mixer = enode.NewFairMix(mixTimeout)
|
s.mixer = enode.NewFairMix(mixTimeout)
|
||||||
knownSelector := lpc.NewWrsIterator(s.ns, sfHasValue, sfDisableSelection, sfiNodeWeight)
|
knownSelector := vfc.NewWrsIterator(s.ns, sfHasValue, sfDisableSelection, sfiNodeWeight)
|
||||||
alwaysConnect := lpc.NewQueueIterator(s.ns, sfAlwaysConnect, sfDisableSelection, true, nil)
|
alwaysConnect := vfc.NewQueueIterator(s.ns, sfAlwaysConnect, sfDisableSelection, true, nil)
|
||||||
s.mixSources = append(s.mixSources, knownSelector)
|
s.mixSources = append(s.mixSources, knownSelector)
|
||||||
s.mixSources = append(s.mixSources, alwaysConnect)
|
s.mixSources = append(s.mixSources, alwaysConnect)
|
||||||
|
|
||||||
@ -183,7 +183,7 @@ func (s *serverPool) addSource(source enode.Iterator) {
|
|||||||
// Nodes that are filtered out and does not appear on the output iterator are put back
|
// Nodes that are filtered out and does not appear on the output iterator are put back
|
||||||
// into redialWait state.
|
// into redialWait state.
|
||||||
func (s *serverPool) addPreNegFilter(input enode.Iterator, query queryFunc) enode.Iterator {
|
func (s *serverPool) addPreNegFilter(input enode.Iterator, query queryFunc) enode.Iterator {
|
||||||
s.fillSet = lpc.NewFillSet(s.ns, input, sfQueried)
|
s.fillSet = vfc.NewFillSet(s.ns, input, sfQueried)
|
||||||
s.ns.SubscribeState(sfQueried, func(n *enode.Node, oldState, newState nodestate.Flags) {
|
s.ns.SubscribeState(sfQueried, func(n *enode.Node, oldState, newState nodestate.Flags) {
|
||||||
if newState.Equals(sfQueried) {
|
if newState.Equals(sfQueried) {
|
||||||
fails := atomic.LoadUint32(&s.queryFails)
|
fails := atomic.LoadUint32(&s.queryFails)
|
||||||
@ -221,7 +221,7 @@ func (s *serverPool) addPreNegFilter(input enode.Iterator, query queryFunc) enod
|
|||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
return lpc.NewQueueIterator(s.ns, sfCanDial, nodestate.Flags{}, false, func(waiting bool) {
|
return vfc.NewQueueIterator(s.ns, sfCanDial, nodestate.Flags{}, false, func(waiting bool) {
|
||||||
if waiting {
|
if waiting {
|
||||||
s.fillSet.SetTarget(preNegLimit)
|
s.fillSet.SetTarget(preNegLimit)
|
||||||
} else {
|
} else {
|
||||||
@ -330,7 +330,7 @@ func (s *serverPool) recalTimeout() {
|
|||||||
s.timeoutLock.Lock()
|
s.timeoutLock.Lock()
|
||||||
if s.timeout != timeout {
|
if s.timeout != timeout {
|
||||||
s.timeout = timeout
|
s.timeout = timeout
|
||||||
s.timeWeights = lpc.TimeoutWeights(s.timeout)
|
s.timeWeights = vfc.TimeoutWeights(s.timeout)
|
||||||
|
|
||||||
suggestedTimeoutGauge.Update(int64(s.timeout / time.Millisecond))
|
suggestedTimeoutGauge.Update(int64(s.timeout / time.Millisecond))
|
||||||
totalValueGauge.Update(int64(rts.Value(s.timeWeights, s.vt.StatsExpFactor())))
|
totalValueGauge.Update(int64(rts.Value(s.timeWeights, s.vt.StatsExpFactor())))
|
||||||
@ -349,7 +349,7 @@ func (s *serverPool) getTimeout() time.Duration {
|
|||||||
|
|
||||||
// getTimeoutAndWeight returns the recommended request timeout as well as the
|
// getTimeoutAndWeight returns the recommended request timeout as well as the
|
||||||
// response time weight which is necessary to calculate service value.
|
// response time weight which is necessary to calculate service value.
|
||||||
func (s *serverPool) getTimeoutAndWeight() (time.Duration, lpc.ResponseTimeWeights) {
|
func (s *serverPool) getTimeoutAndWeight() (time.Duration, vfc.ResponseTimeWeights) {
|
||||||
s.recalTimeout()
|
s.recalTimeout()
|
||||||
s.timeoutLock.RLock()
|
s.timeoutLock.RLock()
|
||||||
defer s.timeoutLock.RUnlock()
|
defer s.timeoutLock.RUnlock()
|
||||||
@ -381,7 +381,7 @@ func (s *serverPool) serviceValue(node *enode.Node) (sessionValue, totalValue fl
|
|||||||
expFactor := s.vt.StatsExpFactor()
|
expFactor := s.vt.StatsExpFactor()
|
||||||
|
|
||||||
totalValue = currentStats.Value(timeWeights, expFactor)
|
totalValue = currentStats.Value(timeWeights, expFactor)
|
||||||
if connStats, ok := s.ns.GetField(node, sfiConnectedStats).(lpc.ResponseTimeStats); ok {
|
if connStats, ok := s.ns.GetField(node, sfiConnectedStats).(vfc.ResponseTimeStats); ok {
|
||||||
diff := currentStats
|
diff := currentStats
|
||||||
diff.SubStats(&connStats)
|
diff.SubStats(&connStats)
|
||||||
sessionValue = diff.Value(timeWeights, expFactor)
|
sessionValue = diff.Value(timeWeights, expFactor)
|
||||||
|
@ -25,7 +25,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/common/mclock"
|
"github.com/ethereum/go-ethereum/common/mclock"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/ethdb/memorydb"
|
"github.com/ethereum/go-ethereum/ethdb/memorydb"
|
||||||
lpc "github.com/ethereum/go-ethereum/les/lespay/client"
|
vfc "github.com/ethereum/go-ethereum/les/vflux/client"
|
||||||
"github.com/ethereum/go-ethereum/p2p"
|
"github.com/ethereum/go-ethereum/p2p"
|
||||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||||
@ -55,7 +55,7 @@ type serverPoolTest struct {
|
|||||||
clock *mclock.Simulated
|
clock *mclock.Simulated
|
||||||
quit chan struct{}
|
quit chan struct{}
|
||||||
preNeg, preNegFail bool
|
preNeg, preNegFail bool
|
||||||
vt *lpc.ValueTracker
|
vt *vfc.ValueTracker
|
||||||
sp *serverPool
|
sp *serverPool
|
||||||
input enode.Iterator
|
input enode.Iterator
|
||||||
testNodes []spTestNode
|
testNodes []spTestNode
|
||||||
@ -144,7 +144,7 @@ func (s *serverPoolTest) start() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
s.vt = lpc.NewValueTracker(s.db, s.clock, requestList, time.Minute, 1/float64(time.Hour), 1/float64(time.Hour*100), 1/float64(time.Hour*1000))
|
s.vt = vfc.NewValueTracker(s.db, s.clock, requestList, time.Minute, 1/float64(time.Hour), 1/float64(time.Hour*100), 1/float64(time.Hour*1000))
|
||||||
s.sp = newServerPool(s.db, []byte("serverpool:"), s.vt, 0, testQuery, s.clock, s.trusted)
|
s.sp = newServerPool(s.db, []byte("serverpool:"), s.vt, 0, testQuery, s.clock, s.trusted)
|
||||||
s.sp.addSource(s.input)
|
s.sp.addSource(s.input)
|
||||||
s.sp.validSchemes = enode.ValidSchemesForTesting
|
s.sp.validSchemes = enode.ValidSchemesForTesting
|
||||||
@ -224,7 +224,7 @@ func (s *serverPoolTest) run() {
|
|||||||
n.peer = &serverPeer{peerCommons: peerCommons{Peer: p2p.NewPeer(id, "", nil)}}
|
n.peer = &serverPeer{peerCommons: peerCommons{Peer: p2p.NewPeer(id, "", nil)}}
|
||||||
s.sp.registerPeer(n.peer)
|
s.sp.registerPeer(n.peer)
|
||||||
if n.service {
|
if n.service {
|
||||||
s.vt.Served(s.vt.GetNode(id), []lpc.ServedRequest{{ReqType: 0, Amount: 100}}, 0)
|
s.vt.Served(s.vt.GetNode(id), []vfc.ServedRequest{{ReqType: 0, Amount: 100}}, 0)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -24,7 +24,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||||
)
|
)
|
||||||
|
|
||||||
// PrivateClientAPI implements the lespay client side API
|
// PrivateClientAPI implements the vflux client side API
|
||||||
type PrivateClientAPI struct {
|
type PrivateClientAPI struct {
|
||||||
vt *ValueTracker
|
vt *ValueTracker
|
||||||
}
|
}
|
Loading…
Reference in New Issue
Block a user