diff --git a/cmd/geth/les_test.go b/cmd/geth/les_test.go
index ae9ed5ecf..259d4a806 100644
--- a/cmd/geth/les_test.go
+++ b/cmd/geth/les_test.go
@@ -152,7 +152,7 @@ func TestPriorityClient(t *testing.T) {
defer prioCli.killAndWait()
// 3_000_000_000 once we move to Go 1.13
tokens := 3000000000
- lightServer.callRPC(nil, "les_addBalance", prioCli.getNodeInfo().ID, tokens, "foobar")
+ lightServer.callRPC(nil, "les_addBalance", prioCli.getNodeInfo().ID, tokens)
prioCli.addPeer(lightServer)
// Check if priority client is actually syncing and the regular client got kicked out
diff --git a/common/prque/lazyqueue.go b/common/prque/lazyqueue.go
index 92ddd77f6..52403df46 100644
--- a/common/prque/lazyqueue.go
+++ b/common/prque/lazyqueue.go
@@ -36,14 +36,15 @@ type LazyQueue struct {
// Items are stored in one of two internal queues ordered by estimated max
// priority until the next and the next-after-next refresh. Update and Refresh
// always places items in queue[1].
- queue [2]*sstack
- popQueue *sstack
- period time.Duration
- maxUntil mclock.AbsTime
- indexOffset int
- setIndex SetIndexCallback
- priority PriorityCallback
- maxPriority MaxPriorityCallback
+ queue [2]*sstack
+ popQueue *sstack
+ period time.Duration
+ maxUntil mclock.AbsTime
+ indexOffset int
+ setIndex SetIndexCallback
+ priority PriorityCallback
+ maxPriority MaxPriorityCallback
+ lastRefresh1, lastRefresh2 mclock.AbsTime
}
type (
@@ -54,14 +55,17 @@ type (
// NewLazyQueue creates a new lazy queue
func NewLazyQueue(setIndex SetIndexCallback, priority PriorityCallback, maxPriority MaxPriorityCallback, clock mclock.Clock, refreshPeriod time.Duration) *LazyQueue {
q := &LazyQueue{
- popQueue: newSstack(nil),
- setIndex: setIndex,
- priority: priority,
- maxPriority: maxPriority,
- clock: clock,
- period: refreshPeriod}
+ popQueue: newSstack(nil),
+ setIndex: setIndex,
+ priority: priority,
+ maxPriority: maxPriority,
+ clock: clock,
+ period: refreshPeriod,
+ lastRefresh1: clock.Now(),
+ lastRefresh2: clock.Now(),
+ }
q.Reset()
- q.Refresh()
+ q.refresh(clock.Now())
return q
}
@@ -71,9 +75,19 @@ func (q *LazyQueue) Reset() {
q.queue[1] = newSstack(q.setIndex1)
}
-// Refresh should be called at least with the frequency specified by the refreshPeriod parameter
+// Refresh performs queue re-evaluation if necessary
func (q *LazyQueue) Refresh() {
- q.maxUntil = q.clock.Now() + mclock.AbsTime(q.period)
+ now := q.clock.Now()
+ for time.Duration(now-q.lastRefresh2) >= q.period*2 {
+ q.refresh(now)
+ q.lastRefresh2 = q.lastRefresh1
+ q.lastRefresh1 = now
+ }
+}
+
+// refresh re-evaluates items in the older queue and swaps the two queues
+func (q *LazyQueue) refresh(now mclock.AbsTime) {
+ q.maxUntil = now + mclock.AbsTime(q.period)
for q.queue[0].Len() != 0 {
q.Push(heap.Pop(q.queue[0]).(*item).value)
}
@@ -139,6 +153,7 @@ func (q *LazyQueue) MultiPop(callback func(data interface{}, priority int64) boo
}
return
}
+ nextIndex = q.peekIndex() // re-check because callback is allowed to push items back
}
}
}
diff --git a/internal/web3ext/web3ext.go b/internal/web3ext/web3ext.go
index 41d865778..300c2b054 100644
--- a/internal/web3ext/web3ext.go
+++ b/internal/web3ext/web3ext.go
@@ -844,7 +844,7 @@ web3._extend({
new web3._extend.Method({
name: 'addBalance',
call: 'les_addBalance',
- params: 3
+ params: 2
}),
],
properties:
diff --git a/les/api.go b/les/api.go
index cd5c99a5f..66d133b85 100644
--- a/les/api.go
+++ b/les/api.go
@@ -19,11 +19,11 @@ package les
import (
"errors"
"fmt"
- "math"
"time"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/common/mclock"
+ lps "github.com/ethereum/go-ethereum/les/lespay/server"
"github.com/ethereum/go-ethereum/p2p/enode"
)
@@ -31,16 +31,13 @@ var (
errNoCheckpoint = errors.New("no local checkpoint provided")
errNotActivated = errors.New("checkpoint registrar is not activated")
errUnknownBenchmarkType = errors.New("unknown benchmark type")
- errBalanceOverflow = errors.New("balance overflow")
errNoPriority = errors.New("priority too low to raise capacity")
)
-const maxBalance = math.MaxInt64
-
// PrivateLightServerAPI provides an API to access the LES light server.
type PrivateLightServerAPI struct {
server *LesServer
- defaultPosFactors, defaultNegFactors priceFactors
+ defaultPosFactors, defaultNegFactors lps.PriceFactors
}
// NewPrivateLightServerAPI creates a new LES light server API.
@@ -57,7 +54,6 @@ func (api *PrivateLightServerAPI) ServerInfo() map[string]interface{} {
res := make(map[string]interface{})
res["minimumCapacity"] = api.server.minCapacity
res["maximumCapacity"] = api.server.maxCapacity
- res["freeClientCapacity"] = api.server.freeCapacity
res["totalCapacity"], res["totalConnectedCapacity"], res["priorityConnectedCapacity"] = api.server.clientPool.capacityInfo()
return res
}
@@ -65,9 +61,8 @@ func (api *PrivateLightServerAPI) ServerInfo() map[string]interface{} {
// ClientInfo returns information about clients listed in the ids list or matching the given tags
func (api *PrivateLightServerAPI) ClientInfo(ids []enode.ID) map[enode.ID]map[string]interface{} {
res := make(map[enode.ID]map[string]interface{})
- api.server.clientPool.forClients(ids, func(client *clientInfo, id enode.ID) error {
- res[id] = api.clientInfo(client, id)
- return nil
+ api.server.clientPool.forClients(ids, func(client *clientInfo) {
+ res[client.node.ID()] = api.clientInfo(client)
})
return res
}
@@ -80,48 +75,40 @@ func (api *PrivateLightServerAPI) ClientInfo(ids []enode.ID) map[enode.ID]map[st
// assigned to it.
func (api *PrivateLightServerAPI) PriorityClientInfo(start, stop enode.ID, maxCount int) map[enode.ID]map[string]interface{} {
res := make(map[enode.ID]map[string]interface{})
- ids := api.server.clientPool.ndb.getPosBalanceIDs(start, stop, maxCount+1)
+ ids := api.server.clientPool.bt.GetPosBalanceIDs(start, stop, maxCount+1)
if len(ids) > maxCount {
res[ids[maxCount]] = make(map[string]interface{})
ids = ids[:maxCount]
}
if len(ids) != 0 {
- api.server.clientPool.forClients(ids, func(client *clientInfo, id enode.ID) error {
- res[id] = api.clientInfo(client, id)
- return nil
+ api.server.clientPool.forClients(ids, func(client *clientInfo) {
+ res[client.node.ID()] = api.clientInfo(client)
})
}
return res
}
// clientInfo creates a client info data structure
-func (api *PrivateLightServerAPI) clientInfo(c *clientInfo, id enode.ID) map[string]interface{} {
+func (api *PrivateLightServerAPI) clientInfo(c *clientInfo) map[string]interface{} {
info := make(map[string]interface{})
- if c != nil {
- now := mclock.Now()
- info["isConnected"] = true
- info["connectionTime"] = float64(now-c.connectedAt) / float64(time.Second)
- info["capacity"] = c.capacity
- pb, nb := c.balanceTracker.getBalance(now)
- info["pricing/balance"], info["pricing/negBalance"] = pb, nb
- info["pricing/balanceMeta"] = c.balanceMetaInfo
- info["priority"] = pb != 0
- } else {
- info["isConnected"] = false
- pb := api.server.clientPool.ndb.getOrNewPB(id)
- info["pricing/balance"], info["pricing/balanceMeta"] = pb.value, pb.meta
- info["priority"] = pb.value != 0
+ pb, nb := c.balance.GetBalance()
+ info["isConnected"] = c.connected
+ info["pricing/balance"] = pb
+ info["priority"] = pb != 0
+ // cb := api.server.clientPool.ndb.getCurrencyBalance(id)
+ // info["pricing/currency"] = cb.amount
+ if c.connected {
+ info["connectionTime"] = float64(mclock.Now()-c.connectedAt) / float64(time.Second)
+ info["capacity"], _ = api.server.clientPool.ns.GetField(c.node, priorityPoolSetup.CapacityField).(uint64)
+ info["pricing/negBalance"] = nb
}
return info
}
// setParams either sets the given parameters for a single connected client (if specified)
// or the default parameters applicable to clients connected in the future
-func (api *PrivateLightServerAPI) setParams(params map[string]interface{}, client *clientInfo, posFactors, negFactors *priceFactors) (updateFactors bool, err error) {
+func (api *PrivateLightServerAPI) setParams(params map[string]interface{}, client *clientInfo, posFactors, negFactors *lps.PriceFactors) (updateFactors bool, err error) {
defParams := client == nil
- if !defParams {
- posFactors, negFactors = &client.posFactors, &client.negFactors
- }
for name, value := range params {
errValue := func() error {
return fmt.Errorf("invalid value for parameter '%s'", name)
@@ -137,20 +124,20 @@ func (api *PrivateLightServerAPI) setParams(params map[string]interface{}, clien
switch {
case name == "pricing/timeFactor":
- setFactor(&posFactors.timeFactor)
+ setFactor(&posFactors.TimeFactor)
case name == "pricing/capacityFactor":
- setFactor(&posFactors.capacityFactor)
+ setFactor(&posFactors.CapacityFactor)
case name == "pricing/requestCostFactor":
- setFactor(&posFactors.requestFactor)
+ setFactor(&posFactors.RequestFactor)
case name == "pricing/negative/timeFactor":
- setFactor(&negFactors.timeFactor)
+ setFactor(&negFactors.TimeFactor)
case name == "pricing/negative/capacityFactor":
- setFactor(&negFactors.capacityFactor)
+ setFactor(&negFactors.CapacityFactor)
case name == "pricing/negative/requestCostFactor":
- setFactor(&negFactors.requestFactor)
+ setFactor(&negFactors.RequestFactor)
case !defParams && name == "capacity":
if capacity, ok := value.(float64); ok && uint64(capacity) >= api.server.minCapacity {
- err = api.server.clientPool.setCapacity(client, uint64(capacity))
+ _, err = api.server.clientPool.setCapacity(client.node, client.address, uint64(capacity), 0, true)
// Don't have to call factor update explicitly. It's already done
// in setCapacity function.
} else {
@@ -170,27 +157,25 @@ func (api *PrivateLightServerAPI) setParams(params map[string]interface{}, clien
return
}
-// AddBalance updates the balance of a client (either overwrites it or adds to it).
-// It also updates the balance meta info string.
-func (api *PrivateLightServerAPI) AddBalance(id enode.ID, value int64, meta string) ([2]uint64, error) {
- oldBalance, newBalance, err := api.server.clientPool.addBalance(id, value, meta)
- return [2]uint64{oldBalance, newBalance}, err
-}
-
// SetClientParams sets client parameters for all clients listed in the ids list
// or all connected clients if the list is empty
func (api *PrivateLightServerAPI) SetClientParams(ids []enode.ID, params map[string]interface{}) error {
- return api.server.clientPool.forClients(ids, func(client *clientInfo, id enode.ID) error {
- if client != nil {
- update, err := api.setParams(params, client, nil, nil)
+ var err error
+ api.server.clientPool.forClients(ids, func(client *clientInfo) {
+ if client.connected {
+ posFactors, negFactors := client.balance.GetPriceFactors()
+ update, e := api.setParams(params, client, &posFactors, &negFactors)
if update {
- client.updatePriceFactors()
+ client.balance.SetPriceFactors(posFactors, negFactors)
+ }
+ if e != nil {
+ err = e
}
- return err
} else {
- return fmt.Errorf("client %064x is not connected", id[:])
+ err = fmt.Errorf("client %064x is not connected", client.node.ID())
}
})
+ return err
}
// SetDefaultParams sets the default parameters applicable to clients connected in the future
@@ -214,6 +199,15 @@ func (api *PrivateLightServerAPI) SetConnectedBias(bias time.Duration) error {
return nil
}
+// AddBalance adds the given amount to the balance of a client if possible and returns
+// the balance before and after the operation
+func (api *PrivateLightServerAPI) AddBalance(id enode.ID, amount int64) (balance [2]uint64, err error) {
+ api.server.clientPool.forClients([]enode.ID{id}, func(c *clientInfo) {
+ balance[0], balance[1], err = c.balance.AddBalance(amount)
+ })
+ return
+}
+
// Benchmark runs a request performance benchmark with a given set of measurement setups
// in multiple passes specified by passCount. The measurement time for each setup in each
// pass is specified in milliseconds by length.
@@ -304,13 +298,15 @@ func NewPrivateDebugAPI(server *LesServer) *PrivateDebugAPI {
// FreezeClient forces a temporary client freeze which normally happens when the server is overloaded
func (api *PrivateDebugAPI) FreezeClient(id enode.ID) error {
- return api.server.clientPool.forClients([]enode.ID{id}, func(c *clientInfo, id enode.ID) error {
- if c == nil {
- return fmt.Errorf("client %064x is not connected", id[:])
+ var err error
+ api.server.clientPool.forClients([]enode.ID{id}, func(c *clientInfo) {
+ if c.connected {
+ c.peer.freeze()
+ } else {
+ err = fmt.Errorf("client %064x is not connected", id[:])
}
- c.peer.freezeClient()
- return nil
})
+ return err
}
// PrivateLightAPI provides an API to access the LES light server or light client.
diff --git a/les/api_test.go b/les/api_test.go
index 7f6aca55c..2895264f6 100644
--- a/les/api_test.go
+++ b/les/api_test.go
@@ -107,7 +107,7 @@ func testCapacityAPI(t *testing.T, clientCount int) {
t.Fatalf("Failed to obtain rpc client: %v", err)
}
headNum, headHash := getHead(ctx, t, serverRpcClient)
- minCap, freeCap, totalCap := getCapacityInfo(ctx, t, serverRpcClient)
+ minCap, totalCap := getCapacityInfo(ctx, t, serverRpcClient)
testCap := totalCap * 3 / 4
t.Logf("Server testCap: %d minCap: %d head number: %d head hash: %064x\n", testCap, minCap, headNum, headHash)
reqMinCap := uint64(float64(testCap) * minRelCap / (minRelCap + float64(len(clients)-1)))
@@ -202,7 +202,7 @@ func testCapacityAPI(t *testing.T, clientCount int) {
weights := make([]float64, len(clients))
for c := 0; c < 5; c++ {
- setCapacity(ctx, t, serverRpcClient, clients[freeIdx].ID(), freeCap)
+ setCapacity(ctx, t, serverRpcClient, clients[freeIdx].ID(), minCap)
freeIdx = rand.Intn(len(clients))
var sum float64
for i := range clients {
@@ -214,7 +214,7 @@ func testCapacityAPI(t *testing.T, clientCount int) {
sum += weights[i]
}
for i, client := range clients {
- weights[i] *= float64(testCap-freeCap-100) / sum
+ weights[i] *= float64(testCap-minCap-100) / sum
capacity := uint64(weights[i])
if i != freeIdx && capacity < getCapacity(ctx, t, serverRpcClient, client.ID()) {
setCapacity(ctx, t, serverRpcClient, client.ID(), capacity)
@@ -227,7 +227,7 @@ func testCapacityAPI(t *testing.T, clientCount int) {
setCapacity(ctx, t, serverRpcClient, client.ID(), capacity)
}
}
- weights[freeIdx] = float64(freeCap)
+ weights[freeIdx] = float64(minCap)
for i := range clients {
weights[i] /= float64(testCap)
}
@@ -247,7 +247,7 @@ func testCapacityAPI(t *testing.T, clientCount int) {
default:
}
- _, _, totalCap = getCapacityInfo(ctx, t, serverRpcClient)
+ _, totalCap = getCapacityInfo(ctx, t, serverRpcClient)
if totalCap < testCap {
t.Log("Total capacity underrun")
close(stop)
@@ -370,7 +370,7 @@ func getCapacity(ctx context.Context, t *testing.T, server *rpc.Client, clientID
return uint64(vv)
}
-func getCapacityInfo(ctx context.Context, t *testing.T, server *rpc.Client) (minCap, freeCap, totalCap uint64) {
+func getCapacityInfo(ctx context.Context, t *testing.T, server *rpc.Client) (minCap, totalCap uint64) {
var res map[string]interface{}
if err := server.CallContext(ctx, &res, "les_serverInfo"); err != nil {
t.Fatalf("Failed to query server info: %v", err)
@@ -387,7 +387,6 @@ func getCapacityInfo(ctx context.Context, t *testing.T, server *rpc.Client) (min
return uint64(vv)
}
minCap = decode("minimumCapacity")
- freeCap = decode("freeClientCapacity")
totalCap = decode("totalCapacity")
return
}
diff --git a/les/balance.go b/les/balance.go
deleted file mode 100644
index 51cef15c8..000000000
--- a/les/balance.go
+++ /dev/null
@@ -1,389 +0,0 @@
-// Copyright 2019 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package les
-
-import (
- "sync"
- "time"
-
- "github.com/ethereum/go-ethereum/common/mclock"
-)
-
-const (
- balanceCallbackQueue = iota
- balanceCallbackZero
- balanceCallbackCount
-)
-
-// balanceTracker keeps track of the positive and negative balances of a connected
-// client and calculates actual and projected future priority values required by
-// prque.LazyQueue.
-type balanceTracker struct {
- lock sync.Mutex
- clock mclock.Clock
- stopped bool
- capacity uint64
- balance balance
- timeFactor, requestFactor float64
- negTimeFactor, negRequestFactor float64
- sumReqCost uint64
- lastUpdate, nextUpdate, initTime mclock.AbsTime
- updateEvent mclock.Timer
- // since only a limited and fixed number of callbacks are needed, they are
- // stored in a fixed size array ordered by priority threshold.
- callbacks [balanceCallbackCount]balanceCallback
- // callbackIndex maps balanceCallback constants to callbacks array indexes (-1 if not active)
- callbackIndex [balanceCallbackCount]int
- callbackCount int // number of active callbacks
-}
-
-// balance represents a pair of positive and negative balances
-type balance struct {
- pos, neg uint64
-}
-
-// balanceCallback represents a single callback that is activated when client priority
-// reaches the given threshold
-type balanceCallback struct {
- id int
- threshold int64
- callback func()
-}
-
-// init initializes balanceTracker
-func (bt *balanceTracker) init(clock mclock.Clock, capacity uint64) {
- bt.clock = clock
- bt.initTime, bt.lastUpdate = clock.Now(), clock.Now() // Init timestamps
- for i := range bt.callbackIndex {
- bt.callbackIndex[i] = -1
- }
- bt.capacity = capacity
-}
-
-// stop shuts down the balance tracker
-func (bt *balanceTracker) stop(now mclock.AbsTime) {
- bt.lock.Lock()
- defer bt.lock.Unlock()
-
- bt.stopped = true
- bt.addBalance(now)
- bt.negTimeFactor = 0
- bt.negRequestFactor = 0
- bt.timeFactor = 0
- bt.requestFactor = 0
- if bt.updateEvent != nil {
- bt.updateEvent.Stop()
- bt.updateEvent = nil
- }
-}
-
-// balanceToPriority converts a balance to a priority value. Higher priority means
-// first to disconnect. Positive balance translates to negative priority. If positive
-// balance is zero then negative balance translates to a positive priority.
-func (bt *balanceTracker) balanceToPriority(b balance) int64 {
- if b.pos > 0 {
- return ^int64(b.pos / bt.capacity)
- }
- return int64(b.neg)
-}
-
-// reducedBalance estimates the reduced balance at a given time in the fututre based
-// on the current balance, the time factor and an estimated average request cost per time ratio
-func (bt *balanceTracker) reducedBalance(at mclock.AbsTime, avgReqCost float64) balance {
- dt := float64(at - bt.lastUpdate)
- b := bt.balance
- if b.pos != 0 {
- factor := bt.timeFactor + bt.requestFactor*avgReqCost
- diff := uint64(dt * factor)
- if diff <= b.pos {
- b.pos -= diff
- dt = 0
- } else {
- dt -= float64(b.pos) / factor
- b.pos = 0
- }
- }
- if dt != 0 {
- factor := bt.negTimeFactor + bt.negRequestFactor*avgReqCost
- b.neg += uint64(dt * factor)
- }
- return b
-}
-
-// timeUntil calculates the remaining time needed to reach a given priority level
-// assuming that no requests are processed until then. If the given level is never
-// reached then (0, false) is returned.
-// Note: the function assumes that the balance has been recently updated and
-// calculates the time starting from the last update.
-func (bt *balanceTracker) timeUntil(priority int64) (time.Duration, bool) {
- var dt float64
- if bt.balance.pos != 0 {
- if bt.timeFactor < 1e-100 {
- return 0, false
- }
- if priority < 0 {
- newBalance := uint64(^priority) * bt.capacity
- if newBalance > bt.balance.pos {
- return 0, false
- }
- dt = float64(bt.balance.pos-newBalance) / bt.timeFactor
- return time.Duration(dt), true
- } else {
- dt = float64(bt.balance.pos) / bt.timeFactor
- }
- } else {
- if priority < 0 {
- return 0, false
- }
- }
- // if we have a positive balance then dt equals the time needed to get it to zero
- if uint64(priority) > bt.balance.neg {
- if bt.negTimeFactor < 1e-100 {
- return 0, false
- }
- dt += float64(uint64(priority)-bt.balance.neg) / bt.negTimeFactor
- }
- return time.Duration(dt), true
-}
-
-// setCapacity updates the capacity value used for priority calculation
-func (bt *balanceTracker) setCapacity(capacity uint64) {
- bt.lock.Lock()
- defer bt.lock.Unlock()
-
- bt.capacity = capacity
-}
-
-// getPriority returns the actual priority based on the current balance
-func (bt *balanceTracker) getPriority(now mclock.AbsTime) int64 {
- bt.lock.Lock()
- defer bt.lock.Unlock()
-
- bt.addBalance(now)
- return bt.balanceToPriority(bt.balance)
-}
-
-// estimatedPriority gives an upper estimate for the priority at a given time in the future.
-// If addReqCost is true then an average request cost per time is assumed that is twice the
-// average cost per time in the current session. If false, zero request cost is assumed.
-func (bt *balanceTracker) estimatedPriority(at mclock.AbsTime, addReqCost bool) int64 {
- bt.lock.Lock()
- defer bt.lock.Unlock()
-
- var avgReqCost float64
- if addReqCost {
- dt := time.Duration(bt.lastUpdate - bt.initTime)
- if dt > time.Second {
- avgReqCost = float64(bt.sumReqCost) * 2 / float64(dt)
- }
- }
- return bt.balanceToPriority(bt.reducedBalance(at, avgReqCost))
-}
-
-// addBalance updates balance based on the time factor
-func (bt *balanceTracker) addBalance(now mclock.AbsTime) {
- if now > bt.lastUpdate {
- bt.balance = bt.reducedBalance(now, 0)
- bt.lastUpdate = now
- }
-}
-
-// checkCallbacks checks whether the threshold of any of the active callbacks
-// have been reached and calls them if necessary. It also sets up or updates
-// a scheduled event to ensure that is will be called again just after the next
-// threshold has been reached.
-// Note: checkCallbacks assumes that the balance has been recently updated.
-func (bt *balanceTracker) checkCallbacks(now mclock.AbsTime) {
- if bt.callbackCount == 0 {
- return
- }
- pri := bt.balanceToPriority(bt.balance)
- for bt.callbackCount != 0 && bt.callbacks[bt.callbackCount-1].threshold <= pri {
- bt.callbackCount--
- bt.callbackIndex[bt.callbacks[bt.callbackCount].id] = -1
- go bt.callbacks[bt.callbackCount].callback()
- }
- if bt.callbackCount != 0 {
- d, ok := bt.timeUntil(bt.callbacks[bt.callbackCount-1].threshold)
- if !ok {
- bt.nextUpdate = 0
- bt.updateAfter(0)
- return
- }
- if bt.nextUpdate == 0 || bt.nextUpdate > now+mclock.AbsTime(d) {
- if d > time.Second {
- // Note: if the scheduled update is not in the very near future then we
- // schedule the update a bit earlier. This way we do need to update a few
- // extra times but don't need to reschedule every time a processed request
- // brings the expected firing time a little bit closer.
- d = ((d - time.Second) * 7 / 8) + time.Second
- }
- bt.nextUpdate = now + mclock.AbsTime(d)
- bt.updateAfter(d)
- }
- } else {
- bt.nextUpdate = 0
- bt.updateAfter(0)
- }
-}
-
-// updateAfter schedules a balance update and callback check in the future
-func (bt *balanceTracker) updateAfter(dt time.Duration) {
- if bt.updateEvent == nil || bt.updateEvent.Stop() {
- if dt == 0 {
- bt.updateEvent = nil
- } else {
- bt.updateEvent = bt.clock.AfterFunc(dt, func() {
- bt.lock.Lock()
- defer bt.lock.Unlock()
-
- if bt.callbackCount != 0 {
- now := bt.clock.Now()
- bt.addBalance(now)
- bt.checkCallbacks(now)
- }
- })
- }
- }
-}
-
-// requestCost should be called after serving a request for the given peer
-func (bt *balanceTracker) requestCost(cost uint64) {
- bt.lock.Lock()
- defer bt.lock.Unlock()
-
- if bt.stopped {
- return
- }
- now := bt.clock.Now()
- bt.addBalance(now)
- fcost := float64(cost)
-
- if bt.balance.pos != 0 {
- if bt.requestFactor != 0 {
- c := uint64(fcost * bt.requestFactor)
- if bt.balance.pos >= c {
- bt.balance.pos -= c
- fcost = 0
- } else {
- fcost *= 1 - float64(bt.balance.pos)/float64(c)
- bt.balance.pos = 0
- }
- bt.checkCallbacks(now)
- } else {
- fcost = 0
- }
- }
- if fcost > 0 {
- if bt.negRequestFactor != 0 {
- bt.balance.neg += uint64(fcost * bt.negRequestFactor)
- bt.checkCallbacks(now)
- }
- }
- bt.sumReqCost += cost
-}
-
-// getBalance returns the current positive and negative balance
-func (bt *balanceTracker) getBalance(now mclock.AbsTime) (uint64, uint64) {
- bt.lock.Lock()
- defer bt.lock.Unlock()
-
- bt.addBalance(now)
- return bt.balance.pos, bt.balance.neg
-}
-
-// setBalance sets the positive and negative balance to the given values
-func (bt *balanceTracker) setBalance(pos, neg uint64) error {
- bt.lock.Lock()
- defer bt.lock.Unlock()
-
- now := bt.clock.Now()
- bt.addBalance(now)
- bt.balance.pos = pos
- bt.balance.neg = neg
- bt.checkCallbacks(now)
- return nil
-}
-
-// setFactors sets the price factors. timeFactor is the price of a nanosecond of
-// connection while requestFactor is the price of a "realCost" unit.
-func (bt *balanceTracker) setFactors(neg bool, timeFactor, requestFactor float64) {
- bt.lock.Lock()
- defer bt.lock.Unlock()
-
- if bt.stopped {
- return
- }
- now := bt.clock.Now()
- bt.addBalance(now)
- if neg {
- bt.negTimeFactor = timeFactor
- bt.negRequestFactor = requestFactor
- } else {
- bt.timeFactor = timeFactor
- bt.requestFactor = requestFactor
- }
- bt.checkCallbacks(now)
-}
-
-// setCallback sets up a one-time callback to be called when priority reaches
-// the threshold. If it has already reached the threshold the callback is called
-// immediately.
-func (bt *balanceTracker) addCallback(id int, threshold int64, callback func()) {
- bt.lock.Lock()
- defer bt.lock.Unlock()
-
- bt.removeCb(id)
- idx := 0
- for idx < bt.callbackCount && threshold < bt.callbacks[idx].threshold {
- idx++
- }
- for i := bt.callbackCount - 1; i >= idx; i-- {
- bt.callbackIndex[bt.callbacks[i].id]++
- bt.callbacks[i+1] = bt.callbacks[i]
- }
- bt.callbackCount++
- bt.callbackIndex[id] = idx
- bt.callbacks[idx] = balanceCallback{id, threshold, callback}
- now := bt.clock.Now()
- bt.addBalance(now)
- bt.checkCallbacks(now)
-}
-
-// removeCallback removes the given callback and returns true if it was active
-func (bt *balanceTracker) removeCallback(id int) bool {
- bt.lock.Lock()
- defer bt.lock.Unlock()
-
- return bt.removeCb(id)
-}
-
-// removeCb removes the given callback and returns true if it was active
-// Note: should be called while bt.lock is held
-func (bt *balanceTracker) removeCb(id int) bool {
- idx := bt.callbackIndex[id]
- if idx == -1 {
- return false
- }
- bt.callbackIndex[id] = -1
- for i := idx; i < bt.callbackCount-1; i++ {
- bt.callbackIndex[bt.callbacks[i+1].id]--
- bt.callbacks[i] = bt.callbacks[i+1]
- }
- bt.callbackCount--
- return true
-}
diff --git a/les/balance_test.go b/les/balance_test.go
deleted file mode 100644
index b571c2cc5..000000000
--- a/les/balance_test.go
+++ /dev/null
@@ -1,260 +0,0 @@
-// Copyright 2019 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package les
-
-import (
- "testing"
- "time"
-
- "github.com/ethereum/go-ethereum/common/mclock"
-)
-
-func TestSetBalance(t *testing.T) {
- var clock = &mclock.Simulated{}
- var inputs = []struct {
- pos uint64
- neg uint64
- }{
- {1000, 0},
- {0, 1000},
- {1000, 1000},
- }
-
- tracker := balanceTracker{}
- tracker.init(clock, 1000)
- defer tracker.stop(clock.Now())
-
- for _, i := range inputs {
- tracker.setBalance(i.pos, i.neg)
- pos, neg := tracker.getBalance(clock.Now())
- if pos != i.pos {
- t.Fatalf("Positive balance mismatch, want %v, got %v", i.pos, pos)
- }
- if neg != i.neg {
- t.Fatalf("Negative balance mismatch, want %v, got %v", i.neg, neg)
- }
- }
-}
-
-func TestBalanceTimeCost(t *testing.T) {
- var (
- clock = &mclock.Simulated{}
- tracker = balanceTracker{}
- )
- tracker.init(clock, 1000)
- defer tracker.stop(clock.Now())
- tracker.setFactors(false, 1, 1)
- tracker.setFactors(true, 1, 1)
-
- tracker.setBalance(uint64(time.Minute), 0) // 1 minute time allowance
-
- var inputs = []struct {
- runTime time.Duration
- expPos uint64
- expNeg uint64
- }{
- {time.Second, uint64(time.Second * 59), 0},
- {0, uint64(time.Second * 59), 0},
- {time.Second * 59, 0, 0},
- {time.Second, 0, uint64(time.Second)},
- }
- for _, i := range inputs {
- clock.Run(i.runTime)
- if pos, _ := tracker.getBalance(clock.Now()); pos != i.expPos {
- t.Fatalf("Positive balance mismatch, want %v, got %v", i.expPos, pos)
- }
- if _, neg := tracker.getBalance(clock.Now()); neg != i.expNeg {
- t.Fatalf("Negative balance mismatch, want %v, got %v", i.expNeg, neg)
- }
- }
-
- tracker.setBalance(uint64(time.Minute), 0) // Refill 1 minute time allowance
- for _, i := range inputs {
- clock.Run(i.runTime)
- if pos, _ := tracker.getBalance(clock.Now()); pos != i.expPos {
- t.Fatalf("Positive balance mismatch, want %v, got %v", i.expPos, pos)
- }
- if _, neg := tracker.getBalance(clock.Now()); neg != i.expNeg {
- t.Fatalf("Negative balance mismatch, want %v, got %v", i.expNeg, neg)
- }
- }
-}
-
-func TestBalanceReqCost(t *testing.T) {
- var (
- clock = &mclock.Simulated{}
- tracker = balanceTracker{}
- )
- tracker.init(clock, 1000)
- defer tracker.stop(clock.Now())
- tracker.setFactors(false, 1, 1)
- tracker.setFactors(true, 1, 1)
-
- tracker.setBalance(uint64(time.Minute), 0) // 1 minute time serving time allowance
- var inputs = []struct {
- reqCost uint64
- expPos uint64
- expNeg uint64
- }{
- {uint64(time.Second), uint64(time.Second * 59), 0},
- {0, uint64(time.Second * 59), 0},
- {uint64(time.Second * 59), 0, 0},
- {uint64(time.Second), 0, uint64(time.Second)},
- }
- for _, i := range inputs {
- tracker.requestCost(i.reqCost)
- if pos, _ := tracker.getBalance(clock.Now()); pos != i.expPos {
- t.Fatalf("Positive balance mismatch, want %v, got %v", i.expPos, pos)
- }
- if _, neg := tracker.getBalance(clock.Now()); neg != i.expNeg {
- t.Fatalf("Negative balance mismatch, want %v, got %v", i.expNeg, neg)
- }
- }
-}
-
-func TestBalanceToPriority(t *testing.T) {
- var (
- clock = &mclock.Simulated{}
- tracker = balanceTracker{}
- )
- tracker.init(clock, 1000) // cap = 1000
- defer tracker.stop(clock.Now())
- tracker.setFactors(false, 1, 1)
- tracker.setFactors(true, 1, 1)
-
- var inputs = []struct {
- pos uint64
- neg uint64
- priority int64
- }{
- {1000, 0, ^int64(1)},
- {2000, 0, ^int64(2)}, // Higher balance, lower priority value
- {0, 0, 0},
- {0, 1000, 1000},
- }
- for _, i := range inputs {
- tracker.setBalance(i.pos, i.neg)
- priority := tracker.getPriority(clock.Now())
- if priority != i.priority {
- t.Fatalf("Priority mismatch, want %v, got %v", i.priority, priority)
- }
- }
-}
-
-func TestEstimatedPriority(t *testing.T) {
- var (
- clock = &mclock.Simulated{}
- tracker = balanceTracker{}
- )
- tracker.init(clock, 1000000000) // cap = 1000,000,000
- defer tracker.stop(clock.Now())
- tracker.setFactors(false, 1, 1)
- tracker.setFactors(true, 1, 1)
-
- tracker.setBalance(uint64(time.Minute), 0)
- var inputs = []struct {
- runTime time.Duration // time cost
- futureTime time.Duration // diff of future time
- reqCost uint64 // single request cost
- priority int64 // expected estimated priority
- }{
- {time.Second, time.Second, 0, ^int64(58)},
- {0, time.Second, 0, ^int64(58)},
-
- // 2 seconds time cost, 1 second estimated time cost, 10^9 request cost,
- // 10^9 estimated request cost per second.
- {time.Second, time.Second, 1000000000, ^int64(55)},
-
- // 3 seconds time cost, 3 second estimated time cost, 10^9*2 request cost,
- // 4*10^9 estimated request cost.
- {time.Second, 3 * time.Second, 1000000000, ^int64(48)},
-
- // All positive balance is used up
- {time.Second * 55, 0, 0, 0},
-
- // 1 minute estimated time cost, 4/58 * 10^9 estimated request cost per sec.
- {0, time.Minute, 0, int64(time.Minute) + int64(time.Second)*120/29},
- }
- for _, i := range inputs {
- clock.Run(i.runTime)
- tracker.requestCost(i.reqCost)
- priority := tracker.estimatedPriority(clock.Now()+mclock.AbsTime(i.futureTime), true)
- if priority != i.priority {
- t.Fatalf("Estimated priority mismatch, want %v, got %v", i.priority, priority)
- }
- }
-}
-
-func TestCallbackChecking(t *testing.T) {
- var (
- clock = &mclock.Simulated{}
- tracker = balanceTracker{}
- )
- tracker.init(clock, 1000000) // cap = 1000,000
- defer tracker.stop(clock.Now())
- tracker.setFactors(false, 1, 1)
- tracker.setFactors(true, 1, 1)
-
- var inputs = []struct {
- priority int64
- expDiff time.Duration
- }{
- {^int64(500), time.Millisecond * 500},
- {0, time.Second},
- {int64(time.Second), 2 * time.Second},
- }
- tracker.setBalance(uint64(time.Second), 0)
- for _, i := range inputs {
- diff, _ := tracker.timeUntil(i.priority)
- if diff != i.expDiff {
- t.Fatalf("Time difference mismatch, want %v, got %v", i.expDiff, diff)
- }
- }
-}
-
-func TestCallback(t *testing.T) {
- var (
- clock = &mclock.Simulated{}
- tracker = balanceTracker{}
- )
- tracker.init(clock, 1000) // cap = 1000
- defer tracker.stop(clock.Now())
- tracker.setFactors(false, 1, 1)
- tracker.setFactors(true, 1, 1)
-
- callCh := make(chan struct{}, 1)
- tracker.setBalance(uint64(time.Minute), 0)
- tracker.addCallback(balanceCallbackZero, 0, func() { callCh <- struct{}{} })
-
- clock.Run(time.Minute)
- select {
- case <-callCh:
- case <-time.NewTimer(time.Second).C:
- t.Fatalf("Callback hasn't been called yet")
- }
-
- tracker.setBalance(uint64(time.Minute), 0)
- tracker.addCallback(balanceCallbackZero, 0, func() { callCh <- struct{}{} })
- tracker.removeCallback(balanceCallbackZero)
-
- clock.Run(time.Minute)
- select {
- case <-callCh:
- t.Fatalf("Callback shouldn't be called")
- case <-time.NewTimer(time.Millisecond * 100).C:
- }
-}
diff --git a/les/clientpool.go b/les/clientpool.go
index 9c4060fc2..4f6e3fafe 100644
--- a/les/clientpool.go
+++ b/les/clientpool.go
@@ -17,34 +17,48 @@
package les
import (
- "bytes"
- "encoding/binary"
"fmt"
- "io"
- "math"
+ "reflect"
"sync"
"time"
- "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/mclock"
- "github.com/ethereum/go-ethereum/common/prque"
"github.com/ethereum/go-ethereum/ethdb"
+ lps "github.com/ethereum/go-ethereum/les/lespay/server"
+ "github.com/ethereum/go-ethereum/les/utils"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p/enode"
- "github.com/ethereum/go-ethereum/rlp"
- lru "github.com/hashicorp/golang-lru"
+ "github.com/ethereum/go-ethereum/p2p/enr"
+ "github.com/ethereum/go-ethereum/p2p/nodestate"
)
const (
- negBalanceExpTC = time.Hour // time constant for exponentially reducing negative balance
- fixedPointMultiplier = 0x1000000 // constant to convert logarithms to fixed point format
- lazyQueueRefresh = time.Second * 10 // refresh period of the connected queue
- persistCumulativeTimeRefresh = time.Minute * 5 // refresh period of the cumulative running time persistence
- posBalanceCacheLimit = 8192 // the maximum number of cached items in positive balance queue
- negBalanceCacheLimit = 8192 // the maximum number of cached items in negative balance queue
- defaultConnectedBias = time.Minute * 3 // the default connectedBias used in clientPool
+ defaultNegExpTC = 3600 // default time constant (in seconds) for exponentially reducing negative balance
+
+ // defaultConnectedBias is applied to already connected clients So that
+ // already connected client won't be kicked out very soon and we
+ // can ensure all connected clients can have enough time to request
+ // or sync some data.
+ //
+ // todo(rjl493456442) make it configurable. It can be the option of
+ // free trial time!
+ defaultConnectedBias = time.Minute * 3
+ inactiveTimeout = time.Second * 10
)
+var (
+ clientPoolSetup = &nodestate.Setup{}
+ clientField = clientPoolSetup.NewField("clientInfo", reflect.TypeOf(&clientInfo{}))
+ connAddressField = clientPoolSetup.NewField("connAddr", reflect.TypeOf(""))
+ balanceTrackerSetup = lps.NewBalanceTrackerSetup(clientPoolSetup)
+ priorityPoolSetup = lps.NewPriorityPoolSetup(clientPoolSetup)
+)
+
+func init() {
+ balanceTrackerSetup.Connect(connAddressField, priorityPoolSetup.CapacityField)
+ priorityPoolSetup.Connect(balanceTrackerSetup.BalanceField, balanceTrackerSetup.UpdateFlag) // NodeBalance implements nodePriority
+}
+
// clientPool implements a client database that assigns a priority to each client
// based on a positive and negative balance. Positive balance is externally assigned
// to prioritized clients and is decreased with connection time and processed
@@ -52,7 +66,7 @@ const (
// then negative balance is accumulated.
//
// Balance tracking and priority calculation for connected clients is done by
-// balanceTracker. connectedQueue ensures that clients with the lowest positive or
+// balanceTracker. activeQueue ensures that clients with the lowest positive or
// highest negative balance get evicted when the total capacity allowance is full
// and new clients with a better balance want to connect.
//
@@ -61,32 +75,24 @@ const (
// each client can have several minutes of connection time.
//
// Balances of disconnected clients are stored in nodeDB including positive balance
-// and negative banalce. Negative balance is transformed into a logarithmic form
-// with a constantly shifting linear offset in order to implement an exponential
-// decrease. Besides nodeDB will have a background thread to check the negative
-// balance of disconnected client. If the balance is low enough, then the record
-// will be dropped.
+// and negative banalce. Boeth positive balance and negative balance will decrease
+// exponentially. If the balance is low enough, then the record will be dropped.
type clientPool struct {
- ndb *nodeDB
+ lps.BalanceTrackerSetup
+ lps.PriorityPoolSetup
lock sync.Mutex
clock mclock.Clock
- stopCh chan struct{}
closed bool
removePeer func(enode.ID)
+ ns *nodestate.NodeStateMachine
+ pp *lps.PriorityPool
+ bt *lps.BalanceTracker
- connectedMap map[enode.ID]*clientInfo
- connectedQueue *prque.LazyQueue
-
- defaultPosFactors, defaultNegFactors priceFactors
-
- connLimit int // The maximum number of connections that clientpool can support
- capLimit uint64 // The maximum cumulative capacity that clientpool can support
- connectedCap uint64 // The sum of the capacity of the current clientpool connected
- priorityConnected uint64 // The sum of the capacity of currently connected priority clients
- freeClientCap uint64 // The capacity value of each free client
- startTime mclock.AbsTime // The timestamp at which the clientpool started running
- cumulativeTime int64 // The cumulative running time of clientpool at the start point.
- connectedBias time.Duration // The connection bias. 0: Disable connection bias(used in testing)
+ defaultPosFactors, defaultNegFactors lps.PriceFactors
+ posExpTC, negExpTC uint64
+ minCap uint64 // The minimal capacity value allowed for any client
+ connectedBias time.Duration
+ capLimit uint64
}
// clientPoolPeer represents a client peer in the pool.
@@ -95,269 +101,162 @@ type clientPool struct {
// clients have a limited access to IP addresses while new node keys can be easily
// generated so it would be useless to assign a negative value to them.
type clientPoolPeer interface {
- ID() enode.ID
+ Node() *enode.Node
freeClientId() string
updateCapacity(uint64)
- freezeClient()
+ freeze()
+ allowInactive() bool
}
-// clientInfo represents a connected client
+// clientInfo defines all information required by clientpool.
type clientInfo struct {
- address string
- id enode.ID
- connectedAt mclock.AbsTime
- capacity uint64
- priority bool
- pool *clientPool
- peer clientPoolPeer
- queueIndex int // position in connectedQueue
- balanceTracker balanceTracker
- posFactors, negFactors priceFactors
- balanceMetaInfo string
-}
-
-// connSetIndex callback updates clientInfo item index in connectedQueue
-func connSetIndex(a interface{}, index int) {
- a.(*clientInfo).queueIndex = index
-}
-
-// connPriority callback returns actual priority of clientInfo item in connectedQueue
-func connPriority(a interface{}, now mclock.AbsTime) int64 {
- c := a.(*clientInfo)
- return c.balanceTracker.getPriority(now)
-}
-
-// connMaxPriority callback returns estimated maximum priority of clientInfo item in connectedQueue
-func connMaxPriority(a interface{}, until mclock.AbsTime) int64 {
- c := a.(*clientInfo)
- pri := c.balanceTracker.estimatedPriority(until, true)
- c.balanceTracker.addCallback(balanceCallbackQueue, pri+1, func() {
- c.pool.lock.Lock()
- if c.queueIndex != -1 {
- c.pool.connectedQueue.Update(c.queueIndex)
- }
- c.pool.lock.Unlock()
- })
- return pri
-}
-
-// priceFactors determine the pricing policy (may apply either to positive or
-// negative balances which may have different factors).
-// - timeFactor is cost unit per nanosecond of connection time
-// - capacityFactor is cost unit per nanosecond of connection time per 1000000 capacity
-// - requestFactor is cost unit per request "realCost" unit
-type priceFactors struct {
- timeFactor, capacityFactor, requestFactor float64
+ node *enode.Node
+ address string
+ peer clientPoolPeer
+ connected, priority bool
+ connectedAt mclock.AbsTime
+ balance *lps.NodeBalance
}
// newClientPool creates a new client pool
-func newClientPool(db ethdb.Database, freeClientCap uint64, clock mclock.Clock, removePeer func(enode.ID)) *clientPool {
- ndb := newNodeDB(db, clock)
+func newClientPool(lespayDb ethdb.Database, minCap uint64, connectedBias time.Duration, clock mclock.Clock, removePeer func(enode.ID)) *clientPool {
+ ns := nodestate.NewNodeStateMachine(nil, nil, clock, clientPoolSetup)
pool := &clientPool{
- ndb: ndb,
- clock: clock,
- connectedMap: make(map[enode.ID]*clientInfo),
- connectedQueue: prque.NewLazyQueue(connSetIndex, connPriority, connMaxPriority, clock, lazyQueueRefresh),
- freeClientCap: freeClientCap,
- removePeer: removePeer,
- startTime: clock.Now(),
- cumulativeTime: ndb.getCumulativeTime(),
- stopCh: make(chan struct{}),
- connectedBias: defaultConnectedBias,
+ ns: ns,
+ BalanceTrackerSetup: balanceTrackerSetup,
+ PriorityPoolSetup: priorityPoolSetup,
+ clock: clock,
+ minCap: minCap,
+ connectedBias: connectedBias,
+ removePeer: removePeer,
}
- // If the negative balance of free client is even lower than 1,
- // delete this entry.
- ndb.nbEvictCallBack = func(now mclock.AbsTime, b negBalance) bool {
- balance := math.Exp(float64(b.logValue-pool.logOffset(now)) / fixedPointMultiplier)
- return balance <= 1
- }
- go func() {
- for {
- select {
- case <-clock.After(lazyQueueRefresh):
- pool.lock.Lock()
- pool.connectedQueue.Refresh()
- pool.lock.Unlock()
- case <-clock.After(persistCumulativeTimeRefresh):
- pool.ndb.setCumulativeTime(pool.logOffset(clock.Now()))
- case <-pool.stopCh:
- return
+ pool.bt = lps.NewBalanceTracker(ns, balanceTrackerSetup, lespayDb, clock, &utils.Expirer{}, &utils.Expirer{})
+ pool.pp = lps.NewPriorityPool(ns, priorityPoolSetup, clock, minCap, connectedBias, 4)
+
+ // set default expiration constants used by tests
+ // Note: server overwrites this if token sale is active
+ pool.bt.SetExpirationTCs(0, defaultNegExpTC)
+
+ ns.SubscribeState(pool.InactiveFlag.Or(pool.PriorityFlag), func(node *enode.Node, oldState, newState nodestate.Flags) {
+ if newState.Equals(pool.InactiveFlag) {
+ ns.AddTimeout(node, pool.InactiveFlag, inactiveTimeout)
+ }
+ if oldState.Equals(pool.InactiveFlag) && newState.Equals(pool.InactiveFlag.Or(pool.PriorityFlag)) {
+ ns.SetStateSub(node, pool.InactiveFlag, nodestate.Flags{}, 0) // remove timeout
+ }
+ })
+
+ ns.SubscribeState(pool.ActiveFlag.Or(pool.PriorityFlag), func(node *enode.Node, oldState, newState nodestate.Flags) {
+ c, _ := ns.GetField(node, clientField).(*clientInfo)
+ if c == nil {
+ return
+ }
+ c.priority = newState.HasAll(pool.PriorityFlag)
+ if newState.Equals(pool.ActiveFlag) {
+ cap, _ := ns.GetField(node, pool.CapacityField).(uint64)
+ if cap > minCap {
+ pool.pp.RequestCapacity(node, minCap, 0, true)
}
}
- }()
+ })
+
+ ns.SubscribeState(pool.InactiveFlag.Or(pool.ActiveFlag), func(node *enode.Node, oldState, newState nodestate.Flags) {
+ if oldState.IsEmpty() {
+ clientConnectedMeter.Mark(1)
+ log.Debug("Client connected", "id", node.ID())
+ }
+ if oldState.Equals(pool.InactiveFlag) && newState.Equals(pool.ActiveFlag) {
+ clientActivatedMeter.Mark(1)
+ log.Debug("Client activated", "id", node.ID())
+ }
+ if oldState.Equals(pool.ActiveFlag) && newState.Equals(pool.InactiveFlag) {
+ clientDeactivatedMeter.Mark(1)
+ log.Debug("Client deactivated", "id", node.ID())
+ c, _ := ns.GetField(node, clientField).(*clientInfo)
+ if c == nil || !c.peer.allowInactive() {
+ pool.removePeer(node.ID())
+ }
+ }
+ if newState.IsEmpty() {
+ clientDisconnectedMeter.Mark(1)
+ log.Debug("Client disconnected", "id", node.ID())
+ pool.removePeer(node.ID())
+ }
+ })
+
+ var totalConnected uint64
+ ns.SubscribeField(pool.CapacityField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) {
+ oldCap, _ := oldValue.(uint64)
+ newCap, _ := newValue.(uint64)
+ totalConnected += newCap - oldCap
+ totalConnectedGauge.Update(int64(totalConnected))
+ c, _ := ns.GetField(node, clientField).(*clientInfo)
+ if c != nil {
+ c.peer.updateCapacity(newCap)
+ }
+ })
+
+ ns.Start()
return pool
}
// stop shuts the client pool down
func (f *clientPool) stop() {
- close(f.stopCh)
f.lock.Lock()
f.closed = true
f.lock.Unlock()
- f.ndb.setCumulativeTime(f.logOffset(f.clock.Now()))
- f.ndb.close()
+ f.ns.ForEach(nodestate.Flags{}, nodestate.Flags{}, func(node *enode.Node, state nodestate.Flags) {
+ // enforces saving all balances in BalanceTracker
+ f.disconnectNode(node)
+ })
+ f.bt.Stop()
+ f.ns.Stop()
}
// connect should be called after a successful handshake. If the connection was
// rejected, there is no need to call disconnect.
-func (f *clientPool) connect(peer clientPoolPeer, capacity uint64) bool {
+func (f *clientPool) connect(peer clientPoolPeer) (uint64, error) {
f.lock.Lock()
defer f.lock.Unlock()
// Short circuit if clientPool is already closed.
if f.closed {
- return false
+ return 0, fmt.Errorf("Client pool is already closed")
}
// Dedup connected peers.
- id, freeID := peer.ID(), peer.freeClientId()
- if _, ok := f.connectedMap[id]; ok {
- clientRejectedMeter.Mark(1)
- log.Debug("Client already connected", "address", freeID, "id", id.String())
- return false
+ node, freeID := peer.Node(), peer.freeClientId()
+ if f.ns.GetField(node, clientField) != nil {
+ log.Debug("Client already connected", "address", freeID, "id", node.ID().String())
+ return 0, fmt.Errorf("Client already connected address=%s id=%s", freeID, node.ID().String())
}
- // Create a clientInfo but do not add it yet
- var (
- posBalance uint64
- negBalance uint64
- now = f.clock.Now()
- )
- pb := f.ndb.getOrNewPB(id)
- posBalance = pb.value
-
- nb := f.ndb.getOrNewNB(freeID)
- if nb.logValue != 0 {
- negBalance = uint64(math.Exp(float64(nb.logValue-f.logOffset(now))/fixedPointMultiplier) * float64(time.Second))
+ now := f.clock.Now()
+ c := &clientInfo{
+ node: node,
+ address: freeID,
+ peer: peer,
+ connected: true,
+ connectedAt: now,
}
- e := &clientInfo{
- pool: f,
- peer: peer,
- address: freeID,
- queueIndex: -1,
- id: id,
- connectedAt: now,
- priority: posBalance != 0,
- posFactors: f.defaultPosFactors,
- negFactors: f.defaultNegFactors,
- balanceMetaInfo: pb.meta,
+ f.ns.SetField(node, clientField, c)
+ f.ns.SetField(node, connAddressField, freeID)
+ if c.balance, _ = f.ns.GetField(node, f.BalanceField).(*lps.NodeBalance); c.balance == nil {
+ f.disconnect(peer)
+ return 0, nil
}
- // If the client is a free client, assign with a low free capacity,
- // Otherwise assign with the given value(priority client)
- if !e.priority || capacity == 0 {
- capacity = f.freeClientCap
+ c.balance.SetPriceFactors(f.defaultPosFactors, f.defaultNegFactors)
+
+ f.ns.SetState(node, f.InactiveFlag, nodestate.Flags{}, 0)
+ var allowed bool
+ f.ns.Operation(func() {
+ _, allowed = f.pp.RequestCapacity(node, f.minCap, f.connectedBias, true)
+ })
+ if allowed {
+ return f.minCap, nil
}
- e.capacity = capacity
-
- // Starts a balance tracker
- e.balanceTracker.init(f.clock, capacity)
- e.balanceTracker.setBalance(posBalance, negBalance)
- e.updatePriceFactors()
-
- // If the number of clients already connected in the clientpool exceeds its
- // capacity, evict some clients with lowest priority.
- //
- // If the priority of the newly added client is lower than the priority of
- // all connected clients, the client is rejected.
- newCapacity := f.connectedCap + capacity
- newCount := f.connectedQueue.Size() + 1
- if newCapacity > f.capLimit || newCount > f.connLimit {
- var (
- kickList []*clientInfo
- kickPriority int64
- )
- f.connectedQueue.MultiPop(func(data interface{}, priority int64) bool {
- c := data.(*clientInfo)
- kickList = append(kickList, c)
- kickPriority = priority
- newCapacity -= c.capacity
- newCount--
- return newCapacity > f.capLimit || newCount > f.connLimit
- })
- if newCapacity > f.capLimit || newCount > f.connLimit || (e.balanceTracker.estimatedPriority(now+mclock.AbsTime(f.connectedBias), false)-kickPriority) > 0 {
- for _, c := range kickList {
- f.connectedQueue.Push(c)
- }
- clientRejectedMeter.Mark(1)
- log.Debug("Client rejected", "address", freeID, "id", id.String())
- return false
- }
- // accept new client, drop old ones
- for _, c := range kickList {
- f.dropClient(c, now, true)
- }
+ if !peer.allowInactive() {
+ f.disconnect(peer)
}
-
- // Register new client to connection queue.
- f.connectedMap[id] = e
- f.connectedQueue.Push(e)
- f.connectedCap += e.capacity
-
- // If the current client is a paid client, monitor the status of client,
- // downgrade it to normal client if positive balance is used up.
- if e.priority {
- f.priorityConnected += capacity
- e.balanceTracker.addCallback(balanceCallbackZero, 0, func() { f.balanceExhausted(id) })
- }
- // If the capacity of client is not the default value(free capacity), notify
- // it to update capacity.
- if e.capacity != f.freeClientCap {
- e.peer.updateCapacity(e.capacity)
- }
- totalConnectedGauge.Update(int64(f.connectedCap))
- clientConnectedMeter.Mark(1)
- log.Debug("Client accepted", "address", freeID)
- return true
-}
-
-// disconnect should be called when a connection is terminated. If the disconnection
-// was initiated by the pool itself using disconnectFn then calling disconnect is
-// not necessary but permitted.
-func (f *clientPool) disconnect(p clientPoolPeer) {
- f.lock.Lock()
- defer f.lock.Unlock()
-
- // Short circuit if client pool is already closed.
- if f.closed {
- return
- }
- // Short circuit if the peer hasn't been registered.
- e := f.connectedMap[p.ID()]
- if e == nil {
- log.Debug("Client not connected", "address", p.freeClientId(), "id", p.ID().String())
- return
- }
- f.dropClient(e, f.clock.Now(), false)
-}
-
-// forClients iterates through a list of clients, calling the callback for each one.
-// If a client is not connected then clientInfo is nil. If the specified list is empty
-// then the callback is called for all connected clients.
-func (f *clientPool) forClients(ids []enode.ID, callback func(*clientInfo, enode.ID) error) error {
- f.lock.Lock()
- defer f.lock.Unlock()
-
- if len(ids) > 0 {
- for _, id := range ids {
- if err := callback(f.connectedMap[id], id); err != nil {
- return err
- }
- }
- } else {
- for _, c := range f.connectedMap {
- if err := callback(c, c.id); err != nil {
- return err
- }
- }
- }
- return nil
-}
-
-// setDefaultFactors sets the default price factors applied to subsequently connected clients
-func (f *clientPool) setDefaultFactors(posFactors, negFactors priceFactors) {
- f.lock.Lock()
- defer f.lock.Unlock()
-
- f.defaultPosFactors = posFactors
- f.defaultNegFactors = negFactors
+ return 0, nil
}
// setConnectedBias sets the connection bias, which is applied to already connected clients
@@ -368,30 +267,29 @@ func (f *clientPool) setConnectedBias(bias time.Duration) {
defer f.lock.Unlock()
f.connectedBias = bias
+ f.pp.SetActiveBias(bias)
}
-// dropClient removes a client from the connected queue and finalizes its balance.
-// If kick is true then it also initiates the disconnection.
-func (f *clientPool) dropClient(e *clientInfo, now mclock.AbsTime, kick bool) {
- if _, ok := f.connectedMap[e.id]; !ok {
- return
- }
- f.finalizeBalance(e, now)
- f.connectedQueue.Remove(e.queueIndex)
- delete(f.connectedMap, e.id)
- f.connectedCap -= e.capacity
- if e.priority {
- f.priorityConnected -= e.capacity
- }
- totalConnectedGauge.Update(int64(f.connectedCap))
- if kick {
- clientKickedMeter.Mark(1)
- log.Debug("Client kicked out", "address", e.address)
- f.removePeer(e.id)
- } else {
- clientDisconnectedMeter.Mark(1)
- log.Debug("Client disconnected", "address", e.address)
- }
+// disconnect should be called when a connection is terminated. If the disconnection
+// was initiated by the pool itself using disconnectFn then calling disconnect is
+// not necessary but permitted.
+func (f *clientPool) disconnect(p clientPoolPeer) {
+ f.disconnectNode(p.Node())
+}
+
+// disconnectNode removes node fields and flags related to connected status
+func (f *clientPool) disconnectNode(node *enode.Node) {
+ f.ns.SetField(node, connAddressField, nil)
+ f.ns.SetField(node, clientField, nil)
+}
+
+// setDefaultFactors sets the default price factors applied to subsequently connected clients
+func (f *clientPool) setDefaultFactors(posFactors, negFactors lps.PriceFactors) {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ f.defaultPosFactors = posFactors
+ f.defaultNegFactors = negFactors
}
// capacityInfo returns the total capacity allowance, the total capacity of connected
@@ -400,472 +298,104 @@ func (f *clientPool) capacityInfo() (uint64, uint64, uint64) {
f.lock.Lock()
defer f.lock.Unlock()
- return f.capLimit, f.connectedCap, f.priorityConnected
+ // total priority active cap will be supported when the token issuer module is added
+ return f.capLimit, f.pp.ActiveCapacity(), 0
}
-// finalizeBalance stops the balance tracker, retrieves the final balances and
-// stores them in posBalanceQueue and negBalanceQueue
-func (f *clientPool) finalizeBalance(c *clientInfo, now mclock.AbsTime) {
- c.balanceTracker.stop(now)
- pos, neg := c.balanceTracker.getBalance(now)
-
- pb, nb := f.ndb.getOrNewPB(c.id), f.ndb.getOrNewNB(c.address)
- pb.value = pos
- f.ndb.setPB(c.id, pb)
-
- neg /= uint64(time.Second) // Convert the expanse to second level.
- if neg > 1 {
- nb.logValue = int64(math.Log(float64(neg))*fixedPointMultiplier) + f.logOffset(now)
- f.ndb.setNB(c.address, nb)
- } else {
- f.ndb.delNB(c.address) // Negative balance is small enough, drop it directly.
- }
-}
-
-// balanceExhausted callback is called by balanceTracker when positive balance is exhausted.
-// It revokes priority status and also reduces the client capacity if necessary.
-func (f *clientPool) balanceExhausted(id enode.ID) {
- f.lock.Lock()
- defer f.lock.Unlock()
-
- c := f.connectedMap[id]
- if c == nil || !c.priority {
- return
- }
- if c.priority {
- f.priorityConnected -= c.capacity
- }
- c.priority = false
- if c.capacity != f.freeClientCap {
- f.connectedCap += f.freeClientCap - c.capacity
- totalConnectedGauge.Update(int64(f.connectedCap))
- c.capacity = f.freeClientCap
- c.balanceTracker.setCapacity(c.capacity)
- c.peer.updateCapacity(c.capacity)
- }
- pb := f.ndb.getOrNewPB(id)
- pb.value = 0
- f.ndb.setPB(id, pb)
-}
-
-// setConnLimit sets the maximum number and total capacity of connected clients,
+// setLimits sets the maximum number and total capacity of connected clients,
// dropping some of them if necessary.
func (f *clientPool) setLimits(totalConn int, totalCap uint64) {
f.lock.Lock()
defer f.lock.Unlock()
- f.connLimit = totalConn
f.capLimit = totalCap
- if f.connectedCap > f.capLimit || f.connectedQueue.Size() > f.connLimit {
- f.connectedQueue.MultiPop(func(data interface{}, priority int64) bool {
- f.dropClient(data.(*clientInfo), mclock.Now(), true)
- return f.connectedCap > f.capLimit || f.connectedQueue.Size() > f.connLimit
- })
- }
+ f.pp.SetLimits(uint64(totalConn), totalCap)
}
// setCapacity sets the assigned capacity of a connected client
-func (f *clientPool) setCapacity(c *clientInfo, capacity uint64) error {
- if f.connectedMap[c.id] != c {
- return fmt.Errorf("client %064x is not connected", c.id[:])
- }
- if c.capacity == capacity {
- return nil
- }
- if !c.priority {
- return errNoPriority
- }
- oldCapacity := c.capacity
- c.capacity = capacity
- f.connectedCap += capacity - oldCapacity
- c.balanceTracker.setCapacity(capacity)
- f.connectedQueue.Update(c.queueIndex)
- if f.connectedCap > f.capLimit {
- var kickList []*clientInfo
- kick := true
- f.connectedQueue.MultiPop(func(data interface{}, priority int64) bool {
- client := data.(*clientInfo)
- kickList = append(kickList, client)
- f.connectedCap -= client.capacity
- if client == c {
- kick = false
- }
- return kick && (f.connectedCap > f.capLimit)
- })
- if kick {
- now := mclock.Now()
- for _, c := range kickList {
- f.dropClient(c, now, true)
- }
- } else {
- c.capacity = oldCapacity
- c.balanceTracker.setCapacity(oldCapacity)
- for _, c := range kickList {
- f.connectedCap += c.capacity
- f.connectedQueue.Push(c)
- }
- return errNoPriority
+func (f *clientPool) setCapacity(node *enode.Node, freeID string, capacity uint64, bias time.Duration, setCap bool) (uint64, error) {
+ c, _ := f.ns.GetField(node, clientField).(*clientInfo)
+ if c == nil {
+ if setCap {
+ return 0, fmt.Errorf("client %064x is not connected", node.ID())
}
- }
- totalConnectedGauge.Update(int64(f.connectedCap))
- f.priorityConnected += capacity - oldCapacity
- c.updatePriceFactors()
- c.peer.updateCapacity(c.capacity)
- return nil
-}
-
-// requestCost feeds request cost after serving a request from the given peer.
-func (f *clientPool) requestCost(p *clientPeer, cost uint64) {
- f.lock.Lock()
- defer f.lock.Unlock()
-
- info, exist := f.connectedMap[p.ID()]
- if !exist || f.closed {
- return
- }
- info.balanceTracker.requestCost(cost)
-}
-
-// logOffset calculates the time-dependent offset for the logarithmic
-// representation of negative balance
-//
-// From another point of view, the result returned by the function represents
-// the total time that the clientpool is cumulatively running(total_hours/multiplier).
-func (f *clientPool) logOffset(now mclock.AbsTime) int64 {
- // Note: fixedPointMultiplier acts as a multiplier here; the reason for dividing the divisor
- // is to avoid int64 overflow. We assume that int64(negBalanceExpTC) >> fixedPointMultiplier.
- cumulativeTime := int64((time.Duration(now - f.startTime)) / (negBalanceExpTC / fixedPointMultiplier))
- return f.cumulativeTime + cumulativeTime
-}
-
-// setClientPriceFactors sets the pricing factors for an individual connected client
-func (c *clientInfo) updatePriceFactors() {
- c.balanceTracker.setFactors(true, c.negFactors.timeFactor+float64(c.capacity)*c.negFactors.capacityFactor/1000000, c.negFactors.requestFactor)
- c.balanceTracker.setFactors(false, c.posFactors.timeFactor+float64(c.capacity)*c.posFactors.capacityFactor/1000000, c.posFactors.requestFactor)
-}
-
-// getPosBalance retrieves a single positive balance entry from cache or the database
-func (f *clientPool) getPosBalance(id enode.ID) posBalance {
- f.lock.Lock()
- defer f.lock.Unlock()
-
- return f.ndb.getOrNewPB(id)
-}
-
-// addBalance updates the balance of a client (either overwrites it or adds to it).
-// It also updates the balance meta info string.
-func (f *clientPool) addBalance(id enode.ID, amount int64, meta string) (uint64, uint64, error) {
- f.lock.Lock()
- defer f.lock.Unlock()
-
- pb := f.ndb.getOrNewPB(id)
- var negBalance uint64
- c := f.connectedMap[id]
- if c != nil {
- pb.value, negBalance = c.balanceTracker.getBalance(f.clock.Now())
- }
- oldBalance := pb.value
- if amount > 0 {
- if amount > maxBalance || pb.value > maxBalance-uint64(amount) {
- return oldBalance, oldBalance, errBalanceOverflow
- }
- pb.value += uint64(amount)
- } else {
- if uint64(-amount) > pb.value {
- pb.value = 0
- } else {
- pb.value -= uint64(-amount)
+ c = &clientInfo{node: node}
+ f.ns.SetField(node, clientField, c)
+ f.ns.SetField(node, connAddressField, freeID)
+ if c.balance, _ = f.ns.GetField(node, f.BalanceField).(*lps.NodeBalance); c.balance == nil {
+ log.Error("BalanceField is missing", "node", node.ID())
+ return 0, fmt.Errorf("BalanceField of %064x is missing", node.ID())
}
+ defer func() {
+ f.ns.SetField(node, connAddressField, nil)
+ f.ns.SetField(node, clientField, nil)
+ }()
}
- pb.meta = meta
- f.ndb.setPB(id, pb)
- if c != nil {
- c.balanceTracker.setBalance(pb.value, negBalance)
- if !c.priority && pb.value > 0 {
- // The capacity should be adjusted based on the requirement,
- // but we have no idea about the new capacity, need a second
- // call to update it.
- c.priority = true
- f.priorityConnected += c.capacity
- c.balanceTracker.addCallback(balanceCallbackZero, 0, func() { f.balanceExhausted(id) })
- }
- // if balance is set to zero then reverting to non-priority status
- // is handled by the balanceExhausted callback
- c.balanceMetaInfo = meta
- }
- return oldBalance, pb.value, nil
-}
-
-// posBalance represents a recently accessed positive balance entry
-type posBalance struct {
- value uint64
- meta string
-}
-
-// EncodeRLP implements rlp.Encoder
-func (e *posBalance) EncodeRLP(w io.Writer) error {
- return rlp.Encode(w, []interface{}{e.value, e.meta})
-}
-
-// DecodeRLP implements rlp.Decoder
-func (e *posBalance) DecodeRLP(s *rlp.Stream) error {
- var entry struct {
- Value uint64
- Meta string
- }
- if err := s.Decode(&entry); err != nil {
- return err
- }
- e.value = entry.Value
- e.meta = entry.Meta
- return nil
-}
-
-// negBalance represents a negative balance entry of a disconnected client
-type negBalance struct{ logValue int64 }
-
-// EncodeRLP implements rlp.Encoder
-func (e *negBalance) EncodeRLP(w io.Writer) error {
- return rlp.Encode(w, []interface{}{uint64(e.logValue)})
-}
-
-// DecodeRLP implements rlp.Decoder
-func (e *negBalance) DecodeRLP(s *rlp.Stream) error {
- var entry struct {
- LogValue uint64
- }
- if err := s.Decode(&entry); err != nil {
- return err
- }
- e.logValue = int64(entry.LogValue)
- return nil
-}
-
-const (
- // nodeDBVersion is the version identifier of the node data in db
- //
- // Changelog:
- // * Replace `lastTotal` with `meta` in positive balance: version 0=>1
- nodeDBVersion = 1
-
- // dbCleanupCycle is the cycle of db for useless data cleanup
- dbCleanupCycle = time.Hour
-)
-
-var (
- positiveBalancePrefix = []byte("pb:") // dbVersion(uint16 big endian) + positiveBalancePrefix + id -> balance
- negativeBalancePrefix = []byte("nb:") // dbVersion(uint16 big endian) + negativeBalancePrefix + ip -> balance
- cumulativeRunningTimeKey = []byte("cumulativeTime:") // dbVersion(uint16 big endian) + cumulativeRunningTimeKey -> cumulativeTime
-)
-
-type nodeDB struct {
- db ethdb.Database
- pcache *lru.Cache
- ncache *lru.Cache
- auxbuf []byte // 37-byte auxiliary buffer for key encoding
- verbuf [2]byte // 2-byte auxiliary buffer for db version
- nbEvictCallBack func(mclock.AbsTime, negBalance) bool // Callback to determine whether the negative balance can be evicted.
- clock mclock.Clock
- closeCh chan struct{}
- cleanupHook func() // Test hook used for testing
-}
-
-func newNodeDB(db ethdb.Database, clock mclock.Clock) *nodeDB {
- pcache, _ := lru.New(posBalanceCacheLimit)
- ncache, _ := lru.New(negBalanceCacheLimit)
- ndb := &nodeDB{
- db: db,
- pcache: pcache,
- ncache: ncache,
- auxbuf: make([]byte, 37),
- clock: clock,
- closeCh: make(chan struct{}),
- }
- binary.BigEndian.PutUint16(ndb.verbuf[:], uint16(nodeDBVersion))
- go ndb.expirer()
- return ndb
-}
-
-func (db *nodeDB) close() {
- close(db.closeCh)
-}
-
-func (db *nodeDB) getPrefix(neg bool) []byte {
- prefix := positiveBalancePrefix
- if neg {
- prefix = negativeBalancePrefix
- }
- return append(db.verbuf[:], prefix...)
-}
-
-func (db *nodeDB) key(id []byte, neg bool) []byte {
- prefix := positiveBalancePrefix
- if neg {
- prefix = negativeBalancePrefix
- }
- if len(prefix)+len(db.verbuf)+len(id) > len(db.auxbuf) {
- db.auxbuf = append(db.auxbuf, make([]byte, len(prefix)+len(db.verbuf)+len(id)-len(db.auxbuf))...)
- }
- copy(db.auxbuf[:len(db.verbuf)], db.verbuf[:])
- copy(db.auxbuf[len(db.verbuf):len(db.verbuf)+len(prefix)], prefix)
- copy(db.auxbuf[len(prefix)+len(db.verbuf):len(prefix)+len(db.verbuf)+len(id)], id)
- return db.auxbuf[:len(prefix)+len(db.verbuf)+len(id)]
-}
-
-func (db *nodeDB) getCumulativeTime() int64 {
- blob, err := db.db.Get(append(cumulativeRunningTimeKey, db.verbuf[:]...))
- if err != nil || len(blob) == 0 {
- return 0
- }
- return int64(binary.BigEndian.Uint64(blob))
-}
-
-func (db *nodeDB) setCumulativeTime(v int64) {
- binary.BigEndian.PutUint64(db.auxbuf[:8], uint64(v))
- db.db.Put(append(cumulativeRunningTimeKey, db.verbuf[:]...), db.auxbuf[:8])
-}
-
-func (db *nodeDB) getOrNewPB(id enode.ID) posBalance {
- key := db.key(id.Bytes(), false)
- item, exist := db.pcache.Get(string(key))
- if exist {
- return item.(posBalance)
- }
- var balance posBalance
- if enc, err := db.db.Get(key); err == nil {
- if err := rlp.DecodeBytes(enc, &balance); err != nil {
- log.Error("Failed to decode positive balance", "err", err)
- }
- }
- db.pcache.Add(string(key), balance)
- return balance
-}
-
-func (db *nodeDB) setPB(id enode.ID, b posBalance) {
- if b.value == 0 && len(b.meta) == 0 {
- db.delPB(id)
- return
- }
- key := db.key(id.Bytes(), false)
- enc, err := rlp.EncodeToBytes(&(b))
- if err != nil {
- log.Error("Failed to encode positive balance", "err", err)
- return
- }
- db.db.Put(key, enc)
- db.pcache.Add(string(key), b)
-}
-
-func (db *nodeDB) delPB(id enode.ID) {
- key := db.key(id.Bytes(), false)
- db.db.Delete(key)
- db.pcache.Remove(string(key))
-}
-
-// getPosBalanceIDs returns a lexicographically ordered list of IDs of accounts
-// with a positive balance
-func (db *nodeDB) getPosBalanceIDs(start, stop enode.ID, maxCount int) (result []enode.ID) {
- if maxCount <= 0 {
- return
- }
- prefix := db.getPrefix(false)
- it := db.db.NewIterator(prefix, start.Bytes())
- defer it.Release()
- for i := len(stop[:]) - 1; i >= 0; i-- {
- stop[i]--
- if stop[i] != 255 {
- break
- }
- }
- stopKey := db.key(stop.Bytes(), false)
- keyLen := len(stopKey)
-
- for it.Next() {
- var id enode.ID
- if len(it.Key()) != keyLen || bytes.Compare(it.Key(), stopKey) == 1 {
- return
- }
- copy(id[:], it.Key()[keyLen-len(id):])
- result = append(result, id)
- if len(result) == maxCount {
- return
- }
- }
- return
-}
-
-func (db *nodeDB) getOrNewNB(id string) negBalance {
- key := db.key([]byte(id), true)
- item, exist := db.ncache.Get(string(key))
- if exist {
- return item.(negBalance)
- }
- var balance negBalance
- if enc, err := db.db.Get(key); err == nil {
- if err := rlp.DecodeBytes(enc, &balance); err != nil {
- log.Error("Failed to decode negative balance", "err", err)
- }
- }
- db.ncache.Add(string(key), balance)
- return balance
-}
-
-func (db *nodeDB) setNB(id string, b negBalance) {
- key := db.key([]byte(id), true)
- enc, err := rlp.EncodeToBytes(&(b))
- if err != nil {
- log.Error("Failed to encode negative balance", "err", err)
- return
- }
- db.db.Put(key, enc)
- db.ncache.Add(string(key), b)
-}
-
-func (db *nodeDB) delNB(id string) {
- key := db.key([]byte(id), true)
- db.db.Delete(key)
- db.ncache.Remove(string(key))
-}
-
-func (db *nodeDB) expirer() {
- for {
- select {
- case <-db.clock.After(dbCleanupCycle):
- db.expireNodes()
- case <-db.closeCh:
- return
- }
- }
-}
-
-// expireNodes iterates the whole node db and checks whether the negative balance
-// entry can deleted.
-//
-// The rationale behind this is: server doesn't need to keep the negative balance
-// records if they are low enough.
-func (db *nodeDB) expireNodes() {
var (
- visited int
- deleted int
- start = time.Now()
- prefix = db.getPrefix(true)
+ minPriority int64
+ allowed bool
)
- iter := db.db.NewIterator(prefix, nil)
- for iter.Next() {
- visited += 1
- var balance negBalance
- if err := rlp.DecodeBytes(iter.Value(), &balance); err != nil {
- log.Error("Failed to decode negative balance", "err", err)
- continue
- }
- if db.nbEvictCallBack != nil && db.nbEvictCallBack(db.clock.Now(), balance) {
- deleted += 1
- db.db.Delete(iter.Key())
+ f.ns.Operation(func() {
+ if !setCap || c.priority {
+ // check clientInfo.priority inside Operation to ensure thread safety
+ minPriority, allowed = f.pp.RequestCapacity(node, capacity, bias, setCap)
}
+ })
+ if allowed {
+ return 0, nil
}
- // Invoke testing hook if it's not nil.
- if db.cleanupHook != nil {
- db.cleanupHook()
+ missing := c.balance.PosBalanceMissing(minPriority, capacity, bias)
+ if missing < 1 {
+ // ensure that we never return 0 missing and insufficient priority error
+ missing = 1
+ }
+ return missing, errNoPriority
+}
+
+// setCapacityLocked is the equivalent of setCapacity used when f.lock is already locked
+func (f *clientPool) setCapacityLocked(node *enode.Node, freeID string, capacity uint64, minConnTime time.Duration, setCap bool) (uint64, error) {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ return f.setCapacity(node, freeID, capacity, minConnTime, setCap)
+}
+
+// forClients calls the supplied callback for either the listed node IDs or all connected
+// nodes. It passes a valid clientInfo to the callback and ensures that the necessary
+// fields and flags are set in order for BalanceTracker and PriorityPool to work even if
+// the node is not connected.
+func (f *clientPool) forClients(ids []enode.ID, cb func(client *clientInfo)) {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ if len(ids) == 0 {
+ f.ns.ForEach(nodestate.Flags{}, nodestate.Flags{}, func(node *enode.Node, state nodestate.Flags) {
+ c, _ := f.ns.GetField(node, clientField).(*clientInfo)
+ if c != nil {
+ cb(c)
+ }
+ })
+ } else {
+ for _, id := range ids {
+ node := f.ns.GetNode(id)
+ if node == nil {
+ node = enode.SignNull(&enr.Record{}, id)
+ }
+ c, _ := f.ns.GetField(node, clientField).(*clientInfo)
+ if c != nil {
+ cb(c)
+ } else {
+ c = &clientInfo{node: node}
+ f.ns.SetField(node, clientField, c)
+ f.ns.SetField(node, connAddressField, "")
+ if c.balance, _ = f.ns.GetField(node, f.BalanceField).(*lps.NodeBalance); c.balance != nil {
+ cb(c)
+ } else {
+ log.Error("BalanceField is missing")
+ }
+ f.ns.SetField(node, connAddressField, nil)
+ f.ns.SetField(node, clientField, nil)
+ }
+ }
}
- log.Debug("Expire nodes", "visited", visited, "deleted", deleted, "elapsed", common.PrettyDuration(time.Since(start)))
}
diff --git a/les/clientpool_test.go b/les/clientpool_test.go
index f8c4ef0c0..cfd1486b4 100644
--- a/les/clientpool_test.go
+++ b/les/clientpool_test.go
@@ -17,17 +17,17 @@
package les
import (
- "bytes"
"fmt"
- "math"
"math/rand"
- "reflect"
"testing"
"time"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/core/rawdb"
+ lps "github.com/ethereum/go-ethereum/les/lespay/server"
"github.com/ethereum/go-ethereum/p2p/enode"
+ "github.com/ethereum/go-ethereum/p2p/enr"
+ "github.com/ethereum/go-ethereum/p2p/nodestate"
)
func TestClientPoolL10C100Free(t *testing.T) {
@@ -56,29 +56,68 @@ func TestClientPoolL100C300P20(t *testing.T) {
const testClientPoolTicks = 100000
-type poolTestPeer int
-
-func (i poolTestPeer) ID() enode.ID {
- return enode.ID{byte(i % 256), byte(i >> 8)}
+type poolTestPeer struct {
+ node *enode.Node
+ index int
+ disconnCh chan int
+ cap uint64
+ inactiveAllowed bool
}
-func (i poolTestPeer) freeClientId() string {
- return fmt.Sprintf("addr #%d", i)
+func newPoolTestPeer(i int, disconnCh chan int) *poolTestPeer {
+ return &poolTestPeer{
+ index: i,
+ disconnCh: disconnCh,
+ node: enode.SignNull(&enr.Record{}, enode.ID{byte(i % 256), byte(i >> 8)}),
+ }
}
-func (i poolTestPeer) updateCapacity(uint64) {}
-
-type poolTestPeerWithCap struct {
- poolTestPeer
-
- cap uint64
+func (i *poolTestPeer) Node() *enode.Node {
+ return i.node
}
-func (i *poolTestPeerWithCap) updateCapacity(cap uint64) { i.cap = cap }
+func (i *poolTestPeer) freeClientId() string {
+ return fmt.Sprintf("addr #%d", i.index)
+}
-func (i poolTestPeer) freezeClient() {}
+func (i *poolTestPeer) updateCapacity(cap uint64) {
+ i.cap = cap
+}
-func testClientPool(t *testing.T, connLimit, clientCount, paidCount int, randomDisconnect bool) {
+func (i *poolTestPeer) freeze() {}
+
+func (i *poolTestPeer) allowInactive() bool {
+ return i.inactiveAllowed
+}
+
+func getBalance(pool *clientPool, p *poolTestPeer) (pos, neg uint64) {
+ temp := pool.ns.GetField(p.node, clientField) == nil
+ if temp {
+ pool.ns.SetField(p.node, connAddressField, p.freeClientId())
+ }
+ n, _ := pool.ns.GetField(p.node, pool.BalanceField).(*lps.NodeBalance)
+ pos, neg = n.GetBalance()
+ if temp {
+ pool.ns.SetField(p.node, connAddressField, nil)
+ }
+ return
+}
+
+func addBalance(pool *clientPool, id enode.ID, amount int64) {
+ pool.forClients([]enode.ID{id}, func(c *clientInfo) {
+ c.balance.AddBalance(amount)
+ })
+}
+
+func checkDiff(a, b uint64) bool {
+ maxDiff := (a + b) / 2000
+ if maxDiff < 1 {
+ maxDiff = 1
+ }
+ return a > b+maxDiff || b > a+maxDiff
+}
+
+func testClientPool(t *testing.T, activeLimit, clientCount, paidCount int, randomDisconnect bool) {
rand.Seed(time.Now().UnixNano())
var (
clock mclock.Simulated
@@ -89,15 +128,15 @@ func testClientPool(t *testing.T, connLimit, clientCount, paidCount int, randomD
disconnFn = func(id enode.ID) {
disconnCh <- int(id[0]) + int(id[1])<<8
}
- pool = newClientPool(db, 1, &clock, disconnFn)
+ pool = newClientPool(db, 1, 0, &clock, disconnFn)
)
- pool.setConnectedBias(0)
- pool.setLimits(connLimit, uint64(connLimit))
- pool.setDefaultFactors(priceFactors{1, 0, 1}, priceFactors{1, 0, 1})
+
+ pool.setLimits(activeLimit, uint64(activeLimit))
+ pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
// pool should accept new peers up to its connected limit
- for i := 0; i < connLimit; i++ {
- if pool.connect(poolTestPeer(i), 0) {
+ for i := 0; i < activeLimit; i++ {
+ if cap, _ := pool.connect(newPoolTestPeer(i, disconnCh)); cap != 0 {
connected[i] = true
} else {
t.Fatalf("Test peer #%d rejected", i)
@@ -111,28 +150,30 @@ func testClientPool(t *testing.T, connLimit, clientCount, paidCount int, randomD
// give a positive balance to some of the peers
amount := testClientPoolTicks / 2 * int64(time.Second) // enough for half of the simulation period
for i := 0; i < paidCount; i++ {
- pool.addBalance(poolTestPeer(i).ID(), amount, "")
+ addBalance(pool, newPoolTestPeer(i, disconnCh).node.ID(), amount)
}
}
i := rand.Intn(clientCount)
if connected[i] {
if randomDisconnect {
- pool.disconnect(poolTestPeer(i))
+ pool.disconnect(newPoolTestPeer(i, disconnCh))
connected[i] = false
connTicks[i] += tickCounter
}
} else {
- if pool.connect(poolTestPeer(i), 0) {
+ if cap, _ := pool.connect(newPoolTestPeer(i, disconnCh)); cap != 0 {
connected[i] = true
connTicks[i] -= tickCounter
+ } else {
+ pool.disconnect(newPoolTestPeer(i, disconnCh))
}
}
pollDisconnects:
for {
select {
case i := <-disconnCh:
- pool.disconnect(poolTestPeer(i))
+ pool.disconnect(newPoolTestPeer(i, disconnCh))
if connected[i] {
connTicks[i] += tickCounter
connected[i] = false
@@ -143,10 +184,10 @@ func testClientPool(t *testing.T, connLimit, clientCount, paidCount int, randomD
}
}
- expTicks := testClientPoolTicks/2*connLimit/clientCount + testClientPoolTicks/2*(connLimit-paidCount)/(clientCount-paidCount)
+ expTicks := testClientPoolTicks/2*activeLimit/clientCount + testClientPoolTicks/2*(activeLimit-paidCount)/(clientCount-paidCount)
expMin := expTicks - expTicks/5
expMax := expTicks + expTicks/5
- paidTicks := testClientPoolTicks/2*connLimit/clientCount + testClientPoolTicks/2
+ paidTicks := testClientPoolTicks/2*activeLimit/clientCount + testClientPoolTicks/2
paidMin := paidTicks - paidTicks/5
paidMax := paidTicks + paidTicks/5
@@ -167,22 +208,39 @@ func testClientPool(t *testing.T, connLimit, clientCount, paidCount int, randomD
pool.stop()
}
+func testPriorityConnect(t *testing.T, pool *clientPool, p *poolTestPeer, cap uint64, expSuccess bool) {
+ if cap, _ := pool.connect(p); cap == 0 {
+ if expSuccess {
+ t.Fatalf("Failed to connect paid client")
+ } else {
+ return
+ }
+ }
+ if _, err := pool.setCapacity(p.node, "", cap, defaultConnectedBias, true); err != nil {
+ if expSuccess {
+ t.Fatalf("Failed to raise capacity of paid client")
+ } else {
+ return
+ }
+ }
+ if !expSuccess {
+ t.Fatalf("Should reject high capacity paid client")
+ }
+}
+
func TestConnectPaidClient(t *testing.T) {
var (
clock mclock.Simulated
db = rawdb.NewMemoryDatabase()
)
- pool := newClientPool(db, 1, &clock, nil)
+ pool := newClientPool(db, 1, defaultConnectedBias, &clock, func(id enode.ID) {})
defer pool.stop()
pool.setLimits(10, uint64(10))
- pool.setDefaultFactors(priceFactors{1, 0, 1}, priceFactors{1, 0, 1})
+ pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
// Add balance for an external client and mark it as paid client
- pool.addBalance(poolTestPeer(0).ID(), 1000, "")
-
- if !pool.connect(poolTestPeer(0), 10) {
- t.Fatalf("Failed to connect paid client")
- }
+ addBalance(pool, newPoolTestPeer(0, nil).node.ID(), int64(time.Minute))
+ testPriorityConnect(t, pool, newPoolTestPeer(0, nil), 10, true)
}
func TestConnectPaidClientToSmallPool(t *testing.T) {
@@ -190,18 +248,16 @@ func TestConnectPaidClientToSmallPool(t *testing.T) {
clock mclock.Simulated
db = rawdb.NewMemoryDatabase()
)
- pool := newClientPool(db, 1, &clock, nil)
+ pool := newClientPool(db, 1, defaultConnectedBias, &clock, func(id enode.ID) {})
defer pool.stop()
pool.setLimits(10, uint64(10)) // Total capacity limit is 10
- pool.setDefaultFactors(priceFactors{1, 0, 1}, priceFactors{1, 0, 1})
+ pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
// Add balance for an external client and mark it as paid client
- pool.addBalance(poolTestPeer(0).ID(), 1000, "")
+ addBalance(pool, newPoolTestPeer(0, nil).node.ID(), int64(time.Minute))
// Connect a fat paid client to pool, should reject it.
- if pool.connect(poolTestPeer(0), 100) {
- t.Fatalf("Connected fat paid client, should reject it")
- }
+ testPriorityConnect(t, pool, newPoolTestPeer(0, nil), 100, false)
}
func TestConnectPaidClientToFullPool(t *testing.T) {
@@ -210,23 +266,23 @@ func TestConnectPaidClientToFullPool(t *testing.T) {
db = rawdb.NewMemoryDatabase()
)
removeFn := func(enode.ID) {} // Noop
- pool := newClientPool(db, 1, &clock, removeFn)
+ pool := newClientPool(db, 1, defaultConnectedBias, &clock, removeFn)
defer pool.stop()
pool.setLimits(10, uint64(10)) // Total capacity limit is 10
- pool.setDefaultFactors(priceFactors{1, 0, 1}, priceFactors{1, 0, 1})
+ pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
for i := 0; i < 10; i++ {
- pool.addBalance(poolTestPeer(i).ID(), 1000000000, "")
- pool.connect(poolTestPeer(i), 1)
+ addBalance(pool, newPoolTestPeer(i, nil).node.ID(), int64(time.Second*20))
+ pool.connect(newPoolTestPeer(i, nil))
}
- pool.addBalance(poolTestPeer(11).ID(), 1000, "") // Add low balance to new paid client
- if pool.connect(poolTestPeer(11), 1) {
+ addBalance(pool, newPoolTestPeer(11, nil).node.ID(), int64(time.Second*2)) // Add low balance to new paid client
+ if cap, _ := pool.connect(newPoolTestPeer(11, nil)); cap != 0 {
t.Fatalf("Low balance paid client should be rejected")
}
clock.Run(time.Second)
- pool.addBalance(poolTestPeer(12).ID(), 1000000000*60*3, "") // Add high balance to new paid client
- if !pool.connect(poolTestPeer(12), 1) {
- t.Fatalf("High balance paid client should be accpected")
+ addBalance(pool, newPoolTestPeer(12, nil).node.ID(), int64(time.Minute*5)) // Add high balance to new paid client
+ if cap, _ := pool.connect(newPoolTestPeer(12, nil)); cap == 0 {
+ t.Fatalf("High balance paid client should be accepted")
}
}
@@ -234,23 +290,25 @@ func TestPaidClientKickedOut(t *testing.T) {
var (
clock mclock.Simulated
db = rawdb.NewMemoryDatabase()
- kickedCh = make(chan int, 1)
+ kickedCh = make(chan int, 100)
)
- removeFn := func(id enode.ID) { kickedCh <- int(id[0]) }
- pool := newClientPool(db, 1, &clock, removeFn)
+ removeFn := func(id enode.ID) {
+ kickedCh <- int(id[0])
+ }
+ pool := newClientPool(db, 1, defaultConnectedBias, &clock, removeFn)
+ pool.bt.SetExpirationTCs(0, 0)
defer pool.stop()
pool.setLimits(10, uint64(10)) // Total capacity limit is 10
- pool.setDefaultFactors(priceFactors{1, 0, 1}, priceFactors{1, 0, 1})
+ pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
for i := 0; i < 10; i++ {
- pool.addBalance(poolTestPeer(i).ID(), 1000000000, "") // 1 second allowance
- pool.connect(poolTestPeer(i), 1)
+ addBalance(pool, newPoolTestPeer(i, kickedCh).node.ID(), 10000000000) // 10 second allowance
+ pool.connect(newPoolTestPeer(i, kickedCh))
clock.Run(time.Millisecond)
}
- clock.Run(time.Second)
- clock.Run(defaultConnectedBias)
- if !pool.connect(poolTestPeer(11), 0) {
- t.Fatalf("Free client should be accectped")
+ clock.Run(defaultConnectedBias + time.Second*11)
+ if cap, _ := pool.connect(newPoolTestPeer(11, kickedCh)); cap == 0 {
+ t.Fatalf("Free client should be accepted")
}
select {
case id := <-kickedCh:
@@ -267,13 +325,14 @@ func TestConnectFreeClient(t *testing.T) {
clock mclock.Simulated
db = rawdb.NewMemoryDatabase()
)
- pool := newClientPool(db, 1, &clock, nil)
+ pool := newClientPool(db, 1, defaultConnectedBias, &clock, func(id enode.ID) {})
defer pool.stop()
pool.setLimits(10, uint64(10))
- pool.setDefaultFactors(priceFactors{1, 0, 1}, priceFactors{1, 0, 1})
- if !pool.connect(poolTestPeer(0), 10) {
+ pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
+ if cap, _ := pool.connect(newPoolTestPeer(0, nil)); cap == 0 {
t.Fatalf("Failed to connect free client")
}
+ testPriorityConnect(t, pool, newPoolTestPeer(0, nil), 2, false)
}
func TestConnectFreeClientToFullPool(t *testing.T) {
@@ -282,24 +341,24 @@ func TestConnectFreeClientToFullPool(t *testing.T) {
db = rawdb.NewMemoryDatabase()
)
removeFn := func(enode.ID) {} // Noop
- pool := newClientPool(db, 1, &clock, removeFn)
+ pool := newClientPool(db, 1, defaultConnectedBias, &clock, removeFn)
defer pool.stop()
pool.setLimits(10, uint64(10)) // Total capacity limit is 10
- pool.setDefaultFactors(priceFactors{1, 0, 1}, priceFactors{1, 0, 1})
+ pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
for i := 0; i < 10; i++ {
- pool.connect(poolTestPeer(i), 1)
+ pool.connect(newPoolTestPeer(i, nil))
}
- if pool.connect(poolTestPeer(11), 1) {
+ if cap, _ := pool.connect(newPoolTestPeer(11, nil)); cap != 0 {
t.Fatalf("New free client should be rejected")
}
clock.Run(time.Minute)
- if pool.connect(poolTestPeer(12), 1) {
+ if cap, _ := pool.connect(newPoolTestPeer(12, nil)); cap != 0 {
t.Fatalf("New free client should be rejected")
}
clock.Run(time.Millisecond)
clock.Run(4 * time.Minute)
- if !pool.connect(poolTestPeer(13), 1) {
+ if cap, _ := pool.connect(newPoolTestPeer(13, nil)); cap == 0 {
t.Fatalf("Old client connects more than 5min should be kicked")
}
}
@@ -308,24 +367,30 @@ func TestFreeClientKickedOut(t *testing.T) {
var (
clock mclock.Simulated
db = rawdb.NewMemoryDatabase()
- kicked = make(chan int, 10)
+ kicked = make(chan int, 100)
)
removeFn := func(id enode.ID) { kicked <- int(id[0]) }
- pool := newClientPool(db, 1, &clock, removeFn)
+ pool := newClientPool(db, 1, defaultConnectedBias, &clock, removeFn)
defer pool.stop()
pool.setLimits(10, uint64(10)) // Total capacity limit is 10
- pool.setDefaultFactors(priceFactors{1, 0, 1}, priceFactors{1, 0, 1})
+ pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
for i := 0; i < 10; i++ {
- pool.connect(poolTestPeer(i), 1)
+ pool.connect(newPoolTestPeer(i, kicked))
clock.Run(time.Millisecond)
}
- if pool.connect(poolTestPeer(10), 1) {
+ if cap, _ := pool.connect(newPoolTestPeer(10, kicked)); cap != 0 {
t.Fatalf("New free client should be rejected")
}
+ select {
+ case <-kicked:
+ case <-time.NewTimer(time.Second).C:
+ t.Fatalf("timeout")
+ }
+ pool.disconnect(newPoolTestPeer(10, kicked))
clock.Run(5 * time.Minute)
for i := 0; i < 10; i++ {
- pool.connect(poolTestPeer(i+10), 1)
+ pool.connect(newPoolTestPeer(i+10, kicked))
}
for i := 0; i < 10; i++ {
select {
@@ -346,19 +411,19 @@ func TestPositiveBalanceCalculation(t *testing.T) {
kicked = make(chan int, 10)
)
removeFn := func(id enode.ID) { kicked <- int(id[0]) } // Noop
- pool := newClientPool(db, 1, &clock, removeFn)
+ pool := newClientPool(db, 1, defaultConnectedBias, &clock, removeFn)
defer pool.stop()
pool.setLimits(10, uint64(10)) // Total capacity limit is 10
- pool.setDefaultFactors(priceFactors{1, 0, 1}, priceFactors{1, 0, 1})
+ pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
- pool.addBalance(poolTestPeer(0).ID(), int64(time.Minute*3), "")
- pool.connect(poolTestPeer(0), 10)
+ addBalance(pool, newPoolTestPeer(0, kicked).node.ID(), int64(time.Minute*3))
+ testPriorityConnect(t, pool, newPoolTestPeer(0, kicked), 10, true)
clock.Run(time.Minute)
- pool.disconnect(poolTestPeer(0))
- pb := pool.ndb.getOrNewPB(poolTestPeer(0).ID())
- if pb.value != uint64(time.Minute*2) {
- t.Fatalf("Positive balance mismatch, want %v, got %v", uint64(time.Minute*2), pb.value)
+ pool.disconnect(newPoolTestPeer(0, kicked))
+ pb, _ := getBalance(pool, newPoolTestPeer(0, kicked))
+ if checkDiff(pb, uint64(time.Minute*2)) {
+ t.Fatalf("Positive balance mismatch, want %v, got %v", uint64(time.Minute*2), pb)
}
}
@@ -369,18 +434,16 @@ func TestDowngradePriorityClient(t *testing.T) {
kicked = make(chan int, 10)
)
removeFn := func(id enode.ID) { kicked <- int(id[0]) } // Noop
- pool := newClientPool(db, 1, &clock, removeFn)
+ pool := newClientPool(db, 1, defaultConnectedBias, &clock, removeFn)
defer pool.stop()
pool.setLimits(10, uint64(10)) // Total capacity limit is 10
- pool.setDefaultFactors(priceFactors{1, 0, 1}, priceFactors{1, 0, 1})
+ pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
- p := &poolTestPeerWithCap{
- poolTestPeer: poolTestPeer(0),
- }
- pool.addBalance(p.ID(), int64(time.Minute), "")
- pool.connect(p, 10)
+ p := newPoolTestPeer(0, kicked)
+ addBalance(pool, p.node.ID(), int64(time.Minute))
+ testPriorityConnect(t, pool, p, 10, true)
if p.cap != 10 {
- t.Fatalf("The capcacity of priority peer hasn't been updated, got: %d", p.cap)
+ t.Fatalf("The capacity of priority peer hasn't been updated, got: %d", p.cap)
}
clock.Run(time.Minute) // All positive balance should be used up.
@@ -388,156 +451,131 @@ func TestDowngradePriorityClient(t *testing.T) {
if p.cap != 1 {
t.Fatalf("The capcacity of peer should be downgraded, got: %d", p.cap)
}
- pb := pool.ndb.getOrNewPB(poolTestPeer(0).ID())
- if pb.value != 0 {
- t.Fatalf("Positive balance mismatch, want %v, got %v", 0, pb.value)
+ pb, _ := getBalance(pool, newPoolTestPeer(0, kicked))
+ if pb != 0 {
+ t.Fatalf("Positive balance mismatch, want %v, got %v", 0, pb)
}
- pool.addBalance(poolTestPeer(0).ID(), int64(time.Minute), "")
- pb = pool.ndb.getOrNewPB(poolTestPeer(0).ID())
- if pb.value != uint64(time.Minute) {
- t.Fatalf("Positive balance mismatch, want %v, got %v", uint64(time.Minute), pb.value)
+ addBalance(pool, newPoolTestPeer(0, kicked).node.ID(), int64(time.Minute))
+ pb, _ = getBalance(pool, newPoolTestPeer(0, kicked))
+ if checkDiff(pb, uint64(time.Minute)) {
+ t.Fatalf("Positive balance mismatch, want %v, got %v", uint64(time.Minute), pb)
}
}
func TestNegativeBalanceCalculation(t *testing.T) {
var (
- clock mclock.Simulated
- db = rawdb.NewMemoryDatabase()
- kicked = make(chan int, 10)
+ clock mclock.Simulated
+ db = rawdb.NewMemoryDatabase()
)
- removeFn := func(id enode.ID) { kicked <- int(id[0]) } // Noop
- pool := newClientPool(db, 1, &clock, removeFn)
+ pool := newClientPool(db, 1, defaultConnectedBias, &clock, func(id enode.ID) {})
defer pool.stop()
pool.setLimits(10, uint64(10)) // Total capacity limit is 10
- pool.setDefaultFactors(priceFactors{1, 0, 1}, priceFactors{1, 0, 1})
+ pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1e-3, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1e-3, CapacityFactor: 0, RequestFactor: 1})
for i := 0; i < 10; i++ {
- pool.connect(poolTestPeer(i), 1)
+ pool.connect(newPoolTestPeer(i, nil))
}
clock.Run(time.Second)
for i := 0; i < 10; i++ {
- pool.disconnect(poolTestPeer(i))
- nb := pool.ndb.getOrNewNB(poolTestPeer(i).freeClientId())
- if nb.logValue != 0 {
+ pool.disconnect(newPoolTestPeer(i, nil))
+ _, nb := getBalance(pool, newPoolTestPeer(i, nil))
+ if nb != 0 {
t.Fatalf("Short connection shouldn't be recorded")
}
}
-
for i := 0; i < 10; i++ {
- pool.connect(poolTestPeer(i), 1)
+ pool.connect(newPoolTestPeer(i, nil))
}
clock.Run(time.Minute)
for i := 0; i < 10; i++ {
- pool.disconnect(poolTestPeer(i))
- nb := pool.ndb.getOrNewNB(poolTestPeer(i).freeClientId())
- nb.logValue -= pool.logOffset(clock.Now())
- nb.logValue /= fixedPointMultiplier
- if nb.logValue != int64(math.Log(float64(time.Minute/time.Second))) {
- t.Fatalf("Negative balance mismatch, want %v, got %v", int64(math.Log(float64(time.Minute/time.Second))), nb.logValue)
+ pool.disconnect(newPoolTestPeer(i, nil))
+ _, nb := getBalance(pool, newPoolTestPeer(i, nil))
+ if checkDiff(nb, uint64(time.Minute)/1000) {
+ t.Fatalf("Negative balance mismatch, want %v, got %v", uint64(time.Minute)/1000, nb)
}
}
}
-func TestNodeDB(t *testing.T) {
- ndb := newNodeDB(rawdb.NewMemoryDatabase(), mclock.System{})
- defer ndb.close()
-
- if !bytes.Equal(ndb.verbuf[:], []byte{0x00, nodeDBVersion}) {
- t.Fatalf("version buffer mismatch, want %v, got %v", []byte{0x00, nodeDBVersion}, ndb.verbuf)
- }
- var cases = []struct {
- id enode.ID
- ip string
- balance interface{}
- positive bool
- }{
- {enode.ID{0x00, 0x01, 0x02}, "", posBalance{value: 100}, true},
- {enode.ID{0x00, 0x01, 0x02}, "", posBalance{value: 200}, true},
- {enode.ID{}, "127.0.0.1", negBalance{logValue: 10}, false},
- {enode.ID{}, "127.0.0.1", negBalance{logValue: 20}, false},
- }
- for _, c := range cases {
- if c.positive {
- ndb.setPB(c.id, c.balance.(posBalance))
- if pb := ndb.getOrNewPB(c.id); !reflect.DeepEqual(pb, c.balance.(posBalance)) {
- t.Fatalf("Positive balance mismatch, want %v, got %v", c.balance.(posBalance), pb)
- }
- } else {
- ndb.setNB(c.ip, c.balance.(negBalance))
- if nb := ndb.getOrNewNB(c.ip); !reflect.DeepEqual(nb, c.balance.(negBalance)) {
- t.Fatalf("Negative balance mismatch, want %v, got %v", c.balance.(negBalance), nb)
- }
- }
- }
- for _, c := range cases {
- if c.positive {
- ndb.delPB(c.id)
- if pb := ndb.getOrNewPB(c.id); !reflect.DeepEqual(pb, posBalance{}) {
- t.Fatalf("Positive balance mismatch, want %v, got %v", posBalance{}, pb)
- }
- } else {
- ndb.delNB(c.ip)
- if nb := ndb.getOrNewNB(c.ip); !reflect.DeepEqual(nb, negBalance{}) {
- t.Fatalf("Negative balance mismatch, want %v, got %v", negBalance{}, nb)
- }
- }
- }
- ndb.setCumulativeTime(100)
- if ndb.getCumulativeTime() != 100 {
- t.Fatalf("Cumulative time mismatch, want %v, got %v", 100, ndb.getCumulativeTime())
- }
-}
-
-func TestNodeDBExpiration(t *testing.T) {
+func TestInactiveClient(t *testing.T) {
var (
- iterated int
- done = make(chan struct{}, 1)
+ clock mclock.Simulated
+ db = rawdb.NewMemoryDatabase()
)
- callback := func(now mclock.AbsTime, b negBalance) bool {
- iterated += 1
- return true
- }
- clock := &mclock.Simulated{}
- ndb := newNodeDB(rawdb.NewMemoryDatabase(), clock)
- defer ndb.close()
- ndb.nbEvictCallBack = callback
- ndb.cleanupHook = func() { done <- struct{}{} }
+ pool := newClientPool(db, 1, defaultConnectedBias, &clock, func(id enode.ID) {})
+ defer pool.stop()
+ pool.setLimits(2, uint64(2))
- var cases = []struct {
- ip string
- balance negBalance
- }{
- {"127.0.0.1", negBalance{logValue: 1}},
- {"127.0.0.2", negBalance{logValue: 1}},
- {"127.0.0.3", negBalance{logValue: 1}},
- {"127.0.0.4", negBalance{logValue: 1}},
+ p1 := newPoolTestPeer(1, nil)
+ p1.inactiveAllowed = true
+ p2 := newPoolTestPeer(2, nil)
+ p2.inactiveAllowed = true
+ p3 := newPoolTestPeer(3, nil)
+ p3.inactiveAllowed = true
+ addBalance(pool, p1.node.ID(), 1000*int64(time.Second))
+ addBalance(pool, p3.node.ID(), 2000*int64(time.Second))
+ // p1: 1000 p2: 0 p3: 2000
+ p1.cap, _ = pool.connect(p1)
+ if p1.cap != 1 {
+ t.Fatalf("Failed to connect peer #1")
}
- for _, c := range cases {
- ndb.setNB(c.ip, c.balance)
+ p2.cap, _ = pool.connect(p2)
+ if p2.cap != 1 {
+ t.Fatalf("Failed to connect peer #2")
}
- clock.WaitForTimers(1)
- clock.Run(time.Hour + time.Minute)
- select {
- case <-done:
- case <-time.NewTimer(time.Second).C:
- t.Fatalf("timeout")
+ p3.cap, _ = pool.connect(p3)
+ if p3.cap != 1 {
+ t.Fatalf("Failed to connect peer #3")
}
- if iterated != 4 {
- t.Fatalf("Failed to evict useless negative balances, want %v, got %d", 4, iterated)
+ if p2.cap != 0 {
+ t.Fatalf("Failed to deactivate peer #2")
}
- clock.WaitForTimers(1)
- for _, c := range cases {
- ndb.setNB(c.ip, c.balance)
+ addBalance(pool, p2.node.ID(), 3000*int64(time.Second))
+ // p1: 1000 p2: 3000 p3: 2000
+ if p2.cap != 1 {
+ t.Fatalf("Failed to activate peer #2")
}
- clock.Run(time.Hour + time.Minute)
- select {
- case <-done:
- case <-time.NewTimer(time.Second).C:
- t.Fatalf("timeout")
+ if p1.cap != 0 {
+ t.Fatalf("Failed to deactivate peer #1")
}
- if iterated != 8 {
- t.Fatalf("Failed to evict useless negative balances, want %v, got %d", 4, iterated)
+ addBalance(pool, p2.node.ID(), -2500*int64(time.Second))
+ // p1: 1000 p2: 500 p3: 2000
+ if p1.cap != 1 {
+ t.Fatalf("Failed to activate peer #1")
+ }
+ if p2.cap != 0 {
+ t.Fatalf("Failed to deactivate peer #2")
+ }
+ pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 0}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 0})
+ p4 := newPoolTestPeer(4, nil)
+ addBalance(pool, p4.node.ID(), 1500*int64(time.Second))
+ // p1: 1000 p2: 500 p3: 2000 p4: 1500
+ p4.cap, _ = pool.connect(p4)
+ if p4.cap != 1 {
+ t.Fatalf("Failed to activate peer #4")
+ }
+ if p1.cap != 0 {
+ t.Fatalf("Failed to deactivate peer #1")
+ }
+ clock.Run(time.Second * 600)
+ // manually trigger a check to avoid a long real-time wait
+ pool.ns.SetState(p1.node, pool.UpdateFlag, nodestate.Flags{}, 0)
+ pool.ns.SetState(p1.node, nodestate.Flags{}, pool.UpdateFlag, 0)
+ // p1: 1000 p2: 500 p3: 2000 p4: 900
+ if p1.cap != 1 {
+ t.Fatalf("Failed to activate peer #1")
+ }
+ if p4.cap != 0 {
+ t.Fatalf("Failed to deactivate peer #4")
+ }
+ pool.disconnect(p2)
+ pool.disconnect(p4)
+ addBalance(pool, p1.node.ID(), -1000*int64(time.Second))
+ if p1.cap != 1 {
+ t.Fatalf("Should not deactivate peer #1")
+ }
+ if p2.cap != 0 {
+ t.Fatalf("Should not activate peer #2")
}
}
diff --git a/les/lespay/server/balance.go b/les/lespay/server/balance.go
new file mode 100644
index 000000000..f820a4ad0
--- /dev/null
+++ b/les/lespay/server/balance.go
@@ -0,0 +1,609 @@
+// Copyright 2019 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package server
+
+import (
+ "errors"
+ "math"
+ "sync"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common/mclock"
+ "github.com/ethereum/go-ethereum/les/utils"
+ "github.com/ethereum/go-ethereum/p2p/enode"
+ "github.com/ethereum/go-ethereum/p2p/nodestate"
+)
+
+var errBalanceOverflow = errors.New("balance overflow")
+
+const maxBalance = math.MaxInt64 // maximum allowed balance value
+
+const (
+ balanceCallbackUpdate = iota // called when priority drops below the last minimum estimate
+ balanceCallbackZero // called when priority drops to zero (positive balance exhausted)
+ balanceCallbackCount // total number of balance callbacks
+)
+
+// PriceFactors determine the pricing policy (may apply either to positive or
+// negative balances which may have different factors).
+// - TimeFactor is cost unit per nanosecond of connection time
+// - CapacityFactor is cost unit per nanosecond of connection time per 1000000 capacity
+// - RequestFactor is cost unit per request "realCost" unit
+type PriceFactors struct {
+ TimeFactor, CapacityFactor, RequestFactor float64
+}
+
+// timePrice returns the price of connection per nanosecond at the given capacity
+func (p PriceFactors) timePrice(cap uint64) float64 {
+ return p.TimeFactor + float64(cap)*p.CapacityFactor/1000000
+}
+
+// NodeBalance keeps track of the positive and negative balances of a connected
+// client and calculates actual and projected future priority values.
+// Implements nodePriority interface.
+type NodeBalance struct {
+ bt *BalanceTracker
+ lock sync.RWMutex
+ node *enode.Node
+ connAddress string
+ active bool
+ priority bool
+ capacity uint64
+ balance balance
+ posFactor, negFactor PriceFactors
+ sumReqCost uint64
+ lastUpdate, nextUpdate, initTime mclock.AbsTime
+ updateEvent mclock.Timer
+ // since only a limited and fixed number of callbacks are needed, they are
+ // stored in a fixed size array ordered by priority threshold.
+ callbacks [balanceCallbackCount]balanceCallback
+ // callbackIndex maps balanceCallback constants to callbacks array indexes (-1 if not active)
+ callbackIndex [balanceCallbackCount]int
+ callbackCount int // number of active callbacks
+}
+
+// balance represents a pair of positive and negative balances
+type balance struct {
+ pos, neg utils.ExpiredValue
+}
+
+// balanceCallback represents a single callback that is activated when client priority
+// reaches the given threshold
+type balanceCallback struct {
+ id int
+ threshold int64
+ callback func()
+}
+
+// GetBalance returns the current positive and negative balance.
+func (n *NodeBalance) GetBalance() (uint64, uint64) {
+ n.lock.Lock()
+ defer n.lock.Unlock()
+
+ now := n.bt.clock.Now()
+ n.updateBalance(now)
+ return n.balance.pos.Value(n.bt.posExp.LogOffset(now)), n.balance.neg.Value(n.bt.negExp.LogOffset(now))
+}
+
+// GetRawBalance returns the current positive and negative balance
+// but in the raw(expired value) format.
+func (n *NodeBalance) GetRawBalance() (utils.ExpiredValue, utils.ExpiredValue) {
+ n.lock.Lock()
+ defer n.lock.Unlock()
+
+ now := n.bt.clock.Now()
+ n.updateBalance(now)
+ return n.balance.pos, n.balance.neg
+}
+
+// AddBalance adds the given amount to the positive balance and returns the balance
+// before and after the operation. Exceeding maxBalance results in an error (balance is
+// unchanged) while adding a negative amount higher than the current balance results in
+// zero balance.
+func (n *NodeBalance) AddBalance(amount int64) (uint64, uint64, error) {
+ var (
+ err error
+ old, new uint64
+ )
+ n.bt.ns.Operation(func() {
+ var (
+ callbacks []func()
+ setPriority bool
+ )
+ n.bt.updateTotalBalance(n, func() bool {
+ now := n.bt.clock.Now()
+ n.updateBalance(now)
+
+ // Ensure the given amount is valid to apply.
+ offset := n.bt.posExp.LogOffset(now)
+ old = n.balance.pos.Value(offset)
+ if amount > 0 && (amount > maxBalance || old > maxBalance-uint64(amount)) {
+ err = errBalanceOverflow
+ return false
+ }
+
+ // Update the total positive balance counter.
+ n.balance.pos.Add(amount, offset)
+ callbacks = n.checkCallbacks(now)
+ setPriority = n.checkPriorityStatus()
+ new = n.balance.pos.Value(offset)
+ n.storeBalance(true, false)
+ return true
+ })
+ for _, cb := range callbacks {
+ cb()
+ }
+ if setPriority {
+ n.bt.ns.SetStateSub(n.node, n.bt.PriorityFlag, nodestate.Flags{}, 0)
+ }
+ n.signalPriorityUpdate()
+ })
+ if err != nil {
+ return old, old, err
+ }
+
+ return old, new, nil
+}
+
+// SetBalance sets the positive and negative balance to the given values
+func (n *NodeBalance) SetBalance(pos, neg uint64) error {
+ if pos > maxBalance || neg > maxBalance {
+ return errBalanceOverflow
+ }
+ n.bt.ns.Operation(func() {
+ var (
+ callbacks []func()
+ setPriority bool
+ )
+ n.bt.updateTotalBalance(n, func() bool {
+ now := n.bt.clock.Now()
+ n.updateBalance(now)
+
+ var pb, nb utils.ExpiredValue
+ pb.Add(int64(pos), n.bt.posExp.LogOffset(now))
+ nb.Add(int64(neg), n.bt.negExp.LogOffset(now))
+ n.balance.pos = pb
+ n.balance.neg = nb
+ callbacks = n.checkCallbacks(now)
+ setPriority = n.checkPriorityStatus()
+ n.storeBalance(true, true)
+ return true
+ })
+ for _, cb := range callbacks {
+ cb()
+ }
+ if setPriority {
+ n.bt.ns.SetStateSub(n.node, n.bt.PriorityFlag, nodestate.Flags{}, 0)
+ }
+ n.signalPriorityUpdate()
+ })
+ return nil
+}
+
+// RequestServed should be called after serving a request for the given peer
+func (n *NodeBalance) RequestServed(cost uint64) uint64 {
+ n.lock.Lock()
+ var callbacks []func()
+ defer func() {
+ n.lock.Unlock()
+ if callbacks != nil {
+ n.bt.ns.Operation(func() {
+ for _, cb := range callbacks {
+ cb()
+ }
+ })
+ }
+ }()
+
+ now := n.bt.clock.Now()
+ n.updateBalance(now)
+ fcost := float64(cost)
+
+ posExp := n.bt.posExp.LogOffset(now)
+ var check bool
+ if !n.balance.pos.IsZero() {
+ if n.posFactor.RequestFactor != 0 {
+ c := -int64(fcost * n.posFactor.RequestFactor)
+ cc := n.balance.pos.Add(c, posExp)
+ if c == cc {
+ fcost = 0
+ } else {
+ fcost *= 1 - float64(cc)/float64(c)
+ }
+ check = true
+ } else {
+ fcost = 0
+ }
+ }
+ if fcost > 0 {
+ if n.negFactor.RequestFactor != 0 {
+ n.balance.neg.Add(int64(fcost*n.negFactor.RequestFactor), n.bt.negExp.LogOffset(now))
+ check = true
+ }
+ }
+ if check {
+ callbacks = n.checkCallbacks(now)
+ }
+ n.sumReqCost += cost
+ return n.balance.pos.Value(posExp)
+}
+
+// Priority returns the actual priority based on the current balance
+func (n *NodeBalance) Priority(now mclock.AbsTime, capacity uint64) int64 {
+ n.lock.Lock()
+ defer n.lock.Unlock()
+
+ n.updateBalance(now)
+ return n.balanceToPriority(n.balance, capacity)
+}
+
+// EstMinPriority gives a lower estimate for the priority at a given time in the future.
+// An average request cost per time is assumed that is twice the average cost per time
+// in the current session.
+// If update is true then a priority callback is added that turns UpdateFlag on and off
+// in case the priority goes below the estimated minimum.
+func (n *NodeBalance) EstMinPriority(at mclock.AbsTime, capacity uint64, update bool) int64 {
+ n.lock.Lock()
+ defer n.lock.Unlock()
+
+ var avgReqCost float64
+ dt := time.Duration(n.lastUpdate - n.initTime)
+ if dt > time.Second {
+ avgReqCost = float64(n.sumReqCost) * 2 / float64(dt)
+ }
+ pri := n.balanceToPriority(n.reducedBalance(at, capacity, avgReqCost), capacity)
+ if update {
+ n.addCallback(balanceCallbackUpdate, pri, n.signalPriorityUpdate)
+ }
+ return pri
+}
+
+// PosBalanceMissing calculates the missing amount of positive balance in order to
+// connect at targetCapacity, stay connected for the given amount of time and then
+// still have a priority of targetPriority
+func (n *NodeBalance) PosBalanceMissing(targetPriority int64, targetCapacity uint64, after time.Duration) uint64 {
+ n.lock.Lock()
+ defer n.lock.Unlock()
+
+ now := n.bt.clock.Now()
+ if targetPriority < 0 {
+ timePrice := n.negFactor.timePrice(targetCapacity)
+ timeCost := uint64(float64(after) * timePrice)
+ negBalance := n.balance.neg.Value(n.bt.negExp.LogOffset(now))
+ if timeCost+negBalance < uint64(-targetPriority) {
+ return 0
+ }
+ if uint64(-targetPriority) > negBalance && timePrice > 1e-100 {
+ if negTime := time.Duration(float64(uint64(-targetPriority)-negBalance) / timePrice); negTime < after {
+ after -= negTime
+ } else {
+ after = 0
+ }
+ }
+ targetPriority = 0
+ }
+ timePrice := n.posFactor.timePrice(targetCapacity)
+ posRequired := uint64(float64(targetPriority)*float64(targetCapacity)+float64(after)*timePrice) + 1
+ if posRequired >= maxBalance {
+ return math.MaxUint64 // target not reachable
+ }
+ posBalance := n.balance.pos.Value(n.bt.posExp.LogOffset(now))
+ if posRequired > posBalance {
+ return posRequired - posBalance
+ }
+ return 0
+}
+
+// SetPriceFactors sets the price factors. TimeFactor is the price of a nanosecond of
+// connection while RequestFactor is the price of a request cost unit.
+func (n *NodeBalance) SetPriceFactors(posFactor, negFactor PriceFactors) {
+ n.lock.Lock()
+ now := n.bt.clock.Now()
+ n.updateBalance(now)
+ n.posFactor, n.negFactor = posFactor, negFactor
+ callbacks := n.checkCallbacks(now)
+ n.lock.Unlock()
+ if callbacks != nil {
+ n.bt.ns.Operation(func() {
+ for _, cb := range callbacks {
+ cb()
+ }
+ })
+ }
+}
+
+// GetPriceFactors returns the price factors
+func (n *NodeBalance) GetPriceFactors() (posFactor, negFactor PriceFactors) {
+ n.lock.Lock()
+ defer n.lock.Unlock()
+
+ return n.posFactor, n.negFactor
+}
+
+// activate starts time/capacity cost deduction.
+func (n *NodeBalance) activate() {
+ n.bt.updateTotalBalance(n, func() bool {
+ if n.active {
+ return false
+ }
+ n.active = true
+ n.lastUpdate = n.bt.clock.Now()
+ return true
+ })
+}
+
+// deactivate stops time/capacity cost deduction and saves the balances in the database
+func (n *NodeBalance) deactivate() {
+ n.bt.updateTotalBalance(n, func() bool {
+ if !n.active {
+ return false
+ }
+ n.updateBalance(n.bt.clock.Now())
+ if n.updateEvent != nil {
+ n.updateEvent.Stop()
+ n.updateEvent = nil
+ }
+ n.storeBalance(true, true)
+ n.active = false
+ return true
+ })
+}
+
+// updateBalance updates balance based on the time factor
+func (n *NodeBalance) updateBalance(now mclock.AbsTime) {
+ if n.active && now > n.lastUpdate {
+ n.balance = n.reducedBalance(now, n.capacity, 0)
+ n.lastUpdate = now
+ }
+}
+
+// storeBalance stores the positive and/or negative balance of the node in the database
+func (n *NodeBalance) storeBalance(pos, neg bool) {
+ if pos {
+ n.bt.storeBalance(n.node.ID().Bytes(), false, n.balance.pos)
+ }
+ if neg {
+ n.bt.storeBalance([]byte(n.connAddress), true, n.balance.neg)
+ }
+}
+
+// addCallback sets up a one-time callback to be called when priority reaches
+// the threshold. If it has already reached the threshold the callback is called
+// immediately.
+// Note: should be called while n.lock is held
+// Note 2: the callback function runs inside a NodeStateMachine operation
+func (n *NodeBalance) addCallback(id int, threshold int64, callback func()) {
+ n.removeCallback(id)
+ idx := 0
+ for idx < n.callbackCount && threshold > n.callbacks[idx].threshold {
+ idx++
+ }
+ for i := n.callbackCount - 1; i >= idx; i-- {
+ n.callbackIndex[n.callbacks[i].id]++
+ n.callbacks[i+1] = n.callbacks[i]
+ }
+ n.callbackCount++
+ n.callbackIndex[id] = idx
+ n.callbacks[idx] = balanceCallback{id, threshold, callback}
+ now := n.bt.clock.Now()
+ n.updateBalance(now)
+ n.scheduleCheck(now)
+}
+
+// removeCallback removes the given callback and returns true if it was active
+// Note: should be called while n.lock is held
+func (n *NodeBalance) removeCallback(id int) bool {
+ idx := n.callbackIndex[id]
+ if idx == -1 {
+ return false
+ }
+ n.callbackIndex[id] = -1
+ for i := idx; i < n.callbackCount-1; i++ {
+ n.callbackIndex[n.callbacks[i+1].id]--
+ n.callbacks[i] = n.callbacks[i+1]
+ }
+ n.callbackCount--
+ return true
+}
+
+// checkCallbacks checks whether the threshold of any of the active callbacks
+// have been reached and returns triggered callbacks.
+// Note: checkCallbacks assumes that the balance has been recently updated.
+func (n *NodeBalance) checkCallbacks(now mclock.AbsTime) (callbacks []func()) {
+ if n.callbackCount == 0 || n.capacity == 0 {
+ return
+ }
+ pri := n.balanceToPriority(n.balance, n.capacity)
+ for n.callbackCount != 0 && n.callbacks[n.callbackCount-1].threshold >= pri {
+ n.callbackCount--
+ n.callbackIndex[n.callbacks[n.callbackCount].id] = -1
+ callbacks = append(callbacks, n.callbacks[n.callbackCount].callback)
+ }
+ n.scheduleCheck(now)
+ return
+}
+
+// scheduleCheck sets up or updates a scheduled event to ensure that it will be called
+// again just after the next threshold has been reached.
+func (n *NodeBalance) scheduleCheck(now mclock.AbsTime) {
+ if n.callbackCount != 0 {
+ d, ok := n.timeUntil(n.callbacks[n.callbackCount-1].threshold)
+ if !ok {
+ n.nextUpdate = 0
+ n.updateAfter(0)
+ return
+ }
+ if n.nextUpdate == 0 || n.nextUpdate > now+mclock.AbsTime(d) {
+ if d > time.Second {
+ // Note: if the scheduled update is not in the very near future then we
+ // schedule the update a bit earlier. This way we do need to update a few
+ // extra times but don't need to reschedule every time a processed request
+ // brings the expected firing time a little bit closer.
+ d = ((d - time.Second) * 7 / 8) + time.Second
+ }
+ n.nextUpdate = now + mclock.AbsTime(d)
+ n.updateAfter(d)
+ }
+ } else {
+ n.nextUpdate = 0
+ n.updateAfter(0)
+ }
+}
+
+// updateAfter schedules a balance update and callback check in the future
+func (n *NodeBalance) updateAfter(dt time.Duration) {
+ if n.updateEvent == nil || n.updateEvent.Stop() {
+ if dt == 0 {
+ n.updateEvent = nil
+ } else {
+ n.updateEvent = n.bt.clock.AfterFunc(dt, func() {
+ var callbacks []func()
+ n.lock.Lock()
+ if n.callbackCount != 0 {
+ now := n.bt.clock.Now()
+ n.updateBalance(now)
+ callbacks = n.checkCallbacks(now)
+ }
+ n.lock.Unlock()
+ if callbacks != nil {
+ n.bt.ns.Operation(func() {
+ for _, cb := range callbacks {
+ cb()
+ }
+ })
+ }
+ })
+ }
+ }
+}
+
+// balanceExhausted should be called when the positive balance is exhausted (priority goes to zero/negative)
+// Note: this function should run inside a NodeStateMachine operation
+func (n *NodeBalance) balanceExhausted() {
+ n.lock.Lock()
+ n.storeBalance(true, false)
+ n.priority = false
+ n.lock.Unlock()
+ n.bt.ns.SetStateSub(n.node, nodestate.Flags{}, n.bt.PriorityFlag, 0)
+}
+
+// checkPriorityStatus checks whether the node has gained priority status and sets the priority
+// callback and flag if necessary. It assumes that the balance has been recently updated.
+// Note that the priority flag has to be set by the caller after the mutex has been released.
+func (n *NodeBalance) checkPriorityStatus() bool {
+ if !n.priority && !n.balance.pos.IsZero() {
+ n.priority = true
+ n.addCallback(balanceCallbackZero, 0, func() { n.balanceExhausted() })
+ return true
+ }
+ return false
+}
+
+// signalPriorityUpdate signals that the priority fell below the previous minimum estimate
+// Note: this function should run inside a NodeStateMachine operation
+func (n *NodeBalance) signalPriorityUpdate() {
+ n.bt.ns.SetStateSub(n.node, n.bt.UpdateFlag, nodestate.Flags{}, 0)
+ n.bt.ns.SetStateSub(n.node, nodestate.Flags{}, n.bt.UpdateFlag, 0)
+}
+
+// setCapacity updates the capacity value used for priority calculation
+// Note: capacity should never be zero
+// Note 2: this function should run inside a NodeStateMachine operation
+func (n *NodeBalance) setCapacity(capacity uint64) {
+ n.lock.Lock()
+ now := n.bt.clock.Now()
+ n.updateBalance(now)
+ n.capacity = capacity
+ callbacks := n.checkCallbacks(now)
+ n.lock.Unlock()
+ for _, cb := range callbacks {
+ cb()
+ }
+}
+
+// balanceToPriority converts a balance to a priority value. Lower priority means
+// first to disconnect. Positive balance translates to positive priority. If positive
+// balance is zero then negative balance translates to a negative priority.
+func (n *NodeBalance) balanceToPriority(b balance, capacity uint64) int64 {
+ if !b.pos.IsZero() {
+ return int64(b.pos.Value(n.bt.posExp.LogOffset(n.bt.clock.Now())) / capacity)
+ }
+ return -int64(b.neg.Value(n.bt.negExp.LogOffset(n.bt.clock.Now())))
+}
+
+// reducedBalance estimates the reduced balance at a given time in the fututre based
+// on the current balance, the time factor and an estimated average request cost per time ratio
+func (n *NodeBalance) reducedBalance(at mclock.AbsTime, capacity uint64, avgReqCost float64) balance {
+ dt := float64(at - n.lastUpdate)
+ b := n.balance
+ if !b.pos.IsZero() {
+ factor := n.posFactor.timePrice(capacity) + n.posFactor.RequestFactor*avgReqCost
+ diff := -int64(dt * factor)
+ dd := b.pos.Add(diff, n.bt.posExp.LogOffset(at))
+ if dd == diff {
+ dt = 0
+ } else {
+ dt += float64(dd) / factor
+ }
+ }
+ if dt > 0 {
+ factor := n.negFactor.timePrice(capacity) + n.negFactor.RequestFactor*avgReqCost
+ b.neg.Add(int64(dt*factor), n.bt.negExp.LogOffset(at))
+ }
+ return b
+}
+
+// timeUntil calculates the remaining time needed to reach a given priority level
+// assuming that no requests are processed until then. If the given level is never
+// reached then (0, false) is returned.
+// Note: the function assumes that the balance has been recently updated and
+// calculates the time starting from the last update.
+func (n *NodeBalance) timeUntil(priority int64) (time.Duration, bool) {
+ now := n.bt.clock.Now()
+ var dt float64
+ if !n.balance.pos.IsZero() {
+ posBalance := n.balance.pos.Value(n.bt.posExp.LogOffset(now))
+ timePrice := n.posFactor.timePrice(n.capacity)
+ if timePrice < 1e-100 {
+ return 0, false
+ }
+ if priority > 0 {
+ newBalance := uint64(priority) * n.capacity
+ if newBalance > posBalance {
+ return 0, false
+ }
+ dt = float64(posBalance-newBalance) / timePrice
+ return time.Duration(dt), true
+ } else {
+ dt = float64(posBalance) / timePrice
+ }
+ } else {
+ if priority > 0 {
+ return 0, false
+ }
+ }
+ // if we have a positive balance then dt equals the time needed to get it to zero
+ negBalance := n.balance.neg.Value(n.bt.negExp.LogOffset(now))
+ timePrice := n.negFactor.timePrice(n.capacity)
+ if uint64(-priority) > negBalance {
+ if timePrice < 1e-100 {
+ return 0, false
+ }
+ dt += float64(uint64(-priority)-negBalance) / timePrice
+ }
+ return time.Duration(dt), true
+}
diff --git a/les/lespay/server/balance_test.go b/les/lespay/server/balance_test.go
new file mode 100644
index 000000000..67e194437
--- /dev/null
+++ b/les/lespay/server/balance_test.go
@@ -0,0 +1,400 @@
+// Copyright 2019 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package server
+
+import (
+ "math/rand"
+ "reflect"
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common/mclock"
+ "github.com/ethereum/go-ethereum/ethdb/memorydb"
+ "github.com/ethereum/go-ethereum/les/utils"
+ "github.com/ethereum/go-ethereum/p2p/enode"
+ "github.com/ethereum/go-ethereum/p2p/enr"
+ "github.com/ethereum/go-ethereum/p2p/nodestate"
+)
+
+var (
+ testFlag = testSetup.NewFlag("testFlag")
+ connAddrFlag = testSetup.NewField("connAddr", reflect.TypeOf(""))
+ btTestSetup = NewBalanceTrackerSetup(testSetup)
+)
+
+func init() {
+ btTestSetup.Connect(connAddrFlag, ppTestSetup.CapacityField)
+}
+
+type zeroExpirer struct{}
+
+func (z zeroExpirer) SetRate(now mclock.AbsTime, rate float64) {}
+func (z zeroExpirer) SetLogOffset(now mclock.AbsTime, logOffset utils.Fixed64) {}
+func (z zeroExpirer) LogOffset(now mclock.AbsTime) utils.Fixed64 { return 0 }
+
+type balanceTestSetup struct {
+ clock *mclock.Simulated
+ ns *nodestate.NodeStateMachine
+ bt *BalanceTracker
+}
+
+func newBalanceTestSetup() *balanceTestSetup {
+ clock := &mclock.Simulated{}
+ ns := nodestate.NewNodeStateMachine(nil, nil, clock, testSetup)
+ db := memorydb.New()
+ bt := NewBalanceTracker(ns, btTestSetup, db, clock, zeroExpirer{}, zeroExpirer{})
+ ns.Start()
+ return &balanceTestSetup{
+ clock: clock,
+ ns: ns,
+ bt: bt,
+ }
+}
+
+func (b *balanceTestSetup) newNode(capacity uint64) *NodeBalance {
+ node := enode.SignNull(&enr.Record{}, enode.ID{})
+ b.ns.SetState(node, testFlag, nodestate.Flags{}, 0)
+ b.ns.SetField(node, btTestSetup.connAddressField, "")
+ b.ns.SetField(node, ppTestSetup.CapacityField, capacity)
+ n, _ := b.ns.GetField(node, btTestSetup.BalanceField).(*NodeBalance)
+ return n
+}
+
+func (b *balanceTestSetup) stop() {
+ b.bt.Stop()
+ b.ns.Stop()
+}
+
+func TestAddBalance(t *testing.T) {
+ b := newBalanceTestSetup()
+ defer b.stop()
+
+ node := b.newNode(1000)
+ var inputs = []struct {
+ delta int64
+ expect [2]uint64
+ total uint64
+ expectErr bool
+ }{
+ {100, [2]uint64{0, 100}, 100, false},
+ {-100, [2]uint64{100, 0}, 0, false},
+ {-100, [2]uint64{0, 0}, 0, false},
+ {1, [2]uint64{0, 1}, 1, false},
+ {maxBalance, [2]uint64{0, 0}, 0, true},
+ }
+ for _, i := range inputs {
+ old, new, err := node.AddBalance(i.delta)
+ if i.expectErr {
+ if err == nil {
+ t.Fatalf("Expect get error but nil")
+ }
+ continue
+ } else if err != nil {
+ t.Fatalf("Expect get no error but %v", err)
+ }
+ if old != i.expect[0] || new != i.expect[1] {
+ t.Fatalf("Positive balance mismatch, got %v -> %v", old, new)
+ }
+ if b.bt.TotalTokenAmount() != i.total {
+ t.Fatalf("Total positive balance mismatch, want %v, got %v", i.total, b.bt.TotalTokenAmount())
+ }
+ }
+}
+
+func TestSetBalance(t *testing.T) {
+ b := newBalanceTestSetup()
+ defer b.stop()
+ node := b.newNode(1000)
+
+ var inputs = []struct {
+ pos, neg uint64
+ }{
+ {1000, 0},
+ {0, 1000},
+ {1000, 1000},
+ }
+
+ for _, i := range inputs {
+ node.SetBalance(i.pos, i.neg)
+ pos, neg := node.GetBalance()
+ if pos != i.pos {
+ t.Fatalf("Positive balance mismatch, want %v, got %v", i.pos, pos)
+ }
+ if neg != i.neg {
+ t.Fatalf("Negative balance mismatch, want %v, got %v", i.neg, neg)
+ }
+ }
+}
+
+func TestBalanceTimeCost(t *testing.T) {
+ b := newBalanceTestSetup()
+ defer b.stop()
+ node := b.newNode(1000)
+
+ b.ns.SetField(node.node, ppTestSetup.CapacityField, uint64(1))
+ node.SetPriceFactors(PriceFactors{1, 0, 1}, PriceFactors{1, 0, 1})
+ node.SetBalance(uint64(time.Minute), 0) // 1 minute time allowance
+
+ var inputs = []struct {
+ runTime time.Duration
+ expPos uint64
+ expNeg uint64
+ }{
+ {time.Second, uint64(time.Second * 59), 0},
+ {0, uint64(time.Second * 59), 0},
+ {time.Second * 59, 0, 0},
+ {time.Second, 0, uint64(time.Second)},
+ }
+ for _, i := range inputs {
+ b.clock.Run(i.runTime)
+ if pos, _ := node.GetBalance(); pos != i.expPos {
+ t.Fatalf("Positive balance mismatch, want %v, got %v", i.expPos, pos)
+ }
+ if _, neg := node.GetBalance(); neg != i.expNeg {
+ t.Fatalf("Negative balance mismatch, want %v, got %v", i.expNeg, neg)
+ }
+ }
+
+ node.SetBalance(uint64(time.Minute), 0) // Refill 1 minute time allowance
+ for _, i := range inputs {
+ b.clock.Run(i.runTime)
+ if pos, _ := node.GetBalance(); pos != i.expPos {
+ t.Fatalf("Positive balance mismatch, want %v, got %v", i.expPos, pos)
+ }
+ if _, neg := node.GetBalance(); neg != i.expNeg {
+ t.Fatalf("Negative balance mismatch, want %v, got %v", i.expNeg, neg)
+ }
+ }
+}
+
+func TestBalanceReqCost(t *testing.T) {
+ b := newBalanceTestSetup()
+ defer b.stop()
+ node := b.newNode(1000)
+ node.SetPriceFactors(PriceFactors{1, 0, 1}, PriceFactors{1, 0, 1})
+
+ b.ns.SetField(node.node, ppTestSetup.CapacityField, uint64(1))
+ node.SetBalance(uint64(time.Minute), 0) // 1 minute time serving time allowance
+ var inputs = []struct {
+ reqCost uint64
+ expPos uint64
+ expNeg uint64
+ }{
+ {uint64(time.Second), uint64(time.Second * 59), 0},
+ {0, uint64(time.Second * 59), 0},
+ {uint64(time.Second * 59), 0, 0},
+ {uint64(time.Second), 0, uint64(time.Second)},
+ }
+ for _, i := range inputs {
+ node.RequestServed(i.reqCost)
+ if pos, _ := node.GetBalance(); pos != i.expPos {
+ t.Fatalf("Positive balance mismatch, want %v, got %v", i.expPos, pos)
+ }
+ if _, neg := node.GetBalance(); neg != i.expNeg {
+ t.Fatalf("Negative balance mismatch, want %v, got %v", i.expNeg, neg)
+ }
+ }
+}
+
+func TestBalanceToPriority(t *testing.T) {
+ b := newBalanceTestSetup()
+ defer b.stop()
+ node := b.newNode(1000)
+ node.SetPriceFactors(PriceFactors{1, 0, 1}, PriceFactors{1, 0, 1})
+
+ var inputs = []struct {
+ pos uint64
+ neg uint64
+ priority int64
+ }{
+ {1000, 0, 1},
+ {2000, 0, 2}, // Higher balance, higher priority value
+ {0, 0, 0},
+ {0, 1000, -1000},
+ }
+ for _, i := range inputs {
+ node.SetBalance(i.pos, i.neg)
+ priority := node.Priority(b.clock.Now(), 1000)
+ if priority != i.priority {
+ t.Fatalf("Priority mismatch, want %v, got %v", i.priority, priority)
+ }
+ }
+}
+
+func TestEstimatedPriority(t *testing.T) {
+ b := newBalanceTestSetup()
+ defer b.stop()
+ node := b.newNode(1000000000)
+ node.SetPriceFactors(PriceFactors{1, 0, 1}, PriceFactors{1, 0, 1})
+
+ b.ns.SetField(node.node, ppTestSetup.CapacityField, uint64(1))
+ node.SetBalance(uint64(time.Minute), 0)
+ var inputs = []struct {
+ runTime time.Duration // time cost
+ futureTime time.Duration // diff of future time
+ reqCost uint64 // single request cost
+ priority int64 // expected estimated priority
+ }{
+ {time.Second, time.Second, 0, 58},
+ {0, time.Second, 0, 58},
+
+ // 2 seconds time cost, 1 second estimated time cost, 10^9 request cost,
+ // 10^9 estimated request cost per second.
+ {time.Second, time.Second, 1000000000, 55},
+
+ // 3 seconds time cost, 3 second estimated time cost, 10^9*2 request cost,
+ // 4*10^9 estimated request cost.
+ {time.Second, 3 * time.Second, 1000000000, 48},
+
+ // All positive balance is used up
+ {time.Second * 55, 0, 0, 0},
+
+ // 1 minute estimated time cost, 4/58 * 10^9 estimated request cost per sec.
+ {0, time.Minute, 0, -int64(time.Minute) - int64(time.Second)*120/29},
+ }
+ for _, i := range inputs {
+ b.clock.Run(i.runTime)
+ node.RequestServed(i.reqCost)
+ priority := node.EstMinPriority(b.clock.Now()+mclock.AbsTime(i.futureTime), 1000000000, false)
+ if priority != i.priority {
+ t.Fatalf("Estimated priority mismatch, want %v, got %v", i.priority, priority)
+ }
+ }
+}
+
+func TestPosBalanceMissing(t *testing.T) {
+ b := newBalanceTestSetup()
+ defer b.stop()
+ node := b.newNode(1000)
+ node.SetPriceFactors(PriceFactors{1, 0, 1}, PriceFactors{1, 0, 1})
+
+ b.ns.SetField(node.node, ppTestSetup.CapacityField, uint64(1))
+ var inputs = []struct {
+ pos, neg uint64
+ priority int64
+ cap uint64
+ after time.Duration
+ expect uint64
+ }{
+ {uint64(time.Second * 2), 0, 0, 1, time.Second, 0},
+ {uint64(time.Second * 2), 0, 0, 1, 2 * time.Second, 1},
+ {uint64(time.Second * 2), 0, int64(time.Second), 1, 2 * time.Second, uint64(time.Second) + 1},
+ {0, 0, int64(time.Second), 1, time.Second, uint64(2*time.Second) + 1},
+ {0, 0, -int64(time.Second), 1, time.Second, 1},
+ }
+ for _, i := range inputs {
+ node.SetBalance(i.pos, i.neg)
+ got := node.PosBalanceMissing(i.priority, i.cap, i.after)
+ if got != i.expect {
+ t.Fatalf("Missing budget mismatch, want %v, got %v", i.expect, got)
+ }
+ }
+}
+
+func TestPostiveBalanceCounting(t *testing.T) {
+ b := newBalanceTestSetup()
+ defer b.stop()
+
+ var nodes []*NodeBalance
+ for i := 0; i < 100; i += 1 {
+ node := b.newNode(1000000)
+ node.SetPriceFactors(PriceFactors{1, 0, 1}, PriceFactors{1, 0, 1})
+ nodes = append(nodes, node)
+ }
+
+ // Allocate service token
+ var sum uint64
+ for i := 0; i < 100; i += 1 {
+ amount := int64(rand.Intn(100) + 100)
+ nodes[i].AddBalance(amount)
+ sum += uint64(amount)
+ }
+ if b.bt.TotalTokenAmount() != sum {
+ t.Fatalf("Invalid token amount")
+ }
+
+ // Change client status
+ for i := 0; i < 100; i += 1 {
+ if rand.Intn(2) == 0 {
+ b.ns.SetField(nodes[i].node, ppTestSetup.CapacityField, uint64(1))
+ }
+ }
+ if b.bt.TotalTokenAmount() != sum {
+ t.Fatalf("Invalid token amount")
+ }
+ for i := 0; i < 100; i += 1 {
+ if rand.Intn(2) == 0 {
+ b.ns.SetField(nodes[i].node, ppTestSetup.CapacityField, uint64(1))
+ }
+ }
+ if b.bt.TotalTokenAmount() != sum {
+ t.Fatalf("Invalid token amount")
+ }
+}
+
+func TestCallbackChecking(t *testing.T) {
+ b := newBalanceTestSetup()
+ defer b.stop()
+ node := b.newNode(1000000)
+ node.SetPriceFactors(PriceFactors{1, 0, 1}, PriceFactors{1, 0, 1})
+
+ var inputs = []struct {
+ priority int64
+ expDiff time.Duration
+ }{
+ {500, time.Millisecond * 500},
+ {0, time.Second},
+ {-int64(time.Second), 2 * time.Second},
+ }
+ node.SetBalance(uint64(time.Second), 0)
+ for _, i := range inputs {
+ diff, _ := node.timeUntil(i.priority)
+ if diff != i.expDiff {
+ t.Fatalf("Time difference mismatch, want %v, got %v", i.expDiff, diff)
+ }
+ }
+}
+
+func TestCallback(t *testing.T) {
+ b := newBalanceTestSetup()
+ defer b.stop()
+ node := b.newNode(1000)
+ node.SetPriceFactors(PriceFactors{1, 0, 1}, PriceFactors{1, 0, 1})
+ b.ns.SetField(node.node, ppTestSetup.CapacityField, uint64(1))
+
+ callCh := make(chan struct{}, 1)
+ node.SetBalance(uint64(time.Minute), 0)
+ node.addCallback(balanceCallbackZero, 0, func() { callCh <- struct{}{} })
+
+ b.clock.Run(time.Minute)
+ select {
+ case <-callCh:
+ case <-time.NewTimer(time.Second).C:
+ t.Fatalf("Callback hasn't been called yet")
+ }
+
+ node.SetBalance(uint64(time.Minute), 0)
+ node.addCallback(balanceCallbackZero, 0, func() { callCh <- struct{}{} })
+ node.removeCallback(balanceCallbackZero)
+
+ b.clock.Run(time.Minute)
+ select {
+ case <-callCh:
+ t.Fatalf("Callback shouldn't be called")
+ case <-time.NewTimer(time.Millisecond * 100).C:
+ }
+}
diff --git a/les/lespay/server/balance_tracker.go b/les/lespay/server/balance_tracker.go
new file mode 100644
index 000000000..c1ea3c649
--- /dev/null
+++ b/les/lespay/server/balance_tracker.go
@@ -0,0 +1,291 @@
+// Copyright 2020 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package server
+
+import (
+ "reflect"
+ "sync"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common/mclock"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/les/utils"
+ "github.com/ethereum/go-ethereum/p2p/enode"
+ "github.com/ethereum/go-ethereum/p2p/nodestate"
+)
+
+const (
+ posThreshold = 1000000 // minimum positive balance that is persisted in the database
+ negThreshold = 1000000 // minimum negative balance that is persisted in the database
+ persistExpirationRefresh = time.Minute * 5 // refresh period of the token expiration persistence
+)
+
+// BalanceTrackerSetup contains node state flags and fields used by BalanceTracker
+type BalanceTrackerSetup struct {
+ // controlled by PriorityPool
+ PriorityFlag, UpdateFlag nodestate.Flags
+ BalanceField nodestate.Field
+ // external connections
+ connAddressField, capacityField nodestate.Field
+}
+
+// NewBalanceTrackerSetup creates a new BalanceTrackerSetup and initializes the fields
+// and flags controlled by BalanceTracker
+func NewBalanceTrackerSetup(setup *nodestate.Setup) BalanceTrackerSetup {
+ return BalanceTrackerSetup{
+ // PriorityFlag is set if the node has a positive balance
+ PriorityFlag: setup.NewFlag("priorityNode"),
+ // UpdateFlag set and then immediately reset if the balance has been updated and
+ // therefore priority is suddenly changed
+ UpdateFlag: setup.NewFlag("balanceUpdate"),
+ // BalanceField contains the NodeBalance struct which implements nodePriority,
+ // allowing on-demand priority calculation and future priority estimation
+ BalanceField: setup.NewField("balance", reflect.TypeOf(&NodeBalance{})),
+ }
+}
+
+// Connect sets the fields used by BalanceTracker as an input
+func (bts *BalanceTrackerSetup) Connect(connAddressField, capacityField nodestate.Field) {
+ bts.connAddressField = connAddressField
+ bts.capacityField = capacityField
+}
+
+// BalanceTracker tracks positive and negative balances for connected nodes.
+// After connAddressField is set externally, a NodeBalance is created and previous
+// balance values are loaded from the database. Both balances are exponentially expired
+// values. Costs are deducted from the positive balance if present, otherwise added to
+// the negative balance. If the capacity is non-zero then a time cost is applied
+// continuously while individual request costs are applied immediately.
+// The two balances are translated into a single priority value that also depends
+// on the actual capacity.
+type BalanceTracker struct {
+ BalanceTrackerSetup
+ clock mclock.Clock
+ lock sync.Mutex
+ ns *nodestate.NodeStateMachine
+ ndb *nodeDB
+ posExp, negExp utils.ValueExpirer
+ posExpTC, negExpTC uint64
+
+ active, inactive utils.ExpiredValue
+ balanceTimer *utils.UpdateTimer
+ quit chan struct{}
+}
+
+// NewBalanceTracker creates a new BalanceTracker
+func NewBalanceTracker(ns *nodestate.NodeStateMachine, setup BalanceTrackerSetup, db ethdb.KeyValueStore, clock mclock.Clock, posExp, negExp utils.ValueExpirer) *BalanceTracker {
+ ndb := newNodeDB(db, clock)
+ bt := &BalanceTracker{
+ ns: ns,
+ BalanceTrackerSetup: setup,
+ ndb: ndb,
+ clock: clock,
+ posExp: posExp,
+ negExp: negExp,
+ balanceTimer: utils.NewUpdateTimer(clock, time.Second*10),
+ quit: make(chan struct{}),
+ }
+ bt.ndb.forEachBalance(false, func(id enode.ID, balance utils.ExpiredValue) bool {
+ bt.inactive.AddExp(balance)
+ return true
+ })
+
+ ns.SubscribeField(bt.capacityField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) {
+ n, _ := ns.GetField(node, bt.BalanceField).(*NodeBalance)
+ if n == nil {
+ return
+ }
+
+ ov, _ := oldValue.(uint64)
+ nv, _ := newValue.(uint64)
+ if ov == 0 && nv != 0 {
+ n.activate()
+ }
+ if nv != 0 {
+ n.setCapacity(nv)
+ }
+ if ov != 0 && nv == 0 {
+ n.deactivate()
+ }
+ })
+ ns.SubscribeField(bt.connAddressField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) {
+ if newValue != nil {
+ ns.SetFieldSub(node, bt.BalanceField, bt.newNodeBalance(node, newValue.(string)))
+ } else {
+ ns.SetStateSub(node, nodestate.Flags{}, bt.PriorityFlag, 0)
+ if b, _ := ns.GetField(node, bt.BalanceField).(*NodeBalance); b != nil {
+ b.deactivate()
+ }
+ ns.SetFieldSub(node, bt.BalanceField, nil)
+ }
+ })
+
+ // The positive and negative balances of clients are stored in database
+ // and both of these decay exponentially over time. Delete them if the
+ // value is small enough.
+ bt.ndb.evictCallBack = bt.canDropBalance
+
+ go func() {
+ for {
+ select {
+ case <-clock.After(persistExpirationRefresh):
+ now := clock.Now()
+ bt.ndb.setExpiration(posExp.LogOffset(now), negExp.LogOffset(now))
+ case <-bt.quit:
+ return
+ }
+ }
+ }()
+ return bt
+}
+
+// Stop saves expiration offset and unsaved node balances and shuts BalanceTracker down
+func (bt *BalanceTracker) Stop() {
+ now := bt.clock.Now()
+ bt.ndb.setExpiration(bt.posExp.LogOffset(now), bt.negExp.LogOffset(now))
+ close(bt.quit)
+ bt.ns.ForEach(nodestate.Flags{}, nodestate.Flags{}, func(node *enode.Node, state nodestate.Flags) {
+ if n, ok := bt.ns.GetField(node, bt.BalanceField).(*NodeBalance); ok {
+ n.lock.Lock()
+ n.storeBalance(true, true)
+ n.lock.Unlock()
+ bt.ns.SetField(node, bt.BalanceField, nil)
+ }
+ })
+ bt.ndb.close()
+}
+
+// TotalTokenAmount returns the current total amount of service tokens in existence
+func (bt *BalanceTracker) TotalTokenAmount() uint64 {
+ bt.lock.Lock()
+ defer bt.lock.Unlock()
+
+ bt.balanceTimer.Update(func(_ time.Duration) bool {
+ bt.active = utils.ExpiredValue{}
+ bt.ns.ForEach(nodestate.Flags{}, nodestate.Flags{}, func(node *enode.Node, state nodestate.Flags) {
+ if n, ok := bt.ns.GetField(node, bt.BalanceField).(*NodeBalance); ok {
+ pos, _ := n.GetRawBalance()
+ bt.active.AddExp(pos)
+ }
+ })
+ return true
+ })
+ total := bt.active
+ total.AddExp(bt.inactive)
+ return total.Value(bt.posExp.LogOffset(bt.clock.Now()))
+}
+
+// GetPosBalanceIDs lists node IDs with an associated positive balance
+func (bt *BalanceTracker) GetPosBalanceIDs(start, stop enode.ID, maxCount int) (result []enode.ID) {
+ return bt.ndb.getPosBalanceIDs(start, stop, maxCount)
+}
+
+// SetExpirationTCs sets positive and negative token expiration time constants.
+// Specified in seconds, 0 means infinite (no expiration).
+func (bt *BalanceTracker) SetExpirationTCs(pos, neg uint64) {
+ bt.lock.Lock()
+ defer bt.lock.Unlock()
+
+ bt.posExpTC, bt.negExpTC = pos, neg
+ now := bt.clock.Now()
+ if pos > 0 {
+ bt.posExp.SetRate(now, 1/float64(pos*uint64(time.Second)))
+ } else {
+ bt.posExp.SetRate(now, 0)
+ }
+ if neg > 0 {
+ bt.negExp.SetRate(now, 1/float64(neg*uint64(time.Second)))
+ } else {
+ bt.negExp.SetRate(now, 0)
+ }
+}
+
+// GetExpirationTCs returns the current positive and negative token expiration
+// time constants
+func (bt *BalanceTracker) GetExpirationTCs() (pos, neg uint64) {
+ bt.lock.Lock()
+ defer bt.lock.Unlock()
+
+ return bt.posExpTC, bt.negExpTC
+}
+
+// newNodeBalance loads balances from the database and creates a NodeBalance instance
+// for the given node. It also sets the PriorityFlag and adds balanceCallbackZero if
+// the node has a positive balance.
+// Note: this function should run inside a NodeStateMachine operation
+func (bt *BalanceTracker) newNodeBalance(node *enode.Node, negBalanceKey string) *NodeBalance {
+ pb := bt.ndb.getOrNewBalance(node.ID().Bytes(), false)
+ nb := bt.ndb.getOrNewBalance([]byte(negBalanceKey), true)
+ n := &NodeBalance{
+ bt: bt,
+ node: node,
+ connAddress: negBalanceKey,
+ balance: balance{pos: pb, neg: nb},
+ initTime: bt.clock.Now(),
+ lastUpdate: bt.clock.Now(),
+ }
+ for i := range n.callbackIndex {
+ n.callbackIndex[i] = -1
+ }
+ if n.checkPriorityStatus() {
+ n.bt.ns.SetStateSub(n.node, n.bt.PriorityFlag, nodestate.Flags{}, 0)
+ }
+ return n
+}
+
+// storeBalance stores either a positive or a negative balance in the database
+func (bt *BalanceTracker) storeBalance(id []byte, neg bool, value utils.ExpiredValue) {
+ if bt.canDropBalance(bt.clock.Now(), neg, value) {
+ bt.ndb.delBalance(id, neg) // balance is small enough, drop it directly.
+ } else {
+ bt.ndb.setBalance(id, neg, value)
+ }
+}
+
+// canDropBalance tells whether a positive or negative balance is below the threshold
+// and therefore can be dropped from the database
+func (bt *BalanceTracker) canDropBalance(now mclock.AbsTime, neg bool, b utils.ExpiredValue) bool {
+ if neg {
+ return b.Value(bt.negExp.LogOffset(now)) <= negThreshold
+ } else {
+ return b.Value(bt.posExp.LogOffset(now)) <= posThreshold
+ }
+}
+
+// updateTotalBalance adjusts the total balance after executing given callback.
+func (bt *BalanceTracker) updateTotalBalance(n *NodeBalance, callback func() bool) {
+ bt.lock.Lock()
+ defer bt.lock.Unlock()
+
+ n.lock.Lock()
+ defer n.lock.Unlock()
+
+ original, active := n.balance.pos, n.active
+ if !callback() {
+ return
+ }
+ if active {
+ bt.active.SubExp(original)
+ } else {
+ bt.inactive.SubExp(original)
+ }
+ if n.active {
+ bt.active.AddExp(n.balance.pos)
+ } else {
+ bt.inactive.AddExp(n.balance.pos)
+ }
+}
diff --git a/les/lespay/server/clientdb.go b/les/lespay/server/clientdb.go
new file mode 100644
index 000000000..c23f1e3b2
--- /dev/null
+++ b/les/lespay/server/clientdb.go
@@ -0,0 +1,245 @@
+// Copyright 2020 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package server
+
+import (
+ "bytes"
+ "encoding/binary"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/mclock"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/les/utils"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/p2p/enode"
+ "github.com/ethereum/go-ethereum/rlp"
+ lru "github.com/hashicorp/golang-lru"
+)
+
+const (
+ balanceCacheLimit = 8192 // the maximum number of cached items in service token balance queue
+
+ // nodeDBVersion is the version identifier of the node data in db
+ //
+ // Changelog:
+ // * Replace `lastTotal` with `meta` in positive balance: version 0=>1
+ nodeDBVersion = 1
+
+ // dbCleanupCycle is the cycle of db for useless data cleanup
+ dbCleanupCycle = time.Hour
+)
+
+var (
+ positiveBalancePrefix = []byte("pb:") // dbVersion(uint16 big endian) + positiveBalancePrefix + id -> balance
+ negativeBalancePrefix = []byte("nb:") // dbVersion(uint16 big endian) + negativeBalancePrefix + ip -> balance
+ expirationKey = []byte("expiration:") // dbVersion(uint16 big endian) + expirationKey -> posExp, negExp
+)
+
+type nodeDB struct {
+ db ethdb.KeyValueStore
+ cache *lru.Cache
+ auxbuf []byte // 37-byte auxiliary buffer for key encoding
+ verbuf [2]byte // 2-byte auxiliary buffer for db version
+ evictCallBack func(mclock.AbsTime, bool, utils.ExpiredValue) bool // Callback to determine whether the balance can be evicted.
+ clock mclock.Clock
+ closeCh chan struct{}
+ cleanupHook func() // Test hook used for testing
+}
+
+func newNodeDB(db ethdb.KeyValueStore, clock mclock.Clock) *nodeDB {
+ cache, _ := lru.New(balanceCacheLimit)
+ ndb := &nodeDB{
+ db: db,
+ cache: cache,
+ auxbuf: make([]byte, 37),
+ clock: clock,
+ closeCh: make(chan struct{}),
+ }
+ binary.BigEndian.PutUint16(ndb.verbuf[:], uint16(nodeDBVersion))
+ go ndb.expirer()
+ return ndb
+}
+
+func (db *nodeDB) close() {
+ close(db.closeCh)
+}
+
+func (db *nodeDB) getPrefix(neg bool) []byte {
+ prefix := positiveBalancePrefix
+ if neg {
+ prefix = negativeBalancePrefix
+ }
+ return append(db.verbuf[:], prefix...)
+}
+
+func (db *nodeDB) key(id []byte, neg bool) []byte {
+ prefix := positiveBalancePrefix
+ if neg {
+ prefix = negativeBalancePrefix
+ }
+ if len(prefix)+len(db.verbuf)+len(id) > len(db.auxbuf) {
+ db.auxbuf = append(db.auxbuf, make([]byte, len(prefix)+len(db.verbuf)+len(id)-len(db.auxbuf))...)
+ }
+ copy(db.auxbuf[:len(db.verbuf)], db.verbuf[:])
+ copy(db.auxbuf[len(db.verbuf):len(db.verbuf)+len(prefix)], prefix)
+ copy(db.auxbuf[len(prefix)+len(db.verbuf):len(prefix)+len(db.verbuf)+len(id)], id)
+ return db.auxbuf[:len(prefix)+len(db.verbuf)+len(id)]
+}
+
+func (db *nodeDB) getExpiration() (utils.Fixed64, utils.Fixed64) {
+ blob, err := db.db.Get(append(db.verbuf[:], expirationKey...))
+ if err != nil || len(blob) != 16 {
+ return 0, 0
+ }
+ return utils.Fixed64(binary.BigEndian.Uint64(blob[:8])), utils.Fixed64(binary.BigEndian.Uint64(blob[8:16]))
+}
+
+func (db *nodeDB) setExpiration(pos, neg utils.Fixed64) {
+ var buff [16]byte
+ binary.BigEndian.PutUint64(buff[:8], uint64(pos))
+ binary.BigEndian.PutUint64(buff[8:16], uint64(neg))
+ db.db.Put(append(db.verbuf[:], expirationKey...), buff[:16])
+}
+
+func (db *nodeDB) getOrNewBalance(id []byte, neg bool) utils.ExpiredValue {
+ key := db.key(id, neg)
+ item, exist := db.cache.Get(string(key))
+ if exist {
+ return item.(utils.ExpiredValue)
+ }
+ var b utils.ExpiredValue
+ enc, err := db.db.Get(key)
+ if err != nil || len(enc) == 0 {
+ return b
+ }
+ if err := rlp.DecodeBytes(enc, &b); err != nil {
+ log.Crit("Failed to decode positive balance", "err", err)
+ }
+ db.cache.Add(string(key), b)
+ return b
+}
+
+func (db *nodeDB) setBalance(id []byte, neg bool, b utils.ExpiredValue) {
+ key := db.key(id, neg)
+ enc, err := rlp.EncodeToBytes(&(b))
+ if err != nil {
+ log.Crit("Failed to encode positive balance", "err", err)
+ }
+ db.db.Put(key, enc)
+ db.cache.Add(string(key), b)
+}
+
+func (db *nodeDB) delBalance(id []byte, neg bool) {
+ key := db.key(id, neg)
+ db.db.Delete(key)
+ db.cache.Remove(string(key))
+}
+
+// getPosBalanceIDs returns a lexicographically ordered list of IDs of accounts
+// with a positive balance
+func (db *nodeDB) getPosBalanceIDs(start, stop enode.ID, maxCount int) (result []enode.ID) {
+ if maxCount <= 0 {
+ return
+ }
+ prefix := db.getPrefix(false)
+ keylen := len(prefix) + len(enode.ID{})
+
+ it := db.db.NewIterator(prefix, start.Bytes())
+ defer it.Release()
+
+ for it.Next() {
+ var id enode.ID
+ if len(it.Key()) != keylen {
+ return
+ }
+ copy(id[:], it.Key()[keylen-len(id):])
+ if bytes.Compare(id.Bytes(), stop.Bytes()) >= 0 {
+ return
+ }
+ result = append(result, id)
+ if len(result) == maxCount {
+ return
+ }
+ }
+ return
+}
+
+// forEachBalance iterates all balances and passes values to callback.
+func (db *nodeDB) forEachBalance(neg bool, callback func(id enode.ID, balance utils.ExpiredValue) bool) {
+ prefix := db.getPrefix(neg)
+ keylen := len(prefix) + len(enode.ID{})
+
+ it := db.db.NewIterator(prefix, nil)
+ defer it.Release()
+
+ for it.Next() {
+ var id enode.ID
+ if len(it.Key()) != keylen {
+ return
+ }
+ copy(id[:], it.Key()[keylen-len(id):])
+
+ var b utils.ExpiredValue
+ if err := rlp.DecodeBytes(it.Value(), &b); err != nil {
+ continue
+ }
+ if !callback(id, b) {
+ return
+ }
+ }
+}
+
+func (db *nodeDB) expirer() {
+ for {
+ select {
+ case <-db.clock.After(dbCleanupCycle):
+ db.expireNodes()
+ case <-db.closeCh:
+ return
+ }
+ }
+}
+
+// expireNodes iterates the whole node db and checks whether the
+// token balances can be deleted.
+func (db *nodeDB) expireNodes() {
+ var (
+ visited int
+ deleted int
+ start = time.Now()
+ )
+ for _, neg := range []bool{false, true} {
+ iter := db.db.NewIterator(db.getPrefix(neg), nil)
+ for iter.Next() {
+ visited++
+ var balance utils.ExpiredValue
+ if err := rlp.DecodeBytes(iter.Value(), &balance); err != nil {
+ log.Crit("Failed to decode negative balance", "err", err)
+ }
+ if db.evictCallBack != nil && db.evictCallBack(db.clock.Now(), neg, balance) {
+ deleted++
+ db.db.Delete(iter.Key())
+ }
+ }
+ }
+ // Invoke testing hook if it's not nil.
+ if db.cleanupHook != nil {
+ db.cleanupHook()
+ }
+ log.Debug("Expire nodes", "visited", visited, "deleted", deleted, "elapsed", common.PrettyDuration(time.Since(start)))
+}
diff --git a/les/lespay/server/clientdb_test.go b/les/lespay/server/clientdb_test.go
new file mode 100644
index 000000000..353d84aea
--- /dev/null
+++ b/les/lespay/server/clientdb_test.go
@@ -0,0 +1,144 @@
+// Copyright 2020 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package server
+
+import (
+ "reflect"
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common/mclock"
+ "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/les/utils"
+ "github.com/ethereum/go-ethereum/p2p/enode"
+)
+
+func expval(v uint64) utils.ExpiredValue {
+ return utils.ExpiredValue{Base: v}
+}
+
+func TestNodeDB(t *testing.T) {
+ ndb := newNodeDB(rawdb.NewMemoryDatabase(), mclock.System{})
+ defer ndb.close()
+
+ var cases = []struct {
+ id enode.ID
+ ip string
+ balance utils.ExpiredValue
+ positive bool
+ }{
+ {enode.ID{0x00, 0x01, 0x02}, "", expval(100), true},
+ {enode.ID{0x00, 0x01, 0x02}, "", expval(200), true},
+ {enode.ID{}, "127.0.0.1", expval(100), false},
+ {enode.ID{}, "127.0.0.1", expval(200), false},
+ }
+ for _, c := range cases {
+ if c.positive {
+ ndb.setBalance(c.id.Bytes(), false, c.balance)
+ if pb := ndb.getOrNewBalance(c.id.Bytes(), false); !reflect.DeepEqual(pb, c.balance) {
+ t.Fatalf("Positive balance mismatch, want %v, got %v", c.balance, pb)
+ }
+ } else {
+ ndb.setBalance([]byte(c.ip), true, c.balance)
+ if nb := ndb.getOrNewBalance([]byte(c.ip), true); !reflect.DeepEqual(nb, c.balance) {
+ t.Fatalf("Negative balance mismatch, want %v, got %v", c.balance, nb)
+ }
+ }
+ }
+ for _, c := range cases {
+ if c.positive {
+ ndb.delBalance(c.id.Bytes(), false)
+ if pb := ndb.getOrNewBalance(c.id.Bytes(), false); !reflect.DeepEqual(pb, utils.ExpiredValue{}) {
+ t.Fatalf("Positive balance mismatch, want %v, got %v", utils.ExpiredValue{}, pb)
+ }
+ } else {
+ ndb.delBalance([]byte(c.ip), true)
+ if nb := ndb.getOrNewBalance([]byte(c.ip), true); !reflect.DeepEqual(nb, utils.ExpiredValue{}) {
+ t.Fatalf("Negative balance mismatch, want %v, got %v", utils.ExpiredValue{}, nb)
+ }
+ }
+ }
+ posExp, negExp := utils.Fixed64(1000), utils.Fixed64(2000)
+ ndb.setExpiration(posExp, negExp)
+ if pos, neg := ndb.getExpiration(); pos != posExp || neg != negExp {
+ t.Fatalf("Expiration mismatch, want %v / %v, got %v / %v", posExp, negExp, pos, neg)
+ }
+ /* curBalance := currencyBalance{typ: "ETH", amount: 10000}
+ ndb.setCurrencyBalance(enode.ID{0x01, 0x02}, curBalance)
+ if got := ndb.getCurrencyBalance(enode.ID{0x01, 0x02}); !reflect.DeepEqual(got, curBalance) {
+ t.Fatalf("Currency balance mismatch, want %v, got %v", curBalance, got)
+ }*/
+}
+
+func TestNodeDBExpiration(t *testing.T) {
+ var (
+ iterated int
+ done = make(chan struct{}, 1)
+ )
+ callback := func(now mclock.AbsTime, neg bool, b utils.ExpiredValue) bool {
+ iterated += 1
+ return true
+ }
+ clock := &mclock.Simulated{}
+ ndb := newNodeDB(rawdb.NewMemoryDatabase(), clock)
+ defer ndb.close()
+ ndb.evictCallBack = callback
+ ndb.cleanupHook = func() { done <- struct{}{} }
+
+ var cases = []struct {
+ id []byte
+ neg bool
+ balance utils.ExpiredValue
+ }{
+ {[]byte{0x01, 0x02}, false, expval(1)},
+ {[]byte{0x03, 0x04}, false, expval(1)},
+ {[]byte{0x05, 0x06}, false, expval(1)},
+ {[]byte{0x07, 0x08}, false, expval(1)},
+
+ {[]byte("127.0.0.1"), true, expval(1)},
+ {[]byte("127.0.0.2"), true, expval(1)},
+ {[]byte("127.0.0.3"), true, expval(1)},
+ {[]byte("127.0.0.4"), true, expval(1)},
+ }
+ for _, c := range cases {
+ ndb.setBalance(c.id, c.neg, c.balance)
+ }
+ clock.WaitForTimers(1)
+ clock.Run(time.Hour + time.Minute)
+ select {
+ case <-done:
+ case <-time.NewTimer(time.Second).C:
+ t.Fatalf("timeout")
+ }
+ if iterated != 8 {
+ t.Fatalf("Failed to evict useless balances, want %v, got %d", 8, iterated)
+ }
+
+ for _, c := range cases {
+ ndb.setBalance(c.id, c.neg, c.balance)
+ }
+ clock.WaitForTimers(1)
+ clock.Run(time.Hour + time.Minute)
+ select {
+ case <-done:
+ case <-time.NewTimer(time.Second).C:
+ t.Fatalf("timeout")
+ }
+ if iterated != 16 {
+ t.Fatalf("Failed to evict useless balances, want %v, got %d", 16, iterated)
+ }
+}
diff --git a/les/lespay/server/prioritypool.go b/les/lespay/server/prioritypool.go
new file mode 100644
index 000000000..52224e093
--- /dev/null
+++ b/les/lespay/server/prioritypool.go
@@ -0,0 +1,503 @@
+// Copyright 2020 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package server
+
+import (
+ "math"
+ "reflect"
+ "sync"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common/mclock"
+ "github.com/ethereum/go-ethereum/common/prque"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/p2p/enode"
+ "github.com/ethereum/go-ethereum/p2p/nodestate"
+)
+
+const (
+ lazyQueueRefresh = time.Second * 10 // refresh period of the active queue
+)
+
+// PriorityPoolSetup contains node state flags and fields used by PriorityPool
+// Note: ActiveFlag and InactiveFlag can be controlled both externally and by the pool,
+// see PriorityPool description for details.
+type PriorityPoolSetup struct {
+ // controlled by PriorityPool
+ ActiveFlag, InactiveFlag nodestate.Flags
+ CapacityField, ppNodeInfoField nodestate.Field
+ // external connections
+ updateFlag nodestate.Flags
+ priorityField nodestate.Field
+}
+
+// NewPriorityPoolSetup creates a new PriorityPoolSetup and initializes the fields
+// and flags controlled by PriorityPool
+func NewPriorityPoolSetup(setup *nodestate.Setup) PriorityPoolSetup {
+ return PriorityPoolSetup{
+ ActiveFlag: setup.NewFlag("active"),
+ InactiveFlag: setup.NewFlag("inactive"),
+ CapacityField: setup.NewField("capacity", reflect.TypeOf(uint64(0))),
+ ppNodeInfoField: setup.NewField("ppNodeInfo", reflect.TypeOf(&ppNodeInfo{})),
+ }
+}
+
+// Connect sets the fields and flags used by PriorityPool as an input
+func (pps *PriorityPoolSetup) Connect(priorityField nodestate.Field, updateFlag nodestate.Flags) {
+ pps.priorityField = priorityField // should implement nodePriority
+ pps.updateFlag = updateFlag // triggers an immediate priority update
+}
+
+// PriorityPool handles a set of nodes where each node has a capacity (a scalar value)
+// and a priority (which can change over time and can also depend on the capacity).
+// A node is active if it has at least the necessary minimal amount of capacity while
+// inactive nodes have 0 capacity (values between 0 and the minimum are not allowed).
+// The pool ensures that the number and total capacity of all active nodes are limited
+// and the highest priority nodes are active at all times (limits can be changed
+// during operation with immediate effect).
+//
+// When activating clients a priority bias is applied in favor of the already active
+// nodes in order to avoid nodes quickly alternating between active and inactive states
+// when their priorities are close to each other. The bias is specified in terms of
+// duration (time) because priorities are expected to usually get lower over time and
+// therefore a future minimum prediction (see EstMinPriority) should monotonously
+// decrease with the specified time parameter.
+// This time bias can be interpreted as minimum expected active time at the given
+// capacity (if the threshold priority stays the same).
+//
+// Nodes in the pool always have either InactiveFlag or ActiveFlag set. A new node is
+// added to the pool by externally setting InactiveFlag. PriorityPool can switch a node
+// between InactiveFlag and ActiveFlag at any time. Nodes can be removed from the pool
+// by externally resetting both flags. ActiveFlag should not be set externally.
+//
+// The highest priority nodes in "inactive" state are moved to "active" state as soon as
+// the minimum capacity can be granted for them. The capacity of lower priority active
+// nodes is reduced or they are demoted to "inactive" state if their priority is
+// insufficient even at minimal capacity.
+type PriorityPool struct {
+ PriorityPoolSetup
+ ns *nodestate.NodeStateMachine
+ clock mclock.Clock
+ lock sync.Mutex
+ activeQueue *prque.LazyQueue
+ inactiveQueue *prque.Prque
+ changed []*ppNodeInfo
+ activeCount, activeCap uint64
+ maxCount, maxCap uint64
+ minCap uint64
+ activeBias time.Duration
+ capacityStepDiv uint64
+}
+
+// nodePriority interface provides current and estimated future priorities on demand
+type nodePriority interface {
+ // Priority should return the current priority of the node (higher is better)
+ Priority(now mclock.AbsTime, cap uint64) int64
+ // EstMinPriority should return a lower estimate for the minimum of the node priority
+ // value starting from the current moment until the given time. If the priority goes
+ // under the returned estimate before the specified moment then it is the caller's
+ // responsibility to signal with updateFlag.
+ EstMinPriority(until mclock.AbsTime, cap uint64, update bool) int64
+}
+
+// ppNodeInfo is the internal node descriptor of PriorityPool
+type ppNodeInfo struct {
+ nodePriority nodePriority
+ node *enode.Node
+ connected bool
+ capacity, origCap uint64
+ bias time.Duration
+ forced, changed bool
+ activeIndex, inactiveIndex int
+}
+
+// NewPriorityPool creates a new PriorityPool
+func NewPriorityPool(ns *nodestate.NodeStateMachine, setup PriorityPoolSetup, clock mclock.Clock, minCap uint64, activeBias time.Duration, capacityStepDiv uint64) *PriorityPool {
+ pp := &PriorityPool{
+ ns: ns,
+ PriorityPoolSetup: setup,
+ clock: clock,
+ activeQueue: prque.NewLazyQueue(activeSetIndex, activePriority, activeMaxPriority, clock, lazyQueueRefresh),
+ inactiveQueue: prque.New(inactiveSetIndex),
+ minCap: minCap,
+ activeBias: activeBias,
+ capacityStepDiv: capacityStepDiv,
+ }
+
+ ns.SubscribeField(pp.priorityField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) {
+ if newValue != nil {
+ c := &ppNodeInfo{
+ node: node,
+ nodePriority: newValue.(nodePriority),
+ activeIndex: -1,
+ inactiveIndex: -1,
+ }
+ ns.SetFieldSub(node, pp.ppNodeInfoField, c)
+ } else {
+ ns.SetStateSub(node, nodestate.Flags{}, pp.ActiveFlag.Or(pp.InactiveFlag), 0)
+ if n, _ := pp.ns.GetField(node, pp.ppNodeInfoField).(*ppNodeInfo); n != nil {
+ pp.disconnectedNode(n)
+ }
+ ns.SetFieldSub(node, pp.CapacityField, nil)
+ ns.SetFieldSub(node, pp.ppNodeInfoField, nil)
+ }
+ })
+ ns.SubscribeState(pp.ActiveFlag.Or(pp.InactiveFlag), func(node *enode.Node, oldState, newState nodestate.Flags) {
+ if c, _ := pp.ns.GetField(node, pp.ppNodeInfoField).(*ppNodeInfo); c != nil {
+ if oldState.IsEmpty() {
+ pp.connectedNode(c)
+ }
+ if newState.IsEmpty() {
+ pp.disconnectedNode(c)
+ }
+ }
+ })
+ ns.SubscribeState(pp.updateFlag, func(node *enode.Node, oldState, newState nodestate.Flags) {
+ if !newState.IsEmpty() {
+ pp.updatePriority(node)
+ }
+ })
+ return pp
+}
+
+// RequestCapacity checks whether changing the capacity of a node to the given target
+// is possible (bias is applied in favor of other active nodes if the target is higher
+// than the current capacity).
+// If setCap is true then it also performs the change if possible. The function returns
+// the minimum priority needed to do the change and whether it is currently allowed.
+// If setCap and allowed are both true then the caller can assume that the change was
+// successful.
+// Note: priorityField should always be set before calling RequestCapacity. If setCap
+// is false then both InactiveFlag and ActiveFlag can be unset and they are not changed
+// by this function call either.
+// Note 2: this function should run inside a NodeStateMachine operation
+func (pp *PriorityPool) RequestCapacity(node *enode.Node, targetCap uint64, bias time.Duration, setCap bool) (minPriority int64, allowed bool) {
+ pp.lock.Lock()
+ pp.activeQueue.Refresh()
+ var updates []capUpdate
+ defer func() {
+ pp.lock.Unlock()
+ pp.updateFlags(updates)
+ }()
+
+ if targetCap < pp.minCap {
+ targetCap = pp.minCap
+ }
+ c, _ := pp.ns.GetField(node, pp.ppNodeInfoField).(*ppNodeInfo)
+ if c == nil {
+ log.Error("RequestCapacity called for unknown node", "id", node.ID())
+ return math.MaxInt64, false
+ }
+ var priority int64
+ if targetCap > c.capacity {
+ priority = c.nodePriority.EstMinPriority(pp.clock.Now()+mclock.AbsTime(bias), targetCap, false)
+ } else {
+ priority = c.nodePriority.Priority(pp.clock.Now(), targetCap)
+ }
+ pp.markForChange(c)
+ pp.setCapacity(c, targetCap)
+ c.forced = true
+ pp.activeQueue.Remove(c.activeIndex)
+ pp.inactiveQueue.Remove(c.inactiveIndex)
+ pp.activeQueue.Push(c)
+ minPriority = pp.enforceLimits()
+ // if capacity update is possible now then minPriority == math.MinInt64
+ // if it is not possible at all then minPriority == math.MaxInt64
+ allowed = priority > minPriority
+ updates = pp.finalizeChanges(setCap && allowed)
+ return
+}
+
+// SetLimits sets the maximum number and total capacity of simultaneously active nodes
+func (pp *PriorityPool) SetLimits(maxCount, maxCap uint64) {
+ pp.lock.Lock()
+ pp.activeQueue.Refresh()
+ var updates []capUpdate
+ defer func() {
+ pp.lock.Unlock()
+ pp.ns.Operation(func() { pp.updateFlags(updates) })
+ }()
+
+ inc := (maxCount > pp.maxCount) || (maxCap > pp.maxCap)
+ dec := (maxCount < pp.maxCount) || (maxCap < pp.maxCap)
+ pp.maxCount, pp.maxCap = maxCount, maxCap
+ if dec {
+ pp.enforceLimits()
+ updates = pp.finalizeChanges(true)
+ }
+ if inc {
+ updates = pp.tryActivate()
+ }
+}
+
+// SetActiveBias sets the bias applied when trying to activate inactive nodes
+func (pp *PriorityPool) SetActiveBias(bias time.Duration) {
+ pp.lock.Lock()
+ defer pp.lock.Unlock()
+
+ pp.activeBias = bias
+ pp.tryActivate()
+}
+
+// ActiveCapacity returns the total capacity of currently active nodes
+func (pp *PriorityPool) ActiveCapacity() uint64 {
+ pp.lock.Lock()
+ defer pp.lock.Unlock()
+
+ return pp.activeCap
+}
+
+// inactiveSetIndex callback updates ppNodeInfo item index in inactiveQueue
+func inactiveSetIndex(a interface{}, index int) {
+ a.(*ppNodeInfo).inactiveIndex = index
+}
+
+// activeSetIndex callback updates ppNodeInfo item index in activeQueue
+func activeSetIndex(a interface{}, index int) {
+ a.(*ppNodeInfo).activeIndex = index
+}
+
+// invertPriority inverts a priority value. The active queue uses inverted priorities
+// because the node on the top is the first to be deactivated.
+func invertPriority(p int64) int64 {
+ if p == math.MinInt64 {
+ return math.MaxInt64
+ }
+ return -p
+}
+
+// activePriority callback returns actual priority of ppNodeInfo item in activeQueue
+func activePriority(a interface{}, now mclock.AbsTime) int64 {
+ c := a.(*ppNodeInfo)
+ if c.forced {
+ return math.MinInt64
+ }
+ if c.bias == 0 {
+ return invertPriority(c.nodePriority.Priority(now, c.capacity))
+ } else {
+ return invertPriority(c.nodePriority.EstMinPriority(now+mclock.AbsTime(c.bias), c.capacity, true))
+ }
+}
+
+// activeMaxPriority callback returns estimated maximum priority of ppNodeInfo item in activeQueue
+func activeMaxPriority(a interface{}, until mclock.AbsTime) int64 {
+ c := a.(*ppNodeInfo)
+ if c.forced {
+ return math.MinInt64
+ }
+ return invertPriority(c.nodePriority.EstMinPriority(until+mclock.AbsTime(c.bias), c.capacity, false))
+}
+
+// inactivePriority callback returns actual priority of ppNodeInfo item in inactiveQueue
+func (pp *PriorityPool) inactivePriority(p *ppNodeInfo) int64 {
+ return p.nodePriority.Priority(pp.clock.Now(), pp.minCap)
+}
+
+// connectedNode is called when a new node has been added to the pool (InactiveFlag set)
+// Note: this function should run inside a NodeStateMachine operation
+func (pp *PriorityPool) connectedNode(c *ppNodeInfo) {
+ pp.lock.Lock()
+ pp.activeQueue.Refresh()
+ var updates []capUpdate
+ defer func() {
+ pp.lock.Unlock()
+ pp.updateFlags(updates)
+ }()
+
+ if c.connected {
+ return
+ }
+ c.connected = true
+ pp.inactiveQueue.Push(c, pp.inactivePriority(c))
+ updates = pp.tryActivate()
+}
+
+// disconnectedNode is called when a node has been removed from the pool (both InactiveFlag
+// and ActiveFlag reset)
+// Note: this function should run inside a NodeStateMachine operation
+func (pp *PriorityPool) disconnectedNode(c *ppNodeInfo) {
+ pp.lock.Lock()
+ pp.activeQueue.Refresh()
+ var updates []capUpdate
+ defer func() {
+ pp.lock.Unlock()
+ pp.updateFlags(updates)
+ }()
+
+ if !c.connected {
+ return
+ }
+ c.connected = false
+ pp.activeQueue.Remove(c.activeIndex)
+ pp.inactiveQueue.Remove(c.inactiveIndex)
+ if c.capacity != 0 {
+ pp.setCapacity(c, 0)
+ updates = pp.tryActivate()
+ }
+}
+
+// markForChange internally puts a node in a temporary state that can either be reverted
+// or confirmed later. This temporary state allows changing the capacity of a node and
+// moving it between the active and inactive queue. ActiveFlag/InactiveFlag and
+// CapacityField are not changed while the changes are still temporary.
+func (pp *PriorityPool) markForChange(c *ppNodeInfo) {
+ if c.changed {
+ return
+ }
+ c.changed = true
+ c.origCap = c.capacity
+ pp.changed = append(pp.changed, c)
+}
+
+// setCapacity changes the capacity of a node and adjusts activeCap and activeCount
+// accordingly. Note that this change is performed in the temporary state so it should
+// be called after markForChange and before finalizeChanges.
+func (pp *PriorityPool) setCapacity(n *ppNodeInfo, cap uint64) {
+ pp.activeCap += cap - n.capacity
+ if n.capacity == 0 {
+ pp.activeCount++
+ }
+ if cap == 0 {
+ pp.activeCount--
+ }
+ n.capacity = cap
+}
+
+// enforceLimits enforces active node count and total capacity limits. It returns the
+// lowest active node priority. Note that this function is performed on the temporary
+// internal state.
+func (pp *PriorityPool) enforceLimits() int64 {
+ if pp.activeCap <= pp.maxCap && pp.activeCount <= pp.maxCount {
+ return math.MinInt64
+ }
+ var maxActivePriority int64
+ pp.activeQueue.MultiPop(func(data interface{}, priority int64) bool {
+ c := data.(*ppNodeInfo)
+ pp.markForChange(c)
+ maxActivePriority = priority
+ if c.capacity == pp.minCap {
+ pp.setCapacity(c, 0)
+ } else {
+ sub := c.capacity / pp.capacityStepDiv
+ if c.capacity-sub < pp.minCap {
+ sub = c.capacity - pp.minCap
+ }
+ pp.setCapacity(c, c.capacity-sub)
+ pp.activeQueue.Push(c)
+ }
+ return pp.activeCap > pp.maxCap || pp.activeCount > pp.maxCount
+ })
+ return invertPriority(maxActivePriority)
+}
+
+// finalizeChanges either commits or reverts temporary changes. The necessary capacity
+// field and according flag updates are not performed here but returned in a list because
+// they should be performed while the mutex is not held.
+func (pp *PriorityPool) finalizeChanges(commit bool) (updates []capUpdate) {
+ for _, c := range pp.changed {
+ // always remove and push back in order to update biased/forced priority
+ pp.activeQueue.Remove(c.activeIndex)
+ pp.inactiveQueue.Remove(c.inactiveIndex)
+ c.bias = 0
+ c.forced = false
+ c.changed = false
+ if !commit {
+ pp.setCapacity(c, c.origCap)
+ }
+ if c.connected {
+ if c.capacity != 0 {
+ pp.activeQueue.Push(c)
+ } else {
+ pp.inactiveQueue.Push(c, pp.inactivePriority(c))
+ }
+ if c.capacity != c.origCap && commit {
+ updates = append(updates, capUpdate{c.node, c.origCap, c.capacity})
+ }
+ }
+ c.origCap = 0
+ }
+ pp.changed = nil
+ return
+}
+
+// capUpdate describes a CapacityField and ActiveFlag/InactiveFlag update
+type capUpdate struct {
+ node *enode.Node
+ oldCap, newCap uint64
+}
+
+// updateFlags performs CapacityField and ActiveFlag/InactiveFlag updates while the
+// pool mutex is not held
+// Note: this function should run inside a NodeStateMachine operation
+func (pp *PriorityPool) updateFlags(updates []capUpdate) {
+ for _, f := range updates {
+ if f.oldCap == 0 {
+ pp.ns.SetStateSub(f.node, pp.ActiveFlag, pp.InactiveFlag, 0)
+ }
+ if f.newCap == 0 {
+ pp.ns.SetStateSub(f.node, pp.InactiveFlag, pp.ActiveFlag, 0)
+ pp.ns.SetFieldSub(f.node, pp.CapacityField, nil)
+ } else {
+ pp.ns.SetFieldSub(f.node, pp.CapacityField, f.newCap)
+ }
+ }
+}
+
+// tryActivate tries to activate inactive nodes if possible
+func (pp *PriorityPool) tryActivate() []capUpdate {
+ var commit bool
+ for pp.inactiveQueue.Size() > 0 {
+ c := pp.inactiveQueue.PopItem().(*ppNodeInfo)
+ pp.markForChange(c)
+ pp.setCapacity(c, pp.minCap)
+ c.bias = pp.activeBias
+ pp.activeQueue.Push(c)
+ pp.enforceLimits()
+ if c.capacity > 0 {
+ commit = true
+ } else {
+ break
+ }
+ }
+ return pp.finalizeChanges(commit)
+}
+
+// updatePriority gets the current priority value of the given node from the nodePriority
+// interface and performs the necessary changes. It is triggered by updateFlag.
+// Note: this function should run inside a NodeStateMachine operation
+func (pp *PriorityPool) updatePriority(node *enode.Node) {
+ pp.lock.Lock()
+ pp.activeQueue.Refresh()
+ var updates []capUpdate
+ defer func() {
+ pp.lock.Unlock()
+ pp.updateFlags(updates)
+ }()
+
+ c, _ := pp.ns.GetField(node, pp.ppNodeInfoField).(*ppNodeInfo)
+ if c == nil || !c.connected {
+ return
+ }
+ pp.activeQueue.Remove(c.activeIndex)
+ pp.inactiveQueue.Remove(c.inactiveIndex)
+ if c.capacity != 0 {
+ pp.activeQueue.Push(c)
+ } else {
+ pp.inactiveQueue.Push(c, pp.inactivePriority(c))
+ }
+ updates = pp.tryActivate()
+}
diff --git a/les/lespay/server/prioritypool_test.go b/les/lespay/server/prioritypool_test.go
new file mode 100644
index 000000000..cbb3f5b37
--- /dev/null
+++ b/les/lespay/server/prioritypool_test.go
@@ -0,0 +1,129 @@
+// Copyright 2020 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package server
+
+import (
+ "math/rand"
+ "reflect"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/common/mclock"
+ "github.com/ethereum/go-ethereum/p2p/enode"
+ "github.com/ethereum/go-ethereum/p2p/enr"
+ "github.com/ethereum/go-ethereum/p2p/nodestate"
+)
+
+var (
+ testSetup = &nodestate.Setup{}
+ ppTestClientFlag = testSetup.NewFlag("ppTestClientFlag")
+ ppTestClientField = testSetup.NewField("ppTestClient", reflect.TypeOf(&ppTestClient{}))
+ ppUpdateFlag = testSetup.NewFlag("ppUpdateFlag")
+ ppTestSetup = NewPriorityPoolSetup(testSetup)
+)
+
+func init() {
+ ppTestSetup.Connect(ppTestClientField, ppUpdateFlag)
+}
+
+const (
+ testCapacityStepDiv = 100
+ testCapacityToleranceDiv = 10
+)
+
+type ppTestClient struct {
+ node *enode.Node
+ balance, cap uint64
+}
+
+func (c *ppTestClient) Priority(now mclock.AbsTime, cap uint64) int64 {
+ return int64(c.balance / cap)
+}
+
+func (c *ppTestClient) EstMinPriority(until mclock.AbsTime, cap uint64, update bool) int64 {
+ return int64(c.balance / cap)
+}
+
+func TestPriorityPool(t *testing.T) {
+ clock := &mclock.Simulated{}
+ ns := nodestate.NewNodeStateMachine(nil, nil, clock, testSetup)
+
+ ns.SubscribeField(ppTestSetup.CapacityField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) {
+ if n := ns.GetField(node, ppTestSetup.priorityField); n != nil {
+ c := n.(*ppTestClient)
+ c.cap = newValue.(uint64)
+ }
+ })
+ pp := NewPriorityPool(ns, ppTestSetup, clock, 100, 0, testCapacityStepDiv)
+ ns.Start()
+ pp.SetLimits(100, 1000000)
+ clients := make([]*ppTestClient, 100)
+ raise := func(c *ppTestClient) {
+ for {
+ var ok bool
+ ns.Operation(func() {
+ _, ok = pp.RequestCapacity(c.node, c.cap+c.cap/testCapacityStepDiv, 0, true)
+ })
+ if !ok {
+ return
+ }
+ }
+ }
+ var sumBalance uint64
+ check := func(c *ppTestClient) {
+ expCap := 1000000 * c.balance / sumBalance
+ capTol := expCap / testCapacityToleranceDiv
+ if c.cap < expCap-capTol || c.cap > expCap+capTol {
+ t.Errorf("Wrong node capacity (expected %d, got %d)", expCap, c.cap)
+ }
+ }
+
+ for i := range clients {
+ c := &ppTestClient{
+ node: enode.SignNull(&enr.Record{}, enode.ID{byte(i)}),
+ balance: 1000000000,
+ cap: 1000,
+ }
+ sumBalance += c.balance
+ clients[i] = c
+ ns.SetState(c.node, ppTestClientFlag, nodestate.Flags{}, 0)
+ ns.SetField(c.node, ppTestSetup.priorityField, c)
+ ns.SetState(c.node, ppTestSetup.InactiveFlag, nodestate.Flags{}, 0)
+ raise(c)
+ check(c)
+ }
+
+ for count := 0; count < 100; count++ {
+ c := clients[rand.Intn(len(clients))]
+ oldBalance := c.balance
+ c.balance = uint64(rand.Int63n(1000000000) + 1000000000)
+ sumBalance += c.balance - oldBalance
+ pp.ns.SetState(c.node, ppUpdateFlag, nodestate.Flags{}, 0)
+ pp.ns.SetState(c.node, nodestate.Flags{}, ppUpdateFlag, 0)
+ if c.balance > oldBalance {
+ raise(c)
+ } else {
+ for _, c := range clients {
+ raise(c)
+ }
+ }
+ for _, c := range clients {
+ check(c)
+ }
+ }
+
+ ns.Stop()
+}
diff --git a/les/metrics.go b/les/metrics.go
index c5edb61c3..9a79fd1bb 100644
--- a/les/metrics.go
+++ b/les/metrics.go
@@ -99,8 +99,8 @@ var (
sqQueuedGauge = metrics.NewRegisteredGauge("les/server/servingQueue/queued", nil)
clientConnectedMeter = metrics.NewRegisteredMeter("les/server/clientEvent/connected", nil)
- clientRejectedMeter = metrics.NewRegisteredMeter("les/server/clientEvent/rejected", nil)
- clientKickedMeter = metrics.NewRegisteredMeter("les/server/clientEvent/kicked", nil)
+ clientActivatedMeter = metrics.NewRegisteredMeter("les/server/clientEvent/activated", nil)
+ clientDeactivatedMeter = metrics.NewRegisteredMeter("les/server/clientEvent/deactivated", nil)
clientDisconnectedMeter = metrics.NewRegisteredMeter("les/server/clientEvent/disconnected", nil)
clientFreezeMeter = metrics.NewRegisteredMeter("les/server/clientEvent/freeze", nil)
clientErrorMeter = metrics.NewRegisteredMeter("les/server/clientEvent/error", nil)
diff --git a/les/peer.go b/les/peer.go
index c52914523..0549daf9a 100644
--- a/les/peer.go
+++ b/les/peer.go
@@ -33,6 +33,7 @@ import (
"github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/les/flowcontrol"
lpc "github.com/ethereum/go-ethereum/les/lespay/client"
+ lps "github.com/ethereum/go-ethereum/les/lespay/server"
"github.com/ethereum/go-ethereum/les/utils"
"github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/p2p"
@@ -463,7 +464,7 @@ func (p *serverPeer) requestTxStatus(reqID uint64, txHashes []common.Hash) error
return p.sendRequest(GetTxStatusMsg, reqID, txHashes, len(txHashes))
}
-// SendTxStatus creates a reply with a batch of transactions to be added to the remote transaction pool.
+// sendTxs creates a reply with a batch of transactions to be added to the remote transaction pool.
func (p *serverPeer) sendTxs(reqID uint64, amount int, txs rlp.RawValue) error {
p.Log().Debug("Sending batch of transactions", "amount", amount, "size", len(txs))
sizeFactor := (len(txs) + txSizeCostLimit/2) / txSizeCostLimit
@@ -719,6 +720,8 @@ type clientPeer struct {
responseLock sync.Mutex
responseCount uint64 // Counter to generate an unique id for request processing.
+ balance *lps.NodeBalance
+
// invalidLock is used for protecting invalidCount.
invalidLock sync.RWMutex
invalidCount utils.LinearExpiredValue // Counter the invalid request the client peer has made.
@@ -876,18 +879,25 @@ func (p *clientPeer) sendAnnounce(request announceData) error {
return p2p.Send(p.rw, AnnounceMsg, request)
}
+// allowInactive implements clientPoolPeer
+func (p *clientPeer) allowInactive() bool {
+ return false
+}
+
// updateCapacity updates the request serving capacity assigned to a given client
// and also sends an announcement about the updated flow control parameters
func (p *clientPeer) updateCapacity(cap uint64) {
p.lock.Lock()
defer p.lock.Unlock()
- p.fcParams = flowcontrol.ServerParams{MinRecharge: cap, BufLimit: cap * bufLimitRatio}
- p.fcClient.UpdateParams(p.fcParams)
- var kvList keyValueList
- kvList = kvList.add("flowControl/MRR", cap)
- kvList = kvList.add("flowControl/BL", cap*bufLimitRatio)
- p.queueSend(func() { p.sendAnnounce(announceData{Update: kvList}) })
+ if cap != p.fcParams.MinRecharge {
+ p.fcParams = flowcontrol.ServerParams{MinRecharge: cap, BufLimit: cap * bufLimitRatio}
+ p.fcClient.UpdateParams(p.fcParams)
+ var kvList keyValueList
+ kvList = kvList.add("flowControl/MRR", cap)
+ kvList = kvList.add("flowControl/BL", cap*bufLimitRatio)
+ p.queueSend(func() { p.sendAnnounce(announceData{Update: kvList}) })
+ }
}
// freezeClient temporarily puts the client in a frozen state which means all
@@ -974,7 +984,7 @@ func (p *clientPeer) Handshake(td *big.Int, head common.Hash, headNum uint64, ge
// set default announceType on server side
p.announceType = announceTypeSimple
}
- p.fcClient = flowcontrol.NewClientNode(server.fcManager, server.defParams)
+ p.fcClient = flowcontrol.NewClientNode(server.fcManager, p.fcParams)
}
return nil
})
diff --git a/les/server.go b/les/server.go
index ecb65150a..225a7ad1f 100644
--- a/les/server.go
+++ b/les/server.go
@@ -23,6 +23,7 @@ import (
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/les/flowcontrol"
+ lps "github.com/ethereum/go-ethereum/les/lespay/server"
"github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node"
@@ -51,9 +52,9 @@ type LesServer struct {
servingQueue *servingQueue
clientPool *clientPool
- minCapacity, maxCapacity, freeCapacity uint64
- threadsIdle int // Request serving threads count when system is idle.
- threadsBusy int // Request serving threads count when system is busy(block insertion).
+ minCapacity, maxCapacity uint64
+ threadsIdle int // Request serving threads count when system is idle.
+ threadsBusy int // Request serving threads count when system is busy(block insertion).
p2pSrv *p2p.Server
}
@@ -94,7 +95,6 @@ func NewLesServer(node *node.Node, e *eth.Ethereum, config *eth.Config) (*LesSer
}
srv.handler = newServerHandler(srv, e.BlockChain(), e.ChainDb(), e.TxPool(), e.Synced)
srv.costTracker, srv.minCapacity = newCostTracker(e.ChainDb(), config)
- srv.freeCapacity = srv.minCapacity
srv.oracle = srv.setupOracle(node, e.BlockChain().Genesis().Hash(), config)
// Initialize the bloom trie indexer.
@@ -102,8 +102,8 @@ func NewLesServer(node *node.Node, e *eth.Ethereum, config *eth.Config) (*LesSer
// Initialize server capacity management fields.
srv.defParams = flowcontrol.ServerParams{
- BufLimit: srv.freeCapacity * bufLimitRatio,
- MinRecharge: srv.freeCapacity,
+ BufLimit: srv.minCapacity * bufLimitRatio,
+ MinRecharge: srv.minCapacity,
}
// LES flow control tries to more or less guarantee the possibility for the
// clients to send a certain amount of requests at any time and get a quick
@@ -111,13 +111,13 @@ func NewLesServer(node *node.Node, e *eth.Ethereum, config *eth.Config) (*LesSer
// to send requests most of the time. Our goal is to serve as many clients as
// possible while the actually used server capacity does not exceed the limits
totalRecharge := srv.costTracker.totalRecharge()
- srv.maxCapacity = srv.freeCapacity * uint64(srv.config.LightPeers)
+ srv.maxCapacity = srv.minCapacity * uint64(srv.config.LightPeers)
if totalRecharge > srv.maxCapacity {
srv.maxCapacity = totalRecharge
}
- srv.fcManager.SetCapacityLimits(srv.freeCapacity, srv.maxCapacity, srv.freeCapacity*2)
- srv.clientPool = newClientPool(srv.chainDb, srv.freeCapacity, mclock.System{}, func(id enode.ID) { go srv.peers.unregister(id.String()) })
- srv.clientPool.setDefaultFactors(priceFactors{0, 1, 1}, priceFactors{0, 1, 1})
+ srv.fcManager.SetCapacityLimits(srv.minCapacity, srv.maxCapacity, srv.minCapacity*2)
+ srv.clientPool = newClientPool(srv.chainDb, srv.minCapacity, defaultConnectedBias, mclock.System{}, func(id enode.ID) { go srv.peers.unregister(id.String()) })
+ srv.clientPool.setDefaultFactors(lps.PriceFactors{TimeFactor: 0, CapacityFactor: 1, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 0, CapacityFactor: 1, RequestFactor: 1})
checkpoint := srv.latestLocalCheckpoint()
if !checkpoint.Empty() {
@@ -268,7 +268,7 @@ func (s *LesServer) capacityManagement() {
updateRecharge()
case totalCapacity = <-totalCapacityCh:
totalCapacityGauge.Update(int64(totalCapacity))
- newFreePeers := totalCapacity / s.freeCapacity
+ newFreePeers := totalCapacity / s.minCapacity
if newFreePeers < freePeers && newFreePeers < uint64(s.config.LightPeers) {
log.Warn("Reduced free peer connections", "from", freePeers, "to", newFreePeers)
}
diff --git a/les/server_handler.go b/les/server_handler.go
index 463f51cb4..583df9600 100644
--- a/les/server_handler.go
+++ b/les/server_handler.go
@@ -31,6 +31,7 @@ import (
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
+ lps "github.com/ethereum/go-ethereum/les/lespay/server"
"github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
@@ -138,10 +139,14 @@ func (h *serverHandler) handle(p *clientPeer) error {
defer p.fcClient.Disconnect()
// Disconnect the inbound peer if it's rejected by clientPool
- if !h.server.clientPool.connect(p, 0) {
- p.Log().Debug("Light Ethereum peer registration failed", "err", errFullClientPool)
+ if cap, err := h.server.clientPool.connect(p); cap != p.fcParams.MinRecharge || err != nil {
+ p.Log().Debug("Light Ethereum peer rejected", "err", errFullClientPool)
return errFullClientPool
}
+ p.balance, _ = h.server.clientPool.ns.GetField(p.Node(), h.server.clientPool.BalanceField).(*lps.NodeBalance)
+ if p.balance == nil {
+ return p2p.DiscRequested
+ }
// Register the peer locally
if err := h.server.peers.register(p); err != nil {
h.server.clientPool.disconnect(p)
@@ -157,6 +162,7 @@ func (h *serverHandler) handle(p *clientPeer) error {
wg.Wait() // Ensure all background task routines have exited.
h.server.peers.unregister(p.id)
h.server.clientPool.disconnect(p)
+ p.balance = nil
clientConnectionGauge.Update(int64(h.server.peers.len()))
connectionTimer.Update(time.Duration(mclock.Now() - connectedAt))
}()
@@ -256,13 +262,16 @@ func (h *serverHandler) handleMsg(p *clientPeer, wg *sync.WaitGroup) error {
realCost = maxCost // Assign a fake cost for testing purpose
} else {
realCost = h.server.costTracker.realCost(servingTime, msg.Size, replySize)
+ if realCost > maxCost {
+ realCost = maxCost
+ }
}
bv := p.fcClient.RequestProcessed(reqID, responseCount, maxCost, realCost)
if amount != 0 {
// Feed cost tracker request serving statistic.
h.server.costTracker.updateStats(msg.Code, amount, servingTime, realCost)
// Reduce priority "balance" for the specific peer.
- h.server.clientPool.requestCost(p, realCost)
+ p.balance.RequestServed(realCost)
}
if reply != nil {
p.queueSend(func() {
@@ -380,7 +389,7 @@ func (h *serverHandler) handleMsg(p *clientPeer, wg *sync.WaitGroup) error {
first = false
}
reply := p.replyBlockHeaders(req.ReqID, headers)
- sendResponse(req.ReqID, query.Amount, p.replyBlockHeaders(req.ReqID, headers), task.done())
+ sendResponse(req.ReqID, query.Amount, reply, task.done())
if metrics.EnabledExpensive {
miscOutHeaderPacketsMeter.Mark(1)
miscOutHeaderTrafficMeter.Mark(int64(reply.size()))
diff --git a/les/test_helper.go b/les/test_helper.go
index 4ce1d03c2..9f9b28721 100644
--- a/les/test_helper.go
+++ b/les/test_helper.go
@@ -282,9 +282,9 @@ func newTestServerHandler(blocks int, indexers []*core.ChainIndexer, db ethdb.Da
},
fcManager: flowcontrol.NewClientManager(nil, clock),
}
- server.costTracker, server.freeCapacity = newCostTracker(db, server.config)
+ server.costTracker, server.minCapacity = newCostTracker(db, server.config)
server.costTracker.testCostList = testCostList(0) // Disable flow control mechanism.
- server.clientPool = newClientPool(db, 1, clock, nil)
+ server.clientPool = newClientPool(db, testBufRecharge, defaultConnectedBias, clock, func(id enode.ID) {})
server.clientPool.setLimits(10000, 10000) // Assign enough capacity for clientpool
server.handler = newServerHandler(server, simulation.Blockchain(), db, txpool, func() bool { return true })
if server.oracle != nil {
diff --git a/les/utils/expiredvalue.go b/les/utils/expiredvalue.go
index 980156d21..55e82cee4 100644
--- a/les/utils/expiredvalue.go
+++ b/les/utils/expiredvalue.go
@@ -18,6 +18,7 @@ package utils
import (
"math"
+ "sync"
"github.com/ethereum/go-ethereum/common/mclock"
)
@@ -124,6 +125,11 @@ func (e *ExpiredValue) SubExp(a ExpiredValue) {
}
}
+// IsZero returns true if the value is zero
+func (e *ExpiredValue) IsZero() bool {
+ return e.Base == 0
+}
+
// LinearExpiredValue is very similar with the expiredValue which the value
// will continuously expired. But the different part is it's expired linearly.
type LinearExpiredValue struct {
@@ -168,12 +174,20 @@ func (e *LinearExpiredValue) Add(amount int64, now mclock.AbsTime) uint64 {
return e.Val
}
+// ValueExpirer controls value expiration rate
+type ValueExpirer interface {
+ SetRate(now mclock.AbsTime, rate float64)
+ SetLogOffset(now mclock.AbsTime, logOffset Fixed64)
+ LogOffset(now mclock.AbsTime) Fixed64
+}
+
// Expirer changes logOffset with a linear rate which can be changed during operation.
// It is not thread safe, if access by multiple goroutines is needed then it should be
// encapsulated into a locked structure.
// Note that if neither SetRate nor SetLogOffset are used during operation then LogOffset
// is thread safe.
type Expirer struct {
+ lock sync.RWMutex
logOffset Fixed64
rate float64
lastUpdate mclock.AbsTime
@@ -182,6 +196,9 @@ type Expirer struct {
// SetRate changes the expiration rate which is the inverse of the time constant in
// nanoseconds.
func (e *Expirer) SetRate(now mclock.AbsTime, rate float64) {
+ e.lock.Lock()
+ defer e.lock.Unlock()
+
dt := now - e.lastUpdate
if dt > 0 {
e.logOffset += Fixed64(logToFixedFactor * float64(dt) * e.rate)
@@ -192,12 +209,18 @@ func (e *Expirer) SetRate(now mclock.AbsTime, rate float64) {
// SetLogOffset sets logOffset instantly.
func (e *Expirer) SetLogOffset(now mclock.AbsTime, logOffset Fixed64) {
+ e.lock.Lock()
+ defer e.lock.Unlock()
+
e.lastUpdate = now
e.logOffset = logOffset
}
// LogOffset returns the current logarithmic offset.
func (e *Expirer) LogOffset(now mclock.AbsTime) Fixed64 {
+ e.lock.RLock()
+ defer e.lock.RUnlock()
+
dt := now - e.lastUpdate
if dt <= 0 {
return e.logOffset
diff --git a/les/utils/timeutils.go b/les/utils/timeutils.go
new file mode 100644
index 000000000..62a4285d1
--- /dev/null
+++ b/les/utils/timeutils.go
@@ -0,0 +1,69 @@
+// Copyright 2020 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package utils
+
+import (
+ "sync"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common/mclock"
+)
+
+type UpdateTimer struct {
+ clock mclock.Clock
+ lock sync.Mutex
+ last mclock.AbsTime
+ threshold time.Duration
+}
+
+func NewUpdateTimer(clock mclock.Clock, threshold time.Duration) *UpdateTimer {
+ // We don't accept the update threshold less than 0.
+ if threshold < 0 {
+ return nil
+ }
+ // Don't panic for lazy users
+ if clock == nil {
+ clock = mclock.System{}
+ }
+ return &UpdateTimer{
+ clock: clock,
+ last: clock.Now(),
+ threshold: threshold,
+ }
+}
+
+func (t *UpdateTimer) Update(callback func(diff time.Duration) bool) bool {
+ return t.UpdateAt(t.clock.Now(), callback)
+}
+
+func (t *UpdateTimer) UpdateAt(at mclock.AbsTime, callback func(diff time.Duration) bool) bool {
+ t.lock.Lock()
+ defer t.lock.Unlock()
+
+ diff := time.Duration(at - t.last)
+ if diff < 0 {
+ diff = 0
+ }
+ if diff < t.threshold {
+ return false
+ }
+ if callback(diff) {
+ t.last = at
+ return true
+ }
+ return false
+}
diff --git a/les/utils/timeutils_test.go b/les/utils/timeutils_test.go
new file mode 100644
index 000000000..9f9e1c2dc
--- /dev/null
+++ b/les/utils/timeutils_test.go
@@ -0,0 +1,47 @@
+// Copyright 2020 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package utils
+
+import (
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common/mclock"
+)
+
+func TestUpdateTimer(t *testing.T) {
+ timer := NewUpdateTimer(mclock.System{}, -1)
+ if timer != nil {
+ t.Fatalf("Create update timer with negative threshold")
+ }
+ sim := &mclock.Simulated{}
+ timer = NewUpdateTimer(sim, time.Second)
+ if updated := timer.Update(func(diff time.Duration) bool { return true }); updated {
+ t.Fatalf("Update the clock without reaching the threshold")
+ }
+ sim.Run(time.Second)
+ if updated := timer.Update(func(diff time.Duration) bool { return true }); !updated {
+ t.Fatalf("Doesn't update the clock when reaching the threshold")
+ }
+ if updated := timer.UpdateAt(sim.Now()+mclock.AbsTime(time.Second), func(diff time.Duration) bool { return true }); !updated {
+ t.Fatalf("Doesn't update the clock when reaching the threshold")
+ }
+ timer = NewUpdateTimer(sim, 0)
+ if updated := timer.Update(func(diff time.Duration) bool { return true }); !updated {
+ t.Fatalf("Doesn't update the clock without threshold limitaion")
+ }
+}