les: remove half-finished priority pool APIs (#19780)

* les: remove half-finish APIs

* les: remove half-finish APIs
This commit is contained in:
gary rong 2019-07-04 02:23:06 +08:00 committed by Péter Szilágyi
parent 8d2cf028a5
commit 59a3198382
10 changed files with 153 additions and 890 deletions

View File

@ -17,462 +17,16 @@
package les package les
import ( import (
"context"
"errors" "errors"
"fmt"
"sync"
"time"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/les/csvlogger"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/rpc"
) )
var ( var (
ErrMinCap = errors.New("capacity too small") errNoCheckpoint = errors.New("no local checkpoint provided")
ErrTotalCap = errors.New("total capacity exceeded") errNotActivated = errors.New("checkpoint registrar is not activated")
ErrUnknownBenchmarkType = errors.New("unknown benchmark type")
ErrNoCheckpoint = errors.New("no local checkpoint provided")
ErrNotActivated = errors.New("checkpoint registrar is not activated")
dropCapacityDelay = time.Second // delay applied to decreasing capacity changes
) )
// PrivateLightServerAPI provides an API to access the LES light server.
// It offers only methods that operate on public data that is freely available to anyone.
type PrivateLightServerAPI struct {
server *LesServer
}
// NewPrivateLightServerAPI creates a new LES light server API.
func NewPrivateLightServerAPI(server *LesServer) *PrivateLightServerAPI {
return &PrivateLightServerAPI{
server: server,
}
}
// TotalCapacity queries total available capacity for all clients
func (api *PrivateLightServerAPI) TotalCapacity() hexutil.Uint64 {
return hexutil.Uint64(api.server.priorityClientPool.totalCapacity())
}
// SubscribeTotalCapacity subscribes to changed total capacity events.
// If onlyUnderrun is true then notification is sent only if the total capacity
// drops under the total capacity of connected priority clients.
//
// Note: actually applying decreasing total capacity values is delayed while the
// notification is sent instantly. This allows lowering the capacity of a priority client
// or choosing which one to drop before the system drops some of them automatically.
func (api *PrivateLightServerAPI) SubscribeTotalCapacity(ctx context.Context, onlyUnderrun bool) (*rpc.Subscription, error) {
notifier, supported := rpc.NotifierFromContext(ctx)
if !supported {
return &rpc.Subscription{}, rpc.ErrNotificationsUnsupported
}
rpcSub := notifier.CreateSubscription()
api.server.priorityClientPool.subscribeTotalCapacity(&tcSubscription{notifier, rpcSub, onlyUnderrun})
return rpcSub, nil
}
type (
// tcSubscription represents a total capacity subscription
tcSubscription struct {
notifier *rpc.Notifier
rpcSub *rpc.Subscription
onlyUnderrun bool
}
tcSubs map[*tcSubscription]struct{}
)
// send sends a changed total capacity event to the subscribers
func (s tcSubs) send(tc uint64, underrun bool) {
for sub := range s {
select {
case <-sub.rpcSub.Err():
delete(s, sub)
case <-sub.notifier.Closed():
delete(s, sub)
default:
if underrun || !sub.onlyUnderrun {
sub.notifier.Notify(sub.rpcSub.ID, tc)
}
}
}
}
// MinimumCapacity queries minimum assignable capacity for a single client
func (api *PrivateLightServerAPI) MinimumCapacity() hexutil.Uint64 {
return hexutil.Uint64(api.server.minCapacity)
}
// FreeClientCapacity queries the capacity provided for free clients
func (api *PrivateLightServerAPI) FreeClientCapacity() hexutil.Uint64 {
return hexutil.Uint64(api.server.freeClientCap)
}
// SetClientCapacity sets the priority capacity assigned to a given client.
// If the assigned capacity is bigger than zero then connection is always
// guaranteed. The sum of capacity assigned to priority clients can not exceed
// the total available capacity.
//
// Note: assigned capacity can be changed while the client is connected with
// immediate effect.
func (api *PrivateLightServerAPI) SetClientCapacity(id enode.ID, cap uint64) error {
if cap != 0 && cap < api.server.minCapacity {
return ErrMinCap
}
return api.server.priorityClientPool.setClientCapacity(id, cap)
}
// GetClientCapacity returns the capacity assigned to a given client
func (api *PrivateLightServerAPI) GetClientCapacity(id enode.ID) hexutil.Uint64 {
api.server.priorityClientPool.lock.Lock()
defer api.server.priorityClientPool.lock.Unlock()
return hexutil.Uint64(api.server.priorityClientPool.clients[id].cap)
}
// clientPool is implemented by both the free and priority client pools
type clientPool interface {
peerSetNotify
setLimits(count int, totalCap uint64)
}
// priorityClientPool stores information about prioritized clients
type priorityClientPool struct {
lock sync.Mutex
child clientPool
ps *peerSet
clients map[enode.ID]priorityClientInfo
totalCap, totalCapAnnounced uint64
totalConnectedCap, freeClientCap uint64
maxPeers, priorityCount int
logger *csvlogger.Logger
logTotalPriConn *csvlogger.Channel
subs tcSubs
updateSchedule []scheduledUpdate
scheduleCounter uint64
}
// scheduledUpdate represents a delayed total capacity update
type scheduledUpdate struct {
time mclock.AbsTime
totalCap, id uint64
}
// priorityClientInfo entries exist for all prioritized clients and currently connected non-priority clients
type priorityClientInfo struct {
cap uint64 // zero for non-priority clients
connected bool
peer *peer
}
// newPriorityClientPool creates a new priority client pool
func newPriorityClientPool(freeClientCap uint64, ps *peerSet, child clientPool, metricsLogger, eventLogger *csvlogger.Logger) *priorityClientPool {
return &priorityClientPool{
clients: make(map[enode.ID]priorityClientInfo),
freeClientCap: freeClientCap,
ps: ps,
child: child,
logger: eventLogger,
logTotalPriConn: metricsLogger.NewChannel("totalPriConn", 0),
}
}
// registerPeer is called when a new client is connected. If the client has no
// priority assigned then it is passed to the child pool which may either keep it
// or disconnect it.
//
// Note: priorityClientPool also stores a record about free clients while they are
// connected in order to be able to assign priority to them later.
func (v *priorityClientPool) registerPeer(p *peer) {
v.lock.Lock()
defer v.lock.Unlock()
id := p.ID()
c := v.clients[id]
v.logger.Event(fmt.Sprintf("priorityClientPool: registerPeer cap=%d connected=%v, %x", c.cap, c.connected, id.Bytes()))
if c.connected {
return
}
if c.cap == 0 && v.child != nil {
v.child.registerPeer(p)
}
if c.cap != 0 && v.totalConnectedCap+c.cap > v.totalCap {
v.logger.Event(fmt.Sprintf("priorityClientPool: rejected, %x", id.Bytes()))
go v.ps.Unregister(p.id)
return
}
c.connected = true
c.peer = p
v.clients[id] = c
if c.cap != 0 {
v.priorityCount++
v.totalConnectedCap += c.cap
v.logger.Event(fmt.Sprintf("priorityClientPool: accepted with %d capacity, %x", c.cap, id.Bytes()))
v.logTotalPriConn.Update(float64(v.totalConnectedCap))
if v.child != nil {
v.child.setLimits(v.maxPeers-v.priorityCount, v.totalCap-v.totalConnectedCap)
}
p.updateCapacity(c.cap)
}
}
// unregisterPeer is called when a client is disconnected. If the client has no
// priority assigned then it is also removed from the child pool.
func (v *priorityClientPool) unregisterPeer(p *peer) {
v.lock.Lock()
defer v.lock.Unlock()
id := p.ID()
c := v.clients[id]
v.logger.Event(fmt.Sprintf("priorityClientPool: unregisterPeer cap=%d connected=%v, %x", c.cap, c.connected, id.Bytes()))
if !c.connected {
return
}
if c.cap != 0 {
c.connected = false
v.clients[id] = c
v.priorityCount--
v.totalConnectedCap -= c.cap
v.logTotalPriConn.Update(float64(v.totalConnectedCap))
if v.child != nil {
v.child.setLimits(v.maxPeers-v.priorityCount, v.totalCap-v.totalConnectedCap)
}
} else {
if v.child != nil {
v.child.unregisterPeer(p)
}
delete(v.clients, id)
}
}
// setLimits updates the allowed peer count and total capacity of the priority
// client pool. Since the free client pool is a child of the priority pool the
// remaining peer count and capacity is assigned to the free pool by calling its
// own setLimits function.
//
// Note: a decreasing change of the total capacity is applied with a delay.
func (v *priorityClientPool) setLimits(count int, totalCap uint64) {
v.lock.Lock()
defer v.lock.Unlock()
v.totalCapAnnounced = totalCap
if totalCap > v.totalCap {
v.setLimitsNow(count, totalCap)
v.subs.send(totalCap, false)
return
}
v.setLimitsNow(count, v.totalCap)
if totalCap < v.totalCap {
v.subs.send(totalCap, totalCap < v.totalConnectedCap)
for i, s := range v.updateSchedule {
if totalCap >= s.totalCap {
s.totalCap = totalCap
v.updateSchedule = v.updateSchedule[:i+1]
return
}
}
v.updateSchedule = append(v.updateSchedule, scheduledUpdate{time: mclock.Now() + mclock.AbsTime(dropCapacityDelay), totalCap: totalCap})
if len(v.updateSchedule) == 1 {
v.scheduleCounter++
id := v.scheduleCounter
v.updateSchedule[0].id = id
time.AfterFunc(dropCapacityDelay, func() { v.checkUpdate(id) })
}
} else {
v.updateSchedule = nil
}
}
// checkUpdate performs the next scheduled update if possible and schedules
// the one after that
func (v *priorityClientPool) checkUpdate(id uint64) {
v.lock.Lock()
defer v.lock.Unlock()
if len(v.updateSchedule) == 0 || v.updateSchedule[0].id != id {
return
}
v.setLimitsNow(v.maxPeers, v.updateSchedule[0].totalCap)
v.updateSchedule = v.updateSchedule[1:]
if len(v.updateSchedule) != 0 {
v.scheduleCounter++
id := v.scheduleCounter
v.updateSchedule[0].id = id
dt := time.Duration(v.updateSchedule[0].time - mclock.Now())
time.AfterFunc(dt, func() { v.checkUpdate(id) })
}
}
// setLimits updates the allowed peer count and total capacity immediately
func (v *priorityClientPool) setLimitsNow(count int, totalCap uint64) {
if v.priorityCount > count || v.totalConnectedCap > totalCap {
for id, c := range v.clients {
if c.connected {
v.logger.Event(fmt.Sprintf("priorityClientPool: setLimitsNow kicked out, %x", id.Bytes()))
c.connected = false
v.totalConnectedCap -= c.cap
v.logTotalPriConn.Update(float64(v.totalConnectedCap))
v.priorityCount--
v.clients[id] = c
go v.ps.Unregister(c.peer.id)
if v.priorityCount <= count && v.totalConnectedCap <= totalCap {
break
}
}
}
}
v.maxPeers = count
v.totalCap = totalCap
if v.child != nil {
v.child.setLimits(v.maxPeers-v.priorityCount, v.totalCap-v.totalConnectedCap)
}
}
// totalCapacity queries total available capacity for all clients
func (v *priorityClientPool) totalCapacity() uint64 {
v.lock.Lock()
defer v.lock.Unlock()
return v.totalCapAnnounced
}
// subscribeTotalCapacity subscribes to changed total capacity events
func (v *priorityClientPool) subscribeTotalCapacity(sub *tcSubscription) {
v.lock.Lock()
defer v.lock.Unlock()
v.subs[sub] = struct{}{}
}
// setClientCapacity sets the priority capacity assigned to a given client
func (v *priorityClientPool) setClientCapacity(id enode.ID, cap uint64) error {
v.lock.Lock()
defer v.lock.Unlock()
c := v.clients[id]
if c.cap == cap {
return nil
}
if c.connected {
if v.totalConnectedCap+cap > v.totalCap+c.cap {
return ErrTotalCap
}
if c.cap == 0 {
if v.child != nil {
v.child.unregisterPeer(c.peer)
}
v.priorityCount++
}
if cap == 0 {
v.priorityCount--
}
v.totalConnectedCap += cap - c.cap
v.logTotalPriConn.Update(float64(v.totalConnectedCap))
if v.child != nil {
v.child.setLimits(v.maxPeers-v.priorityCount, v.totalCap-v.totalConnectedCap)
}
if cap == 0 {
if v.child != nil {
v.child.registerPeer(c.peer)
}
c.peer.updateCapacity(v.freeClientCap)
} else {
c.peer.updateCapacity(cap)
}
}
if cap != 0 || c.connected {
c.cap = cap
v.clients[id] = c
} else {
delete(v.clients, id)
}
if c.connected {
v.logger.Event(fmt.Sprintf("priorityClientPool: changed capacity to %d, %x", cap, id.Bytes()))
}
return nil
}
// Benchmark runs a request performance benchmark with a given set of measurement setups
// in multiple passes specified by passCount. The measurement time for each setup in each
// pass is specified in milliseconds by length.
//
// Note: measurement time is adjusted for each pass depending on the previous ones.
// Therefore a controlled total measurement time is achievable in multiple passes.
func (api *PrivateLightServerAPI) Benchmark(setups []map[string]interface{}, passCount, length int) ([]map[string]interface{}, error) {
benchmarks := make([]requestBenchmark, len(setups))
for i, setup := range setups {
if t, ok := setup["type"].(string); ok {
getInt := func(field string, def int) int {
if value, ok := setup[field].(float64); ok {
return int(value)
}
return def
}
getBool := func(field string, def bool) bool {
if value, ok := setup[field].(bool); ok {
return value
}
return def
}
switch t {
case "header":
benchmarks[i] = &benchmarkBlockHeaders{
amount: getInt("amount", 1),
skip: getInt("skip", 1),
byHash: getBool("byHash", false),
reverse: getBool("reverse", false),
}
case "body":
benchmarks[i] = &benchmarkBodiesOrReceipts{receipts: false}
case "receipts":
benchmarks[i] = &benchmarkBodiesOrReceipts{receipts: true}
case "proof":
benchmarks[i] = &benchmarkProofsOrCode{code: false}
case "code":
benchmarks[i] = &benchmarkProofsOrCode{code: true}
case "cht":
benchmarks[i] = &benchmarkHelperTrie{
bloom: false,
reqCount: getInt("amount", 1),
}
case "bloom":
benchmarks[i] = &benchmarkHelperTrie{
bloom: true,
reqCount: getInt("amount", 1),
}
case "txSend":
benchmarks[i] = &benchmarkTxSend{}
case "txStatus":
benchmarks[i] = &benchmarkTxStatus{}
default:
return nil, ErrUnknownBenchmarkType
}
} else {
return nil, ErrUnknownBenchmarkType
}
}
rs := api.server.protocolManager.runBenchmark(benchmarks, passCount, time.Millisecond*time.Duration(length))
result := make([]map[string]interface{}, len(setups))
for i, r := range rs {
res := make(map[string]interface{})
if r.err == nil {
res["totalCount"] = r.totalCount
res["avgTime"] = r.avgTime
res["maxInSize"] = r.maxInSize
res["maxOutSize"] = r.maxOutSize
} else {
res["error"] = r.err.Error()
}
result[i] = res
}
return result, nil
}
// PrivateLightAPI provides an API to access the LES light server or light client. // PrivateLightAPI provides an API to access the LES light server or light client.
type PrivateLightAPI struct { type PrivateLightAPI struct {
backend *lesCommons backend *lesCommons
@ -498,7 +52,7 @@ func (api *PrivateLightAPI) LatestCheckpoint() ([4]string, error) {
var res [4]string var res [4]string
cp := api.backend.latestLocalCheckpoint() cp := api.backend.latestLocalCheckpoint()
if cp.Empty() { if cp.Empty() {
return res, ErrNoCheckpoint return res, errNoCheckpoint
} }
res[0] = hexutil.EncodeUint64(cp.SectionIndex) res[0] = hexutil.EncodeUint64(cp.SectionIndex)
res[1], res[2], res[3] = cp.SectionHead.Hex(), cp.CHTRoot.Hex(), cp.BloomRoot.Hex() res[1], res[2], res[3] = cp.SectionHead.Hex(), cp.CHTRoot.Hex(), cp.BloomRoot.Hex()
@ -515,7 +69,7 @@ func (api *PrivateLightAPI) GetCheckpoint(index uint64) ([3]string, error) {
var res [3]string var res [3]string
cp := api.backend.getLocalCheckpoint(index) cp := api.backend.getLocalCheckpoint(index)
if cp.Empty() { if cp.Empty() {
return res, ErrNoCheckpoint return res, errNoCheckpoint
} }
res[0], res[1], res[2] = cp.SectionHead.Hex(), cp.CHTRoot.Hex(), cp.BloomRoot.Hex() res[0], res[1], res[2] = cp.SectionHead.Hex(), cp.CHTRoot.Hex(), cp.BloomRoot.Hex()
return res, nil return res, nil
@ -524,7 +78,7 @@ func (api *PrivateLightAPI) GetCheckpoint(index uint64) ([3]string, error) {
// GetCheckpointContractAddress returns the contract contract address in hex format. // GetCheckpointContractAddress returns the contract contract address in hex format.
func (api *PrivateLightAPI) GetCheckpointContractAddress() (string, error) { func (api *PrivateLightAPI) GetCheckpointContractAddress() (string, error) {
if api.reg == nil { if api.reg == nil {
return "", ErrNotActivated return "", errNotActivated
} }
return api.reg.config.Address.Hex(), nil return api.reg.config.Address.Hex(), nil
} }

View File

@ -18,7 +18,6 @@ package les
import ( import (
"encoding/binary" "encoding/binary"
"fmt"
"math" "math"
"sync" "sync"
"sync/atomic" "sync/atomic"
@ -27,7 +26,6 @@ import (
"github.com/ethereum/go-ethereum/common/mclock" "github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/eth" "github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/les/csvlogger"
"github.com/ethereum/go-ethereum/les/flowcontrol" "github.com/ethereum/go-ethereum/les/flowcontrol"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
) )
@ -96,40 +94,50 @@ const (
// as the number of cost units per nanosecond of serving time in a single thread. // as the number of cost units per nanosecond of serving time in a single thread.
// It is based on statistics collected during serving requests in high-load periods // It is based on statistics collected during serving requests in high-load periods
// and practically acts as a one-dimension request price scaling factor over the // and practically acts as a one-dimension request price scaling factor over the
// pre-defined cost estimate table. Instead of scaling the cost values, the real // pre-defined cost estimate table.
// value of cost units is changed by applying the factor to the serving times. This //
// is more convenient because the changes in the cost factor can be applied immediately // The reason for dynamically maintaining the global factor on the server side is:
// without always notifying the clients about the changed cost tables. // the estimated time cost of the request is fixed(hardcoded) but the configuration
// of the machine running the server is really different. Therefore, the request serving
// time in different machine will vary greatly. And also, the request serving time
// in same machine may vary greatly with different request pressure.
//
// In order to more effectively limit resources, we apply the global factor to serving
// time to make the result as close as possible to the estimated time cost no matter
// the server is slow or fast. And also we scale the totalRecharge with global factor
// so that fast server can serve more requests than estimation and slow server can
// reduce request pressure.
//
// Instead of scaling the cost values, the real value of cost units is changed by
// applying the factor to the serving times. This is more convenient because the
// changes in the cost factor can be applied immediately without always notifying
// the clients about the changed cost tables.
type costTracker struct { type costTracker struct {
db ethdb.Database db ethdb.Database
stopCh chan chan struct{} stopCh chan chan struct{}
inSizeFactor, outSizeFactor float64 inSizeFactor float64
gf, utilTarget float64 outSizeFactor float64
minBufLimit uint64 factor float64
utilTarget float64
minBufLimit uint64
gfUpdateCh chan gfUpdate
gfLock sync.RWMutex gfLock sync.RWMutex
reqInfoCh chan reqInfo
totalRechargeCh chan uint64 totalRechargeCh chan uint64
stats map[uint64][]uint64 stats map[uint64][]uint64 // Used for testing purpose.
logger *csvlogger.Logger
logRecentTime, logRecentAvg, logTotalRecharge, logRelCost *csvlogger.Channel
} }
// newCostTracker creates a cost tracker and loads the cost factor statistics from the database. // newCostTracker creates a cost tracker and loads the cost factor statistics from the database.
// It also returns the minimum capacity that can be assigned to any peer. // It also returns the minimum capacity that can be assigned to any peer.
func newCostTracker(db ethdb.Database, config *eth.Config, logger *csvlogger.Logger) (*costTracker, uint64) { func newCostTracker(db ethdb.Database, config *eth.Config) (*costTracker, uint64) {
utilTarget := float64(config.LightServ) * flowcontrol.FixedPointMultiplier / 100 utilTarget := float64(config.LightServ) * flowcontrol.FixedPointMultiplier / 100
ct := &costTracker{ ct := &costTracker{
db: db, db: db,
stopCh: make(chan chan struct{}), stopCh: make(chan chan struct{}),
utilTarget: utilTarget, reqInfoCh: make(chan reqInfo, 100),
logger: logger, utilTarget: utilTarget,
logRelCost: logger.NewMinMaxChannel("relativeCost", true),
logRecentTime: logger.NewMinMaxChannel("recentTime", true),
logRecentAvg: logger.NewMinMaxChannel("recentAvg", true),
logTotalRecharge: logger.NewChannel("totalRecharge", 0.01),
} }
if config.LightBandwidthIn > 0 { if config.LightBandwidthIn > 0 {
ct.inSizeFactor = utilTarget / float64(config.LightBandwidthIn) ct.inSizeFactor = utilTarget / float64(config.LightBandwidthIn)
@ -204,8 +212,15 @@ func (ct *costTracker) makeCostList(globalFactor float64) RequestCostList {
return list return list
} }
type gfUpdate struct { // reqInfo contains the estimated time cost and the actual request serving time
avgTimeCost, servingTime float64 // which acts as a feed source to update factor maintained by costTracker.
type reqInfo struct {
// avgTimeCost is the estimated time cost corresponding to maxCostTable.
avgTimeCost float64
// servingTime is the CPU time corresponding to the actual processing of
// the request.
servingTime float64
} }
// gfLoop starts an event loop which updates the global cost factor which is // gfLoop starts an event loop which updates the global cost factor which is
@ -218,43 +233,48 @@ type gfUpdate struct {
// total allowed serving time per second but nominated in cost units, should // total allowed serving time per second but nominated in cost units, should
// also be scaled with the cost factor and is also updated by this loop. // also be scaled with the cost factor and is also updated by this loop.
func (ct *costTracker) gfLoop() { func (ct *costTracker) gfLoop() {
var gfLog, recentTime, recentAvg float64 var (
lastUpdate := mclock.Now() factor, totalRecharge float64
expUpdate := lastUpdate gfLog, recentTime, recentAvg float64
lastUpdate, expUpdate = mclock.Now(), mclock.Now()
)
// Load historical cost factor statistics from the database.
data, _ := ct.db.Get([]byte(gfDbKey)) data, _ := ct.db.Get([]byte(gfDbKey))
if len(data) == 8 { if len(data) == 8 {
gfLog = math.Float64frombits(binary.BigEndian.Uint64(data[:])) gfLog = math.Float64frombits(binary.BigEndian.Uint64(data[:]))
} }
gf := math.Exp(gfLog) ct.factor = math.Exp(gfLog)
ct.gf = gf factor, totalRecharge = ct.factor, ct.utilTarget*ct.factor
totalRecharge := ct.utilTarget * gf
ct.gfUpdateCh = make(chan gfUpdate, 100) // In order to perform factor data statistics under the high request pressure,
threshold := gfUsageThreshold * float64(gfUsageTC) * ct.utilTarget / 1000000 // we only adjust factor when recent factor usage beyond the threshold.
threshold := gfUsageThreshold * float64(gfUsageTC) * ct.utilTarget / flowcontrol.FixedPointMultiplier
go func() { go func() {
saveCostFactor := func() { saveCostFactor := func() {
var data [8]byte var data [8]byte
binary.BigEndian.PutUint64(data[:], math.Float64bits(gfLog)) binary.BigEndian.PutUint64(data[:], math.Float64bits(gfLog))
ct.db.Put([]byte(gfDbKey), data[:]) ct.db.Put([]byte(gfDbKey), data[:])
log.Debug("global cost factor saved", "value", gf) log.Debug("global cost factor saved", "value", factor)
} }
saveTicker := time.NewTicker(time.Minute * 10) saveTicker := time.NewTicker(time.Minute * 10)
for { for {
select { select {
case r := <-ct.gfUpdateCh: case r := <-ct.reqInfoCh:
requestServedMeter.Mark(int64(r.servingTime))
requestEstimatedMeter.Mark(int64(r.avgTimeCost / factor))
requestServedTimer.Update(time.Duration(r.servingTime))
relativeCostHistogram.Update(int64(r.avgTimeCost / factor / r.servingTime))
now := mclock.Now() now := mclock.Now()
if ct.logRelCost != nil && r.avgTimeCost > 1e-20 {
ct.logRelCost.Update(r.servingTime * gf / r.avgTimeCost)
}
if r.servingTime > 1000000000 {
ct.logger.Event(fmt.Sprintf("Very long servingTime = %f avgTimeCost = %f costFactor = %f", r.servingTime, r.avgTimeCost, gf))
}
dt := float64(now - expUpdate) dt := float64(now - expUpdate)
expUpdate = now expUpdate = now
exp := math.Exp(-dt / float64(gfUsageTC)) exp := math.Exp(-dt / float64(gfUsageTC))
// calculate gf correction until now, based on previous values
// calculate factor correction until now, based on previous values
var gfCorr float64 var gfCorr float64
max := recentTime max := recentTime
if recentAvg > max { if recentAvg > max {
@ -268,27 +288,28 @@ func (ct *costTracker) gfLoop() {
} else { } else {
gfCorr = math.Log(max/threshold) * float64(gfUsageTC) gfCorr = math.Log(max/threshold) * float64(gfUsageTC)
} }
// calculate log(gf) correction with the right direction and time constant // calculate log(factor) correction with the right direction and time constant
if recentTime > recentAvg { if recentTime > recentAvg {
// drop gf if actual serving times are larger than average estimates // drop factor if actual serving times are larger than average estimates
gfCorr /= -float64(gfDropTC) gfCorr /= -float64(gfDropTC)
} else { } else {
// raise gf if actual serving times are smaller than average estimates // raise factor if actual serving times are smaller than average estimates
gfCorr /= float64(gfRaiseTC) gfCorr /= float64(gfRaiseTC)
} }
} }
// update recent cost values with current request // update recent cost values with current request
recentTime = recentTime*exp + r.servingTime recentTime = recentTime*exp + r.servingTime
recentAvg = recentAvg*exp + r.avgTimeCost/gf recentAvg = recentAvg*exp + r.avgTimeCost/factor
if gfCorr != 0 { if gfCorr != 0 {
// Apply the correction to factor
gfLog += gfCorr gfLog += gfCorr
gf = math.Exp(gfLog) factor = math.Exp(gfLog)
// Notify outside modules the new factor and totalRecharge.
if time.Duration(now-lastUpdate) > time.Second { if time.Duration(now-lastUpdate) > time.Second {
totalRecharge = ct.utilTarget * gf totalRecharge, lastUpdate = ct.utilTarget*factor, now
lastUpdate = now
ct.gfLock.Lock() ct.gfLock.Lock()
ct.gf = gf ct.factor = factor
ch := ct.totalRechargeCh ch := ct.totalRechargeCh
ct.gfLock.Unlock() ct.gfLock.Unlock()
if ch != nil { if ch != nil {
@ -297,12 +318,12 @@ func (ct *costTracker) gfLoop() {
default: default:
} }
} }
log.Debug("global cost factor updated", "gf", gf) log.Debug("global cost factor updated", "factor", factor)
} }
} }
ct.logRecentTime.Update(recentTime) recentServedGauge.Update(int64(recentTime))
ct.logRecentAvg.Update(recentAvg) recentEstimatedGauge.Update(int64(recentAvg))
ct.logTotalRecharge.Update(totalRecharge) totalRechargeGauge.Update(int64(totalRecharge))
case <-saveTicker.C: case <-saveTicker.C:
saveCostFactor() saveCostFactor()
@ -321,7 +342,7 @@ func (ct *costTracker) globalFactor() float64 {
ct.gfLock.RLock() ct.gfLock.RLock()
defer ct.gfLock.RUnlock() defer ct.gfLock.RUnlock()
return ct.gf return ct.factor
} }
// totalRecharge returns the current total recharge parameter which is used by // totalRecharge returns the current total recharge parameter which is used by
@ -330,7 +351,7 @@ func (ct *costTracker) totalRecharge() uint64 {
ct.gfLock.RLock() ct.gfLock.RLock()
defer ct.gfLock.RUnlock() defer ct.gfLock.RUnlock()
return uint64(ct.gf * ct.utilTarget) return uint64(ct.factor * ct.utilTarget)
} }
// subscribeTotalRecharge returns all future updates to the total recharge value // subscribeTotalRecharge returns all future updates to the total recharge value
@ -340,7 +361,7 @@ func (ct *costTracker) subscribeTotalRecharge(ch chan uint64) uint64 {
defer ct.gfLock.Unlock() defer ct.gfLock.Unlock()
ct.totalRechargeCh = ch ct.totalRechargeCh = ch
return uint64(ct.gf * ct.utilTarget) return uint64(ct.factor * ct.utilTarget)
} }
// updateStats updates the global cost factor and (if enabled) the real cost vs. // updateStats updates the global cost factor and (if enabled) the real cost vs.
@ -349,7 +370,7 @@ func (ct *costTracker) updateStats(code, amount, servingTime, realCost uint64) {
avg := reqAvgTimeCost[code] avg := reqAvgTimeCost[code]
avgTimeCost := avg.baseCost + amount*avg.reqCost avgTimeCost := avg.baseCost + amount*avg.reqCost
select { select {
case ct.gfUpdateCh <- gfUpdate{float64(avgTimeCost), float64(servingTime)}: case ct.reqInfoCh <- reqInfo{float64(avgTimeCost), float64(servingTime)}:
default: default:
} }
if makeCostStats { if makeCostStats {

View File

@ -1,227 +0,0 @@
// Copyright 2019 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package csvlogger
import (
"fmt"
"os"
"sync"
"time"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/log"
)
// Logger is a metrics/events logger that writes logged values and events into a comma separated file
type Logger struct {
file *os.File
started mclock.AbsTime
channels []*Channel
period time.Duration
stopCh, stopped chan struct{}
storeCh chan string
eventHeader string
}
// NewLogger creates a new Logger
func NewLogger(fileName string, updatePeriod time.Duration, eventHeader string) *Logger {
if fileName == "" {
return nil
}
f, err := os.Create(fileName)
if err != nil {
log.Error("Error creating log file", "name", fileName, "error", err)
return nil
}
return &Logger{
file: f,
period: updatePeriod,
stopCh: make(chan struct{}),
storeCh: make(chan string, 1),
eventHeader: eventHeader,
}
}
// NewChannel creates a new value logger channel that writes values in a single
// column. If the relative change of the value is bigger than the given threshold
// then a new line is added immediately (threshold can also be 0).
func (l *Logger) NewChannel(name string, threshold float64) *Channel {
if l == nil {
return nil
}
c := &Channel{
logger: l,
name: name,
threshold: threshold,
}
l.channels = append(l.channels, c)
return c
}
// NewMinMaxChannel creates a new value logger channel that writes the minimum and
// maximum of the tracked value in two columns. It never triggers adding a new line.
// If zeroDefault is true then 0 is written to both min and max columns if no update
// was given during the last period. If it is false then the last update will appear
// in both columns.
func (l *Logger) NewMinMaxChannel(name string, zeroDefault bool) *Channel {
if l == nil {
return nil
}
c := &Channel{
logger: l,
name: name,
minmax: true,
mmZeroDefault: zeroDefault,
}
l.channels = append(l.channels, c)
return c
}
func (l *Logger) store(event string) {
s := fmt.Sprintf("%g", float64(mclock.Now()-l.started)/1000000000)
for _, ch := range l.channels {
s += ", " + ch.store()
}
if event != "" {
s += ", " + event
}
l.file.WriteString(s + "\n")
}
// Start writes the header line and starts the logger
func (l *Logger) Start() {
if l == nil {
return
}
l.started = mclock.Now()
s := "Time"
for _, ch := range l.channels {
s += ", " + ch.header()
}
if l.eventHeader != "" {
s += ", " + l.eventHeader
}
l.file.WriteString(s + "\n")
go func() {
timer := time.NewTimer(l.period)
for {
select {
case <-timer.C:
l.store("")
timer.Reset(l.period)
case event := <-l.storeCh:
l.store(event)
if !timer.Stop() {
<-timer.C
}
timer.Reset(l.period)
case <-l.stopCh:
close(l.stopped)
return
}
}
}()
}
// Stop stops the logger and closes the file
func (l *Logger) Stop() {
if l == nil {
return
}
l.stopped = make(chan struct{})
close(l.stopCh)
<-l.stopped
l.file.Close()
}
// Event immediately adds a new line and adds the given event string in the last column
func (l *Logger) Event(event string) {
if l == nil {
return
}
select {
case l.storeCh <- event:
case <-l.stopCh:
}
}
// Channel represents a logger channel tracking a single value
type Channel struct {
logger *Logger
lock sync.Mutex
name string
threshold, storeMin, storeMax, lastValue, min, max float64
minmax, mmSet, mmZeroDefault bool
}
// Update updates the tracked value
func (lc *Channel) Update(value float64) {
if lc == nil {
return
}
lc.lock.Lock()
defer lc.lock.Unlock()
lc.lastValue = value
if lc.minmax {
if value > lc.max || !lc.mmSet {
lc.max = value
}
if value < lc.min || !lc.mmSet {
lc.min = value
}
lc.mmSet = true
} else {
if value < lc.storeMin || value > lc.storeMax {
select {
case lc.logger.storeCh <- "":
default:
}
}
}
}
func (lc *Channel) store() (s string) {
lc.lock.Lock()
defer lc.lock.Unlock()
if lc.minmax {
s = fmt.Sprintf("%g, %g", lc.min, lc.max)
lc.mmSet = false
if lc.mmZeroDefault {
lc.min = 0
} else {
lc.min = lc.lastValue
}
lc.max = lc.min
} else {
s = fmt.Sprintf("%g", lc.lastValue)
lc.storeMin = lc.lastValue * (1 - lc.threshold)
lc.storeMax = lc.lastValue * (1 + lc.threshold)
if lc.lastValue < 0 {
lc.storeMin, lc.storeMax = lc.storeMax, lc.storeMin
}
}
return
}
func (lc *Channel) header() string {
if lc.minmax {
return lc.name + " (min), " + lc.name + " (max)"
}
return lc.name
}

View File

@ -26,7 +26,6 @@ import (
"github.com/ethereum/go-ethereum/common/mclock" "github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/common/prque" "github.com/ethereum/go-ethereum/common/prque"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/les/csvlogger"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
) )
@ -53,8 +52,7 @@ type freeClientPool struct {
connectedLimit, totalLimit int connectedLimit, totalLimit int
freeClientCap uint64 freeClientCap uint64
logger *csvlogger.Logger connectedCap uint64
logTotalFreeConn *csvlogger.Channel
addressMap map[string]*freeClientPoolEntry addressMap map[string]*freeClientPoolEntry
connPool, disconnPool *prque.Prque connPool, disconnPool *prque.Prque
@ -69,18 +67,16 @@ const (
) )
// newFreeClientPool creates a new free client pool // newFreeClientPool creates a new free client pool
func newFreeClientPool(db ethdb.Database, freeClientCap uint64, totalLimit int, clock mclock.Clock, removePeer func(string), metricsLogger, eventLogger *csvlogger.Logger) *freeClientPool { func newFreeClientPool(db ethdb.Database, freeClientCap uint64, totalLimit int, clock mclock.Clock, removePeer func(string)) *freeClientPool {
pool := &freeClientPool{ pool := &freeClientPool{
db: db, db: db,
clock: clock, clock: clock,
addressMap: make(map[string]*freeClientPoolEntry), addressMap: make(map[string]*freeClientPoolEntry),
connPool: prque.New(poolSetIndex), connPool: prque.New(poolSetIndex),
disconnPool: prque.New(poolSetIndex), disconnPool: prque.New(poolSetIndex),
freeClientCap: freeClientCap, freeClientCap: freeClientCap,
totalLimit: totalLimit, totalLimit: totalLimit,
logger: eventLogger, removePeer: removePeer,
logTotalFreeConn: metricsLogger.NewChannel("totalFreeConn", 0),
removePeer: removePeer,
} }
pool.loadFromDb() pool.loadFromDb()
return pool return pool
@ -126,10 +122,7 @@ func (f *freeClientPool) connect(address, id string) bool {
if f.closed { if f.closed {
return false return false
} }
f.logger.Event("freeClientPool: connecting from " + address + ", " + id)
if f.connectedLimit == 0 { if f.connectedLimit == 0 {
f.logger.Event("freeClientPool: rejected, " + id)
log.Debug("Client rejected", "address", address) log.Debug("Client rejected", "address", address)
return false return false
} }
@ -141,7 +134,6 @@ func (f *freeClientPool) connect(address, id string) bool {
f.addressMap[address] = e f.addressMap[address] = e
} else { } else {
if e.connected { if e.connected {
f.logger.Event("freeClientPool: already connected, " + id)
log.Debug("Client already connected", "address", address) log.Debug("Client already connected", "address", address)
return false return false
} }
@ -154,12 +146,13 @@ func (f *freeClientPool) connect(address, id string) bool {
if e.linUsage+int64(connectedBias)-i.linUsage < 0 { if e.linUsage+int64(connectedBias)-i.linUsage < 0 {
// kick it out and accept the new client // kick it out and accept the new client
f.dropClient(i, now) f.dropClient(i, now)
f.logger.Event("freeClientPool: kicked out, " + i.id) clientKickedMeter.Mark(1)
f.connectedCap -= f.freeClientCap
} else { } else {
// keep the old client and reject the new one // keep the old client and reject the new one
f.connPool.Push(i, i.linUsage) f.connPool.Push(i, i.linUsage)
f.logger.Event("freeClientPool: rejected, " + id)
log.Debug("Client rejected", "address", address) log.Debug("Client rejected", "address", address)
clientRejectedMeter.Mark(1)
return false return false
} }
} }
@ -167,11 +160,12 @@ func (f *freeClientPool) connect(address, id string) bool {
e.connected = true e.connected = true
e.id = id e.id = id
f.connPool.Push(e, e.linUsage) f.connPool.Push(e, e.linUsage)
f.logTotalFreeConn.Update(float64(uint64(f.connPool.Size()) * f.freeClientCap))
if f.connPool.Size()+f.disconnPool.Size() > f.totalLimit { if f.connPool.Size()+f.disconnPool.Size() > f.totalLimit {
f.disconnPool.Pop() f.disconnPool.Pop()
} }
f.logger.Event("freeClientPool: accepted, " + id) f.connectedCap += f.freeClientCap
totalConnectedGauge.Update(int64(f.connectedCap))
clientConnectedMeter.Mark(1)
log.Debug("Client accepted", "address", address) log.Debug("Client accepted", "address", address)
return true return true
} }
@ -203,13 +197,12 @@ func (f *freeClientPool) disconnect(address string) {
log.Debug("Client already disconnected", "address", address) log.Debug("Client already disconnected", "address", address)
return return
} }
f.connPool.Remove(e.index) f.connPool.Remove(e.index)
f.logTotalFreeConn.Update(float64(uint64(f.connPool.Size()) * f.freeClientCap))
f.calcLogUsage(e, now) f.calcLogUsage(e, now)
e.connected = false e.connected = false
f.disconnPool.Push(e, -e.logUsage) f.disconnPool.Push(e, -e.logUsage)
f.logger.Event("freeClientPool: disconnected, " + e.id) f.connectedCap -= f.freeClientCap
totalConnectedGauge.Update(int64(f.connectedCap))
log.Debug("Client disconnected", "address", address) log.Debug("Client disconnected", "address", address)
} }
@ -227,15 +220,15 @@ func (f *freeClientPool) setLimits(count int, totalCap uint64) {
for f.connPool.Size() > f.connectedLimit { for f.connPool.Size() > f.connectedLimit {
i := f.connPool.PopItem().(*freeClientPoolEntry) i := f.connPool.PopItem().(*freeClientPoolEntry)
f.dropClient(i, now) f.dropClient(i, now)
f.logger.Event("freeClientPool: setLimits kicked out, " + i.id) f.connectedCap -= f.freeClientCap
} }
totalConnectedGauge.Update(int64(f.connectedCap))
} }
// dropClient disconnects a client and also moves it from the connected to the // dropClient disconnects a client and also moves it from the connected to the
// disconnected pool // disconnected pool
func (f *freeClientPool) dropClient(i *freeClientPoolEntry, now mclock.AbsTime) { func (f *freeClientPool) dropClient(i *freeClientPoolEntry, now mclock.AbsTime) {
f.connPool.Remove(i.index) f.connPool.Remove(i.index)
f.logTotalFreeConn.Update(float64(uint64(f.connPool.Size()) * f.freeClientCap))
f.calcLogUsage(i, now) f.calcLogUsage(i, now)
i.connected = false i.connected = false
f.disconnPool.Push(i, -i.logUsage) f.disconnPool.Push(i, -i.logUsage)

View File

@ -61,7 +61,7 @@ func testFreeClientPool(t *testing.T, connLimit, clientCount int) {
} }
disconnCh <- i disconnCh <- i
} }
pool = newFreeClientPool(db, 1, 10000, &clock, disconnFn, nil, nil) pool = newFreeClientPool(db, 1, 10000, &clock, disconnFn)
) )
pool.setLimits(connLimit, uint64(connLimit)) pool.setLimits(connLimit, uint64(connLimit))
@ -130,7 +130,7 @@ func testFreeClientPool(t *testing.T, connLimit, clientCount int) {
// close and restart pool // close and restart pool
pool.stop() pool.stop()
pool = newFreeClientPool(db, 1, 10000, &clock, disconnFn, nil, nil) pool = newFreeClientPool(db, 1, 10000, &clock, disconnFn)
pool.setLimits(connLimit, uint64(connLimit)) pool.setLimits(connLimit, uint64(connLimit))
// try connecting all known peers (connLimit should be filled up) // try connecting all known peers (connLimit should be filled up)

View File

@ -35,7 +35,6 @@ import (
"github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/les/csvlogger"
"github.com/ethereum/go-ethereum/light" "github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p"
@ -124,7 +123,6 @@ type ProtocolManager struct {
wg *sync.WaitGroup wg *sync.WaitGroup
eventMux *event.TypeMux eventMux *event.TypeMux
logger *csvlogger.Logger
// Callbacks // Callbacks
synced func() bool synced func() bool
@ -262,11 +260,12 @@ func (pm *ProtocolManager) handle(p *peer) error {
// Ignore maxPeers if this is a trusted peer // Ignore maxPeers if this is a trusted peer
// In server mode we try to check into the client pool after handshake // In server mode we try to check into the client pool after handshake
if pm.client && pm.peers.Len() >= pm.maxPeers && !p.Peer.Info().Network.Trusted { if pm.client && pm.peers.Len() >= pm.maxPeers && !p.Peer.Info().Network.Trusted {
pm.logger.Event("Rejected (too many peers), " + p.id) clientRejectedMeter.Mark(1)
return p2p.DiscTooManyPeers return p2p.DiscTooManyPeers
} }
// Reject light clients if server is not synced. // Reject light clients if server is not synced.
if !pm.client && !pm.synced() { if !pm.client && !pm.synced() {
clientRejectedMeter.Mark(1)
return p2p.DiscRequested return p2p.DiscRequested
} }
p.Log().Debug("Light Ethereum peer connected", "name", p.Name()) p.Log().Debug("Light Ethereum peer connected", "name", p.Name())
@ -281,7 +280,7 @@ func (pm *ProtocolManager) handle(p *peer) error {
) )
if err := p.Handshake(td, hash, number, genesis.Hash(), pm.server); err != nil { if err := p.Handshake(td, hash, number, genesis.Hash(), pm.server); err != nil {
p.Log().Debug("Light Ethereum handshake failed", "err", err) p.Log().Debug("Light Ethereum handshake failed", "err", err)
pm.logger.Event("Handshake error: " + err.Error() + ", " + p.id) clientErrorMeter.Mark(1)
return err return err
} }
if p.fcClient != nil { if p.fcClient != nil {
@ -294,14 +293,14 @@ func (pm *ProtocolManager) handle(p *peer) error {
// Register the peer locally // Register the peer locally
if err := pm.peers.Register(p); err != nil { if err := pm.peers.Register(p); err != nil {
clientErrorMeter.Mark(1)
p.Log().Error("Light Ethereum peer registration failed", "err", err) p.Log().Error("Light Ethereum peer registration failed", "err", err)
pm.logger.Event("Peer registration error: " + err.Error() + ", " + p.id)
return err return err
} }
pm.logger.Event("Connection established, " + p.id) connectedAt := time.Now()
defer func() { defer func() {
pm.logger.Event("Closed connection, " + p.id)
pm.removePeer(p.id) pm.removePeer(p.id)
connectionTimer.UpdateSince(connectedAt)
}() }()
// Register the peer in the downloader. If the downloader considers it banned, we disconnect // Register the peer in the downloader. If the downloader considers it banned, we disconnect
@ -317,11 +316,9 @@ func (pm *ProtocolManager) handle(p *peer) error {
pm.serverPool.registered(p.poolEntry) pm.serverPool.registered(p.poolEntry)
} }
} }
// main loop. handle incoming messages. // main loop. handle incoming messages.
for { for {
if err := pm.handleMsg(p); err != nil { if err := pm.handleMsg(p); err != nil {
pm.logger.Event("Message handling error: " + err.Error() + ", " + p.id)
p.Log().Debug("Light Ethereum message handling failed", "err", err) p.Log().Debug("Light Ethereum message handling failed", "err", err)
if p.fcServer != nil { if p.fcServer != nil {
p.fcServer.DumpLogs() p.fcServer.DumpLogs()

View File

@ -231,7 +231,7 @@ func newTestProtocolManager(lightSync bool, blocks int, odr *LesOdr, indexers []
if !lightSync { if !lightSync {
srv := &LesServer{lesCommons: lesCommons{protocolManager: pm, chainDb: db}} srv := &LesServer{lesCommons: lesCommons{protocolManager: pm, chainDb: db}}
pm.server = srv pm.server = srv
pm.servingQueue = newServingQueue(int64(time.Millisecond*10), 1, nil) pm.servingQueue = newServingQueue(int64(time.Millisecond*10), 1)
pm.servingQueue.setThreads(4) pm.servingQueue.setThreads(4)
srv.defParams = flowcontrol.ServerParams{ srv.defParams = flowcontrol.ServerParams{

View File

@ -22,46 +22,31 @@ import (
) )
var ( var (
/* propTxnInPacketsMeter = metrics.NewMeter("eth/prop/txns/in/packets")
propTxnInTrafficMeter = metrics.NewMeter("eth/prop/txns/in/traffic")
propTxnOutPacketsMeter = metrics.NewMeter("eth/prop/txns/out/packets")
propTxnOutTrafficMeter = metrics.NewMeter("eth/prop/txns/out/traffic")
propHashInPacketsMeter = metrics.NewMeter("eth/prop/hashes/in/packets")
propHashInTrafficMeter = metrics.NewMeter("eth/prop/hashes/in/traffic")
propHashOutPacketsMeter = metrics.NewMeter("eth/prop/hashes/out/packets")
propHashOutTrafficMeter = metrics.NewMeter("eth/prop/hashes/out/traffic")
propBlockInPacketsMeter = metrics.NewMeter("eth/prop/blocks/in/packets")
propBlockInTrafficMeter = metrics.NewMeter("eth/prop/blocks/in/traffic")
propBlockOutPacketsMeter = metrics.NewMeter("eth/prop/blocks/out/packets")
propBlockOutTrafficMeter = metrics.NewMeter("eth/prop/blocks/out/traffic")
reqHashInPacketsMeter = metrics.NewMeter("eth/req/hashes/in/packets")
reqHashInTrafficMeter = metrics.NewMeter("eth/req/hashes/in/traffic")
reqHashOutPacketsMeter = metrics.NewMeter("eth/req/hashes/out/packets")
reqHashOutTrafficMeter = metrics.NewMeter("eth/req/hashes/out/traffic")
reqBlockInPacketsMeter = metrics.NewMeter("eth/req/blocks/in/packets")
reqBlockInTrafficMeter = metrics.NewMeter("eth/req/blocks/in/traffic")
reqBlockOutPacketsMeter = metrics.NewMeter("eth/req/blocks/out/packets")
reqBlockOutTrafficMeter = metrics.NewMeter("eth/req/blocks/out/traffic")
reqHeaderInPacketsMeter = metrics.NewMeter("eth/req/headers/in/packets")
reqHeaderInTrafficMeter = metrics.NewMeter("eth/req/headers/in/traffic")
reqHeaderOutPacketsMeter = metrics.NewMeter("eth/req/headers/out/packets")
reqHeaderOutTrafficMeter = metrics.NewMeter("eth/req/headers/out/traffic")
reqBodyInPacketsMeter = metrics.NewMeter("eth/req/bodies/in/packets")
reqBodyInTrafficMeter = metrics.NewMeter("eth/req/bodies/in/traffic")
reqBodyOutPacketsMeter = metrics.NewMeter("eth/req/bodies/out/packets")
reqBodyOutTrafficMeter = metrics.NewMeter("eth/req/bodies/out/traffic")
reqStateInPacketsMeter = metrics.NewMeter("eth/req/states/in/packets")
reqStateInTrafficMeter = metrics.NewMeter("eth/req/states/in/traffic")
reqStateOutPacketsMeter = metrics.NewMeter("eth/req/states/out/packets")
reqStateOutTrafficMeter = metrics.NewMeter("eth/req/states/out/traffic")
reqReceiptInPacketsMeter = metrics.NewMeter("eth/req/receipts/in/packets")
reqReceiptInTrafficMeter = metrics.NewMeter("eth/req/receipts/in/traffic")
reqReceiptOutPacketsMeter = metrics.NewMeter("eth/req/receipts/out/packets")
reqReceiptOutTrafficMeter = metrics.NewMeter("eth/req/receipts/out/traffic")*/
miscInPacketsMeter = metrics.NewRegisteredMeter("les/misc/in/packets", nil) miscInPacketsMeter = metrics.NewRegisteredMeter("les/misc/in/packets", nil)
miscInTrafficMeter = metrics.NewRegisteredMeter("les/misc/in/traffic", nil) miscInTrafficMeter = metrics.NewRegisteredMeter("les/misc/in/traffic", nil)
miscOutPacketsMeter = metrics.NewRegisteredMeter("les/misc/out/packets", nil) miscOutPacketsMeter = metrics.NewRegisteredMeter("les/misc/out/packets", nil)
miscOutTrafficMeter = metrics.NewRegisteredMeter("les/misc/out/traffic", nil) miscOutTrafficMeter = metrics.NewRegisteredMeter("les/misc/out/traffic", nil)
connectionTimer = metrics.NewRegisteredTimer("les/connectionTime", nil)
totalConnectedGauge = metrics.NewRegisteredGauge("les/server/totalConnected", nil)
totalCapacityGauge = metrics.NewRegisteredGauge("les/server/totalCapacity", nil)
totalRechargeGauge = metrics.NewRegisteredGauge("les/server/totalRecharge", nil)
blockProcessingTimer = metrics.NewRegisteredTimer("les/server/blockProcessingTime", nil)
requestServedTimer = metrics.NewRegisteredTimer("les/server/requestServed", nil)
requestServedMeter = metrics.NewRegisteredMeter("les/server/totalRequestServed", nil)
requestEstimatedMeter = metrics.NewRegisteredMeter("les/server/totalRequestEstimated", nil)
relativeCostHistogram = metrics.NewRegisteredHistogram("les/server/relativeCost", nil, metrics.NewExpDecaySample(1028, 0.015))
recentServedGauge = metrics.NewRegisteredGauge("les/server/recentRequestServed", nil)
recentEstimatedGauge = metrics.NewRegisteredGauge("les/server/recentRequestEstimated", nil)
sqServedGauge = metrics.NewRegisteredGauge("les/server/servingQueue/served", nil)
sqQueuedGauge = metrics.NewRegisteredGauge("les/server/servingQueue/queued", nil)
clientConnectedMeter = metrics.NewRegisteredMeter("les/server/clientEvent/connected", nil)
clientRejectedMeter = metrics.NewRegisteredMeter("les/server/clientEvent/rejected", nil)
clientKickedMeter = metrics.NewRegisteredMeter("les/server/clientEvent/kicked", nil)
// clientDisconnectedMeter = metrics.NewRegisteredMeter("les/server/clientEvent/disconnected", nil)
clientFreezeMeter = metrics.NewRegisteredMeter("les/server/clientEvent/freeze", nil)
clientErrorMeter = metrics.NewRegisteredMeter("les/server/clientEvent/error", nil)
) )
// meteredMsgReadWriter is a wrapper around a p2p.MsgReadWriter, capable of // meteredMsgReadWriter is a wrapper around a p2p.MsgReadWriter, capable of

View File

@ -28,7 +28,6 @@ import (
"github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth" "github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/les/csvlogger"
"github.com/ethereum/go-ethereum/les/flowcontrol" "github.com/ethereum/go-ethereum/les/flowcontrol"
"github.com/ethereum/go-ethereum/light" "github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
@ -40,15 +39,6 @@ import (
const bufLimitRatio = 6000 // fixed bufLimit/MRR ratio const bufLimitRatio = 6000 // fixed bufLimit/MRR ratio
const (
logFileName = "" // csv log file name (disabled if empty)
logClientPoolMetrics = true // log client pool metrics
logClientPoolEvents = false // detailed client pool event logging
logRequestServing = true // log request serving metrics and events
logBlockProcEvents = true // log block processing events
logProtocolHandler = true // log protocol handler events
)
type LesServer struct { type LesServer struct {
lesCommons lesCommons
@ -62,26 +52,15 @@ type LesServer struct {
privateKey *ecdsa.PrivateKey privateKey *ecdsa.PrivateKey
quitSync chan struct{} quitSync chan struct{}
onlyAnnounce bool onlyAnnounce bool
csvLogger *csvlogger.Logger
logTotalCap *csvlogger.Channel
thcNormal, thcBlockProcessing int // serving thread count for normal operation and block processing mode thcNormal, thcBlockProcessing int // serving thread count for normal operation and block processing mode
maxPeers int maxPeers int
minCapacity, freeClientCap uint64 minCapacity, freeClientCap uint64
freeClientPool *freeClientPool freeClientPool *freeClientPool
priorityClientPool *priorityClientPool
} }
func NewLesServer(e *eth.Ethereum, config *eth.Config) (*LesServer, error) { func NewLesServer(e *eth.Ethereum, config *eth.Config) (*LesServer, error) {
var csvLogger *csvlogger.Logger
if logFileName != "" {
csvLogger = csvlogger.NewLogger(logFileName, time.Second*10, "event, peerId")
}
requestLogger := csvLogger
if !logRequestServing {
requestLogger = nil
}
lesTopics := make([]discv5.Topic, len(AdvertiseProtocolVersions)) lesTopics := make([]discv5.Topic, len(AdvertiseProtocolVersions))
for i, pv := range AdvertiseProtocolVersions { for i, pv := range AdvertiseProtocolVersions {
lesTopics[i] = lesTopic(e.BlockChain().Genesis().Hash(), pv) lesTopics[i] = lesTopic(e.BlockChain().Genesis().Hash(), pv)
@ -99,10 +78,8 @@ func NewLesServer(e *eth.Ethereum, config *eth.Config) (*LesServer, error) {
quitSync: quitSync, quitSync: quitSync,
lesTopics: lesTopics, lesTopics: lesTopics,
onlyAnnounce: config.OnlyAnnounce, onlyAnnounce: config.OnlyAnnounce,
csvLogger: csvLogger,
logTotalCap: requestLogger.NewChannel("totalCapacity", 0.01),
} }
srv.costTracker, srv.minCapacity = newCostTracker(e.ChainDb(), config, requestLogger) srv.costTracker, srv.minCapacity = newCostTracker(e.ChainDb(), config)
logger := log.New() logger := log.New()
srv.thcNormal = config.LightServ * 4 / 100 srv.thcNormal = config.LightServ * 4 / 100
@ -131,10 +108,7 @@ func NewLesServer(e *eth.Ethereum, config *eth.Config) (*LesServer, error) {
return nil, err return nil, err
} }
srv.protocolManager = pm srv.protocolManager = pm
if logProtocolHandler { pm.servingQueue = newServingQueue(int64(time.Millisecond*10), float64(config.LightServ)/100)
pm.logger = csvLogger
}
pm.servingQueue = newServingQueue(int64(time.Millisecond*10), float64(config.LightServ)/100, requestLogger)
pm.server = srv pm.server = srv
return srv, nil return srv, nil
@ -142,12 +116,6 @@ func NewLesServer(e *eth.Ethereum, config *eth.Config) (*LesServer, error) {
func (s *LesServer) APIs() []rpc.API { func (s *LesServer) APIs() []rpc.API {
return []rpc.API{ return []rpc.API{
{
Namespace: "les",
Version: "1.0",
Service: NewPrivateLightServerAPI(s),
Public: false,
},
{ {
Namespace: "les", Namespace: "les",
Version: "1.0", Version: "1.0",
@ -163,11 +131,10 @@ func (s *LesServer) APIs() []rpc.API {
func (s *LesServer) startEventLoop() { func (s *LesServer) startEventLoop() {
s.protocolManager.wg.Add(1) s.protocolManager.wg.Add(1)
blockProcLogger := s.csvLogger var (
if !logBlockProcEvents { processing, procLast bool
blockProcLogger = nil procStarted time.Time
} )
var processing, procLast bool
blockProcFeed := make(chan bool, 100) blockProcFeed := make(chan bool, 100)
s.protocolManager.blockchain.(*core.BlockChain).SubscribeBlockProcessingEvent(blockProcFeed) s.protocolManager.blockchain.(*core.BlockChain).SubscribeBlockProcessingEvent(blockProcFeed)
totalRechargeCh := make(chan uint64, 100) totalRechargeCh := make(chan uint64, 100)
@ -176,13 +143,13 @@ func (s *LesServer) startEventLoop() {
updateRecharge := func() { updateRecharge := func() {
if processing { if processing {
if !procLast { if !procLast {
blockProcLogger.Event("block processing started") procStarted = time.Now()
} }
s.protocolManager.servingQueue.setThreads(s.thcBlockProcessing) s.protocolManager.servingQueue.setThreads(s.thcBlockProcessing)
s.fcManager.SetRechargeCurve(flowcontrol.PieceWiseLinear{{0, 0}, {totalRecharge, totalRecharge}}) s.fcManager.SetRechargeCurve(flowcontrol.PieceWiseLinear{{0, 0}, {totalRecharge, totalRecharge}})
} else { } else {
if procLast { if procLast {
blockProcLogger.Event("block processing finished") blockProcessingTimer.UpdateSince(procStarted)
} }
s.protocolManager.servingQueue.setThreads(s.thcNormal) s.protocolManager.servingQueue.setThreads(s.thcNormal)
s.fcManager.SetRechargeCurve(flowcontrol.PieceWiseLinear{{0, 0}, {totalRecharge / 16, totalRecharge / 2}, {totalRecharge / 2, totalRecharge / 2}, {totalRecharge, totalRecharge}}) s.fcManager.SetRechargeCurve(flowcontrol.PieceWiseLinear{{0, 0}, {totalRecharge / 16, totalRecharge / 2}, {totalRecharge / 2, totalRecharge / 2}, {totalRecharge, totalRecharge}})
@ -191,7 +158,7 @@ func (s *LesServer) startEventLoop() {
} }
updateRecharge() updateRecharge()
totalCapacity := s.fcManager.SubscribeTotalCapacity(totalCapacityCh) totalCapacity := s.fcManager.SubscribeTotalCapacity(totalCapacityCh)
s.priorityClientPool.setLimits(s.maxPeers, totalCapacity) s.freeClientPool.setLimits(s.maxPeers, totalCapacity)
var maxFreePeers uint64 var maxFreePeers uint64
go func() { go func() {
@ -202,13 +169,13 @@ func (s *LesServer) startEventLoop() {
case totalRecharge = <-totalRechargeCh: case totalRecharge = <-totalRechargeCh:
updateRecharge() updateRecharge()
case totalCapacity = <-totalCapacityCh: case totalCapacity = <-totalCapacityCh:
s.logTotalCap.Update(float64(totalCapacity)) totalCapacityGauge.Update(int64(totalCapacity))
newFreePeers := totalCapacity / s.freeClientCap newFreePeers := totalCapacity / s.freeClientCap
if newFreePeers < maxFreePeers && newFreePeers < uint64(s.maxPeers) { if newFreePeers < maxFreePeers && newFreePeers < uint64(s.maxPeers) {
log.Warn("Reduced total capacity", "maxFreePeers", newFreePeers) log.Warn("Reduced total capacity", "maxFreePeers", newFreePeers)
} }
maxFreePeers = newFreePeers maxFreePeers = newFreePeers
s.priorityClientPool.setLimits(s.maxPeers, totalCapacity) s.freeClientPool.setLimits(s.maxPeers, totalCapacity)
case <-s.protocolManager.quitSync: case <-s.protocolManager.quitSync:
s.protocolManager.wg.Done() s.protocolManager.wg.Done()
return return
@ -243,19 +210,9 @@ func (s *LesServer) Start(srvr *p2p.Server) {
maxCapacity = totalRecharge maxCapacity = totalRecharge
} }
s.fcManager.SetCapacityLimits(s.freeClientCap, maxCapacity, s.freeClientCap*2) s.fcManager.SetCapacityLimits(s.freeClientCap, maxCapacity, s.freeClientCap*2)
poolMetricsLogger := s.csvLogger s.freeClientPool = newFreeClientPool(s.chainDb, s.freeClientCap, 10000, mclock.System{}, func(id string) { go s.protocolManager.removePeer(id) })
if !logClientPoolMetrics { s.protocolManager.peers.notify(s.freeClientPool)
poolMetricsLogger = nil
}
poolEventLogger := s.csvLogger
if !logClientPoolEvents {
poolEventLogger = nil
}
s.freeClientPool = newFreeClientPool(s.chainDb, s.freeClientCap, 10000, mclock.System{}, func(id string) { go s.protocolManager.removePeer(id) }, poolMetricsLogger, poolEventLogger)
s.priorityClientPool = newPriorityClientPool(s.freeClientCap, s.protocolManager.peers, s.freeClientPool, poolMetricsLogger, poolEventLogger)
s.protocolManager.peers.notify(s.priorityClientPool)
s.csvLogger.Start()
s.startEventLoop() s.startEventLoop()
s.protocolManager.Start(s.config.LightPeers) s.protocolManager.Start(s.config.LightPeers)
if srvr.DiscV5 != nil { if srvr.DiscV5 != nil {
@ -296,7 +253,6 @@ func (s *LesServer) Stop() {
s.freeClientPool.stop() s.freeClientPool.stop()
s.costTracker.stop() s.costTracker.stop()
s.protocolManager.Stop() s.protocolManager.Stop()
s.csvLogger.Stop()
} }
// todo(rjl493456442) separate client and server implementation. // todo(rjl493456442) separate client and server implementation.

View File

@ -17,14 +17,12 @@
package les package les
import ( import (
"fmt"
"sort" "sort"
"sync" "sync"
"sync/atomic" "sync/atomic"
"github.com/ethereum/go-ethereum/common/mclock" "github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/common/prque" "github.com/ethereum/go-ethereum/common/prque"
"github.com/ethereum/go-ethereum/les/csvlogger"
) )
// servingQueue allows running tasks in a limited number of threads and puts the // servingQueue allows running tasks in a limited number of threads and puts the
@ -44,10 +42,6 @@ type servingQueue struct {
queue *prque.Prque // priority queue for waiting or suspended tasks queue *prque.Prque // priority queue for waiting or suspended tasks
best *servingTask // the highest priority task (not included in the queue) best *servingTask // the highest priority task (not included in the queue)
suspendBias int64 // priority bias against suspending an already running task suspendBias int64 // priority bias against suspending an already running task
logger *csvlogger.Logger
logRecentTime *csvlogger.Channel
logQueuedTime *csvlogger.Channel
} }
// servingTask represents a request serving task. Tasks can be implemented to // servingTask represents a request serving task. Tasks can be implemented to
@ -127,7 +121,7 @@ func (t *servingTask) waitOrStop() bool {
} }
// newServingQueue returns a new servingQueue // newServingQueue returns a new servingQueue
func newServingQueue(suspendBias int64, utilTarget float64, logger *csvlogger.Logger) *servingQueue { func newServingQueue(suspendBias int64, utilTarget float64) *servingQueue {
sq := &servingQueue{ sq := &servingQueue{
queue: prque.New(nil), queue: prque.New(nil),
suspendBias: suspendBias, suspendBias: suspendBias,
@ -140,9 +134,6 @@ func newServingQueue(suspendBias int64, utilTarget float64, logger *csvlogger.Lo
burstDropLimit: uint64(utilTarget * bufLimitRatio * 1000000), burstDropLimit: uint64(utilTarget * bufLimitRatio * 1000000),
burstDecRate: utilTarget, burstDecRate: utilTarget,
lastUpdate: mclock.Now(), lastUpdate: mclock.Now(),
logger: logger,
logRecentTime: logger.NewMinMaxChannel("recentTime", false),
logQueuedTime: logger.NewMinMaxChannel("queuedTime", false),
} }
sq.wg.Add(2) sq.wg.Add(2)
go sq.queueLoop() go sq.queueLoop()
@ -246,16 +237,13 @@ func (sq *servingQueue) freezePeers() {
} }
sort.Sort(peerList) sort.Sort(peerList)
drop := true drop := true
sq.logger.Event("freezing peers")
for _, tasks := range peerList { for _, tasks := range peerList {
if drop { if drop {
tasks.peer.freezeClient() tasks.peer.freezeClient()
tasks.peer.fcClient.Freeze() tasks.peer.fcClient.Freeze()
sq.queuedTime -= tasks.sumTime sq.queuedTime -= tasks.sumTime
if sq.logQueuedTime != nil { sqQueuedGauge.Update(int64(sq.queuedTime))
sq.logQueuedTime.Update(float64(sq.queuedTime) / 1000) clientFreezeMeter.Mark(1)
}
sq.logger.Event(fmt.Sprintf("frozen peer sumTime=%d, %v", tasks.sumTime, tasks.peer.id))
drop = sq.recentTime+sq.queuedTime > sq.burstDropLimit drop = sq.recentTime+sq.queuedTime > sq.burstDropLimit
for _, task := range tasks.list { for _, task := range tasks.list {
task.tokenCh <- nil task.tokenCh <- nil
@ -299,10 +287,8 @@ func (sq *servingQueue) addTask(task *servingTask) {
} }
sq.updateRecentTime() sq.updateRecentTime()
sq.queuedTime += task.expTime sq.queuedTime += task.expTime
if sq.logQueuedTime != nil { sqServedGauge.Update(int64(sq.recentTime))
sq.logRecentTime.Update(float64(sq.recentTime) / 1000) sqQueuedGauge.Update(int64(sq.queuedTime))
sq.logQueuedTime.Update(float64(sq.queuedTime) / 1000)
}
if sq.recentTime+sq.queuedTime > sq.burstLimit { if sq.recentTime+sq.queuedTime > sq.burstLimit {
sq.freezePeers() sq.freezePeers()
} }
@ -322,10 +308,8 @@ func (sq *servingQueue) queueLoop() {
sq.updateRecentTime() sq.updateRecentTime()
sq.queuedTime -= expTime sq.queuedTime -= expTime
sq.recentTime += expTime sq.recentTime += expTime
if sq.logQueuedTime != nil { sqServedGauge.Update(int64(sq.recentTime))
sq.logRecentTime.Update(float64(sq.recentTime) / 1000) sqQueuedGauge.Update(int64(sq.queuedTime))
sq.logQueuedTime.Update(float64(sq.queuedTime) / 1000)
}
if sq.queue.Size() == 0 { if sq.queue.Size() == 0 {
sq.best = nil sq.best = nil
} else { } else {