2016-11-09 01:01:56 +00:00
|
|
|
// Copyright 2016 The go-ethereum Authors
|
2016-10-14 03:51:29 +00:00
|
|
|
// This file is part of the go-ethereum library.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
|
|
|
package les
|
|
|
|
|
|
|
|
import (
|
|
|
|
"errors"
|
|
|
|
"fmt"
|
|
|
|
"math/big"
|
2019-05-30 18:51:13 +00:00
|
|
|
"math/rand"
|
2016-10-14 03:51:29 +00:00
|
|
|
"sync"
|
2019-05-30 18:51:13 +00:00
|
|
|
"sync/atomic"
|
2017-03-22 19:44:22 +00:00
|
|
|
"time"
|
2016-10-14 03:51:29 +00:00
|
|
|
|
|
|
|
"github.com/ethereum/go-ethereum/common"
|
2019-02-26 11:32:48 +00:00
|
|
|
"github.com/ethereum/go-ethereum/common/mclock"
|
2019-05-30 18:51:13 +00:00
|
|
|
"github.com/ethereum/go-ethereum/core"
|
2016-10-14 03:51:29 +00:00
|
|
|
"github.com/ethereum/go-ethereum/core/types"
|
|
|
|
"github.com/ethereum/go-ethereum/eth"
|
|
|
|
"github.com/ethereum/go-ethereum/les/flowcontrol"
|
2017-10-24 13:19:09 +00:00
|
|
|
"github.com/ethereum/go-ethereum/light"
|
2016-10-14 03:51:29 +00:00
|
|
|
"github.com/ethereum/go-ethereum/p2p"
|
all: on-chain oracle checkpoint syncing (#19543)
* all: implement simple checkpoint syncing
cmd, les, node: remove callback mechanism
cmd, node: remove callback definition
les: simplify the registrar
les: expose checkpoint rpc services in the light client
les, light: don't store untrusted receipt
cmd, contracts, les: discard stale checkpoint
cmd, contracts/registrar: loose restriction of registeration
cmd, contracts: add replay-protection
all: off-chain multi-signature contract
params: deploy checkpoint contract for rinkeby
cmd/registrar: add raw signing mode for registrar
cmd/registrar, contracts/registrar, les: fixed messages
* cmd/registrar, contracts/registrar: fix lints
* accounts/abi/bind, les: address comments
* cmd, contracts, les, light, params: minor checkpoint sync cleanups
* cmd, eth, les, light: move checkpoint config to config file
* cmd, eth, les, params: address comments
* eth, les, params: address comments
* cmd: polish up the checkpoint admin CLI
* cmd, contracts, params: deploy new version contract
* cmd/checkpoint-admin: add another flag for clef mode signing
* cmd, contracts, les: rename and regen checkpoint oracle with abigen
2019-06-28 07:34:02 +00:00
|
|
|
"github.com/ethereum/go-ethereum/params"
|
2016-10-14 03:51:29 +00:00
|
|
|
"github.com/ethereum/go-ethereum/rlp"
|
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
2019-04-05 15:01:51 +00:00
|
|
|
errClosed = errors.New("peer set is closed")
|
|
|
|
errAlreadyRegistered = errors.New("peer is already registered")
|
|
|
|
errNotRegistered = errors.New("peer is not registered")
|
2016-10-14 03:51:29 +00:00
|
|
|
)
|
|
|
|
|
2019-06-12 11:09:40 +00:00
|
|
|
const (
|
|
|
|
maxRequestErrors = 20 // number of invalid requests tolerated (makes the protocol less brittle but still avoids spam)
|
|
|
|
maxResponseErrors = 50 // number of invalid responses tolerated (makes the protocol less brittle but still avoids spam)
|
|
|
|
)
|
2016-10-14 03:51:29 +00:00
|
|
|
|
2019-02-26 11:32:48 +00:00
|
|
|
// capacity limitation for parameter updates
|
|
|
|
const (
|
|
|
|
allowedUpdateBytes = 100000 // initial/maximum allowed update size
|
|
|
|
allowedUpdateRate = time.Millisecond * 10 // time constant for recharging one byte of allowance
|
|
|
|
)
|
|
|
|
|
2019-05-30 18:51:13 +00:00
|
|
|
const (
|
|
|
|
freezeTimeBase = time.Millisecond * 700 // fixed component of client freeze time
|
|
|
|
freezeTimeRandom = time.Millisecond * 600 // random component of client freeze time
|
|
|
|
freezeCheckPeriod = time.Millisecond * 100 // buffer value recheck period after initial freeze time has elapsed
|
|
|
|
)
|
|
|
|
|
2019-02-26 11:32:48 +00:00
|
|
|
// if the total encoded size of a sent transaction batch is over txSizeCostLimit
|
|
|
|
// per transaction then the request cost is calculated as proportional to the
|
|
|
|
// encoded size instead of the transaction count
|
2019-05-30 18:51:13 +00:00
|
|
|
const txSizeCostLimit = 0x4000
|
2019-02-26 11:32:48 +00:00
|
|
|
|
2017-10-24 13:19:09 +00:00
|
|
|
const (
|
|
|
|
announceTypeNone = iota
|
|
|
|
announceTypeSimple
|
|
|
|
announceTypeSigned
|
|
|
|
)
|
|
|
|
|
2016-10-14 03:51:29 +00:00
|
|
|
type peer struct {
|
|
|
|
*p2p.Peer
|
|
|
|
rw p2p.MsgReadWriter
|
|
|
|
|
2017-04-25 11:31:15 +00:00
|
|
|
version int // Protocol version negotiated
|
|
|
|
network uint64 // Network ID being on
|
2016-10-14 03:51:29 +00:00
|
|
|
|
2019-01-24 11:18:26 +00:00
|
|
|
announceType uint64
|
2017-10-24 13:19:09 +00:00
|
|
|
|
all: on-chain oracle checkpoint syncing (#19543)
* all: implement simple checkpoint syncing
cmd, les, node: remove callback mechanism
cmd, node: remove callback definition
les: simplify the registrar
les: expose checkpoint rpc services in the light client
les, light: don't store untrusted receipt
cmd, contracts, les: discard stale checkpoint
cmd, contracts/registrar: loose restriction of registeration
cmd, contracts: add replay-protection
all: off-chain multi-signature contract
params: deploy checkpoint contract for rinkeby
cmd/registrar: add raw signing mode for registrar
cmd/registrar, contracts/registrar, les: fixed messages
* cmd/registrar, contracts/registrar: fix lints
* accounts/abi/bind, les: address comments
* cmd, contracts, les, light, params: minor checkpoint sync cleanups
* cmd, eth, les, light: move checkpoint config to config file
* cmd, eth, les, params: address comments
* eth, les, params: address comments
* cmd: polish up the checkpoint admin CLI
* cmd, contracts, params: deploy new version contract
* cmd/checkpoint-admin: add another flag for clef mode signing
* cmd, contracts, les: rename and regen checkpoint oracle with abigen
2019-06-28 07:34:02 +00:00
|
|
|
// Checkpoint relative fields
|
|
|
|
checkpoint params.TrustedCheckpoint
|
|
|
|
checkpointNumber uint64
|
|
|
|
|
2016-10-14 03:51:29 +00:00
|
|
|
id string
|
|
|
|
|
2016-11-30 05:02:08 +00:00
|
|
|
headInfo *announceData
|
|
|
|
lock sync.RWMutex
|
2016-10-14 03:51:29 +00:00
|
|
|
|
2019-02-26 11:32:48 +00:00
|
|
|
sendQueue *execQueue
|
|
|
|
|
|
|
|
errCh chan error
|
|
|
|
// responseLock ensures that responses are queued in the same order as
|
|
|
|
// RequestProcessed is called
|
|
|
|
responseLock sync.Mutex
|
|
|
|
responseCount uint64
|
2019-06-12 11:09:40 +00:00
|
|
|
invalidCount uint32
|
2016-10-14 03:51:29 +00:00
|
|
|
|
2017-03-22 19:44:22 +00:00
|
|
|
poolEntry *poolEntry
|
2018-10-01 13:14:53 +00:00
|
|
|
hasBlock func(common.Hash, uint64, bool) bool
|
2017-03-22 19:44:22 +00:00
|
|
|
responseErrors int
|
2019-02-26 11:32:48 +00:00
|
|
|
updateCounter uint64
|
|
|
|
updateTime mclock.AbsTime
|
2019-05-30 18:51:13 +00:00
|
|
|
frozen uint32 // 1 if client is in frozen state
|
2016-11-17 14:54:24 +00:00
|
|
|
|
2019-02-26 11:32:48 +00:00
|
|
|
fcClient *flowcontrol.ClientNode // nil if the peer is server only
|
|
|
|
fcServer *flowcontrol.ServerNode // nil if the peer is client only
|
|
|
|
fcParams flowcontrol.ServerParams
|
|
|
|
fcCosts requestCostTable
|
2019-01-24 11:18:26 +00:00
|
|
|
|
2019-05-30 18:51:13 +00:00
|
|
|
isTrusted bool
|
|
|
|
isOnlyAnnounce bool
|
|
|
|
chainSince, chainRecent uint64
|
|
|
|
stateSince, stateRecent uint64
|
2016-10-14 03:51:29 +00:00
|
|
|
}
|
|
|
|
|
2019-01-24 11:18:26 +00:00
|
|
|
func newPeer(version int, network uint64, isTrusted bool, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
|
2016-10-14 03:51:29 +00:00
|
|
|
return &peer{
|
2019-02-26 11:32:48 +00:00
|
|
|
Peer: p,
|
|
|
|
rw: rw,
|
|
|
|
version: version,
|
|
|
|
network: network,
|
2019-03-20 08:35:05 +00:00
|
|
|
id: fmt.Sprintf("%x", p.ID().Bytes()),
|
2019-02-26 11:32:48 +00:00
|
|
|
isTrusted: isTrusted,
|
2019-03-20 08:35:05 +00:00
|
|
|
errCh: make(chan error, 1),
|
2019-02-26 11:32:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// rejectUpdate returns true if a parameter update has to be rejected because
|
|
|
|
// the size and/or rate of updates exceed the capacity limitation
|
|
|
|
func (p *peer) rejectUpdate(size uint64) bool {
|
|
|
|
now := mclock.Now()
|
|
|
|
if p.updateCounter == 0 {
|
|
|
|
p.updateTime = now
|
|
|
|
} else {
|
|
|
|
dt := now - p.updateTime
|
|
|
|
r := uint64(dt / mclock.AbsTime(allowedUpdateRate))
|
|
|
|
if p.updateCounter > r {
|
|
|
|
p.updateCounter -= r
|
|
|
|
p.updateTime += mclock.AbsTime(allowedUpdateRate * time.Duration(r))
|
|
|
|
} else {
|
|
|
|
p.updateCounter = 0
|
|
|
|
p.updateTime = now
|
|
|
|
}
|
2016-10-14 03:51:29 +00:00
|
|
|
}
|
2019-02-26 11:32:48 +00:00
|
|
|
p.updateCounter += size
|
|
|
|
return p.updateCounter > allowedUpdateBytes
|
2016-10-14 03:51:29 +00:00
|
|
|
}
|
|
|
|
|
2019-05-30 18:51:13 +00:00
|
|
|
// freezeClient temporarily puts the client in a frozen state which means all
|
|
|
|
// unprocessed and subsequent requests are dropped. Unfreezing happens automatically
|
|
|
|
// after a short time if the client's buffer value is at least in the slightly positive
|
|
|
|
// region. The client is also notified about being frozen/unfrozen with a Stop/Resume
|
|
|
|
// message.
|
|
|
|
func (p *peer) freezeClient() {
|
|
|
|
if p.version < lpv3 {
|
|
|
|
// if Stop/Resume is not supported then just drop the peer after setting
|
|
|
|
// its frozen status permanently
|
|
|
|
atomic.StoreUint32(&p.frozen, 1)
|
|
|
|
p.Peer.Disconnect(p2p.DiscUselessPeer)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if atomic.SwapUint32(&p.frozen, 1) == 0 {
|
|
|
|
go func() {
|
|
|
|
p.SendStop()
|
|
|
|
time.Sleep(freezeTimeBase + time.Duration(rand.Int63n(int64(freezeTimeRandom))))
|
|
|
|
for {
|
|
|
|
bufValue, bufLimit := p.fcClient.BufferStatus()
|
|
|
|
if bufLimit == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if bufValue <= bufLimit/8 {
|
|
|
|
time.Sleep(freezeCheckPeriod)
|
|
|
|
} else {
|
|
|
|
atomic.StoreUint32(&p.frozen, 0)
|
|
|
|
p.SendResume(bufValue)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// freezeServer processes Stop/Resume messages from the given server
|
|
|
|
func (p *peer) freezeServer(frozen bool) {
|
|
|
|
var f uint32
|
|
|
|
if frozen {
|
|
|
|
f = 1
|
|
|
|
}
|
|
|
|
if atomic.SwapUint32(&p.frozen, f) != f && frozen {
|
|
|
|
p.sendQueue.clear()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// isFrozen returns true if the client is frozen or the server has put our
|
|
|
|
// client in frozen state
|
|
|
|
func (p *peer) isFrozen() bool {
|
|
|
|
return atomic.LoadUint32(&p.frozen) != 0
|
|
|
|
}
|
|
|
|
|
2017-03-22 19:44:22 +00:00
|
|
|
func (p *peer) canQueue() bool {
|
2019-05-30 18:51:13 +00:00
|
|
|
return p.sendQueue.canQueue() && !p.isFrozen()
|
2017-03-22 19:44:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (p *peer) queueSend(f func()) {
|
|
|
|
p.sendQueue.queue(f)
|
|
|
|
}
|
|
|
|
|
2016-10-14 03:51:29 +00:00
|
|
|
// Info gathers and returns a collection of metadata known about a peer.
|
|
|
|
func (p *peer) Info() *eth.PeerInfo {
|
|
|
|
return ð.PeerInfo{
|
|
|
|
Version: p.version,
|
|
|
|
Difficulty: p.Td(),
|
|
|
|
Head: fmt.Sprintf("%x", p.Head()),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Head retrieves a copy of the current head (most recent) hash of the peer.
|
|
|
|
func (p *peer) Head() (hash common.Hash) {
|
|
|
|
p.lock.RLock()
|
|
|
|
defer p.lock.RUnlock()
|
|
|
|
|
|
|
|
copy(hash[:], p.headInfo.Hash[:])
|
|
|
|
return hash
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *peer) HeadAndTd() (hash common.Hash, td *big.Int) {
|
|
|
|
p.lock.RLock()
|
|
|
|
defer p.lock.RUnlock()
|
|
|
|
|
|
|
|
copy(hash[:], p.headInfo.Hash[:])
|
|
|
|
return hash, p.headInfo.Td
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *peer) headBlockInfo() blockInfo {
|
|
|
|
p.lock.RLock()
|
|
|
|
defer p.lock.RUnlock()
|
|
|
|
|
|
|
|
return blockInfo{Hash: p.headInfo.Hash, Number: p.headInfo.Number, Td: p.headInfo.Td}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Td retrieves the current total difficulty of a peer.
|
|
|
|
func (p *peer) Td() *big.Int {
|
|
|
|
p.lock.RLock()
|
|
|
|
defer p.lock.RUnlock()
|
|
|
|
|
|
|
|
return new(big.Int).Set(p.headInfo.Td)
|
|
|
|
}
|
|
|
|
|
2017-03-22 19:44:22 +00:00
|
|
|
// waitBefore implements distPeer interface
|
|
|
|
func (p *peer) waitBefore(maxCost uint64) (time.Duration, float64) {
|
|
|
|
return p.fcServer.CanSend(maxCost)
|
|
|
|
}
|
|
|
|
|
2019-02-26 11:32:48 +00:00
|
|
|
// updateCapacity updates the request serving capacity assigned to a given client
|
|
|
|
// and also sends an announcement about the updated flow control parameters
|
|
|
|
func (p *peer) updateCapacity(cap uint64) {
|
|
|
|
p.responseLock.Lock()
|
|
|
|
defer p.responseLock.Unlock()
|
|
|
|
|
|
|
|
p.fcParams = flowcontrol.ServerParams{MinRecharge: cap, BufLimit: cap * bufLimitRatio}
|
|
|
|
p.fcClient.UpdateParams(p.fcParams)
|
|
|
|
var kvList keyValueList
|
|
|
|
kvList = kvList.add("flowControl/MRR", cap)
|
|
|
|
kvList = kvList.add("flowControl/BL", cap*bufLimitRatio)
|
|
|
|
p.queueSend(func() { p.SendAnnounce(announceData{Update: kvList}) })
|
|
|
|
}
|
|
|
|
|
2016-10-14 03:51:29 +00:00
|
|
|
func sendRequest(w p2p.MsgWriter, msgcode, reqID, cost uint64, data interface{}) error {
|
|
|
|
type req struct {
|
|
|
|
ReqID uint64
|
|
|
|
Data interface{}
|
|
|
|
}
|
|
|
|
return p2p.Send(w, msgcode, req{reqID, data})
|
|
|
|
}
|
|
|
|
|
2019-02-26 11:32:48 +00:00
|
|
|
// reply struct represents a reply with the actual data already RLP encoded and
|
|
|
|
// only the bv (buffer value) missing. This allows the serving mechanism to
|
|
|
|
// calculate the bv value which depends on the data size before sending the reply.
|
|
|
|
type reply struct {
|
|
|
|
w p2p.MsgWriter
|
|
|
|
msgcode, reqID uint64
|
|
|
|
data rlp.RawValue
|
|
|
|
}
|
|
|
|
|
|
|
|
// send sends the reply with the calculated buffer value
|
|
|
|
func (r *reply) send(bv uint64) error {
|
2016-10-14 03:51:29 +00:00
|
|
|
type resp struct {
|
|
|
|
ReqID, BV uint64
|
2019-02-26 11:32:48 +00:00
|
|
|
Data rlp.RawValue
|
2016-10-14 03:51:29 +00:00
|
|
|
}
|
2019-02-26 11:32:48 +00:00
|
|
|
return p2p.Send(r.w, r.msgcode, resp{r.reqID, bv, r.data})
|
|
|
|
}
|
|
|
|
|
|
|
|
// size returns the RLP encoded size of the message data
|
|
|
|
func (r *reply) size() uint32 {
|
|
|
|
return uint32(len(r.data))
|
2016-10-14 03:51:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (p *peer) GetRequestCost(msgcode uint64, amount int) uint64 {
|
2016-12-10 05:50:36 +00:00
|
|
|
p.lock.RLock()
|
|
|
|
defer p.lock.RUnlock()
|
|
|
|
|
2019-04-16 11:30:47 +00:00
|
|
|
costs := p.fcCosts[msgcode]
|
|
|
|
if costs == nil {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
cost := costs.baseCost + costs.reqCost*uint64(amount)
|
2019-02-26 11:32:48 +00:00
|
|
|
if cost > p.fcParams.BufLimit {
|
|
|
|
cost = p.fcParams.BufLimit
|
|
|
|
}
|
|
|
|
return cost
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *peer) GetTxRelayCost(amount, size int) uint64 {
|
|
|
|
p.lock.RLock()
|
|
|
|
defer p.lock.RUnlock()
|
|
|
|
|
2019-04-16 11:30:47 +00:00
|
|
|
costs := p.fcCosts[SendTxV2Msg]
|
|
|
|
if costs == nil {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
cost := costs.baseCost + costs.reqCost*uint64(amount)
|
|
|
|
sizeCost := costs.baseCost + costs.reqCost*uint64(size)/txSizeCostLimit
|
2019-02-26 11:32:48 +00:00
|
|
|
if sizeCost > cost {
|
|
|
|
cost = sizeCost
|
|
|
|
}
|
|
|
|
|
|
|
|
if cost > p.fcParams.BufLimit {
|
|
|
|
cost = p.fcParams.BufLimit
|
2016-10-14 03:51:29 +00:00
|
|
|
}
|
|
|
|
return cost
|
|
|
|
}
|
|
|
|
|
2016-12-10 05:50:36 +00:00
|
|
|
// HasBlock checks if the peer has a given block
|
2018-10-01 13:14:53 +00:00
|
|
|
func (p *peer) HasBlock(hash common.Hash, number uint64, hasState bool) bool {
|
2019-05-30 18:51:13 +00:00
|
|
|
var head, since, recent uint64
|
2016-12-10 05:50:36 +00:00
|
|
|
p.lock.RLock()
|
2019-05-30 18:51:13 +00:00
|
|
|
if p.headInfo != nil {
|
|
|
|
head = p.headInfo.Number
|
|
|
|
}
|
|
|
|
if hasState {
|
|
|
|
since = p.stateSince
|
|
|
|
recent = p.stateRecent
|
|
|
|
} else {
|
|
|
|
since = p.chainSince
|
|
|
|
recent = p.chainRecent
|
|
|
|
}
|
2017-06-21 10:27:38 +00:00
|
|
|
hasBlock := p.hasBlock
|
2016-12-10 05:50:36 +00:00
|
|
|
p.lock.RUnlock()
|
2019-05-30 18:51:13 +00:00
|
|
|
return head >= number && number >= since && (recent == 0 || number+recent+4 > head) && hasBlock != nil && hasBlock(hash, number, hasState)
|
2016-12-10 05:50:36 +00:00
|
|
|
}
|
|
|
|
|
2016-10-14 03:51:29 +00:00
|
|
|
// SendAnnounce announces the availability of a number of blocks through
|
|
|
|
// a hash notification.
|
|
|
|
func (p *peer) SendAnnounce(request announceData) error {
|
|
|
|
return p2p.Send(p.rw, AnnounceMsg, request)
|
|
|
|
}
|
|
|
|
|
2019-05-30 18:51:13 +00:00
|
|
|
// SendStop notifies the client about being in frozen state
|
|
|
|
func (p *peer) SendStop() error {
|
|
|
|
return p2p.Send(p.rw, StopMsg, struct{}{})
|
|
|
|
}
|
|
|
|
|
|
|
|
// SendResume notifies the client about getting out of frozen state
|
|
|
|
func (p *peer) SendResume(bv uint64) error {
|
|
|
|
return p2p.Send(p.rw, ResumeMsg, bv)
|
|
|
|
}
|
|
|
|
|
2019-02-26 11:32:48 +00:00
|
|
|
// ReplyBlockHeaders creates a reply with a batch of block headers
|
|
|
|
func (p *peer) ReplyBlockHeaders(reqID uint64, headers []*types.Header) *reply {
|
|
|
|
data, _ := rlp.EncodeToBytes(headers)
|
|
|
|
return &reply{p.rw, BlockHeadersMsg, reqID, data}
|
2016-10-14 03:51:29 +00:00
|
|
|
}
|
|
|
|
|
2019-02-26 11:32:48 +00:00
|
|
|
// ReplyBlockBodiesRLP creates a reply with a batch of block contents from
|
2016-10-14 03:51:29 +00:00
|
|
|
// an already RLP encoded format.
|
2019-02-26 11:32:48 +00:00
|
|
|
func (p *peer) ReplyBlockBodiesRLP(reqID uint64, bodies []rlp.RawValue) *reply {
|
|
|
|
data, _ := rlp.EncodeToBytes(bodies)
|
|
|
|
return &reply{p.rw, BlockBodiesMsg, reqID, data}
|
2016-10-14 03:51:29 +00:00
|
|
|
}
|
|
|
|
|
2019-02-26 11:32:48 +00:00
|
|
|
// ReplyCode creates a reply with a batch of arbitrary internal data, corresponding to the
|
2016-10-14 03:51:29 +00:00
|
|
|
// hashes requested.
|
2019-02-26 11:32:48 +00:00
|
|
|
func (p *peer) ReplyCode(reqID uint64, codes [][]byte) *reply {
|
|
|
|
data, _ := rlp.EncodeToBytes(codes)
|
|
|
|
return &reply{p.rw, CodeMsg, reqID, data}
|
2016-10-14 03:51:29 +00:00
|
|
|
}
|
|
|
|
|
2019-02-26 11:32:48 +00:00
|
|
|
// ReplyReceiptsRLP creates a reply with a batch of transaction receipts, corresponding to the
|
2016-10-14 03:51:29 +00:00
|
|
|
// ones requested from an already RLP encoded format.
|
2019-02-26 11:32:48 +00:00
|
|
|
func (p *peer) ReplyReceiptsRLP(reqID uint64, receipts []rlp.RawValue) *reply {
|
|
|
|
data, _ := rlp.EncodeToBytes(receipts)
|
|
|
|
return &reply{p.rw, ReceiptsMsg, reqID, data}
|
2016-10-14 03:51:29 +00:00
|
|
|
}
|
|
|
|
|
2019-02-26 11:32:48 +00:00
|
|
|
// ReplyProofsV2 creates a reply with a batch of merkle proofs, corresponding to the ones requested.
|
|
|
|
func (p *peer) ReplyProofsV2(reqID uint64, proofs light.NodeList) *reply {
|
|
|
|
data, _ := rlp.EncodeToBytes(proofs)
|
|
|
|
return &reply{p.rw, ProofsV2Msg, reqID, data}
|
2017-10-24 13:19:09 +00:00
|
|
|
}
|
|
|
|
|
2019-02-26 11:32:48 +00:00
|
|
|
// ReplyHelperTrieProofs creates a reply with a batch of HelperTrie proofs, corresponding to the ones requested.
|
|
|
|
func (p *peer) ReplyHelperTrieProofs(reqID uint64, resp HelperTrieResps) *reply {
|
|
|
|
data, _ := rlp.EncodeToBytes(resp)
|
|
|
|
return &reply{p.rw, HelperTrieProofsMsg, reqID, data}
|
2017-10-24 13:19:09 +00:00
|
|
|
}
|
|
|
|
|
2019-02-26 11:32:48 +00:00
|
|
|
// ReplyTxStatus creates a reply with a batch of transaction status records, corresponding to the ones requested.
|
2019-05-13 11:41:10 +00:00
|
|
|
func (p *peer) ReplyTxStatus(reqID uint64, stats []light.TxStatus) *reply {
|
2019-02-26 11:32:48 +00:00
|
|
|
data, _ := rlp.EncodeToBytes(stats)
|
|
|
|
return &reply{p.rw, TxStatusMsg, reqID, data}
|
2017-10-24 13:19:09 +00:00
|
|
|
}
|
|
|
|
|
2016-10-14 03:51:29 +00:00
|
|
|
// RequestHeadersByHash fetches a batch of blocks' headers corresponding to the
|
|
|
|
// specified header query, based on the hash of an origin block.
|
|
|
|
func (p *peer) RequestHeadersByHash(reqID, cost uint64, origin common.Hash, amount int, skip int, reverse bool) error {
|
2017-03-03 09:41:52 +00:00
|
|
|
p.Log().Debug("Fetching batch of headers", "count", amount, "fromhash", origin, "skip", skip, "reverse", reverse)
|
2016-10-14 03:51:29 +00:00
|
|
|
return sendRequest(p.rw, GetBlockHeadersMsg, reqID, cost, &getBlockHeadersData{Origin: hashOrNumber{Hash: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse})
|
|
|
|
}
|
|
|
|
|
|
|
|
// RequestHeadersByNumber fetches a batch of blocks' headers corresponding to the
|
|
|
|
// specified header query, based on the number of an origin block.
|
|
|
|
func (p *peer) RequestHeadersByNumber(reqID, cost, origin uint64, amount int, skip int, reverse bool) error {
|
2017-03-03 09:41:52 +00:00
|
|
|
p.Log().Debug("Fetching batch of headers", "count", amount, "fromnum", origin, "skip", skip, "reverse", reverse)
|
2016-10-14 03:51:29 +00:00
|
|
|
return sendRequest(p.rw, GetBlockHeadersMsg, reqID, cost, &getBlockHeadersData{Origin: hashOrNumber{Number: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse})
|
|
|
|
}
|
|
|
|
|
|
|
|
// RequestBodies fetches a batch of blocks' bodies corresponding to the hashes
|
|
|
|
// specified.
|
|
|
|
func (p *peer) RequestBodies(reqID, cost uint64, hashes []common.Hash) error {
|
2017-03-03 09:41:52 +00:00
|
|
|
p.Log().Debug("Fetching batch of block bodies", "count", len(hashes))
|
2016-10-14 03:51:29 +00:00
|
|
|
return sendRequest(p.rw, GetBlockBodiesMsg, reqID, cost, hashes)
|
|
|
|
}
|
|
|
|
|
|
|
|
// RequestCode fetches a batch of arbitrary data from a node's known state
|
|
|
|
// data, corresponding to the specified hashes.
|
2017-10-24 13:19:09 +00:00
|
|
|
func (p *peer) RequestCode(reqID, cost uint64, reqs []CodeReq) error {
|
2017-03-03 09:41:52 +00:00
|
|
|
p.Log().Debug("Fetching batch of codes", "count", len(reqs))
|
2016-10-14 03:51:29 +00:00
|
|
|
return sendRequest(p.rw, GetCodeMsg, reqID, cost, reqs)
|
|
|
|
}
|
|
|
|
|
|
|
|
// RequestReceipts fetches a batch of transaction receipts from a remote node.
|
|
|
|
func (p *peer) RequestReceipts(reqID, cost uint64, hashes []common.Hash) error {
|
2017-03-03 09:41:52 +00:00
|
|
|
p.Log().Debug("Fetching batch of receipts", "count", len(hashes))
|
2016-10-14 03:51:29 +00:00
|
|
|
return sendRequest(p.rw, GetReceiptsMsg, reqID, cost, hashes)
|
|
|
|
}
|
|
|
|
|
|
|
|
// RequestProofs fetches a batch of merkle proofs from a remote node.
|
2017-10-24 13:19:09 +00:00
|
|
|
func (p *peer) RequestProofs(reqID, cost uint64, reqs []ProofReq) error {
|
2017-03-03 09:41:52 +00:00
|
|
|
p.Log().Debug("Fetching batch of proofs", "count", len(reqs))
|
2019-04-05 15:01:51 +00:00
|
|
|
return sendRequest(p.rw, GetProofsV2Msg, reqID, cost, reqs)
|
2017-10-24 13:19:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// RequestHelperTrieProofs fetches a batch of HelperTrie merkle proofs from a remote node.
|
2019-04-05 15:01:51 +00:00
|
|
|
func (p *peer) RequestHelperTrieProofs(reqID, cost uint64, reqs []HelperTrieReq) error {
|
|
|
|
p.Log().Debug("Fetching batch of HelperTrie proofs", "count", len(reqs))
|
|
|
|
return sendRequest(p.rw, GetHelperTrieProofsMsg, reqID, cost, reqs)
|
2016-10-14 03:51:29 +00:00
|
|
|
}
|
|
|
|
|
2017-10-24 13:19:09 +00:00
|
|
|
// RequestTxStatus fetches a batch of transaction status records from a remote node.
|
|
|
|
func (p *peer) RequestTxStatus(reqID, cost uint64, txHashes []common.Hash) error {
|
|
|
|
p.Log().Debug("Requesting transaction status", "count", len(txHashes))
|
|
|
|
return sendRequest(p.rw, GetTxStatusMsg, reqID, cost, txHashes)
|
2016-10-14 03:51:29 +00:00
|
|
|
}
|
|
|
|
|
2019-02-26 11:32:48 +00:00
|
|
|
// SendTxStatus creates a reply with a batch of transactions to be added to the remote transaction pool.
|
|
|
|
func (p *peer) SendTxs(reqID, cost uint64, txs rlp.RawValue) error {
|
|
|
|
p.Log().Debug("Sending batch of transactions", "size", len(txs))
|
2019-04-05 15:01:51 +00:00
|
|
|
return sendRequest(p.rw, SendTxV2Msg, reqID, cost, txs)
|
2016-10-14 03:51:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type keyValueEntry struct {
|
|
|
|
Key string
|
|
|
|
Value rlp.RawValue
|
|
|
|
}
|
|
|
|
type keyValueList []keyValueEntry
|
|
|
|
type keyValueMap map[string]rlp.RawValue
|
|
|
|
|
|
|
|
func (l keyValueList) add(key string, val interface{}) keyValueList {
|
|
|
|
var entry keyValueEntry
|
|
|
|
entry.Key = key
|
|
|
|
if val == nil {
|
|
|
|
val = uint64(0)
|
|
|
|
}
|
|
|
|
enc, err := rlp.EncodeToBytes(val)
|
|
|
|
if err == nil {
|
|
|
|
entry.Value = enc
|
|
|
|
}
|
|
|
|
return append(l, entry)
|
|
|
|
}
|
|
|
|
|
2019-02-26 11:32:48 +00:00
|
|
|
func (l keyValueList) decode() (keyValueMap, uint64) {
|
2016-10-14 03:51:29 +00:00
|
|
|
m := make(keyValueMap)
|
2019-02-26 11:32:48 +00:00
|
|
|
var size uint64
|
2016-10-14 03:51:29 +00:00
|
|
|
for _, entry := range l {
|
|
|
|
m[entry.Key] = entry.Value
|
2019-02-26 11:32:48 +00:00
|
|
|
size += uint64(len(entry.Key)) + uint64(len(entry.Value)) + 8
|
2016-10-14 03:51:29 +00:00
|
|
|
}
|
2019-02-26 11:32:48 +00:00
|
|
|
return m, size
|
2016-10-14 03:51:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (m keyValueMap) get(key string, val interface{}) error {
|
|
|
|
enc, ok := m[key]
|
|
|
|
if !ok {
|
2017-10-24 13:19:09 +00:00
|
|
|
return errResp(ErrMissingKey, "%s", key)
|
2016-10-14 03:51:29 +00:00
|
|
|
}
|
|
|
|
if val == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return rlp.DecodeBytes(enc, val)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *peer) sendReceiveHandshake(sendList keyValueList) (keyValueList, error) {
|
|
|
|
// Send out own handshake in a new thread
|
|
|
|
errc := make(chan error, 1)
|
|
|
|
go func() {
|
|
|
|
errc <- p2p.Send(p.rw, StatusMsg, sendList)
|
|
|
|
}()
|
|
|
|
// In the mean time retrieve the remote status message
|
|
|
|
msg, err := p.rw.ReadMsg()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if msg.Code != StatusMsg {
|
|
|
|
return nil, errResp(ErrNoStatusMsg, "first msg has code %x (!= %x)", msg.Code, StatusMsg)
|
|
|
|
}
|
|
|
|
if msg.Size > ProtocolMaxMsgSize {
|
|
|
|
return nil, errResp(ErrMsgTooLarge, "%v > %v", msg.Size, ProtocolMaxMsgSize)
|
|
|
|
}
|
|
|
|
// Decode the handshake
|
|
|
|
var recvList keyValueList
|
|
|
|
if err := msg.Decode(&recvList); err != nil {
|
|
|
|
return nil, errResp(ErrDecode, "msg %v: %v", msg, err)
|
|
|
|
}
|
|
|
|
if err := <-errc; err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return recvList, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Handshake executes the les protocol handshake, negotiating version number,
|
|
|
|
// network IDs, difficulties, head and genesis blocks.
|
|
|
|
func (p *peer) Handshake(td *big.Int, head common.Hash, headNum uint64, genesis common.Hash, server *LesServer) error {
|
|
|
|
p.lock.Lock()
|
|
|
|
defer p.lock.Unlock()
|
|
|
|
|
|
|
|
var send keyValueList
|
|
|
|
send = send.add("protocolVersion", uint64(p.version))
|
2017-11-10 17:06:45 +00:00
|
|
|
send = send.add("networkId", p.network)
|
2016-10-14 03:51:29 +00:00
|
|
|
send = send.add("headTd", td)
|
|
|
|
send = send.add("headHash", head)
|
|
|
|
send = send.add("headNum", headNum)
|
|
|
|
send = send.add("genesisHash", genesis)
|
|
|
|
if server != nil {
|
2019-01-24 11:18:26 +00:00
|
|
|
if !server.onlyAnnounce {
|
|
|
|
send = send.add("serveHeaders", nil)
|
|
|
|
send = send.add("serveChainSince", uint64(0))
|
|
|
|
send = send.add("serveStateSince", uint64(0))
|
2019-06-11 07:40:32 +00:00
|
|
|
|
|
|
|
// If local ethereum node is running in archive mode, advertise ourselves we have
|
|
|
|
// all version state data. Otherwise only recent state is available.
|
|
|
|
stateRecent := uint64(core.TriesInMemory - 4)
|
|
|
|
if server.archiveMode {
|
|
|
|
stateRecent = 0
|
|
|
|
}
|
|
|
|
send = send.add("serveRecentState", stateRecent)
|
2019-01-24 11:18:26 +00:00
|
|
|
send = send.add("txRelay", nil)
|
|
|
|
}
|
2016-10-14 03:51:29 +00:00
|
|
|
send = send.add("flowControl/BL", server.defParams.BufLimit)
|
|
|
|
send = send.add("flowControl/MRR", server.defParams.MinRecharge)
|
2019-02-26 11:32:48 +00:00
|
|
|
var costList RequestCostList
|
|
|
|
if server.costTracker != nil {
|
2019-05-30 18:51:13 +00:00
|
|
|
costList = server.costTracker.makeCostList(server.costTracker.globalFactor())
|
2019-02-26 11:32:48 +00:00
|
|
|
} else {
|
2019-05-30 18:51:13 +00:00
|
|
|
costList = testCostList(server.testCost)
|
2019-02-26 11:32:48 +00:00
|
|
|
}
|
|
|
|
send = send.add("flowControl/MRC", costList)
|
2019-05-13 11:26:47 +00:00
|
|
|
p.fcCosts = costList.decode(ProtocolLengths[uint(p.version)])
|
2019-02-26 11:32:48 +00:00
|
|
|
p.fcParams = server.defParams
|
all: on-chain oracle checkpoint syncing (#19543)
* all: implement simple checkpoint syncing
cmd, les, node: remove callback mechanism
cmd, node: remove callback definition
les: simplify the registrar
les: expose checkpoint rpc services in the light client
les, light: don't store untrusted receipt
cmd, contracts, les: discard stale checkpoint
cmd, contracts/registrar: loose restriction of registeration
cmd, contracts: add replay-protection
all: off-chain multi-signature contract
params: deploy checkpoint contract for rinkeby
cmd/registrar: add raw signing mode for registrar
cmd/registrar, contracts/registrar, les: fixed messages
* cmd/registrar, contracts/registrar: fix lints
* accounts/abi/bind, les: address comments
* cmd, contracts, les, light, params: minor checkpoint sync cleanups
* cmd, eth, les, light: move checkpoint config to config file
* cmd, eth, les, params: address comments
* eth, les, params: address comments
* cmd: polish up the checkpoint admin CLI
* cmd, contracts, params: deploy new version contract
* cmd/checkpoint-admin: add another flag for clef mode signing
* cmd, contracts, les: rename and regen checkpoint oracle with abigen
2019-06-28 07:34:02 +00:00
|
|
|
|
|
|
|
if server.protocolManager != nil && server.protocolManager.reg != nil && server.protocolManager.reg.isRunning() {
|
|
|
|
cp, height := server.protocolManager.reg.stableCheckpoint()
|
|
|
|
if cp != nil {
|
|
|
|
send = send.add("checkpoint/value", cp)
|
|
|
|
send = send.add("checkpoint/registerHeight", height)
|
|
|
|
}
|
|
|
|
}
|
2017-10-24 13:19:09 +00:00
|
|
|
} else {
|
2019-01-24 11:18:26 +00:00
|
|
|
//on client node
|
|
|
|
p.announceType = announceTypeSimple
|
|
|
|
if p.isTrusted {
|
|
|
|
p.announceType = announceTypeSigned
|
|
|
|
}
|
|
|
|
send = send.add("announceType", p.announceType)
|
2016-10-14 03:51:29 +00:00
|
|
|
}
|
2019-01-24 11:18:26 +00:00
|
|
|
|
2016-10-14 03:51:29 +00:00
|
|
|
recvList, err := p.sendReceiveHandshake(send)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-02-26 11:32:48 +00:00
|
|
|
recv, size := recvList.decode()
|
|
|
|
if p.rejectUpdate(size) {
|
|
|
|
return errResp(ErrRequestRejected, "")
|
|
|
|
}
|
2016-10-14 03:51:29 +00:00
|
|
|
|
|
|
|
var rGenesis, rHash common.Hash
|
|
|
|
var rVersion, rNetwork, rNum uint64
|
|
|
|
var rTd *big.Int
|
|
|
|
|
|
|
|
if err := recv.get("protocolVersion", &rVersion); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := recv.get("networkId", &rNetwork); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := recv.get("headTd", &rTd); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := recv.get("headHash", &rHash); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := recv.get("headNum", &rNum); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := recv.get("genesisHash", &rGenesis); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if rGenesis != genesis {
|
2017-02-27 12:53:30 +00:00
|
|
|
return errResp(ErrGenesisBlockMismatch, "%x (!= %x)", rGenesis[:8], genesis[:8])
|
2016-10-14 03:51:29 +00:00
|
|
|
}
|
2017-04-25 11:31:15 +00:00
|
|
|
if rNetwork != p.network {
|
2016-10-14 03:51:29 +00:00
|
|
|
return errResp(ErrNetworkIdMismatch, "%d (!= %d)", rNetwork, p.network)
|
|
|
|
}
|
|
|
|
if int(rVersion) != p.version {
|
|
|
|
return errResp(ErrProtocolVersionMismatch, "%d (!= %d)", rVersion, p.version)
|
|
|
|
}
|
2019-01-24 11:18:26 +00:00
|
|
|
|
2016-10-14 03:51:29 +00:00
|
|
|
if server != nil {
|
2017-04-10 23:23:39 +00:00
|
|
|
// until we have a proper peer connectivity API, allow LES connection to other servers
|
|
|
|
/*if recv.get("serveStateSince", nil) == nil {
|
2016-10-14 03:51:29 +00:00
|
|
|
return errResp(ErrUselessPeer, "wanted client, got server")
|
2017-04-10 23:23:39 +00:00
|
|
|
}*/
|
2017-10-24 13:19:09 +00:00
|
|
|
if recv.get("announceType", &p.announceType) != nil {
|
2019-01-24 11:18:26 +00:00
|
|
|
//set default announceType on server side
|
2017-10-24 13:19:09 +00:00
|
|
|
p.announceType = announceTypeSimple
|
|
|
|
}
|
2016-10-14 03:51:29 +00:00
|
|
|
p.fcClient = flowcontrol.NewClientNode(server.fcManager, server.defParams)
|
|
|
|
} else {
|
2019-01-24 11:18:26 +00:00
|
|
|
//mark OnlyAnnounce server if "serveHeaders", "serveChainSince", "serveStateSince" or "txRelay" fields don't exist
|
2019-05-30 18:51:13 +00:00
|
|
|
if recv.get("serveChainSince", &p.chainSince) != nil {
|
2019-01-24 11:18:26 +00:00
|
|
|
p.isOnlyAnnounce = true
|
2016-10-14 03:51:29 +00:00
|
|
|
}
|
2019-05-30 18:51:13 +00:00
|
|
|
if recv.get("serveRecentChain", &p.chainRecent) != nil {
|
|
|
|
p.chainRecent = 0
|
|
|
|
}
|
|
|
|
if recv.get("serveStateSince", &p.stateSince) != nil {
|
2019-01-24 11:18:26 +00:00
|
|
|
p.isOnlyAnnounce = true
|
2016-10-14 03:51:29 +00:00
|
|
|
}
|
2019-05-30 18:51:13 +00:00
|
|
|
if recv.get("serveRecentState", &p.stateRecent) != nil {
|
|
|
|
p.stateRecent = 0
|
|
|
|
}
|
2016-10-14 03:51:29 +00:00
|
|
|
if recv.get("txRelay", nil) != nil {
|
2019-01-24 11:18:26 +00:00
|
|
|
p.isOnlyAnnounce = true
|
2016-10-14 03:51:29 +00:00
|
|
|
}
|
2019-01-24 11:18:26 +00:00
|
|
|
|
|
|
|
if p.isOnlyAnnounce && !p.isTrusted {
|
|
|
|
return errResp(ErrUselessPeer, "peer cannot serve requests")
|
|
|
|
}
|
|
|
|
|
all: on-chain oracle checkpoint syncing (#19543)
* all: implement simple checkpoint syncing
cmd, les, node: remove callback mechanism
cmd, node: remove callback definition
les: simplify the registrar
les: expose checkpoint rpc services in the light client
les, light: don't store untrusted receipt
cmd, contracts, les: discard stale checkpoint
cmd, contracts/registrar: loose restriction of registeration
cmd, contracts: add replay-protection
all: off-chain multi-signature contract
params: deploy checkpoint contract for rinkeby
cmd/registrar: add raw signing mode for registrar
cmd/registrar, contracts/registrar, les: fixed messages
* cmd/registrar, contracts/registrar: fix lints
* accounts/abi/bind, les: address comments
* cmd, contracts, les, light, params: minor checkpoint sync cleanups
* cmd, eth, les, light: move checkpoint config to config file
* cmd, eth, les, params: address comments
* eth, les, params: address comments
* cmd: polish up the checkpoint admin CLI
* cmd, contracts, params: deploy new version contract
* cmd/checkpoint-admin: add another flag for clef mode signing
* cmd, contracts, les: rename and regen checkpoint oracle with abigen
2019-06-28 07:34:02 +00:00
|
|
|
var sParams flowcontrol.ServerParams
|
|
|
|
if err := recv.get("flowControl/BL", &sParams.BufLimit); err != nil {
|
2016-10-14 03:51:29 +00:00
|
|
|
return err
|
|
|
|
}
|
all: on-chain oracle checkpoint syncing (#19543)
* all: implement simple checkpoint syncing
cmd, les, node: remove callback mechanism
cmd, node: remove callback definition
les: simplify the registrar
les: expose checkpoint rpc services in the light client
les, light: don't store untrusted receipt
cmd, contracts, les: discard stale checkpoint
cmd, contracts/registrar: loose restriction of registeration
cmd, contracts: add replay-protection
all: off-chain multi-signature contract
params: deploy checkpoint contract for rinkeby
cmd/registrar: add raw signing mode for registrar
cmd/registrar, contracts/registrar, les: fixed messages
* cmd/registrar, contracts/registrar: fix lints
* accounts/abi/bind, les: address comments
* cmd, contracts, les, light, params: minor checkpoint sync cleanups
* cmd, eth, les, light: move checkpoint config to config file
* cmd, eth, les, params: address comments
* eth, les, params: address comments
* cmd: polish up the checkpoint admin CLI
* cmd, contracts, params: deploy new version contract
* cmd/checkpoint-admin: add another flag for clef mode signing
* cmd, contracts, les: rename and regen checkpoint oracle with abigen
2019-06-28 07:34:02 +00:00
|
|
|
if err := recv.get("flowControl/MRR", &sParams.MinRecharge); err != nil {
|
2016-10-14 03:51:29 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
var MRC RequestCostList
|
|
|
|
if err := recv.get("flowControl/MRC", &MRC); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
all: on-chain oracle checkpoint syncing (#19543)
* all: implement simple checkpoint syncing
cmd, les, node: remove callback mechanism
cmd, node: remove callback definition
les: simplify the registrar
les: expose checkpoint rpc services in the light client
les, light: don't store untrusted receipt
cmd, contracts, les: discard stale checkpoint
cmd, contracts/registrar: loose restriction of registeration
cmd, contracts: add replay-protection
all: off-chain multi-signature contract
params: deploy checkpoint contract for rinkeby
cmd/registrar: add raw signing mode for registrar
cmd/registrar, contracts/registrar, les: fixed messages
* cmd/registrar, contracts/registrar: fix lints
* accounts/abi/bind, les: address comments
* cmd, contracts, les, light, params: minor checkpoint sync cleanups
* cmd, eth, les, light: move checkpoint config to config file
* cmd, eth, les, params: address comments
* eth, les, params: address comments
* cmd: polish up the checkpoint admin CLI
* cmd, contracts, params: deploy new version contract
* cmd/checkpoint-admin: add another flag for clef mode signing
* cmd, contracts, les: rename and regen checkpoint oracle with abigen
2019-06-28 07:34:02 +00:00
|
|
|
p.fcParams = sParams
|
|
|
|
p.fcServer = flowcontrol.NewServerNode(sParams, &mclock.System{})
|
2019-05-13 11:26:47 +00:00
|
|
|
p.fcCosts = MRC.decode(ProtocolLengths[uint(p.version)])
|
all: on-chain oracle checkpoint syncing (#19543)
* all: implement simple checkpoint syncing
cmd, les, node: remove callback mechanism
cmd, node: remove callback definition
les: simplify the registrar
les: expose checkpoint rpc services in the light client
les, light: don't store untrusted receipt
cmd, contracts, les: discard stale checkpoint
cmd, contracts/registrar: loose restriction of registeration
cmd, contracts: add replay-protection
all: off-chain multi-signature contract
params: deploy checkpoint contract for rinkeby
cmd/registrar: add raw signing mode for registrar
cmd/registrar, contracts/registrar, les: fixed messages
* cmd/registrar, contracts/registrar: fix lints
* accounts/abi/bind, les: address comments
* cmd, contracts, les, light, params: minor checkpoint sync cleanups
* cmd, eth, les, light: move checkpoint config to config file
* cmd, eth, les, params: address comments
* eth, les, params: address comments
* cmd: polish up the checkpoint admin CLI
* cmd, contracts, params: deploy new version contract
* cmd/checkpoint-admin: add another flag for clef mode signing
* cmd, contracts, les: rename and regen checkpoint oracle with abigen
2019-06-28 07:34:02 +00:00
|
|
|
|
|
|
|
recv.get("checkpoint/value", &p.checkpoint)
|
|
|
|
recv.get("checkpoint/registerHeight", &p.checkpointNumber)
|
|
|
|
|
2019-04-16 11:30:47 +00:00
|
|
|
if !p.isOnlyAnnounce {
|
|
|
|
for msgCode := range reqAvgTimeCost {
|
|
|
|
if p.fcCosts[msgCode] == nil {
|
|
|
|
return errResp(ErrUselessPeer, "peer does not support message %d", msgCode)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-10-14 03:51:29 +00:00
|
|
|
}
|
2016-11-30 05:02:08 +00:00
|
|
|
p.headInfo = &announceData{Td: rTd, Hash: rHash, Number: rNum}
|
2016-10-14 03:51:29 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-02-26 11:32:48 +00:00
|
|
|
// updateFlowControl updates the flow control parameters belonging to the server
|
|
|
|
// node if the announced key/value set contains relevant fields
|
|
|
|
func (p *peer) updateFlowControl(update keyValueMap) {
|
|
|
|
if p.fcServer == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
params := p.fcParams
|
|
|
|
updateParams := false
|
|
|
|
if update.get("flowControl/BL", ¶ms.BufLimit) == nil {
|
|
|
|
updateParams = true
|
|
|
|
}
|
|
|
|
if update.get("flowControl/MRR", ¶ms.MinRecharge) == nil {
|
|
|
|
updateParams = true
|
|
|
|
}
|
|
|
|
if updateParams {
|
|
|
|
p.fcParams = params
|
|
|
|
p.fcServer.UpdateParams(params)
|
|
|
|
}
|
|
|
|
var MRC RequestCostList
|
|
|
|
if update.get("flowControl/MRC", &MRC) == nil {
|
2019-05-13 11:26:47 +00:00
|
|
|
costUpdate := MRC.decode(ProtocolLengths[uint(p.version)])
|
|
|
|
for code, cost := range costUpdate {
|
|
|
|
p.fcCosts[code] = cost
|
|
|
|
}
|
2019-02-26 11:32:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-14 03:51:29 +00:00
|
|
|
// String implements fmt.Stringer.
|
|
|
|
func (p *peer) String() string {
|
|
|
|
return fmt.Sprintf("Peer %s [%s]", p.id,
|
|
|
|
fmt.Sprintf("les/%d", p.version),
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2017-06-21 10:27:38 +00:00
|
|
|
// peerSetNotify is a callback interface to notify services about added or
|
|
|
|
// removed peers
|
|
|
|
type peerSetNotify interface {
|
|
|
|
registerPeer(*peer)
|
|
|
|
unregisterPeer(*peer)
|
|
|
|
}
|
|
|
|
|
2016-10-14 03:51:29 +00:00
|
|
|
// peerSet represents the collection of active peers currently participating in
|
|
|
|
// the Light Ethereum sub-protocol.
|
|
|
|
type peerSet struct {
|
2017-06-21 10:27:38 +00:00
|
|
|
peers map[string]*peer
|
|
|
|
lock sync.RWMutex
|
|
|
|
notifyList []peerSetNotify
|
|
|
|
closed bool
|
2016-10-14 03:51:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// newPeerSet creates a new peer set to track the active participants.
|
|
|
|
func newPeerSet() *peerSet {
|
|
|
|
return &peerSet{
|
|
|
|
peers: make(map[string]*peer),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-06-21 10:27:38 +00:00
|
|
|
// notify adds a service to be notified about added or removed peers
|
|
|
|
func (ps *peerSet) notify(n peerSetNotify) {
|
|
|
|
ps.lock.Lock()
|
|
|
|
ps.notifyList = append(ps.notifyList, n)
|
2017-10-24 13:19:09 +00:00
|
|
|
peers := make([]*peer, 0, len(ps.peers))
|
2017-06-21 10:27:38 +00:00
|
|
|
for _, p := range ps.peers {
|
2017-10-24 13:19:09 +00:00
|
|
|
peers = append(peers, p)
|
|
|
|
}
|
|
|
|
ps.lock.Unlock()
|
|
|
|
|
|
|
|
for _, p := range peers {
|
|
|
|
n.registerPeer(p)
|
2017-06-21 10:27:38 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-14 03:51:29 +00:00
|
|
|
// Register injects a new peer into the working set, or returns an error if the
|
|
|
|
// peer is already known.
|
|
|
|
func (ps *peerSet) Register(p *peer) error {
|
|
|
|
ps.lock.Lock()
|
|
|
|
if ps.closed {
|
2018-04-11 08:02:33 +00:00
|
|
|
ps.lock.Unlock()
|
2016-10-14 03:51:29 +00:00
|
|
|
return errClosed
|
|
|
|
}
|
|
|
|
if _, ok := ps.peers[p.id]; ok {
|
2018-04-11 08:02:33 +00:00
|
|
|
ps.lock.Unlock()
|
2016-10-14 03:51:29 +00:00
|
|
|
return errAlreadyRegistered
|
|
|
|
}
|
|
|
|
ps.peers[p.id] = p
|
2017-03-22 19:44:22 +00:00
|
|
|
p.sendQueue = newExecQueue(100)
|
2017-10-24 13:19:09 +00:00
|
|
|
peers := make([]peerSetNotify, len(ps.notifyList))
|
|
|
|
copy(peers, ps.notifyList)
|
|
|
|
ps.lock.Unlock()
|
|
|
|
|
|
|
|
for _, n := range peers {
|
|
|
|
n.registerPeer(p)
|
2017-06-21 10:27:38 +00:00
|
|
|
}
|
2016-10-14 03:51:29 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Unregister removes a remote peer from the active set, disabling any further
|
2017-06-21 10:27:38 +00:00
|
|
|
// actions to/from that particular entity. It also initiates disconnection at the networking layer.
|
2016-10-14 03:51:29 +00:00
|
|
|
func (ps *peerSet) Unregister(id string) error {
|
|
|
|
ps.lock.Lock()
|
2017-03-22 19:44:22 +00:00
|
|
|
if p, ok := ps.peers[id]; !ok {
|
2017-10-24 13:19:09 +00:00
|
|
|
ps.lock.Unlock()
|
2016-10-14 03:51:29 +00:00
|
|
|
return errNotRegistered
|
2017-03-22 19:44:22 +00:00
|
|
|
} else {
|
2017-10-24 13:19:09 +00:00
|
|
|
delete(ps.peers, id)
|
|
|
|
peers := make([]peerSetNotify, len(ps.notifyList))
|
|
|
|
copy(peers, ps.notifyList)
|
|
|
|
ps.lock.Unlock()
|
|
|
|
|
|
|
|
for _, n := range peers {
|
|
|
|
n.unregisterPeer(p)
|
2017-06-21 10:27:38 +00:00
|
|
|
}
|
2019-01-24 11:18:26 +00:00
|
|
|
|
2017-03-22 19:44:22 +00:00
|
|
|
p.sendQueue.quit()
|
2017-06-21 10:27:38 +00:00
|
|
|
p.Peer.Disconnect(p2p.DiscUselessPeer)
|
2019-01-24 11:18:26 +00:00
|
|
|
|
2017-10-24 13:19:09 +00:00
|
|
|
return nil
|
2016-10-14 03:51:29 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// AllPeerIDs returns a list of all registered peer IDs
|
|
|
|
func (ps *peerSet) AllPeerIDs() []string {
|
|
|
|
ps.lock.RLock()
|
|
|
|
defer ps.lock.RUnlock()
|
|
|
|
|
|
|
|
res := make([]string, len(ps.peers))
|
|
|
|
idx := 0
|
2017-01-06 14:52:03 +00:00
|
|
|
for id := range ps.peers {
|
2016-10-14 03:51:29 +00:00
|
|
|
res[idx] = id
|
|
|
|
idx++
|
|
|
|
}
|
|
|
|
return res
|
|
|
|
}
|
|
|
|
|
|
|
|
// Peer retrieves the registered peer with the given id.
|
|
|
|
func (ps *peerSet) Peer(id string) *peer {
|
|
|
|
ps.lock.RLock()
|
|
|
|
defer ps.lock.RUnlock()
|
|
|
|
|
|
|
|
return ps.peers[id]
|
|
|
|
}
|
|
|
|
|
|
|
|
// Len returns if the current number of peers in the set.
|
|
|
|
func (ps *peerSet) Len() int {
|
|
|
|
ps.lock.RLock()
|
|
|
|
defer ps.lock.RUnlock()
|
|
|
|
|
|
|
|
return len(ps.peers)
|
|
|
|
}
|
|
|
|
|
|
|
|
// BestPeer retrieves the known peer with the currently highest total difficulty.
|
|
|
|
func (ps *peerSet) BestPeer() *peer {
|
|
|
|
ps.lock.RLock()
|
|
|
|
defer ps.lock.RUnlock()
|
|
|
|
|
|
|
|
var (
|
|
|
|
bestPeer *peer
|
|
|
|
bestTd *big.Int
|
|
|
|
)
|
|
|
|
for _, p := range ps.peers {
|
|
|
|
if td := p.Td(); bestPeer == nil || td.Cmp(bestTd) > 0 {
|
|
|
|
bestPeer, bestTd = p, td
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return bestPeer
|
|
|
|
}
|
|
|
|
|
|
|
|
// AllPeers returns all peers in a list
|
|
|
|
func (ps *peerSet) AllPeers() []*peer {
|
|
|
|
ps.lock.RLock()
|
|
|
|
defer ps.lock.RUnlock()
|
|
|
|
|
|
|
|
list := make([]*peer, len(ps.peers))
|
|
|
|
i := 0
|
|
|
|
for _, peer := range ps.peers {
|
|
|
|
list[i] = peer
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
return list
|
|
|
|
}
|
|
|
|
|
|
|
|
// Close disconnects all peers.
|
|
|
|
// No new peers can be registered after Close has returned.
|
|
|
|
func (ps *peerSet) Close() {
|
|
|
|
ps.lock.Lock()
|
|
|
|
defer ps.lock.Unlock()
|
|
|
|
|
|
|
|
for _, p := range ps.peers {
|
|
|
|
p.Disconnect(p2p.DiscQuitting)
|
|
|
|
}
|
|
|
|
ps.closed = true
|
|
|
|
}
|