forked from cerc-io/plugeth
Merge pull request #3607 from zsfelfoldi/light-fix2
les: fix private net issues, enable adding peers manually again
This commit is contained in:
commit
9c45b4462c
@ -654,6 +654,10 @@ func MakeNode(ctx *cli.Context, name, gitCommit string) *node.Node {
|
||||
vsn += "-" + gitCommit[:8]
|
||||
}
|
||||
|
||||
// if we're running a light client or server, force enable the v5 peer discovery unless it is explicitly disabled with --nodiscover
|
||||
// note that explicitly specifying --v5disc overrides --nodiscover, in which case the later only disables v4 discovery
|
||||
forceV5Discovery := (ctx.GlobalBool(LightModeFlag.Name) || ctx.GlobalInt(LightServFlag.Name) > 0) && !ctx.GlobalBool(NoDiscoverFlag.Name)
|
||||
|
||||
config := &node.Config{
|
||||
DataDir: MakeDataDir(ctx),
|
||||
KeyStoreDir: ctx.GlobalString(KeyStoreDirFlag.Name),
|
||||
@ -662,8 +666,8 @@ func MakeNode(ctx *cli.Context, name, gitCommit string) *node.Node {
|
||||
Name: name,
|
||||
Version: vsn,
|
||||
UserIdent: makeNodeUserIdent(ctx),
|
||||
NoDiscovery: ctx.GlobalBool(NoDiscoverFlag.Name) || ctx.GlobalBool(LightModeFlag.Name),
|
||||
DiscoveryV5: ctx.GlobalBool(DiscoveryV5Flag.Name) || ctx.GlobalBool(LightModeFlag.Name) || ctx.GlobalInt(LightServFlag.Name) > 0,
|
||||
NoDiscovery: ctx.GlobalBool(NoDiscoverFlag.Name) || ctx.GlobalBool(LightModeFlag.Name), // always disable v4 discovery in light client mode
|
||||
DiscoveryV5: ctx.GlobalBool(DiscoveryV5Flag.Name) || forceV5Discovery,
|
||||
DiscoveryV5Addr: MakeDiscoveryV5Address(ctx),
|
||||
BootstrapNodes: MakeBootstrapNodes(ctx),
|
||||
BootstrapNodesV5: MakeBootstrapNodesV5(ctx),
|
||||
|
@ -105,7 +105,6 @@ type Config struct {
|
||||
|
||||
type LesServer interface {
|
||||
Start(srvr *p2p.Server)
|
||||
Synced()
|
||||
Stop()
|
||||
Protocols() []p2p.Protocol
|
||||
}
|
||||
|
@ -173,7 +173,7 @@ func NewProtocolManager(config *params.ChainConfig, fastSync bool, networkId int
|
||||
return blockchain.CurrentBlock().NumberU64()
|
||||
}
|
||||
inserter := func(blocks types.Blocks) (int, error) {
|
||||
manager.setSynced() // Mark initial sync done on any fetcher import
|
||||
atomic.StoreUint32(&manager.synced, 1) // Mark initial sync done on any fetcher import
|
||||
return manager.insertChain(blocks)
|
||||
}
|
||||
manager.fetcher = fetcher.New(blockchain.GetBlockByHash, validator, manager.BroadcastBlock, heighter, inserter, manager.removePeer)
|
||||
|
@ -181,7 +181,7 @@ func (pm *ProtocolManager) synchronise(peer *peer) {
|
||||
if err := pm.downloader.Synchronise(peer.id, pHead, pTd, mode); err != nil {
|
||||
return
|
||||
}
|
||||
pm.setSynced() // Mark initial sync done
|
||||
atomic.StoreUint32(&pm.synced, 1) // Mark initial sync done
|
||||
|
||||
// If fast sync was enabled, and we synced up, disable it
|
||||
if atomic.LoadUint32(&pm.fastSync) == 1 {
|
||||
@ -192,10 +192,3 @@ func (pm *ProtocolManager) synchronise(peer *peer) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// setSynced sets the synced flag and notifies the light server if present
|
||||
func (pm *ProtocolManager) setSynced() {
|
||||
if atomic.SwapUint32(&pm.synced, 1) == 0 && pm.lesServer != nil {
|
||||
pm.lesServer.Synced()
|
||||
}
|
||||
}
|
||||
|
@ -160,9 +160,6 @@ func NewProtocolManager(chainConfig *params.ChainConfig, lightSync bool, network
|
||||
if manager.serverPool != nil {
|
||||
addr := p.RemoteAddr().(*net.TCPAddr)
|
||||
entry = manager.serverPool.connect(peer, addr.IP, uint16(addr.Port))
|
||||
if entry == nil {
|
||||
return fmt.Errorf("unwanted connection")
|
||||
}
|
||||
}
|
||||
peer.poolEntry = entry
|
||||
select {
|
||||
|
@ -42,9 +42,7 @@ type LesServer struct {
|
||||
fcManager *flowcontrol.ClientManager // nil if our node is client only
|
||||
fcCostStats *requestCostStats
|
||||
defParams *flowcontrol.ServerParams
|
||||
srvr *p2p.Server
|
||||
synced, stopped bool
|
||||
lock sync.Mutex
|
||||
stopped bool
|
||||
}
|
||||
|
||||
func NewLesServer(eth *eth.Ethereum, config *eth.Config) (*LesServer, error) {
|
||||
@ -70,35 +68,13 @@ func (s *LesServer) Protocols() []p2p.Protocol {
|
||||
return s.protocolManager.SubProtocols
|
||||
}
|
||||
|
||||
// Start only starts the actual service if the ETH protocol has already been synced,
|
||||
// otherwise it will be started by Synced()
|
||||
// Start starts the LES server
|
||||
func (s *LesServer) Start(srvr *p2p.Server) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
s.srvr = srvr
|
||||
if s.synced {
|
||||
s.protocolManager.Start(s.srvr)
|
||||
}
|
||||
}
|
||||
|
||||
// Synced notifies the server that the ETH protocol has been synced and LES service can be started
|
||||
func (s *LesServer) Synced() {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
s.synced = true
|
||||
if s.srvr != nil && !s.stopped {
|
||||
s.protocolManager.Start(s.srvr)
|
||||
}
|
||||
s.protocolManager.Start(srvr)
|
||||
}
|
||||
|
||||
// Stop stops the LES service
|
||||
func (s *LesServer) Stop() {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
s.stopped = true
|
||||
s.fcCostStats.store()
|
||||
s.fcManager.Stop()
|
||||
go func() {
|
||||
|
@ -160,10 +160,10 @@ func (pool *serverPool) connect(p *peer, ip net.IP, port uint16) *poolEntry {
|
||||
defer pool.lock.Unlock()
|
||||
entry := pool.entries[p.ID()]
|
||||
if entry == nil {
|
||||
return nil
|
||||
entry = pool.findOrNewNode(p.ID(), ip, port)
|
||||
}
|
||||
glog.V(logger.Debug).Infof("connecting to %v, state: %v", p.id, entry.state)
|
||||
if entry.state != psDialed {
|
||||
if entry.state == psConnected || entry.state == psRegistered {
|
||||
return nil
|
||||
}
|
||||
pool.connWg.Add(1)
|
||||
@ -250,11 +250,17 @@ type poolStatAdjust struct {
|
||||
|
||||
// adjustBlockDelay adjusts the block announce delay statistics of a node
|
||||
func (pool *serverPool) adjustBlockDelay(entry *poolEntry, time time.Duration) {
|
||||
if entry == nil {
|
||||
return
|
||||
}
|
||||
pool.adjustStats <- poolStatAdjust{pseBlockDelay, entry, time}
|
||||
}
|
||||
|
||||
// adjustResponseTime adjusts the request response time statistics of a node
|
||||
func (pool *serverPool) adjustResponseTime(entry *poolEntry, time time.Duration, timeout bool) {
|
||||
if entry == nil {
|
||||
return
|
||||
}
|
||||
if timeout {
|
||||
pool.adjustStats <- poolStatAdjust{pseResponseTimeout, entry, time}
|
||||
} else {
|
||||
@ -342,7 +348,9 @@ func (pool *serverPool) selectPeerWait(reqID uint64, canSend func(*peer) (bool,
|
||||
func (pool *serverPool) eventLoop() {
|
||||
lookupCnt := 0
|
||||
var convTime mclock.AbsTime
|
||||
pool.discSetPeriod <- time.Millisecond * 100
|
||||
if pool.discSetPeriod != nil {
|
||||
pool.discSetPeriod <- time.Millisecond * 100
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case entry := <-pool.timeout:
|
||||
@ -375,39 +383,7 @@ func (pool *serverPool) eventLoop() {
|
||||
|
||||
case node := <-pool.discNodes:
|
||||
pool.lock.Lock()
|
||||
now := mclock.Now()
|
||||
id := discover.NodeID(node.ID)
|
||||
entry := pool.entries[id]
|
||||
if entry == nil {
|
||||
glog.V(logger.Debug).Infof("discovered %v", node.String())
|
||||
entry = &poolEntry{
|
||||
id: id,
|
||||
addr: make(map[string]*poolEntryAddress),
|
||||
addrSelect: *newWeightedRandomSelect(),
|
||||
shortRetry: shortRetryCnt,
|
||||
}
|
||||
pool.entries[id] = entry
|
||||
// initialize previously unknown peers with good statistics to give a chance to prove themselves
|
||||
entry.connectStats.add(1, initStatsWeight)
|
||||
entry.delayStats.add(0, initStatsWeight)
|
||||
entry.responseStats.add(0, initStatsWeight)
|
||||
entry.timeoutStats.add(0, initStatsWeight)
|
||||
}
|
||||
entry.lastDiscovered = now
|
||||
addr := &poolEntryAddress{
|
||||
ip: node.IP,
|
||||
port: node.TCP,
|
||||
}
|
||||
if a, ok := entry.addr[addr.strKey()]; ok {
|
||||
addr = a
|
||||
} else {
|
||||
entry.addr[addr.strKey()] = addr
|
||||
}
|
||||
addr.lastSeen = now
|
||||
entry.addrSelect.update(addr)
|
||||
if !entry.known {
|
||||
pool.newQueue.setLatest(entry)
|
||||
}
|
||||
entry := pool.findOrNewNode(discover.NodeID(node.ID), node.IP, node.TCP)
|
||||
pool.updateCheckDial(entry)
|
||||
pool.lock.Unlock()
|
||||
|
||||
@ -419,12 +395,16 @@ func (pool *serverPool) eventLoop() {
|
||||
lookupCnt++
|
||||
if pool.fastDiscover && (lookupCnt == 50 || time.Duration(mclock.Now()-convTime) > time.Minute) {
|
||||
pool.fastDiscover = false
|
||||
pool.discSetPeriod <- time.Minute
|
||||
if pool.discSetPeriod != nil {
|
||||
pool.discSetPeriod <- time.Minute
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
case <-pool.quit:
|
||||
close(pool.discSetPeriod)
|
||||
if pool.discSetPeriod != nil {
|
||||
close(pool.discSetPeriod)
|
||||
}
|
||||
pool.connWg.Wait()
|
||||
pool.saveNodes()
|
||||
pool.wg.Done()
|
||||
@ -434,6 +414,42 @@ func (pool *serverPool) eventLoop() {
|
||||
}
|
||||
}
|
||||
|
||||
func (pool *serverPool) findOrNewNode(id discover.NodeID, ip net.IP, port uint16) *poolEntry {
|
||||
now := mclock.Now()
|
||||
entry := pool.entries[id]
|
||||
if entry == nil {
|
||||
glog.V(logger.Debug).Infof("discovered %v", id.String())
|
||||
entry = &poolEntry{
|
||||
id: id,
|
||||
addr: make(map[string]*poolEntryAddress),
|
||||
addrSelect: *newWeightedRandomSelect(),
|
||||
shortRetry: shortRetryCnt,
|
||||
}
|
||||
pool.entries[id] = entry
|
||||
// initialize previously unknown peers with good statistics to give a chance to prove themselves
|
||||
entry.connectStats.add(1, initStatsWeight)
|
||||
entry.delayStats.add(0, initStatsWeight)
|
||||
entry.responseStats.add(0, initStatsWeight)
|
||||
entry.timeoutStats.add(0, initStatsWeight)
|
||||
}
|
||||
entry.lastDiscovered = now
|
||||
addr := &poolEntryAddress{
|
||||
ip: ip,
|
||||
port: port,
|
||||
}
|
||||
if a, ok := entry.addr[addr.strKey()]; ok {
|
||||
addr = a
|
||||
} else {
|
||||
entry.addr[addr.strKey()] = addr
|
||||
}
|
||||
addr.lastSeen = now
|
||||
entry.addrSelect.update(addr)
|
||||
if !entry.known {
|
||||
pool.newQueue.setLatest(entry)
|
||||
}
|
||||
return entry
|
||||
}
|
||||
|
||||
// loadNodes loads known nodes and their statistics from the database
|
||||
func (pool *serverPool) loadNodes() {
|
||||
enc, err := pool.db.Get(pool.dbKey)
|
||||
|
Loading…
Reference in New Issue
Block a user