forked from cerc-io/plugeth
core/forkid: implement the forkid EIP, announce via ENR (#19738)
* eth: chain config (genesis + fork) ENR entry * core/forkid, eth: protocol independent fork ID, update to CRC32 spec * core/forkid, eth: make forkid a struct, next uint64, enr struct, RLP * core/forkid: change forkhash rlp encoding from int to [4]byte * eth: fixup eth entry a bit and update it every block * eth: fix lint * eth: fix crash in ethclient tests
This commit is contained in:
parent
cc0f0e27a6
commit
983f92368b
@ -2146,7 +2146,7 @@ func (bc *BlockChain) GetHeaderByNumber(number uint64) *types.Header {
|
|||||||
return bc.hc.GetHeaderByNumber(number)
|
return bc.hc.GetHeaderByNumber(number)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Config retrieves the blockchain's chain configuration.
|
// Config retrieves the chain's fork configuration.
|
||||||
func (bc *BlockChain) Config() *params.ChainConfig { return bc.chainConfig }
|
func (bc *BlockChain) Config() *params.ChainConfig { return bc.chainConfig }
|
||||||
|
|
||||||
// Engine retrieves the blockchain's consensus engine.
|
// Engine retrieves the blockchain's consensus engine.
|
||||||
|
236
core/forkid/forkid.go
Normal file
236
core/forkid/forkid.go
Normal file
@ -0,0 +1,236 @@
|
|||||||
|
// Copyright 2019 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
// Package forkid implements EIP-2124 (https://eips.ethereum.org/EIPS/eip-2124).
|
||||||
|
package forkid
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
"hash/crc32"
|
||||||
|
"math"
|
||||||
|
"math/big"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/core"
|
||||||
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
"github.com/ethereum/go-ethereum/params"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrRemoteStale is returned by the validator if a remote fork checksum is a
|
||||||
|
// subset of our already applied forks, but the announced next fork block is
|
||||||
|
// not on our already passed chain.
|
||||||
|
ErrRemoteStale = errors.New("remote needs update")
|
||||||
|
|
||||||
|
// ErrLocalIncompatibleOrStale is returned by the validator if a remote fork
|
||||||
|
// checksum does not match any local checksum variation, signalling that the
|
||||||
|
// two chains have diverged in the past at some point (possibly at genesis).
|
||||||
|
ErrLocalIncompatibleOrStale = errors.New("local incompatible or needs update")
|
||||||
|
)
|
||||||
|
|
||||||
|
// ID is a fork identifier as defined by EIP-2124.
|
||||||
|
type ID struct {
|
||||||
|
Hash [4]byte // CRC32 checksum of the genesis block and passed fork block numbers
|
||||||
|
Next uint64 // Block number of the next upcoming fork, or 0 if no forks are known
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewID calculates the Ethereum fork ID from the chain config and head.
|
||||||
|
func NewID(chain *core.BlockChain) ID {
|
||||||
|
return newID(
|
||||||
|
chain.Config(),
|
||||||
|
chain.Genesis().Hash(),
|
||||||
|
chain.CurrentHeader().Number.Uint64(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// newID is the internal version of NewID, which takes extracted values as its
|
||||||
|
// arguments instead of a chain. The reason is to allow testing the IDs without
|
||||||
|
// having to simulate an entire blockchain.
|
||||||
|
func newID(config *params.ChainConfig, genesis common.Hash, head uint64) ID {
|
||||||
|
// Calculate the starting checksum from the genesis hash
|
||||||
|
hash := crc32.ChecksumIEEE(genesis[:])
|
||||||
|
|
||||||
|
// Calculate the current fork checksum and the next fork block
|
||||||
|
var next uint64
|
||||||
|
for _, fork := range gatherForks(config) {
|
||||||
|
if fork <= head {
|
||||||
|
// Fork already passed, checksum the previous hash and the fork number
|
||||||
|
hash = checksumUpdate(hash, fork)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
next = fork
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return ID{Hash: checksumToBytes(hash), Next: next}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFilter creates an filter that returns if a fork ID should be rejected or not
|
||||||
|
// based on the local chain's status.
|
||||||
|
func NewFilter(chain *core.BlockChain) func(id ID) error {
|
||||||
|
return newFilter(
|
||||||
|
chain.Config(),
|
||||||
|
chain.Genesis().Hash(),
|
||||||
|
func() uint64 {
|
||||||
|
return chain.CurrentHeader().Number.Uint64()
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// newFilter is the internal version of NewFilter, taking closures as its arguments
|
||||||
|
// instead of a chain. The reason is to allow testing it without having to simulate
|
||||||
|
// an entire blockchain.
|
||||||
|
func newFilter(config *params.ChainConfig, genesis common.Hash, headfn func() uint64) func(id ID) error {
|
||||||
|
// Calculate the all the valid fork hash and fork next combos
|
||||||
|
var (
|
||||||
|
forks = gatherForks(config)
|
||||||
|
sums = make([][4]byte, len(forks)+1) // 0th is the genesis
|
||||||
|
)
|
||||||
|
hash := crc32.ChecksumIEEE(genesis[:])
|
||||||
|
sums[0] = checksumToBytes(hash)
|
||||||
|
for i, fork := range forks {
|
||||||
|
hash = checksumUpdate(hash, fork)
|
||||||
|
sums[i+1] = checksumToBytes(hash)
|
||||||
|
}
|
||||||
|
// Add two sentries to simplify the fork checks and don't require special
|
||||||
|
// casing the last one.
|
||||||
|
forks = append(forks, math.MaxUint64) // Last fork will never be passed
|
||||||
|
|
||||||
|
// Create a validator that will filter out incompatible chains
|
||||||
|
return func(id ID) error {
|
||||||
|
// Run the fork checksum validation ruleset:
|
||||||
|
// 1. If local and remote FORK_CSUM matches, connect.
|
||||||
|
// The two nodes are in the same fork state currently. They might know
|
||||||
|
// of differing future forks, but that's not relevant until the fork
|
||||||
|
// triggers (might be postponed, nodes might be updated to match).
|
||||||
|
// 2. If the remote FORK_CSUM is a subset of the local past forks and the
|
||||||
|
// remote FORK_NEXT matches with the locally following fork block number,
|
||||||
|
// connect.
|
||||||
|
// Remote node is currently syncing. It might eventually diverge from
|
||||||
|
// us, but at this current point in time we don't have enough information.
|
||||||
|
// 3. If the remote FORK_CSUM is a superset of the local past forks and can
|
||||||
|
// be completed with locally known future forks, connect.
|
||||||
|
// Local node is currently syncing. It might eventually diverge from
|
||||||
|
// the remote, but at this current point in time we don't have enough
|
||||||
|
// information.
|
||||||
|
// 4. Reject in all other cases.
|
||||||
|
head := headfn()
|
||||||
|
for i, fork := range forks {
|
||||||
|
// If our head is beyond this fork, continue to the next (we have a dummy
|
||||||
|
// fork of maxuint64 as the last item to always fail this check eventually).
|
||||||
|
if head > fork {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Found the first unpassed fork block, check if our current state matches
|
||||||
|
// the remote checksum (rule #1).
|
||||||
|
if sums[i] == id.Hash {
|
||||||
|
// Yay, fork checksum matched, ignore any upcoming fork
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// The local and remote nodes are in different forks currently, check if the
|
||||||
|
// remote checksum is a subset of our local forks (rule #2).
|
||||||
|
for j := 0; j < i; j++ {
|
||||||
|
if sums[j] == id.Hash {
|
||||||
|
// Remote checksum is a subset, validate based on the announced next fork
|
||||||
|
if forks[j] != id.Next {
|
||||||
|
return ErrRemoteStale
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Remote chain is not a subset of our local one, check if it's a superset by
|
||||||
|
// any chance, signalling that we're simply out of sync (rule #3).
|
||||||
|
for j := i + 1; j < len(sums); j++ {
|
||||||
|
if sums[j] == id.Hash {
|
||||||
|
// Yay, remote checksum is a superset, ignore upcoming forks
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// No exact, subset or superset match. We are on differing chains, reject.
|
||||||
|
return ErrLocalIncompatibleOrStale
|
||||||
|
}
|
||||||
|
log.Error("Impossible fork ID validation", "id", id)
|
||||||
|
return nil // Something's very wrong, accept rather than reject
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// checksum calculates the IEEE CRC32 checksum of a block number.
|
||||||
|
func checksum(fork uint64) uint32 {
|
||||||
|
var blob [8]byte
|
||||||
|
binary.BigEndian.PutUint64(blob[:], fork)
|
||||||
|
return crc32.ChecksumIEEE(blob[:])
|
||||||
|
}
|
||||||
|
|
||||||
|
// checksumUpdate calculates the next IEEE CRC32 checksum based on the previous
|
||||||
|
// one and a fork block number (equivalent to CRC32(original-blob || fork)).
|
||||||
|
func checksumUpdate(hash uint32, fork uint64) uint32 {
|
||||||
|
var blob [8]byte
|
||||||
|
binary.BigEndian.PutUint64(blob[:], fork)
|
||||||
|
return crc32.Update(hash, crc32.IEEETable, blob[:])
|
||||||
|
}
|
||||||
|
|
||||||
|
// checksumToBytes converts a uint32 checksum into a [4]byte array.
|
||||||
|
func checksumToBytes(hash uint32) [4]byte {
|
||||||
|
var blob [4]byte
|
||||||
|
binary.BigEndian.PutUint32(blob[:], hash)
|
||||||
|
return blob
|
||||||
|
}
|
||||||
|
|
||||||
|
// gatherForks gathers all the known forks and creates a sorted list out of them.
|
||||||
|
func gatherForks(config *params.ChainConfig) []uint64 {
|
||||||
|
// Gather all the fork block numbers via reflection
|
||||||
|
kind := reflect.TypeOf(params.ChainConfig{})
|
||||||
|
conf := reflect.ValueOf(config).Elem()
|
||||||
|
|
||||||
|
var forks []uint64
|
||||||
|
for i := 0; i < kind.NumField(); i++ {
|
||||||
|
// Fetch the next field and skip non-fork rules
|
||||||
|
field := kind.Field(i)
|
||||||
|
if !strings.HasSuffix(field.Name, "Block") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if field.Type != reflect.TypeOf(new(big.Int)) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Extract the fork rule block number and aggregate it
|
||||||
|
rule := conf.Field(i).Interface().(*big.Int)
|
||||||
|
if rule != nil {
|
||||||
|
forks = append(forks, rule.Uint64())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Sort the fork block numbers to permit chronologival XOR
|
||||||
|
for i := 0; i < len(forks); i++ {
|
||||||
|
for j := i + 1; j < len(forks); j++ {
|
||||||
|
if forks[i] > forks[j] {
|
||||||
|
forks[i], forks[j] = forks[j], forks[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Deduplicate block numbers applying multiple forks
|
||||||
|
for i := 1; i < len(forks); i++ {
|
||||||
|
if forks[i] == forks[i-1] {
|
||||||
|
forks = append(forks[:i], forks[i+1:]...)
|
||||||
|
i--
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Skip any forks in block 0, that's the genesis ruleset
|
||||||
|
if len(forks) > 0 && forks[0] == 0 {
|
||||||
|
forks = forks[1:]
|
||||||
|
}
|
||||||
|
return forks
|
||||||
|
}
|
205
core/forkid/forkid_test.go
Normal file
205
core/forkid/forkid_test.go
Normal file
@ -0,0 +1,205 @@
|
|||||||
|
// Copyright 2019 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package forkid
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"math"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/params"
|
||||||
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestCreation tests that different genesis and fork rule combinations result in
|
||||||
|
// the correct fork ID.
|
||||||
|
func TestCreation(t *testing.T) {
|
||||||
|
type testcase struct {
|
||||||
|
head uint64
|
||||||
|
want ID
|
||||||
|
}
|
||||||
|
tests := []struct {
|
||||||
|
config *params.ChainConfig
|
||||||
|
genesis common.Hash
|
||||||
|
cases []testcase
|
||||||
|
}{
|
||||||
|
// Mainnet test cases
|
||||||
|
{
|
||||||
|
params.MainnetChainConfig,
|
||||||
|
params.MainnetGenesisHash,
|
||||||
|
[]testcase{
|
||||||
|
{0, ID{Hash: checksumToBytes(0xfc64ec04), Next: 1150000}}, // Unsynced
|
||||||
|
{1149999, ID{Hash: checksumToBytes(0xfc64ec04), Next: 1150000}}, // Last Frontier block
|
||||||
|
{1150000, ID{Hash: checksumToBytes(0x97c2c34c), Next: 1920000}}, // First Homestead block
|
||||||
|
{1919999, ID{Hash: checksumToBytes(0x97c2c34c), Next: 1920000}}, // Last Homestead block
|
||||||
|
{1920000, ID{Hash: checksumToBytes(0x91d1f948), Next: 2463000}}, // First DAO block
|
||||||
|
{2462999, ID{Hash: checksumToBytes(0x91d1f948), Next: 2463000}}, // Last DAO block
|
||||||
|
{2463000, ID{Hash: checksumToBytes(0x7a64da13), Next: 2675000}}, // First Tangerine block
|
||||||
|
{2674999, ID{Hash: checksumToBytes(0x7a64da13), Next: 2675000}}, // Last Tangerine block
|
||||||
|
{2675000, ID{Hash: checksumToBytes(0x3edd5b10), Next: 4370000}}, // First Spurious block
|
||||||
|
{4369999, ID{Hash: checksumToBytes(0x3edd5b10), Next: 4370000}}, // Last Spurious block
|
||||||
|
{4370000, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}}, // First Byzantium block
|
||||||
|
{7279999, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}}, // Last Byzantium block
|
||||||
|
{7280000, ID{Hash: checksumToBytes(0x668db0af), Next: 0}}, // First and last Constantinople, first Petersburg block
|
||||||
|
{7987396, ID{Hash: checksumToBytes(0x668db0af), Next: 0}}, // Today Petersburg block
|
||||||
|
},
|
||||||
|
},
|
||||||
|
// Ropsten test cases
|
||||||
|
{
|
||||||
|
params.TestnetChainConfig,
|
||||||
|
params.TestnetGenesisHash,
|
||||||
|
[]testcase{
|
||||||
|
{0, ID{Hash: checksumToBytes(0x30c7ddbc), Next: 10}}, // Unsynced, last Frontier, Homestead and first Tangerine block
|
||||||
|
{9, ID{Hash: checksumToBytes(0x30c7ddbc), Next: 10}}, // Last Tangerine block
|
||||||
|
{10, ID{Hash: checksumToBytes(0x63760190), Next: 1700000}}, // First Spurious block
|
||||||
|
{1699999, ID{Hash: checksumToBytes(0x63760190), Next: 1700000}}, // Last Spurious block
|
||||||
|
{1700000, ID{Hash: checksumToBytes(0x3ea159c7), Next: 4230000}}, // First Byzantium block
|
||||||
|
{4229999, ID{Hash: checksumToBytes(0x3ea159c7), Next: 4230000}}, // Last Byzantium block
|
||||||
|
{4230000, ID{Hash: checksumToBytes(0x97b544f3), Next: 4939394}}, // First Constantinople block
|
||||||
|
{4939393, ID{Hash: checksumToBytes(0x97b544f3), Next: 4939394}}, // Last Constantinople block
|
||||||
|
{4939394, ID{Hash: checksumToBytes(0xd6e2149b), Next: 0}}, // First Petersburg block
|
||||||
|
{5822692, ID{Hash: checksumToBytes(0xd6e2149b), Next: 0}}, // Today Petersburg block
|
||||||
|
},
|
||||||
|
},
|
||||||
|
// Rinkeby test cases
|
||||||
|
{
|
||||||
|
params.RinkebyChainConfig,
|
||||||
|
params.RinkebyGenesisHash,
|
||||||
|
[]testcase{
|
||||||
|
{0, ID{Hash: checksumToBytes(0x3b8e0691), Next: 1}}, // Unsynced, last Frontier block
|
||||||
|
{1, ID{Hash: checksumToBytes(0x60949295), Next: 2}}, // First and last Homestead block
|
||||||
|
{2, ID{Hash: checksumToBytes(0x8bde40dd), Next: 3}}, // First and last Tangerine block
|
||||||
|
{3, ID{Hash: checksumToBytes(0xcb3a64bb), Next: 1035301}}, // First Spurious block
|
||||||
|
{1035300, ID{Hash: checksumToBytes(0xcb3a64bb), Next: 1035301}}, // Last Spurious block
|
||||||
|
{1035301, ID{Hash: checksumToBytes(0x8d748b57), Next: 3660663}}, // First Byzantium block
|
||||||
|
{3660662, ID{Hash: checksumToBytes(0x8d748b57), Next: 3660663}}, // Last Byzantium block
|
||||||
|
{3660663, ID{Hash: checksumToBytes(0xe49cab14), Next: 4321234}}, // First Constantinople block
|
||||||
|
{4321233, ID{Hash: checksumToBytes(0xe49cab14), Next: 4321234}}, // Last Constantinople block
|
||||||
|
{4321234, ID{Hash: checksumToBytes(0xafec6b27), Next: 0}}, // First Petersburg block
|
||||||
|
{4586649, ID{Hash: checksumToBytes(0xafec6b27), Next: 0}}, // Today Petersburg block
|
||||||
|
},
|
||||||
|
},
|
||||||
|
// Goerli test cases
|
||||||
|
{
|
||||||
|
params.GoerliChainConfig,
|
||||||
|
params.GoerliGenesisHash,
|
||||||
|
[]testcase{
|
||||||
|
{0, ID{Hash: checksumToBytes(0xa3f5ab08), Next: 0}}, // Unsynced, last Frontier, Homestead, Tangerine, Spurious, Byzantium, Constantinople and first Petersburg block
|
||||||
|
{795329, ID{Hash: checksumToBytes(0xa3f5ab08), Next: 0}}, // Today Petersburg block
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for i, tt := range tests {
|
||||||
|
for j, ttt := range tt.cases {
|
||||||
|
if have := newID(tt.config, tt.genesis, ttt.head); have != ttt.want {
|
||||||
|
t.Errorf("test %d, case %d: fork ID mismatch: have %x, want %x", i, j, have, ttt.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestValidation tests that a local peer correctly validates and accepts a remote
|
||||||
|
// fork ID.
|
||||||
|
func TestValidation(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
head uint64
|
||||||
|
id ID
|
||||||
|
err error
|
||||||
|
}{
|
||||||
|
// Local is mainnet Petersburg, remote announces the same. No future fork is announced.
|
||||||
|
{7987396, ID{Hash: checksumToBytes(0x668db0af), Next: 0}, nil},
|
||||||
|
|
||||||
|
// Local is mainnet Petersburg, remote announces the same. Remote also announces a next fork
|
||||||
|
// at block 0xffffffff, but that is uncertain.
|
||||||
|
{7987396, ID{Hash: checksumToBytes(0x668db0af), Next: math.MaxUint64}, nil},
|
||||||
|
|
||||||
|
// Local is mainnet currently in Byzantium only (so it's aware of Petersburg), remote announces
|
||||||
|
// also Byzantium, but it's not yet aware of Petersburg (e.g. non updated node before the fork).
|
||||||
|
// In this case we don't know if Petersburg passed yet or not.
|
||||||
|
{7279999, ID{Hash: checksumToBytes(0xa00bc324), Next: 0}, nil},
|
||||||
|
|
||||||
|
// Local is mainnet currently in Byzantium only (so it's aware of Petersburg), remote announces
|
||||||
|
// also Byzantium, and it's also aware of Petersburg (e.g. updated node before the fork). We
|
||||||
|
// don't know if Petersburg passed yet (will pass) or not.
|
||||||
|
{7279999, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}, nil},
|
||||||
|
|
||||||
|
// Local is mainnet currently in Byzantium only (so it's aware of Petersburg), remote announces
|
||||||
|
// also Byzantium, and it's also aware of some random fork (e.g. misconfigured Petersburg). As
|
||||||
|
// neither forks passed at neither nodes, they may mismatch, but we still connect for now.
|
||||||
|
{7279999, ID{Hash: checksumToBytes(0xa00bc324), Next: math.MaxUint64}, nil},
|
||||||
|
|
||||||
|
// Local is mainnet Petersburg, remote announces Byzantium + knowledge about Petersburg. Remote
|
||||||
|
// is simply out of sync, accept.
|
||||||
|
{7987396, ID{Hash: checksumToBytes(0x668db0af), Next: 7280000}, nil},
|
||||||
|
|
||||||
|
// Local is mainnet Petersburg, remote announces Spurious + knowledge about Byzantium. Remote
|
||||||
|
// is definitely out of sync. It may or may not need the Petersburg update, we don't know yet.
|
||||||
|
{7987396, ID{Hash: checksumToBytes(0x3edd5b10), Next: 4370000}, nil},
|
||||||
|
|
||||||
|
// Local is mainnet Byzantium, remote announces Petersburg. Local is out of sync, accept.
|
||||||
|
{7279999, ID{Hash: checksumToBytes(0x668db0af), Next: 0}, nil},
|
||||||
|
|
||||||
|
// Local is mainnet Spurious, remote announces Byzantium, but is not aware of Petersburg. Local
|
||||||
|
// out of sync. Local also knows about a future fork, but that is uncertain yet.
|
||||||
|
{4369999, ID{Hash: checksumToBytes(0xa00bc324), Next: 0}, nil},
|
||||||
|
|
||||||
|
// Local is mainnet Petersburg. remote announces Byzantium but is not aware of further forks.
|
||||||
|
// Remote needs software update.
|
||||||
|
{7987396, ID{Hash: checksumToBytes(0xa00bc324), Next: 0}, ErrRemoteStale},
|
||||||
|
|
||||||
|
// Local is mainnet Petersburg, and isn't aware of more forks. Remote announces Petersburg +
|
||||||
|
// 0xffffffff. Local needs software update, reject.
|
||||||
|
{7987396, ID{Hash: checksumToBytes(0x5cddc0e1), Next: 0}, ErrLocalIncompatibleOrStale},
|
||||||
|
|
||||||
|
// Local is mainnet Byzantium, and is aware of Petersburg. Remote announces Petersburg +
|
||||||
|
// 0xffffffff. Local needs software update, reject.
|
||||||
|
{7279999, ID{Hash: checksumToBytes(0x5cddc0e1), Next: 0}, ErrLocalIncompatibleOrStale},
|
||||||
|
|
||||||
|
// Local is mainnet Petersburg, remote is Rinkeby Petersburg.
|
||||||
|
{7987396, ID{Hash: checksumToBytes(0xafec6b27), Next: 0}, ErrLocalIncompatibleOrStale},
|
||||||
|
}
|
||||||
|
for i, tt := range tests {
|
||||||
|
filter := newFilter(params.MainnetChainConfig, params.MainnetGenesisHash, func() uint64 { return tt.head })
|
||||||
|
if err := filter(tt.id); err != tt.err {
|
||||||
|
t.Errorf("test %d: validation error mismatch: have %v, want %v", i, err, tt.err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests that IDs are properly RLP encoded (specifically important because we
|
||||||
|
// use uint32 to store the hash, but we need to encode it as [4]byte).
|
||||||
|
func TestEncoding(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
id ID
|
||||||
|
want []byte
|
||||||
|
}{
|
||||||
|
{ID{Hash: checksumToBytes(0), Next: 0}, common.Hex2Bytes("c6840000000080")},
|
||||||
|
{ID{Hash: checksumToBytes(0xdeadbeef), Next: 0xBADDCAFE}, common.Hex2Bytes("ca84deadbeef84baddcafe,")},
|
||||||
|
{ID{Hash: checksumToBytes(math.MaxUint32), Next: math.MaxUint64}, common.Hex2Bytes("ce84ffffffff88ffffffffffffffff")},
|
||||||
|
}
|
||||||
|
for i, tt := range tests {
|
||||||
|
have, err := rlp.EncodeToBytes(tt.id)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("test %d: failed to encode forkid: %v", i, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !bytes.Equal(have, tt.want) {
|
||||||
|
t.Errorf("test %d: RLP mismatch: have %x, want %x", i, have, tt.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -47,6 +47,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/miner"
|
"github.com/ethereum/go-ethereum/miner"
|
||||||
"github.com/ethereum/go-ethereum/node"
|
"github.com/ethereum/go-ethereum/node"
|
||||||
"github.com/ethereum/go-ethereum/p2p"
|
"github.com/ethereum/go-ethereum/p2p"
|
||||||
|
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/rpc"
|
"github.com/ethereum/go-ethereum/rpc"
|
||||||
@ -66,7 +67,9 @@ type Ethereum struct {
|
|||||||
config *Config
|
config *Config
|
||||||
|
|
||||||
// Channel for shutting down the service
|
// Channel for shutting down the service
|
||||||
shutdownChan chan bool // Channel for shutting down the Ethereum
|
shutdownChan chan bool
|
||||||
|
|
||||||
|
server *p2p.Server
|
||||||
|
|
||||||
// Handlers
|
// Handlers
|
||||||
txPool *core.TxPool
|
txPool *core.TxPool
|
||||||
@ -496,7 +499,7 @@ func (s *Ethereum) EventMux() *event.TypeMux { return s.eventMux }
|
|||||||
func (s *Ethereum) Engine() consensus.Engine { return s.engine }
|
func (s *Ethereum) Engine() consensus.Engine { return s.engine }
|
||||||
func (s *Ethereum) ChainDb() ethdb.Database { return s.chainDb }
|
func (s *Ethereum) ChainDb() ethdb.Database { return s.chainDb }
|
||||||
func (s *Ethereum) IsListening() bool { return true } // Always listening
|
func (s *Ethereum) IsListening() bool { return true } // Always listening
|
||||||
func (s *Ethereum) EthVersion() int { return int(s.protocolManager.SubProtocols[0].Version) }
|
func (s *Ethereum) EthVersion() int { return int(ProtocolVersions[0]) }
|
||||||
func (s *Ethereum) NetVersion() uint64 { return s.networkID }
|
func (s *Ethereum) NetVersion() uint64 { return s.networkID }
|
||||||
func (s *Ethereum) Downloader() *downloader.Downloader { return s.protocolManager.downloader }
|
func (s *Ethereum) Downloader() *downloader.Downloader { return s.protocolManager.downloader }
|
||||||
func (s *Ethereum) Synced() bool { return atomic.LoadUint32(&s.protocolManager.acceptTxs) == 1 }
|
func (s *Ethereum) Synced() bool { return atomic.LoadUint32(&s.protocolManager.acceptTxs) == 1 }
|
||||||
@ -505,15 +508,22 @@ func (s *Ethereum) ArchiveMode() bool { return s.config.NoPruni
|
|||||||
// Protocols implements node.Service, returning all the currently configured
|
// Protocols implements node.Service, returning all the currently configured
|
||||||
// network protocols to start.
|
// network protocols to start.
|
||||||
func (s *Ethereum) Protocols() []p2p.Protocol {
|
func (s *Ethereum) Protocols() []p2p.Protocol {
|
||||||
if s.lesServer == nil {
|
protos := make([]p2p.Protocol, len(ProtocolVersions))
|
||||||
return s.protocolManager.SubProtocols
|
for i, vsn := range ProtocolVersions {
|
||||||
|
protos[i] = s.protocolManager.makeProtocol(vsn)
|
||||||
|
protos[i].Attributes = []enr.Entry{s.currentEthEntry()}
|
||||||
}
|
}
|
||||||
return append(s.protocolManager.SubProtocols, s.lesServer.Protocols()...)
|
if s.lesServer != nil {
|
||||||
|
protos = append(protos, s.lesServer.Protocols()...)
|
||||||
|
}
|
||||||
|
return protos
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start implements node.Service, starting all internal goroutines needed by the
|
// Start implements node.Service, starting all internal goroutines needed by the
|
||||||
// Ethereum protocol implementation.
|
// Ethereum protocol implementation.
|
||||||
func (s *Ethereum) Start(srvr *p2p.Server) error {
|
func (s *Ethereum) Start(srvr *p2p.Server) error {
|
||||||
|
s.startEthEntryUpdate(srvr.LocalNode())
|
||||||
|
|
||||||
// Start the bloom bits servicing goroutines
|
// Start the bloom bits servicing goroutines
|
||||||
s.startBloomHandlers(params.BloomBitsBlocks)
|
s.startBloomHandlers(params.BloomBitsBlocks)
|
||||||
|
|
||||||
|
61
eth/enr_entry.go
Normal file
61
eth/enr_entry.go
Normal file
@ -0,0 +1,61 @@
|
|||||||
|
// Copyright 2019 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package eth
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/ethereum/go-ethereum/core"
|
||||||
|
"github.com/ethereum/go-ethereum/core/forkid"
|
||||||
|
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||||
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ethEntry is the "eth" ENR entry which advertises eth protocol
|
||||||
|
// on the discovery network.
|
||||||
|
type ethEntry struct {
|
||||||
|
ForkID forkid.ID // Fork identifier per EIP-2124
|
||||||
|
|
||||||
|
// Ignore additional fields (for forward compatibility).
|
||||||
|
Rest []rlp.RawValue `rlp:"tail"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ENRKey implements enr.Entry.
|
||||||
|
func (e ethEntry) ENRKey() string {
|
||||||
|
return "eth"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (eth *Ethereum) startEthEntryUpdate(ln *enode.LocalNode) {
|
||||||
|
var newHead = make(chan core.ChainHeadEvent, 10)
|
||||||
|
sub := eth.blockchain.SubscribeChainHeadEvent(newHead)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
defer sub.Unsubscribe()
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-newHead:
|
||||||
|
ln.Set(eth.currentEthEntry())
|
||||||
|
case <-sub.Err():
|
||||||
|
// Would be nice to sync with eth.Stop, but there is no
|
||||||
|
// good way to do that.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (eth *Ethereum) currentEthEntry() *ethEntry {
|
||||||
|
return ðEntry{ForkID: forkid.NewID(eth.blockchain)}
|
||||||
|
}
|
@ -58,10 +58,6 @@ var (
|
|||||||
syncChallengeTimeout = 15 * time.Second // Time allowance for a node to reply to the sync progress challenge
|
syncChallengeTimeout = 15 * time.Second // Time allowance for a node to reply to the sync progress challenge
|
||||||
)
|
)
|
||||||
|
|
||||||
// errIncompatibleConfig is returned if the requested protocols and configs are
|
|
||||||
// not compatible (low protocol version restrictions and high requirements).
|
|
||||||
var errIncompatibleConfig = errors.New("incompatible configuration")
|
|
||||||
|
|
||||||
func errResp(code errCode, format string, v ...interface{}) error {
|
func errResp(code errCode, format string, v ...interface{}) error {
|
||||||
return fmt.Errorf("%v - %v", code, fmt.Sprintf(format, v...))
|
return fmt.Errorf("%v - %v", code, fmt.Sprintf(format, v...))
|
||||||
}
|
}
|
||||||
@ -75,17 +71,14 @@ type ProtocolManager struct {
|
|||||||
checkpointNumber uint64 // Block number for the sync progress validator to cross reference
|
checkpointNumber uint64 // Block number for the sync progress validator to cross reference
|
||||||
checkpointHash common.Hash // Block hash for the sync progress validator to cross reference
|
checkpointHash common.Hash // Block hash for the sync progress validator to cross reference
|
||||||
|
|
||||||
txpool txPool
|
txpool txPool
|
||||||
blockchain *core.BlockChain
|
blockchain *core.BlockChain
|
||||||
chainconfig *params.ChainConfig
|
maxPeers int
|
||||||
maxPeers int
|
|
||||||
|
|
||||||
downloader *downloader.Downloader
|
downloader *downloader.Downloader
|
||||||
fetcher *fetcher.Fetcher
|
fetcher *fetcher.Fetcher
|
||||||
peers *peerSet
|
peers *peerSet
|
||||||
|
|
||||||
SubProtocols []p2p.Protocol
|
|
||||||
|
|
||||||
eventMux *event.TypeMux
|
eventMux *event.TypeMux
|
||||||
txsCh chan core.NewTxsEvent
|
txsCh chan core.NewTxsEvent
|
||||||
txsSub event.Subscription
|
txsSub event.Subscription
|
||||||
@ -113,7 +106,6 @@ func NewProtocolManager(config *params.ChainConfig, checkpoint *params.TrustedCh
|
|||||||
eventMux: mux,
|
eventMux: mux,
|
||||||
txpool: txpool,
|
txpool: txpool,
|
||||||
blockchain: blockchain,
|
blockchain: blockchain,
|
||||||
chainconfig: config,
|
|
||||||
peers: newPeerSet(),
|
peers: newPeerSet(),
|
||||||
whitelist: whitelist,
|
whitelist: whitelist,
|
||||||
newPeerCh: make(chan *peer),
|
newPeerCh: make(chan *peer),
|
||||||
@ -149,45 +141,7 @@ func NewProtocolManager(config *params.ChainConfig, checkpoint *params.TrustedCh
|
|||||||
manager.checkpointNumber = (checkpoint.SectionIndex+1)*params.CHTFrequency - 1
|
manager.checkpointNumber = (checkpoint.SectionIndex+1)*params.CHTFrequency - 1
|
||||||
manager.checkpointHash = checkpoint.SectionHead
|
manager.checkpointHash = checkpoint.SectionHead
|
||||||
}
|
}
|
||||||
// Initiate a sub-protocol for every implemented version we can handle
|
|
||||||
manager.SubProtocols = make([]p2p.Protocol, 0, len(ProtocolVersions))
|
|
||||||
for i, version := range ProtocolVersions {
|
|
||||||
// Skip protocol version if incompatible with the mode of operation
|
|
||||||
// TODO(karalabe): hard-drop eth/62 from the code base
|
|
||||||
if atomic.LoadUint32(&manager.fastSync) == 1 && version < eth63 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Compatible; initialise the sub-protocol
|
|
||||||
version := version // Closure for the run
|
|
||||||
manager.SubProtocols = append(manager.SubProtocols, p2p.Protocol{
|
|
||||||
Name: ProtocolName,
|
|
||||||
Version: version,
|
|
||||||
Length: ProtocolLengths[i],
|
|
||||||
Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
|
|
||||||
peer := manager.newPeer(int(version), p, rw)
|
|
||||||
select {
|
|
||||||
case manager.newPeerCh <- peer:
|
|
||||||
manager.wg.Add(1)
|
|
||||||
defer manager.wg.Done()
|
|
||||||
return manager.handle(peer)
|
|
||||||
case <-manager.quitSync:
|
|
||||||
return p2p.DiscQuitting
|
|
||||||
}
|
|
||||||
},
|
|
||||||
NodeInfo: func() interface{} {
|
|
||||||
return manager.NodeInfo()
|
|
||||||
},
|
|
||||||
PeerInfo: func(id enode.ID) interface{} {
|
|
||||||
if p := manager.peers.Peer(fmt.Sprintf("%x", id[:8])); p != nil {
|
|
||||||
return p.Info()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
if len(manager.SubProtocols) == 0 {
|
|
||||||
return nil, errIncompatibleConfig
|
|
||||||
}
|
|
||||||
// Construct the downloader (long sync) and its backing state bloom if fast
|
// Construct the downloader (long sync) and its backing state bloom if fast
|
||||||
// sync is requested. The downloader is responsible for deallocating the state
|
// sync is requested. The downloader is responsible for deallocating the state
|
||||||
// bloom when it's done.
|
// bloom when it's done.
|
||||||
@ -235,6 +189,39 @@ func NewProtocolManager(config *params.ChainConfig, checkpoint *params.TrustedCh
|
|||||||
return manager, nil
|
return manager, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (pm *ProtocolManager) makeProtocol(version uint) p2p.Protocol {
|
||||||
|
length, ok := protocolLengths[version]
|
||||||
|
if !ok {
|
||||||
|
panic("makeProtocol for unknown version")
|
||||||
|
}
|
||||||
|
|
||||||
|
return p2p.Protocol{
|
||||||
|
Name: protocolName,
|
||||||
|
Version: version,
|
||||||
|
Length: length,
|
||||||
|
Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
|
||||||
|
peer := pm.newPeer(int(version), p, rw)
|
||||||
|
select {
|
||||||
|
case pm.newPeerCh <- peer:
|
||||||
|
pm.wg.Add(1)
|
||||||
|
defer pm.wg.Done()
|
||||||
|
return pm.handle(peer)
|
||||||
|
case <-pm.quitSync:
|
||||||
|
return p2p.DiscQuitting
|
||||||
|
}
|
||||||
|
},
|
||||||
|
NodeInfo: func() interface{} {
|
||||||
|
return pm.NodeInfo()
|
||||||
|
},
|
||||||
|
PeerInfo: func(id enode.ID) interface{} {
|
||||||
|
if p := pm.peers.Peer(fmt.Sprintf("%x", id[:8])); p != nil {
|
||||||
|
return p.Info()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (pm *ProtocolManager) removePeer(id string) {
|
func (pm *ProtocolManager) removePeer(id string) {
|
||||||
// Short circuit if the peer was already removed
|
// Short circuit if the peer was already removed
|
||||||
peer := pm.peers.Peer(id)
|
peer := pm.peers.Peer(id)
|
||||||
@ -381,8 +368,8 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if msg.Size > ProtocolMaxMsgSize {
|
if msg.Size > protocolMaxMsgSize {
|
||||||
return errResp(ErrMsgTooLarge, "%v > %v", msg.Size, ProtocolMaxMsgSize)
|
return errResp(ErrMsgTooLarge, "%v > %v", msg.Size, protocolMaxMsgSize)
|
||||||
}
|
}
|
||||||
defer msg.Discard()
|
defer msg.Discard()
|
||||||
|
|
||||||
|
@ -38,35 +38,6 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Tests that protocol versions and modes of operations are matched up properly.
|
|
||||||
func TestProtocolCompatibility(t *testing.T) {
|
|
||||||
// Define the compatibility chart
|
|
||||||
tests := []struct {
|
|
||||||
version uint
|
|
||||||
mode downloader.SyncMode
|
|
||||||
compatible bool
|
|
||||||
}{
|
|
||||||
{61, downloader.FullSync, true}, {62, downloader.FullSync, true}, {63, downloader.FullSync, true},
|
|
||||||
{61, downloader.FastSync, false}, {62, downloader.FastSync, false}, {63, downloader.FastSync, true},
|
|
||||||
}
|
|
||||||
// Make sure anything we screw up is restored
|
|
||||||
backup := ProtocolVersions
|
|
||||||
defer func() { ProtocolVersions = backup }()
|
|
||||||
|
|
||||||
// Try all available compatibility configs and check for errors
|
|
||||||
for i, tt := range tests {
|
|
||||||
ProtocolVersions = []uint{tt.version}
|
|
||||||
|
|
||||||
pm, _, err := newTestProtocolManager(tt.mode, 0, nil, nil)
|
|
||||||
if pm != nil {
|
|
||||||
defer pm.Stop()
|
|
||||||
}
|
|
||||||
if (err == nil && !tt.compatible) || (err != nil && tt.compatible) {
|
|
||||||
t.Errorf("test %d: compatibility mismatch: have error %v, want compatibility %v", i, err, tt.compatible)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests that block headers can be retrieved from a remote chain based on user queries.
|
// Tests that block headers can be retrieved from a remote chain based on user queries.
|
||||||
func TestGetBlockHeaders62(t *testing.T) { testGetBlockHeaders(t, 62) }
|
func TestGetBlockHeaders62(t *testing.T) { testGetBlockHeaders(t, 62) }
|
||||||
func TestGetBlockHeaders63(t *testing.T) { testGetBlockHeaders(t, 63) }
|
func TestGetBlockHeaders63(t *testing.T) { testGetBlockHeaders(t, 63) }
|
||||||
|
@ -394,8 +394,8 @@ func (p *peer) readStatus(network uint64, status *statusData, genesis common.Has
|
|||||||
if msg.Code != StatusMsg {
|
if msg.Code != StatusMsg {
|
||||||
return errResp(ErrNoStatusMsg, "first msg has code %x (!= %x)", msg.Code, StatusMsg)
|
return errResp(ErrNoStatusMsg, "first msg has code %x (!= %x)", msg.Code, StatusMsg)
|
||||||
}
|
}
|
||||||
if msg.Size > ProtocolMaxMsgSize {
|
if msg.Size > protocolMaxMsgSize {
|
||||||
return errResp(ErrMsgTooLarge, "%v > %v", msg.Size, ProtocolMaxMsgSize)
|
return errResp(ErrMsgTooLarge, "%v > %v", msg.Size, protocolMaxMsgSize)
|
||||||
}
|
}
|
||||||
// Decode the handshake and make sure everything matches
|
// Decode the handshake and make sure everything matches
|
||||||
if err := msg.Decode(&status); err != nil {
|
if err := msg.Decode(&status); err != nil {
|
||||||
|
@ -34,16 +34,16 @@ const (
|
|||||||
eth63 = 63
|
eth63 = 63
|
||||||
)
|
)
|
||||||
|
|
||||||
// ProtocolName is the official short name of the protocol used during capability negotiation.
|
// protocolName is the official short name of the protocol used during capability negotiation.
|
||||||
var ProtocolName = "eth"
|
const protocolName = "eth"
|
||||||
|
|
||||||
// ProtocolVersions are the supported versions of the eth protocol (first is primary).
|
// ProtocolVersions are the supported versions of the eth protocol (first is primary).
|
||||||
var ProtocolVersions = []uint{eth63, eth62}
|
var ProtocolVersions = []uint{eth63}
|
||||||
|
|
||||||
// ProtocolLengths are the number of implemented message corresponding to different protocol versions.
|
// protocolLengths are the number of implemented message corresponding to different protocol versions.
|
||||||
var ProtocolLengths = []uint64{17, 8}
|
var protocolLengths = map[uint]uint64{eth63: 17, eth62: 8}
|
||||||
|
|
||||||
const ProtocolMaxMsgSize = 10 * 1024 * 1024 // Maximum cap on the size of a protocol message
|
const protocolMaxMsgSize = 10 * 1024 * 1024 // Maximum cap on the size of a protocol message
|
||||||
|
|
||||||
// eth protocol message codes
|
// eth protocol message codes
|
||||||
const (
|
const (
|
||||||
|
@ -70,7 +70,7 @@ func TestGetSetIPv6(t *testing.T) {
|
|||||||
assert.Equal(t, ip, ip2)
|
assert.Equal(t, ip, ip2)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestGetSetDiscPort tests encoding/decoding and setting/getting of the DiscPort key.
|
// TestGetSetUDP tests encoding/decoding and setting/getting of the UDP key.
|
||||||
func TestGetSetUDP(t *testing.T) {
|
func TestGetSetUDP(t *testing.T) {
|
||||||
port := UDP(30309)
|
port := UDP(30309)
|
||||||
var r Record
|
var r Record
|
||||||
|
@ -286,6 +286,11 @@ func (c *conn) set(f connFlag, val bool) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// LocalNode returns the local node record.
|
||||||
|
func (srv *Server) LocalNode() *enode.LocalNode {
|
||||||
|
return srv.localnode
|
||||||
|
}
|
||||||
|
|
||||||
// Peers returns all connected peers.
|
// Peers returns all connected peers.
|
||||||
func (srv *Server) Peers() []*Peer {
|
func (srv *Server) Peers() []*Peer {
|
||||||
var ps []*Peer
|
var ps []*Peer
|
||||||
|
Loading…
Reference in New Issue
Block a user