Merge branch 'master' into max-stream-peer-servers

This commit is contained in:
Janos Guljas 2018-09-25 16:57:31 +02:00
commit 24349144b6
221 changed files with 4273 additions and 3680 deletions

View File

@ -1,16 +1,40 @@
# Contributing
Thank you for considering to help out with the source code! We welcome
contributions from anyone on the internet, and are grateful for even the
smallest of fixes!
If you'd like to contribute to go-ethereum, please fork, fix, commit and send a
pull request for the maintainers to review and merge into the main code base. If
you wish to submit more complex changes though, please check up with the core
devs first on [our gitter channel](https://gitter.im/ethereum/go-ethereum) to
ensure those changes are in line with the general philosophy of the project
and/or get some early feedback which can make both your efforts much lighter as
well as our review and merge procedures quick and simple.
## Coding guidelines
Please make sure your contributions adhere to our coding guidelines:
* Code must adhere to the official Go
[formatting](https://golang.org/doc/effective_go.html#formatting) guidelines
(i.e. uses [gofmt](https://golang.org/cmd/gofmt/)).
* Code must be documented adhering to the official Go
[commentary](https://golang.org/doc/effective_go.html#commentary) guidelines.
* Pull requests need to be based on and opened against the `master` branch.
* Commit messages should be prefixed with the package(s) they modify.
* E.g. "eth, rpc: make trace configs optional"
## Can I have feature X
Before you do a feature request please check and make sure that it isn't possible
through some other means. The JavaScript enabled console is a powerful feature
in the right hands. Please check our [Wiki page](https://github.com/ethereum/go-ethereum/wiki) for more info
Before you submit a feature request, please check and make sure that it isn't
possible through some other means. The JavaScript-enabled console is a powerful
feature in the right hands. Please check our
[Wiki page](https://github.com/ethereum/go-ethereum/wiki) for more info
and help.
## Contributing
## Configuration, dependencies, and tests
If you'd like to contribute to go-ethereum please fork, fix, commit and
send a pull request. Commits which do not comply with the coding standards
are ignored (use gofmt!).
See [Developers' Guide](https://github.com/ethereum/go-ethereum/wiki/Developers'-Guide)
for more details on configuring your environment, testing, and
dependency management.
Please see the [Developers' Guide](https://github.com/ethereum/go-ethereum/wiki/Developers'-Guide)
for more details on configuring your environment, managing project dependencies
and testing procedures.

View File

@ -14,7 +14,6 @@ matrix:
- go run build/ci.go install
- go run build/ci.go test -coverage $TEST_PACKAGES
# These are the latest Go versions.
- os: linux
dist: trusty
sudo: required
@ -26,8 +25,20 @@ matrix:
- go run build/ci.go install
- go run build/ci.go test -coverage $TEST_PACKAGES
# These are the latest Go versions.
- os: linux
dist: trusty
sudo: required
go: 1.11.x
script:
- sudo modprobe fuse
- sudo chmod 666 /dev/fuse
- sudo chown root:$USER /etc/fuse.conf
- go run build/ci.go install
- go run build/ci.go test -coverage $TEST_PACKAGES
- os: osx
go: 1.10.x
go: 1.11.x
script:
- unset -f cd # workaround for https://github.com/travis-ci/travis-ci/issues/8703
- go run build/ci.go install
@ -36,7 +47,7 @@ matrix:
# This builder only tests code linters on latest version of Go
- os: linux
dist: trusty
go: 1.10.x
go: 1.11.x
env:
- lint
git:
@ -47,7 +58,7 @@ matrix:
# This builder does the Ubuntu PPA upload
- os: linux
dist: trusty
go: 1.10.x
go: 1.11.x
env:
- ubuntu-ppa
git:
@ -66,7 +77,7 @@ matrix:
- os: linux
dist: trusty
sudo: required
go: 1.10.x
go: 1.11.x
env:
- azure-linux
git:
@ -100,7 +111,7 @@ matrix:
dist: trusty
services:
- docker
go: 1.10.x
go: 1.11.x
env:
- azure-linux-mips
git:
@ -144,7 +155,7 @@ matrix:
git:
submodules: false # avoid cloning ethereum/tests
before_install:
- curl https://storage.googleapis.com/golang/go1.10.3.linux-amd64.tar.gz | tar -xz
- curl https://storage.googleapis.com/golang/go1.11.linux-amd64.tar.gz | tar -xz
- export PATH=`pwd`/go/bin:$PATH
- export GOROOT=`pwd`/go
- export GOPATH=$HOME/go
@ -161,7 +172,7 @@ matrix:
# This builder does the OSX Azure, iOS CocoaPods and iOS Azure uploads
- os: osx
go: 1.10.x
go: 1.11.x
env:
- azure-osx
- azure-ios
@ -190,7 +201,7 @@ matrix:
# This builder does the Azure archive purges to avoid accumulating junk
- os: linux
dist: trusty
go: 1.10.x
go: 1.11.x
env:
- azure-purge
git:

View File

@ -1,5 +1,5 @@
# Build Geth in a stock Go builder container
FROM golang:1.10-alpine as builder
FROM golang:1.11-alpine as builder
RUN apk add --no-cache make gcc musl-dev linux-headers

View File

@ -1,5 +1,5 @@
# Build Geth in a stock Go builder container
FROM golang:1.10-alpine as builder
FROM golang:1.11-alpine as builder
RUN apk add --no-cache make gcc musl-dev linux-headers

View File

@ -69,7 +69,7 @@ func NewSimulatedBackend(alloc core.GenesisAlloc, gasLimit uint64) *SimulatedBac
database := ethdb.NewMemDatabase()
genesis := core.Genesis{Config: params.AllEthashProtocolChanges, GasLimit: gasLimit, Alloc: alloc}
genesis.MustCommit(database)
blockchain, _ := core.NewBlockChain(database, nil, genesis.Config, ethash.NewFaker(), vm.Config{})
blockchain, _ := core.NewBlockChain(database, nil, genesis.Config, ethash.NewFaker(), vm.Config{}, nil)
backend := &SimulatedBackend{
database: database,

View File

@ -103,7 +103,12 @@ func NewType(t string) (typ Type, err error) {
return typ, err
}
// parse the type and size of the abi-type.
parsedType := typeRegex.FindAllStringSubmatch(t, -1)[0]
matches := typeRegex.FindAllStringSubmatch(t, -1)
if len(matches) == 0 {
return Type{}, fmt.Errorf("invalid type '%v'", t)
}
parsedType := matches[0]
// varSize is the size of the variable
var varSize int
if len(parsedType[3]) > 0 {

View File

@ -179,26 +179,34 @@ func storeNewKey(ks keyStore, rand io.Reader, auth string) (*Key, accounts.Accou
return key, a, err
}
func writeKeyFile(file string, content []byte) error {
func writeTemporaryKeyFile(file string, content []byte) (string, error) {
// Create the keystore directory with appropriate permissions
// in case it is not present yet.
const dirPerm = 0700
if err := os.MkdirAll(filepath.Dir(file), dirPerm); err != nil {
return err
return "", err
}
// Atomic write: create a temporary hidden file first
// then move it into place. TempFile assigns mode 0600.
f, err := ioutil.TempFile(filepath.Dir(file), "."+filepath.Base(file)+".tmp")
if err != nil {
return err
return "", err
}
if _, err := f.Write(content); err != nil {
f.Close()
os.Remove(f.Name())
return err
return "", err
}
f.Close()
return os.Rename(f.Name(), file)
return f.Name(), nil
}
func writeKeyFile(file string, content []byte) error {
name, err := writeTemporaryKeyFile(file, content)
if err != nil {
return err
}
return os.Rename(name, file)
}
// keyFileName implements the naming convention for keyfiles:

View File

@ -78,7 +78,7 @@ type unlocked struct {
// NewKeyStore creates a keystore for the given directory.
func NewKeyStore(keydir string, scryptN, scryptP int) *KeyStore {
keydir, _ = filepath.Abs(keydir)
ks := &KeyStore{storage: &keyStorePassphrase{keydir, scryptN, scryptP}}
ks := &KeyStore{storage: &keyStorePassphrase{keydir, scryptN, scryptP, false}}
ks.init(keydir)
return ks
}

View File

@ -35,6 +35,7 @@ import (
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"github.com/ethereum/go-ethereum/common"
@ -72,6 +73,10 @@ type keyStorePassphrase struct {
keysDirPath string
scryptN int
scryptP int
// skipKeyFileVerification disables the security-feature which does
// reads and decrypts any newly created keyfiles. This should be 'false' in all
// cases except tests -- setting this to 'true' is not recommended.
skipKeyFileVerification bool
}
func (ks keyStorePassphrase) GetKey(addr common.Address, filename, auth string) (*Key, error) {
@ -93,7 +98,7 @@ func (ks keyStorePassphrase) GetKey(addr common.Address, filename, auth string)
// StoreKey generates a key, encrypts with 'auth' and stores in the given directory
func StoreKey(dir, auth string, scryptN, scryptP int) (common.Address, error) {
_, a, err := storeNewKey(&keyStorePassphrase{dir, scryptN, scryptP}, rand.Reader, auth)
_, a, err := storeNewKey(&keyStorePassphrase{dir, scryptN, scryptP, false}, rand.Reader, auth)
return a.Address, err
}
@ -102,7 +107,25 @@ func (ks keyStorePassphrase) StoreKey(filename string, key *Key, auth string) er
if err != nil {
return err
}
return writeKeyFile(filename, keyjson)
// Write into temporary file
tmpName, err := writeTemporaryKeyFile(filename, keyjson)
if err != nil {
return err
}
if !ks.skipKeyFileVerification {
// Verify that we can decrypt the file with the given password.
_, err = ks.GetKey(key.Address, tmpName, auth)
if err != nil {
msg := "An error was encountered when saving and verifying the keystore file. \n" +
"This indicates that the keystore is corrupted. \n" +
"The corrupted file is stored at \n%v\n" +
"Please file a ticket at:\n\n" +
"https://github.com/ethereum/go-ethereum/issues." +
"The error was : %s"
return fmt.Errorf(msg, tmpName, err)
}
}
return os.Rename(tmpName, filename)
}
func (ks keyStorePassphrase) JoinPath(filename string) string {

View File

@ -37,7 +37,7 @@ func tmpKeyStoreIface(t *testing.T, encrypted bool) (dir string, ks keyStore) {
t.Fatal(err)
}
if encrypted {
ks = &keyStorePassphrase{d, veryLightScryptN, veryLightScryptP}
ks = &keyStorePassphrase{d, veryLightScryptN, veryLightScryptP, true}
} else {
ks = &keyStorePlain{d}
}
@ -191,7 +191,7 @@ func TestV1_1(t *testing.T) {
func TestV1_2(t *testing.T) {
t.Parallel()
ks := &keyStorePassphrase{"testdata/v1", LightScryptN, LightScryptP}
ks := &keyStorePassphrase{"testdata/v1", LightScryptN, LightScryptP, true}
addr := common.HexToAddress("cb61d5a9c4896fb9658090b597ef0e7be6f7b67e")
file := "testdata/v1/cb61d5a9c4896fb9658090b597ef0e7be6f7b67e/cb61d5a9c4896fb9658090b597ef0e7be6f7b67e"
k, err := ks.GetKey(addr, file, "g")

View File

@ -23,8 +23,8 @@ environment:
install:
- git submodule update --init
- rmdir C:\go /s /q
- appveyor DownloadFile https://storage.googleapis.com/golang/go1.10.3.windows-%GETH_ARCH%.zip
- 7z x go1.10.3.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
- appveyor DownloadFile https://storage.googleapis.com/golang/go1.11.windows-%GETH_ARCH%.zip
- 7z x go1.11.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
- go version
- gcc --version

View File

@ -29,6 +29,7 @@ import (
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/p2p/discv5"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/nat"
"github.com/ethereum/go-ethereum/p2p/netutil"
)
@ -85,7 +86,7 @@ func main() {
}
if *writeAddr {
fmt.Printf("%v\n", discover.PubkeyID(&nodeKey.PublicKey))
fmt.Printf("%v\n", enode.PubkeyToIDV4(&nodeKey.PublicKey))
os.Exit(0)
}

View File

@ -1,6 +1,13 @@
### Changelog for external API
#### 4.0.0
* The external `account_Ecrecover`-method was removed.
* The external `account_Import`-method was removed.
#### 3.0.0
* The external `account_List`-method was changed to not expose `url`, which contained info about the local filesystem. It now returns only a list of addresses.
#### 2.0.0

View File

@ -48,7 +48,7 @@ import (
)
// ExternalAPIVersion -- see extapi_changelog.md
const ExternalAPIVersion = "2.0.0"
const ExternalAPIVersion = "3.0.0"
// InternalAPIVersion -- see intapi_changelog.md
const InternalAPIVersion = "2.0.0"
@ -70,6 +70,10 @@ var (
Value: 4,
Usage: "log level to emit to the screen",
}
advancedMode = cli.BoolFlag{
Name: "advanced",
Usage: "If enabled, issues warnings instead of rejections for suspicious requests. Default off",
}
keystoreFlag = cli.StringFlag{
Name: "keystore",
Value: filepath.Join(node.DefaultDataDir(), "keystore"),
@ -191,6 +195,7 @@ func init() {
ruleFlag,
stdiouiFlag,
testFlag,
advancedMode,
}
app.Action = signer
app.Commands = []cli.Command{initCommand, attestCommand, addCredentialCommand}
@ -384,7 +389,8 @@ func signer(c *cli.Context) error {
c.String(keystoreFlag.Name),
c.Bool(utils.NoUSBFlag.Name),
ui, db,
c.Bool(utils.LightKDFFlag.Name))
c.Bool(utils.LightKDFFlag.Name),
c.Bool(advancedMode.Name))
api = apiImpl

View File

@ -52,7 +52,7 @@ INFO [02-21|12:14:38] Ruleset attestation updated sha256=6c21d17374
At this point, we then start the signer with the rule-file:
```text
#./signer --rules rules.json
#./signer --rules rules.js
INFO [02-21|12:15:18] Using CLI as UI-channel
INFO [02-21|12:15:18] Loaded 4byte db signatures=5509 file=./4byte.json
@ -153,7 +153,7 @@ INFO [02-21|14:36:30] Ruleset attestation updated sha256=2a0cb661da
And start the signer:
```
#./signer --rules rules.js
#./signer --rules rules.js --rpc
INFO [02-21|14:41:56] Using CLI as UI-channel
INFO [02-21|14:41:56] Loaded 4byte db signatures=5509 file=./4byte.json
@ -190,7 +190,7 @@ INFO [02-21|14:42:56] Op rejected
The signer also stores all traffic over the external API in a log file. The last 4 lines shows the two requests and their responses:
```text
#tail audit.log -n 4
#tail -n 4 audit.log
t=2018-02-21T14:42:41+0100 lvl=info msg=Sign api=signer type=request metadata="{\"remote\":\"127.0.0.1:49706\",\"local\":\"localhost:8550\",\"scheme\":\"HTTP/1.1\"}" addr="0x694267f14675d7e1b9494fd8d72fefe1755710fa [chksum INVALID]" data=202062617a6f6e6b2062617a2067617a0a
t=2018-02-21T14:42:42+0100 lvl=info msg=Sign api=signer type=response data=93e6161840c3ae1efc26dc68dedab6e8fc233bb3fefa1b4645dbf6609b93dace160572ea4ab33240256bb6d3dadb60dcd9c515d6374d3cf614ee897408d41d541c error=nil
t=2018-02-21T14:42:56+0100 lvl=info msg=Sign api=signer type=request metadata="{\"remote\":\"127.0.0.1:49708\",\"local\":\"localhost:8550\",\"scheme\":\"HTTP/1.1\"}" addr="0x694267f14675d7e1b9494fd8d72fefe1755710fa [chksum INVALID]" data=2020626f6e6b2062617a2067617a0a

View File

@ -86,7 +86,7 @@ func runCmd(ctx *cli.Context) error {
chainConfig *params.ChainConfig
sender = common.BytesToAddress([]byte("sender"))
receiver = common.BytesToAddress([]byte("receiver"))
blockNumber uint64
genesisConfig *core.Genesis
)
if ctx.GlobalBool(MachineFlag.Name) {
tracer = NewJSONLogger(logconfig, os.Stdout)
@ -98,13 +98,14 @@ func runCmd(ctx *cli.Context) error {
}
if ctx.GlobalString(GenesisFlag.Name) != "" {
gen := readGenesis(ctx.GlobalString(GenesisFlag.Name))
genesisConfig = gen
db := ethdb.NewMemDatabase()
genesis := gen.ToBlock(db)
statedb, _ = state.New(genesis.Root(), state.NewDatabase(db))
chainConfig = gen.Config
blockNumber = gen.Number
} else {
statedb, _ = state.New(common.Hash{}, state.NewDatabase(ethdb.NewMemDatabase()))
genesisConfig = new(core.Genesis)
}
if ctx.GlobalString(SenderFlag.Name) != "" {
sender = common.HexToAddress(ctx.GlobalString(SenderFlag.Name))
@ -156,13 +157,19 @@ func runCmd(ctx *cli.Context) error {
}
initialGas := ctx.GlobalUint64(GasFlag.Name)
if genesisConfig.GasLimit != 0 {
initialGas = genesisConfig.GasLimit
}
runtimeConfig := runtime.Config{
Origin: sender,
State: statedb,
GasLimit: initialGas,
GasPrice: utils.GlobalBig(ctx, PriceFlag.Name),
Value: utils.GlobalBig(ctx, ValueFlag.Name),
BlockNumber: new(big.Int).SetUint64(blockNumber),
Difficulty: genesisConfig.Difficulty,
Time: new(big.Int).SetUint64(genesisConfig.Timestamp),
Coinbase: genesisConfig.Coinbase,
BlockNumber: new(big.Int).SetUint64(genesisConfig.Number),
EVMConfig: vm.Config{
Tracer: tracer,
Debug: ctx.GlobalBool(DebugFlag.Name) || ctx.GlobalBool(MachineFlag.Name),

View File

@ -54,8 +54,8 @@ import (
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/p2p/discv5"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/nat"
"github.com/ethereum/go-ethereum/params"
"golang.org/x/net/websocket"
@ -199,6 +199,8 @@ type faucet struct {
keystore *keystore.KeyStore // Keystore containing the single signer
account accounts.Account // Account funding user faucet requests
head *types.Header // Current head header of the faucet
balance *big.Int // Current balance of the faucet
nonce uint64 // Current pending nonce of the faucet
price *big.Int // Current gas price to issue funds with
@ -253,9 +255,11 @@ func newFaucet(genesis *core.Genesis, port int, enodes []*discv5.Node, network u
return nil, err
}
for _, boot := range enodes {
old, _ := discover.ParseNode(boot.String())
old, err := enode.ParseV4(boot.String())
if err != nil {
stack.Server().AddPeer(old)
}
}
// Attach to the client and retrieve and interesting metadatas
api, err := stack.Attach()
if err != nil {
@ -324,33 +328,30 @@ func (f *faucet) apiHandler(conn *websocket.Conn) {
nonce uint64
err error
)
for {
// Attempt to retrieve the stats, may error on no faucet connectivity
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
head, err = f.client.HeaderByNumber(ctx, nil)
if err == nil {
balance, err = f.client.BalanceAt(ctx, f.account.Address, head.Number)
if err == nil {
nonce, err = f.client.NonceAt(ctx, f.account.Address, nil)
for head == nil || balance == nil {
// Retrieve the current stats cached by the faucet
f.lock.RLock()
if f.head != nil {
head = types.CopyHeader(f.head)
}
if f.balance != nil {
balance = new(big.Int).Set(f.balance)
}
cancel()
nonce = f.nonce
f.lock.RUnlock()
// If stats retrieval failed, wait a bit and retry
if err != nil {
if err = sendError(conn, errors.New("Faucet offline: "+err.Error())); err != nil {
if head == nil || balance == nil {
// Report the faucet offline until initial stats are ready
if err = sendError(conn, errors.New("Faucet offline")); err != nil {
log.Warn("Failed to send faucet error to client", "err", err)
return
}
time.Sleep(3 * time.Second)
continue
}
// Initial stats reported successfully, proceed with user interaction
break
}
// Send over the initial stats and the latest header
if err = send(conn, map[string]interface{}{
"funds": balance.Div(balance, ether),
"funds": new(big.Int).Div(balance, ether),
"funded": nonce,
"peers": f.stack.Server().PeerCount(),
"requests": f.reqs,
@ -520,6 +521,47 @@ func (f *faucet) apiHandler(conn *websocket.Conn) {
}
}
// refresh attempts to retrieve the latest header from the chain and extract the
// associated faucet balance and nonce for connectivity caching.
func (f *faucet) refresh(head *types.Header) error {
// Ensure a state update does not run for too long
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
// If no header was specified, use the current chain head
var err error
if head == nil {
if head, err = f.client.HeaderByNumber(ctx, nil); err != nil {
return err
}
}
// Retrieve the balance, nonce and gas price from the current head
var (
balance *big.Int
nonce uint64
price *big.Int
)
if balance, err = f.client.BalanceAt(ctx, f.account.Address, head.Number); err != nil {
return err
}
if nonce, err = f.client.NonceAt(ctx, f.account.Address, head.Number); err != nil {
return err
}
if price, err = f.client.SuggestGasPrice(ctx); err != nil {
return err
}
// Everything succeeded, update the cached stats and eject old requests
f.lock.Lock()
f.head, f.balance = head, balance
f.price, f.nonce = price, nonce
for len(f.reqs) > 0 && f.reqs[0].Tx.Nonce() < f.nonce {
f.reqs = f.reqs[1:]
}
f.lock.Unlock()
return nil
}
// loop keeps waiting for interesting events and pushes them out to connected
// websockets.
func (f *faucet) loop() {
@ -537,45 +579,27 @@ func (f *faucet) loop() {
go func() {
for head := range update {
// New chain head arrived, query the current stats and stream to clients
var (
balance *big.Int
nonce uint64
price *big.Int
err error
)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
balance, err = f.client.BalanceAt(ctx, f.account.Address, head.Number)
if err == nil {
nonce, err = f.client.NonceAt(ctx, f.account.Address, nil)
if err == nil {
price, err = f.client.SuggestGasPrice(ctx)
timestamp := time.Unix(head.Time.Int64(), 0)
if time.Since(timestamp) > time.Hour {
log.Warn("Skipping faucet refresh, head too old", "number", head.Number, "hash", head.Hash(), "age", common.PrettyAge(timestamp))
continue
}
}
cancel()
// If querying the data failed, try for the next block
if err != nil {
if err := f.refresh(head); err != nil {
log.Warn("Failed to update faucet state", "block", head.Number, "hash", head.Hash(), "err", err)
continue
} else {
log.Info("Updated faucet state", "block", head.Number, "hash", head.Hash(), "balance", balance, "nonce", nonce, "price", price)
}
// Faucet state retrieved, update locally and send to clients
balance = new(big.Int).Div(balance, ether)
f.lock.Lock()
f.price, f.nonce = price, nonce
for len(f.reqs) > 0 && f.reqs[0].Tx.Nonce() < f.nonce {
f.reqs = f.reqs[1:]
}
f.lock.Unlock()
f.lock.RLock()
log.Info("Updated faucet state", "number", head.Number, "hash", head.Hash(), "age", common.PrettyAge(timestamp), "balance", f.balance, "nonce", f.nonce, "price", f.price)
balance := new(big.Int).Div(f.balance, ether)
peers := f.stack.Server().PeerCount()
for _, conn := range f.conns {
if err := send(conn, map[string]interface{}{
"funds": balance,
"funded": f.nonce,
"peers": f.stack.Server().PeerCount(),
"peers": peers,
"requests": f.reqs,
}, time.Second); err != nil {
log.Warn("Failed to send stats to client", "err", err)

View File

@ -340,9 +340,9 @@ func importPreimages(ctx *cli.Context) error {
start := time.Now()
if err := utils.ImportPreimages(diskdb, ctx.Args().First()); err != nil {
utils.Fatalf("Export error: %v\n", err)
utils.Fatalf("Import error: %v\n", err)
}
fmt.Printf("Export done in %v\n", time.Since(start))
fmt.Printf("Import done in %v\n", time.Since(start))
return nil
}

View File

@ -130,6 +130,8 @@ var (
utils.NoCompactionFlag,
utils.GpoBlocksFlag,
utils.GpoPercentileFlag,
utils.EWASMInterpreterFlag,
utils.EVMInterpreterFlag,
configFileFlag,
}

View File

@ -207,6 +207,8 @@ var AppHelpFlagGroups = []flagGroup{
Name: "VIRTUAL MACHINE",
Flags: []cli.Flag{
utils.VMEnableDebugFlag,
utils.EVMInterpreterFlag,
utils.EWASMInterpreterFlag,
},
},
{

View File

@ -47,7 +47,7 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/simulations"
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
"github.com/ethereum/go-ethereum/rpc"
@ -285,7 +285,7 @@ func createNode(ctx *cli.Context) error {
if err != nil {
return err
}
config.ID = discover.PubkeyID(&privKey.PublicKey)
config.ID = enode.PubkeyToIDV4(&privKey.PublicKey)
config.PrivateKey = privKey
}
if services := ctx.String("services"); services != "" {

View File

@ -92,7 +92,7 @@ func (w *wizard) deployDashboard() {
pages = append(pages, page)
}
}
// Promt the user to chose one, enter manually or simply not list this service
// Prompt the user to chose one, enter manually or simply not list this service
defLabel, defChoice := "don't list", len(pages)+2
if len(pages) > 0 {
defLabel, defChoice = pages[0], 1

View File

@ -38,7 +38,7 @@ import (
"github.com/ethereum/go-ethereum/internal/debug"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/swarm"
bzzapi "github.com/ethereum/go-ethereum/swarm/api"
swarmmetrics "github.com/ethereum/go-ethereum/swarm/metrics"
@ -795,10 +795,10 @@ func setSwarmBootstrapNodes(ctx *cli.Context, cfg *node.Config) {
return
}
cfg.P2P.BootstrapNodes = []*discover.Node{}
cfg.P2P.BootstrapNodes = []*enode.Node{}
for _, url := range SwarmBootnodes {
node, err := discover.ParseNode(url)
node, err := enode.ParseV4(url)
if err != nil {
log.Error("Bootstrap URL invalid", "enode", url, "err", err)
}

View File

@ -234,6 +234,7 @@ func existingTestNode(t *testing.T, dir string, bzzaccount string) *testNode {
// start the node
node.Cmd = runSwarm(t,
"--port", p2pPort,
"--nat", "extip:127.0.0.1",
"--nodiscover",
"--datadir", dir,
"--ipcpath", conf.IPCPath,
@ -241,7 +242,7 @@ func existingTestNode(t *testing.T, dir string, bzzaccount string) *testNode {
"--bzzaccount", bzzaccount,
"--bzznetworkid", "321",
"--bzzport", httpPort,
"--verbosity", "6",
"--verbosity", "3",
)
node.Cmd.InputLine(testPassphrase)
defer func() {
@ -284,8 +285,8 @@ func existingTestNode(t *testing.T, dir string, bzzaccount string) *testNode {
if err := node.Client.Call(&nodeInfo, "admin_nodeInfo"); err != nil {
t.Fatal(err)
}
node.Enode = fmt.Sprintf("enode://%s@127.0.0.1:%s", nodeInfo.ID, p2pPort)
node.Enode = nodeInfo.Enode
node.IpcPath = conf.IPCPath
return node
}
@ -309,6 +310,7 @@ func newTestNode(t *testing.T, dir string) *testNode {
// start the node
node.Cmd = runSwarm(t,
"--port", p2pPort,
"--nat", "extip:127.0.0.1",
"--nodiscover",
"--datadir", dir,
"--ipcpath", conf.IPCPath,
@ -316,7 +318,7 @@ func newTestNode(t *testing.T, dir string) *testNode {
"--bzzaccount", account.Address.String(),
"--bzznetworkid", "321",
"--bzzport", httpPort,
"--verbosity", "6",
"--verbosity", "3",
)
node.Cmd.InputLine(testPassphrase)
defer func() {
@ -359,9 +361,8 @@ func newTestNode(t *testing.T, dir string) *testNode {
if err := node.Client.Call(&nodeInfo, "admin_nodeInfo"); err != nil {
t.Fatal(err)
}
node.Enode = fmt.Sprintf("enode://%s@127.0.0.1:%s", nodeInfo.ID, p2pPort)
node.Enode = nodeInfo.Enode
node.IpcPath = conf.IPCPath
return node
}

View File

@ -51,8 +51,8 @@ import (
"github.com/ethereum/go-ethereum/metrics/influxdb"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/p2p/discv5"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/nat"
"github.com/ethereum/go-ethereum/p2p/netutil"
"github.com/ethereum/go-ethereum/params"
@ -610,6 +610,17 @@ var (
Usage: "InfluxDB `host` tag attached to all measurements",
Value: "localhost",
}
EWASMInterpreterFlag = cli.StringFlag{
Name: "vm.ewasm",
Usage: "External ewasm configuration (default = built-in interpreter)",
Value: "",
}
EVMInterpreterFlag = cli.StringFlag{
Name: "vm.evm",
Usage: "External EVM configuration (default = built-in interpreter)",
Value: "",
}
)
// MakeDataDir retrieves the currently requested data directory, terminating
@ -681,9 +692,9 @@ func setBootstrapNodes(ctx *cli.Context, cfg *p2p.Config) {
return // already set, don't apply defaults.
}
cfg.BootstrapNodes = make([]*discover.Node, 0, len(urls))
cfg.BootstrapNodes = make([]*enode.Node, 0, len(urls))
for _, url := range urls {
node, err := discover.ParseNode(url)
node, err := enode.ParseV4(url)
if err != nil {
log.Crit("Bootstrap URL invalid", "enode", url, "err", err)
}
@ -1184,6 +1195,14 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
cfg.EnablePreimageRecording = ctx.GlobalBool(VMEnableDebugFlag.Name)
}
if ctx.GlobalIsSet(EWASMInterpreterFlag.Name) {
cfg.EWASMInterpreter = ctx.GlobalString(EWASMInterpreterFlag.Name)
}
if ctx.GlobalIsSet(EVMInterpreterFlag.Name) {
cfg.EVMInterpreter = ctx.GlobalString(EVMInterpreterFlag.Name)
}
// Override any default configs for hard coded networks.
switch {
case ctx.GlobalBool(TestnetFlag.Name):
@ -1379,7 +1398,7 @@ func MakeChain(ctx *cli.Context, stack *node.Node) (chain *core.BlockChain, chai
cache.TrieNodeLimit = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheGCFlag.Name) / 100
}
vmcfg := vm.Config{EnablePreimageRecording: ctx.GlobalBool(VMEnableDebugFlag.Name)}
chain, err = core.NewBlockChain(chainDb, cache, config, engine, vmcfg)
chain, err = core.NewBlockChain(chainDb, cache, config, engine, vmcfg, nil)
if err != nil {
Fatalf("Can't create BlockChain: %v", err)
}

View File

@ -41,7 +41,7 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/nat"
"github.com/ethereum/go-ethereum/whisper/mailserver"
whisper "github.com/ethereum/go-ethereum/whisper/whisperv6"
@ -175,7 +175,7 @@ func initialize() {
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*argVerbosity), log.StreamHandler(os.Stderr, log.TerminalFormat(false))))
done = make(chan struct{})
var peers []*discover.Node
var peers []*enode.Node
var err error
if *generateKey {
@ -203,7 +203,7 @@ func initialize() {
if len(*argEnode) == 0 {
argEnode = scanLineA("Please enter the peer's enode: ")
}
peer := discover.MustParseNode(*argEnode)
peer := enode.MustParseV4(*argEnode)
peers = append(peers, peer)
}
@ -747,11 +747,11 @@ func requestExpiredMessagesLoop() {
}
func extractIDFromEnode(s string) []byte {
n, err := discover.ParseNode(s)
n, err := enode.ParseV4(s)
if err != nil {
utils.Fatalf("Failed to parse enode: %s", err)
}
return n.ID[:]
return n.ID().Bytes()
}
// obfuscateBloom adds 16 random bits to the bloom

View File

@ -38,3 +38,45 @@ func (d PrettyDuration) String() string {
}
return label
}
// PrettyAge is a pretty printed version of a time.Duration value that rounds
// the values up to a single most significant unit, days/weeks/years included.
type PrettyAge time.Time
// ageUnits is a list of units the age pretty printing uses.
var ageUnits = []struct {
Size time.Duration
Symbol string
}{
{12 * 30 * 24 * time.Hour, "y"},
{30 * 24 * time.Hour, "mo"},
{7 * 24 * time.Hour, "w"},
{24 * time.Hour, "d"},
{time.Hour, "h"},
{time.Minute, "m"},
{time.Second, "s"},
}
// String implements the Stringer interface, allowing pretty printing of duration
// values rounded to the most significant time unit.
func (t PrettyAge) String() string {
// Calculate the time difference and handle the 0 cornercase
diff := time.Since(time.Time(t))
if diff < time.Second {
return "0"
}
// Accumulate a precision of 3 components before returning
result, prec := "", 0
for _, unit := range ageUnits {
if diff > unit.Size {
result = fmt.Sprintf("%s%d%s", result, diff/unit.Size, unit.Symbol)
diff %= unit.Size
if prec += 1; prec >= 3 {
break
}
}
}
return result
}

View File

@ -134,11 +134,6 @@ var (
// errRecentlySigned is returned if a header is signed by an authorized entity
// that already signed a header recently, thus is temporarily not allowed to.
errRecentlySigned = errors.New("recently signed")
// errWaitTransactions is returned if an empty block is attempted to be sealed
// on an instant chain (0 second period). It's important to refuse these as the
// block reward is zero, so an empty block just bloats the chain... fast.
errWaitTransactions = errors.New("waiting for transactions")
)
// SignerFn is a signer callback function to request a hash to be signed by a
@ -615,7 +610,8 @@ func (c *Clique) Seal(chain consensus.ChainReader, block *types.Block, results c
}
// For 0-period chains, refuse to seal empty blocks (no reward but would spin sealing)
if c.config.Period == 0 && len(block.Transactions()) == 0 {
return errWaitTransactions
log.Info("Sealing paused, waiting for transactions")
return nil
}
// Don't hold the signer fields for the entire sealing procedure
c.lock.RLock()

View File

@ -448,7 +448,7 @@ func TestClique(t *testing.T) {
batches[len(batches)-1] = append(batches[len(batches)-1], block)
}
// Pass all the headers through clique and ensure tallying succeeds
chain, err := core.NewBlockChain(db, nil, &config, engine, vm.Config{})
chain, err := core.NewBlockChain(db, nil, &config, engine, vm.Config{}, nil)
if err != nil {
t.Errorf("test %d: failed to create test chain: %v", i, err)
continue

View File

@ -175,7 +175,7 @@ func benchInsertChain(b *testing.B, disk bool, gen func(int, *BlockGen)) {
// Time the insertion of the new chain.
// State and blocks are stored in the same DB.
chainman, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{})
chainman, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil)
defer chainman.Stop()
b.ReportAllocs()
b.ResetTimer()
@ -287,7 +287,7 @@ func benchReadChain(b *testing.B, full bool, count uint64) {
if err != nil {
b.Fatalf("error opening database at %v: %v", dir, err)
}
chain, err := NewBlockChain(db, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{})
chain, err := NewBlockChain(db, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil)
if err != nil {
b.Fatalf("error creating chain: %v", err)
}

View File

@ -42,7 +42,7 @@ func TestHeaderVerification(t *testing.T) {
headers[i] = block.Header()
}
// Run the header checker for blocks one-by-one, checking for both valid and invalid nonces
chain, _ := NewBlockChain(testdb, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{})
chain, _ := NewBlockChain(testdb, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil)
defer chain.Stop()
for i := 0; i < len(blocks); i++ {
@ -106,11 +106,11 @@ func testHeaderConcurrentVerification(t *testing.T, threads int) {
var results <-chan error
if valid {
chain, _ := NewBlockChain(testdb, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{})
chain, _ := NewBlockChain(testdb, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil)
_, results = chain.engine.VerifyHeaders(chain, headers, seals)
chain.Stop()
} else {
chain, _ := NewBlockChain(testdb, nil, params.TestChainConfig, ethash.NewFakeFailer(uint64(len(headers)-1)), vm.Config{})
chain, _ := NewBlockChain(testdb, nil, params.TestChainConfig, ethash.NewFakeFailer(uint64(len(headers)-1)), vm.Config{}, nil)
_, results = chain.engine.VerifyHeaders(chain, headers, seals)
chain.Stop()
}
@ -173,7 +173,7 @@ func testHeaderConcurrentAbortion(t *testing.T, threads int) {
defer runtime.GOMAXPROCS(old)
// Start the verifications and immediately abort
chain, _ := NewBlockChain(testdb, nil, params.TestChainConfig, ethash.NewFakeDelayer(time.Millisecond), vm.Config{})
chain, _ := NewBlockChain(testdb, nil, params.TestChainConfig, ethash.NewFakeDelayer(time.Millisecond), vm.Config{}, nil)
defer chain.Stop()
abort, results := chain.engine.VerifyHeaders(chain, headers, seals)

View File

@ -129,12 +129,13 @@ type BlockChain struct {
vmConfig vm.Config
badBlocks *lru.Cache // Bad block cache
shouldPreserve func(*types.Block) bool // Function used to determine whether should preserve the given block.
}
// NewBlockChain returns a fully initialised block chain using information
// available in the database. It initialises the default Ethereum Validator and
// Processor.
func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config) (*BlockChain, error) {
func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(block *types.Block) bool) (*BlockChain, error) {
if cacheConfig == nil {
cacheConfig = &CacheConfig{
TrieNodeLimit: 256 * 1024 * 1024,
@ -154,6 +155,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
triegc: prque.New(nil),
stateCache: state.NewDatabase(db),
quit: make(chan struct{}),
shouldPreserve: shouldPreserve,
bodyCache: bodyCache,
bodyRLPCache: bodyRLPCache,
blockCache: blockCache,
@ -251,9 +253,9 @@ func (bc *BlockChain) loadLastState() error {
blockTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
fastTd := bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64())
log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd)
log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd)
log.Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd)
log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(currentHeader.Time.Int64(), 0)))
log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd, "age", common.PrettyAge(time.Unix(currentBlock.Time().Int64(), 0)))
log.Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd, "age", common.PrettyAge(time.Unix(currentFastBlock.Time().Int64(), 0)))
return nil
}
@ -850,13 +852,16 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
}
bc.mu.Unlock()
log.Info("Imported new block receipts",
"count", stats.processed,
"elapsed", common.PrettyDuration(time.Since(start)),
"number", head.Number(),
"hash", head.Hash(),
context := []interface{}{
"count", stats.processed, "elapsed", common.PrettyDuration(time.Since(start)),
"number", head.Number(), "hash", head.Hash(), "age", common.PrettyAge(time.Unix(head.Time().Int64(), 0)),
"size", common.StorageSize(bytes),
"ignored", stats.ignored)
}
if stats.ignored > 0 {
context = append(context, []interface{}{"ignored", stats.ignored}...)
}
log.Info("Imported new block receipts", context...)
return 0, nil
}
@ -964,8 +969,17 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
reorg := externTd.Cmp(localTd) > 0
currentBlock = bc.CurrentBlock()
if !reorg && externTd.Cmp(localTd) == 0 {
// Split same-difficulty blocks by number, then at random
reorg = block.NumberU64() < currentBlock.NumberU64() || (block.NumberU64() == currentBlock.NumberU64() && mrand.Float64() < 0.5)
// Split same-difficulty blocks by number, then preferentially select
// the block generated by the local miner as the canonical block.
if block.NumberU64() < currentBlock.NumberU64() {
reorg = true
} else if block.NumberU64() == currentBlock.NumberU64() {
var currentPreserve, blockPreserve bool
if bc.shouldPreserve != nil {
currentPreserve, blockPreserve = bc.shouldPreserve(currentBlock), bc.shouldPreserve(block)
}
reorg = !currentPreserve && (blockPreserve || mrand.Float64() < 0.5)
}
}
if reorg {
// Reorganise the chain if the parent is not the head block
@ -1229,8 +1243,13 @@ func (st *insertStats) report(chain []*types.Block, index int, cache common.Stor
context := []interface{}{
"blocks", st.processed, "txs", txs, "mgas", float64(st.usedGas) / 1000000,
"elapsed", common.PrettyDuration(elapsed), "mgasps", float64(st.usedGas) * 1000 / float64(elapsed),
"number", end.Number(), "hash", end.Hash(), "cache", cache,
"number", end.Number(), "hash", end.Hash(),
}
if timestamp := time.Unix(end.Time().Int64(), 0); time.Since(timestamp) > time.Minute {
context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...)
}
context = append(context, []interface{}{"cache", cache}...)
if st.queued > 0 {
context = append(context, []interface{}{"queued", st.queued}...)
}

View File

@ -52,7 +52,7 @@ func newCanonical(engine consensus.Engine, n int, full bool) (ethdb.Database, *B
)
// Initialize a fresh chain with only a genesis block
blockchain, _ := NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{})
blockchain, _ := NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil)
// Create and inject the requested chain
if n == 0 {
return db, blockchain, nil
@ -523,7 +523,7 @@ func testReorgBadHashes(t *testing.T, full bool) {
blockchain.Stop()
// Create a new BlockChain and check that it rolled back the state.
ncm, err := NewBlockChain(blockchain.db, nil, blockchain.chainConfig, ethash.NewFaker(), vm.Config{})
ncm, err := NewBlockChain(blockchain.db, nil, blockchain.chainConfig, ethash.NewFaker(), vm.Config{}, nil)
if err != nil {
t.Fatalf("failed to create new chain manager: %v", err)
}
@ -635,7 +635,7 @@ func TestFastVsFullChains(t *testing.T) {
// Import the chain as an archive node for the comparison baseline
archiveDb := ethdb.NewMemDatabase()
gspec.MustCommit(archiveDb)
archive, _ := NewBlockChain(archiveDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{})
archive, _ := NewBlockChain(archiveDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil)
defer archive.Stop()
if n, err := archive.InsertChain(blocks); err != nil {
@ -644,7 +644,7 @@ func TestFastVsFullChains(t *testing.T) {
// Fast import the chain as a non-archive node to test
fastDb := ethdb.NewMemDatabase()
gspec.MustCommit(fastDb)
fast, _ := NewBlockChain(fastDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{})
fast, _ := NewBlockChain(fastDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil)
defer fast.Stop()
headers := make([]*types.Header, len(blocks))
@ -722,7 +722,7 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
archiveDb := ethdb.NewMemDatabase()
gspec.MustCommit(archiveDb)
archive, _ := NewBlockChain(archiveDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{})
archive, _ := NewBlockChain(archiveDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil)
if n, err := archive.InsertChain(blocks); err != nil {
t.Fatalf("failed to process block %d: %v", n, err)
}
@ -735,7 +735,7 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
// Import the chain as a non-archive node and ensure all pointers are updated
fastDb := ethdb.NewMemDatabase()
gspec.MustCommit(fastDb)
fast, _ := NewBlockChain(fastDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{})
fast, _ := NewBlockChain(fastDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil)
defer fast.Stop()
headers := make([]*types.Header, len(blocks))
@ -756,7 +756,7 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
lightDb := ethdb.NewMemDatabase()
gspec.MustCommit(lightDb)
light, _ := NewBlockChain(lightDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{})
light, _ := NewBlockChain(lightDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil)
if n, err := light.InsertHeaderChain(headers, 1); err != nil {
t.Fatalf("failed to insert header %d: %v", n, err)
}
@ -825,7 +825,7 @@ func TestChainTxReorgs(t *testing.T) {
}
})
// Import the chain. This runs all block validation rules.
blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{})
blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil)
if i, err := blockchain.InsertChain(chain); err != nil {
t.Fatalf("failed to insert original chain[%d]: %v", i, err)
}
@ -896,7 +896,7 @@ func TestLogReorgs(t *testing.T) {
signer = types.NewEIP155Signer(gspec.Config.ChainID)
)
blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{})
blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil)
defer blockchain.Stop()
rmLogsCh := make(chan RemovedLogsEvent)
@ -943,7 +943,7 @@ func TestReorgSideEvent(t *testing.T) {
signer = types.NewEIP155Signer(gspec.Config.ChainID)
)
blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{})
blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil)
defer blockchain.Stop()
chain, _ := GenerateChain(gspec.Config, genesis, ethash.NewFaker(), db, 3, func(i int, gen *BlockGen) {})
@ -1072,7 +1072,7 @@ func TestEIP155Transition(t *testing.T) {
genesis = gspec.MustCommit(db)
)
blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{})
blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil)
defer blockchain.Stop()
blocks, _ := GenerateChain(gspec.Config, genesis, ethash.NewFaker(), db, 4, func(i int, block *BlockGen) {
@ -1179,7 +1179,7 @@ func TestEIP161AccountRemoval(t *testing.T) {
}
genesis = gspec.MustCommit(db)
)
blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{})
blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil)
defer blockchain.Stop()
blocks, _ := GenerateChain(gspec.Config, genesis, ethash.NewFaker(), db, 3, func(i int, block *BlockGen) {
@ -1254,7 +1254,7 @@ func TestBlockchainHeaderchainReorgConsistency(t *testing.T) {
diskdb := ethdb.NewMemDatabase()
new(Genesis).MustCommit(diskdb)
chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{})
chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@ -1298,7 +1298,7 @@ func TestTrieForkGC(t *testing.T) {
diskdb := ethdb.NewMemDatabase()
new(Genesis).MustCommit(diskdb)
chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{})
chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@ -1337,7 +1337,7 @@ func TestLargeReorgTrieGC(t *testing.T) {
diskdb := ethdb.NewMemDatabase()
new(Genesis).MustCommit(diskdb)
chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{})
chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@ -1419,7 +1419,7 @@ func benchmarkLargeNumberOfValueToNonexisting(b *testing.B, numTxs, numBlocks in
diskdb := ethdb.NewMemDatabase()
gspec.MustCommit(diskdb)
chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{})
chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil)
if err != nil {
b.Fatalf("failed to create tester chain: %v", err)
}

View File

@ -177,7 +177,7 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse
genblock := func(i int, parent *types.Block, statedb *state.StateDB) (*types.Block, types.Receipts) {
// TODO(karalabe): This is needed for clique, which depends on multiple blocks.
// It's nonetheless ugly to spin up a blockchain here. Get rid of this somehow.
blockchain, _ := NewBlockChain(db, nil, config, engine, vm.Config{})
blockchain, _ := NewBlockChain(db, nil, config, engine, vm.Config{}, nil)
defer blockchain.Stop()
b := &BlockGen{i: i, parent: parent, chain: blocks, chainReader: blockchain, statedb: statedb, config: config, engine: engine}

View File

@ -79,7 +79,7 @@ func ExampleGenerateChain() {
})
// Import the chain. This runs all block validation rules.
blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{})
blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil)
defer blockchain.Stop()
if i, err := blockchain.InsertChain(chain); err != nil {

View File

@ -45,7 +45,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
proConf.DAOForkBlock = forkBlock
proConf.DAOForkSupport = true
proBc, _ := NewBlockChain(proDb, nil, &proConf, ethash.NewFaker(), vm.Config{})
proBc, _ := NewBlockChain(proDb, nil, &proConf, ethash.NewFaker(), vm.Config{}, nil)
defer proBc.Stop()
conDb := ethdb.NewMemDatabase()
@ -55,7 +55,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
conConf.DAOForkBlock = forkBlock
conConf.DAOForkSupport = false
conBc, _ := NewBlockChain(conDb, nil, &conConf, ethash.NewFaker(), vm.Config{})
conBc, _ := NewBlockChain(conDb, nil, &conConf, ethash.NewFaker(), vm.Config{}, nil)
defer conBc.Stop()
if _, err := proBc.InsertChain(prefix); err != nil {
@ -69,7 +69,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
// Create a pro-fork block, and try to feed into the no-fork chain
db = ethdb.NewMemDatabase()
gspec.MustCommit(db)
bc, _ := NewBlockChain(db, nil, &conConf, ethash.NewFaker(), vm.Config{})
bc, _ := NewBlockChain(db, nil, &conConf, ethash.NewFaker(), vm.Config{}, nil)
defer bc.Stop()
blocks := conBc.GetBlocksFromHash(conBc.CurrentBlock().Hash(), int(conBc.CurrentBlock().NumberU64()))
@ -94,7 +94,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
// Create a no-fork block, and try to feed into the pro-fork chain
db = ethdb.NewMemDatabase()
gspec.MustCommit(db)
bc, _ = NewBlockChain(db, nil, &proConf, ethash.NewFaker(), vm.Config{})
bc, _ = NewBlockChain(db, nil, &proConf, ethash.NewFaker(), vm.Config{}, nil)
defer bc.Stop()
blocks = proBc.GetBlocksFromHash(proBc.CurrentBlock().Hash(), int(proBc.CurrentBlock().NumberU64()))
@ -120,7 +120,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
// Verify that contra-forkers accept pro-fork extra-datas after forking finishes
db = ethdb.NewMemDatabase()
gspec.MustCommit(db)
bc, _ := NewBlockChain(db, nil, &conConf, ethash.NewFaker(), vm.Config{})
bc, _ := NewBlockChain(db, nil, &conConf, ethash.NewFaker(), vm.Config{}, nil)
defer bc.Stop()
blocks := conBc.GetBlocksFromHash(conBc.CurrentBlock().Hash(), int(conBc.CurrentBlock().NumberU64()))
@ -140,7 +140,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
// Verify that pro-forkers accept contra-fork extra-datas after forking finishes
db = ethdb.NewMemDatabase()
gspec.MustCommit(db)
bc, _ = NewBlockChain(db, nil, &proConf, ethash.NewFaker(), vm.Config{})
bc, _ = NewBlockChain(db, nil, &proConf, ethash.NewFaker(), vm.Config{}, nil)
defer bc.Stop()
blocks = proBc.GetBlocksFromHash(proBc.CurrentBlock().Hash(), int(proBc.CurrentBlock().NumberU64()))

View File

@ -120,7 +120,7 @@ func TestSetupGenesis(t *testing.T) {
// Advance to block #4, past the homestead transition block of customg.
genesis := oldcustomg.MustCommit(db)
bc, _ := NewBlockChain(db, nil, oldcustomg.Config, ethash.NewFullFaker(), vm.Config{})
bc, _ := NewBlockChain(db, nil, oldcustomg.Config, ethash.NewFullFaker(), vm.Config{}, nil)
defer bc.Stop()
blocks, _ := GenerateChain(oldcustomg.Config, genesis, ethash.NewFaker(), db, 4, nil)

View File

@ -281,8 +281,18 @@ func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, writeHeader WhCa
}
// Report some public statistics so the user has a clue what's going on
last := chain[len(chain)-1]
log.Info("Imported new block headers", "count", stats.processed, "elapsed", common.PrettyDuration(time.Since(start)),
"number", last.Number, "hash", last.Hash(), "ignored", stats.ignored)
context := []interface{}{
"count", stats.processed, "elapsed", common.PrettyDuration(time.Since(start)),
"number", last.Number, "hash", last.Hash(),
}
if timestamp := time.Unix(last.Time.Int64(), 0); time.Since(timestamp) > time.Minute {
context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...)
}
if stats.ignored > 0 {
context = append(context, []interface{}{"ignored", stats.ignored}...)
}
log.Info("Imported new block headers", context...)
return 0, nil
}

View File

@ -77,7 +77,7 @@ type stateObject struct {
trie Trie // storage trie, which becomes non-nil on first access
code Code // contract bytecode, which gets set when code is loaded
cachedStorage Storage // Storage entry cache to avoid duplicate reads
originStorage Storage // Storage cache of original entries to dedup rewrites
dirtyStorage Storage // Storage entries that need to be flushed to disk
// Cache flags.
@ -115,7 +115,7 @@ func newObject(db *StateDB, address common.Address, data Account) *stateObject {
address: address,
addrHash: crypto.Keccak256Hash(address[:]),
data: data,
cachedStorage: make(Storage),
originStorage: make(Storage),
dirtyStorage: make(Storage),
}
}
@ -159,13 +159,25 @@ func (c *stateObject) getTrie(db Database) Trie {
return c.trie
}
// GetState returns a value in account storage.
// GetState retrieves a value from the account storage trie.
func (self *stateObject) GetState(db Database, key common.Hash) common.Hash {
value, exists := self.cachedStorage[key]
if exists {
// If we have a dirty value for this state entry, return it
value, dirty := self.dirtyStorage[key]
if dirty {
return value
}
// Load from DB in case it is missing.
// Otherwise return the entry's original value
return self.GetCommittedState(db, key)
}
// GetCommittedState retrieves a value from the committed account storage trie.
func (self *stateObject) GetCommittedState(db Database, key common.Hash) common.Hash {
// If we have the original value cached, return that
value, cached := self.originStorage[key]
if cached {
return value
}
// Otherwise load the value from the database
enc, err := self.getTrie(db).TryGet(key[:])
if err != nil {
self.setError(err)
@ -178,22 +190,27 @@ func (self *stateObject) GetState(db Database, key common.Hash) common.Hash {
}
value.SetBytes(content)
}
self.cachedStorage[key] = value
self.originStorage[key] = value
return value
}
// SetState updates a value in account storage.
func (self *stateObject) SetState(db Database, key, value common.Hash) {
// If the new value is the same as old, don't set
prev := self.GetState(db, key)
if prev == value {
return
}
// New value is different, update and journal the change
self.db.journal.append(storageChange{
account: &self.address,
key: key,
prevalue: self.GetState(db, key),
prevalue: prev,
})
self.setState(key, value)
}
func (self *stateObject) setState(key, value common.Hash) {
self.cachedStorage[key] = value
self.dirtyStorage[key] = value
}
@ -202,6 +219,13 @@ func (self *stateObject) updateTrie(db Database) Trie {
tr := self.getTrie(db)
for key, value := range self.dirtyStorage {
delete(self.dirtyStorage, key)
// Skip noop changes, persist actual changes
if value == self.originStorage[key] {
continue
}
self.originStorage[key] = value
if (value == common.Hash{}) {
self.setError(tr.TryDelete(key[:]))
continue
@ -279,7 +303,7 @@ func (self *stateObject) deepCopy(db *StateDB) *stateObject {
}
stateObject.code = self.code
stateObject.dirtyStorage = self.dirtyStorage.Copy()
stateObject.cachedStorage = self.dirtyStorage.Copy()
stateObject.originStorage = self.originStorage.Copy()
stateObject.suicided = self.suicided
stateObject.dirtyCode = self.dirtyCode
stateObject.deleted = self.deleted

View File

@ -96,11 +96,15 @@ func (s *StateSuite) TestNull(c *checker.C) {
s.state.CreateAccount(address)
//value := common.FromHex("0x823140710bf13990e4500136726d8b55")
var value common.Hash
s.state.SetState(address, common.Hash{}, value)
s.state.Commit(false)
value = s.state.GetState(address, common.Hash{})
if value != (common.Hash{}) {
c.Errorf("expected empty hash. got %x", value)
if value := s.state.GetState(address, common.Hash{}); value != (common.Hash{}) {
c.Errorf("expected empty current value, got %x", value)
}
if value := s.state.GetCommittedState(address, common.Hash{}); value != (common.Hash{}) {
c.Errorf("expected empty committed value, got %x", value)
}
}
@ -110,20 +114,24 @@ func (s *StateSuite) TestSnapshot(c *checker.C) {
data1 := common.BytesToHash([]byte{42})
data2 := common.BytesToHash([]byte{43})
// snapshot the genesis state
genesis := s.state.Snapshot()
// set initial state object value
s.state.SetState(stateobjaddr, storageaddr, data1)
// get snapshot of current state
snapshot := s.state.Snapshot()
// set new state object value
// set a new state object value, revert it and ensure correct content
s.state.SetState(stateobjaddr, storageaddr, data2)
// restore snapshot
s.state.RevertToSnapshot(snapshot)
// get state storage value
res := s.state.GetState(stateobjaddr, storageaddr)
c.Assert(s.state.GetState(stateobjaddr, storageaddr), checker.DeepEquals, data1)
c.Assert(s.state.GetCommittedState(stateobjaddr, storageaddr), checker.DeepEquals, common.Hash{})
c.Assert(data1, checker.DeepEquals, res)
// revert up to the genesis state and ensure correct content
s.state.RevertToSnapshot(genesis)
c.Assert(s.state.GetState(stateobjaddr, storageaddr), checker.DeepEquals, common.Hash{})
c.Assert(s.state.GetCommittedState(stateobjaddr, storageaddr), checker.DeepEquals, common.Hash{})
}
func (s *StateSuite) TestSnapshotEmpty(c *checker.C) {
@ -208,24 +216,30 @@ func compareStateObjects(so0, so1 *stateObject, t *testing.T) {
t.Fatalf("Code mismatch: have %v, want %v", so0.code, so1.code)
}
if len(so1.cachedStorage) != len(so0.cachedStorage) {
t.Errorf("Storage size mismatch: have %d, want %d", len(so1.cachedStorage), len(so0.cachedStorage))
if len(so1.dirtyStorage) != len(so0.dirtyStorage) {
t.Errorf("Dirty storage size mismatch: have %d, want %d", len(so1.dirtyStorage), len(so0.dirtyStorage))
}
for k, v := range so1.cachedStorage {
if so0.cachedStorage[k] != v {
t.Errorf("Storage key %x mismatch: have %v, want %v", k, so0.cachedStorage[k], v)
for k, v := range so1.dirtyStorage {
if so0.dirtyStorage[k] != v {
t.Errorf("Dirty storage key %x mismatch: have %v, want %v", k, so0.dirtyStorage[k], v)
}
}
for k, v := range so0.cachedStorage {
if so1.cachedStorage[k] != v {
t.Errorf("Storage key %x mismatch: have %v, want none.", k, v)
for k, v := range so0.dirtyStorage {
if so1.dirtyStorage[k] != v {
t.Errorf("Dirty storage key %x mismatch: have %v, want none.", k, v)
}
}
if so0.suicided != so1.suicided {
t.Fatalf("suicided mismatch: have %v, want %v", so0.suicided, so1.suicided)
if len(so1.originStorage) != len(so0.originStorage) {
t.Errorf("Origin storage size mismatch: have %d, want %d", len(so1.originStorage), len(so0.originStorage))
}
for k, v := range so1.originStorage {
if so0.originStorage[k] != v {
t.Errorf("Origin storage key %x mismatch: have %v, want %v", k, so0.originStorage[k], v)
}
}
for k, v := range so0.originStorage {
if so1.originStorage[k] != v {
t.Errorf("Origin storage key %x mismatch: have %v, want none.", k, v)
}
if so0.deleted != so1.deleted {
t.Fatalf("Deleted mismatch: have %v, want %v", so0.deleted, so1.deleted)
}
}

View File

@ -169,11 +169,22 @@ func (self *StateDB) Preimages() map[common.Hash][]byte {
return self.preimages
}
// AddRefund adds gas to the refund counter
func (self *StateDB) AddRefund(gas uint64) {
self.journal.append(refundChange{prev: self.refund})
self.refund += gas
}
// SubRefund removes gas from the refund counter.
// This method will panic if the refund counter goes below zero
func (self *StateDB) SubRefund(gas uint64) {
self.journal.append(refundChange{prev: self.refund})
if gas > self.refund {
panic("Refund counter below zero")
}
self.refund -= gas
}
// Exist reports whether the given account address exists in the state.
// Notably this also returns true for suicided accounts.
func (self *StateDB) Exist(addr common.Address) bool {
@ -236,10 +247,20 @@ func (self *StateDB) GetCodeHash(addr common.Address) common.Hash {
return common.BytesToHash(stateObject.CodeHash())
}
func (self *StateDB) GetState(addr common.Address, bhash common.Hash) common.Hash {
// GetState retrieves a value from the given account's storage trie.
func (self *StateDB) GetState(addr common.Address, hash common.Hash) common.Hash {
stateObject := self.getStateObject(addr)
if stateObject != nil {
return stateObject.GetState(self.db, bhash)
return stateObject.GetState(self.db, hash)
}
return common.Hash{}
}
// GetCommittedState retrieves a value from the given account's committed storage trie.
func (self *StateDB) GetCommittedState(addr common.Address, hash common.Hash) common.Hash {
stateObject := self.getStateObject(addr)
if stateObject != nil {
return stateObject.GetCommittedState(self.db, hash)
}
return common.Hash{}
}
@ -435,19 +456,14 @@ func (db *StateDB) ForEachStorage(addr common.Address, cb func(key, value common
if so == nil {
return
}
// When iterating over the storage check the cache first
for h, value := range so.cachedStorage {
cb(h, value)
}
it := trie.NewIterator(so.getTrie(db.db).NodeIterator(nil))
for it.Next() {
// ignore cached values
key := common.BytesToHash(db.trie.GetKey(it.Key))
if _, ok := so.cachedStorage[key]; !ok {
cb(key, common.BytesToHash(it.Value))
if value, dirty := so.dirtyStorage[key]; dirty {
cb(key, value)
continue
}
cb(key, common.BytesToHash(it.Value))
}
}

View File

@ -381,11 +381,11 @@ func (test *snapshotTest) checkEqual(state, checkstate *StateDB) error {
checkeq("GetCodeSize", state.GetCodeSize(addr), checkstate.GetCodeSize(addr))
// Check storage.
if obj := state.getStateObject(addr); obj != nil {
state.ForEachStorage(addr, func(key, val common.Hash) bool {
return checkeq("GetState("+key.Hex()+")", val, checkstate.GetState(addr, key))
state.ForEachStorage(addr, func(key, value common.Hash) bool {
return checkeq("GetState("+key.Hex()+")", checkstate.GetState(addr, key), value)
})
checkstate.ForEachStorage(addr, func(key, checkval common.Hash) bool {
return checkeq("GetState("+key.Hex()+")", state.GetState(addr, key), checkval)
checkstate.ForEachStorage(addr, func(key, value common.Hash) bool {
return checkeq("GetState("+key.Hex()+")", checkstate.GetState(addr, key), value)
})
}
if err != nil {

View File

@ -525,7 +525,7 @@ func (pool *TxPool) Content() (map[common.Address]types.Transactions, map[common
return pending, queued
}
// Pending retrieves all currently processable transactions, groupped by origin
// Pending retrieves all currently processable transactions, grouped by origin
// account and sorted by nonce. The returned transaction set is a copy and can be
// freely modified by calling code.
func (pool *TxPool) Pending() (map[common.Address]types.Transactions, error) {
@ -547,7 +547,7 @@ func (pool *TxPool) Locals() []common.Address {
return pool.locals.flatten()
}
// local retrieves all currently known local transactions, groupped by origin
// local retrieves all currently known local transactions, grouped by origin
// account and sorted by nonce. The returned transaction set is a copy and can be
// freely modified by calling code.
func (pool *TxPool) local() map[common.Address]types.Transactions {

View File

@ -136,10 +136,28 @@ func NewEVM(ctx Context, statedb StateDB, chainConfig *params.ChainConfig, vmCon
vmConfig: vmConfig,
chainConfig: chainConfig,
chainRules: chainConfig.Rules(ctx.BlockNumber),
interpreters: make([]Interpreter, 1),
interpreters: make([]Interpreter, 0, 1),
}
evm.interpreters[0] = NewEVMInterpreter(evm, vmConfig)
if chainConfig.IsEWASM(ctx.BlockNumber) {
// to be implemented by EVM-C and Wagon PRs.
// if vmConfig.EWASMInterpreter != "" {
// extIntOpts := strings.Split(vmConfig.EWASMInterpreter, ":")
// path := extIntOpts[0]
// options := []string{}
// if len(extIntOpts) > 1 {
// options = extIntOpts[1..]
// }
// evm.interpreters = append(evm.interpreters, NewEVMVCInterpreter(evm, vmConfig, options))
// } else {
// evm.interpreters = append(evm.interpreters, NewEWASMInterpreter(evm, vmConfig))
// }
panic("No supported ewasm interpreter yet.")
}
// vmConfig.EVMInterpreter will be used by EVM-C, it won't be checked here
// as we always want to have the built-in EVM as the failover option.
evm.interpreters = append(evm.interpreters, NewEVMInterpreter(evm, vmConfig))
evm.interpreter = evm.interpreters[0]
return evm

View File

@ -118,24 +118,69 @@ func gasReturnDataCopy(gt params.GasTable, evm *EVM, contract *Contract, stack *
func gasSStore(gt params.GasTable, evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
var (
y, x = stack.Back(1), stack.Back(0)
val = evm.StateDB.GetState(contract.Address(), common.BigToHash(x))
current = evm.StateDB.GetState(contract.Address(), common.BigToHash(x))
)
// This checks for 3 scenario's and calculates gas accordingly
// The legacy gas metering only takes into consideration the current state
if !evm.chainRules.IsConstantinople {
// This checks for 3 scenario's and calculates gas accordingly:
//
// 1. From a zero-value address to a non-zero value (NEW VALUE)
// 2. From a non-zero value address to a zero-value address (DELETE)
// 3. From a non-zero to a non-zero (CHANGE)
if val == (common.Hash{}) && y.Sign() != 0 {
// 0 => non 0
switch {
case current == (common.Hash{}) && y.Sign() != 0: // 0 => non 0
return params.SstoreSetGas, nil
} else if val != (common.Hash{}) && y.Sign() == 0 {
// non 0 => 0
case current != (common.Hash{}) && y.Sign() == 0: // non 0 => 0
evm.StateDB.AddRefund(params.SstoreRefundGas)
return params.SstoreClearGas, nil
} else {
// non 0 => non 0 (or 0 => 0)
default: // non 0 => non 0 (or 0 => 0)
return params.SstoreResetGas, nil
}
}
// The new gas metering is based on net gas costs (EIP-1283):
//
// 1. If current value equals new value (this is a no-op), 200 gas is deducted.
// 2. If current value does not equal new value
// 2.1. If original value equals current value (this storage slot has not been changed by the current execution context)
// 2.1.1. If original value is 0, 20000 gas is deducted.
// 2.1.2. Otherwise, 5000 gas is deducted. If new value is 0, add 15000 gas to refund counter.
// 2.2. If original value does not equal current value (this storage slot is dirty), 200 gas is deducted. Apply both of the following clauses.
// 2.2.1. If original value is not 0
// 2.2.1.1. If current value is 0 (also means that new value is not 0), remove 15000 gas from refund counter. We can prove that refund counter will never go below 0.
// 2.2.1.2. If new value is 0 (also means that current value is not 0), add 15000 gas to refund counter.
// 2.2.2. If original value equals new value (this storage slot is reset)
// 2.2.2.1. If original value is 0, add 19800 gas to refund counter.
// 2.2.2.2. Otherwise, add 4800 gas to refund counter.
value := common.BigToHash(y)
if current == value { // noop (1)
return params.NetSstoreNoopGas, nil
}
original := evm.StateDB.GetCommittedState(contract.Address(), common.BigToHash(x))
if original == current {
if original == (common.Hash{}) { // create slot (2.1.1)
return params.NetSstoreInitGas, nil
}
if value == (common.Hash{}) { // delete slot (2.1.2b)
evm.StateDB.AddRefund(params.NetSstoreClearRefund)
}
return params.NetSstoreCleanGas, nil // write existing slot (2.1.2)
}
if original != (common.Hash{}) {
if current == (common.Hash{}) { // recreate slot (2.2.1.1)
evm.StateDB.SubRefund(params.NetSstoreClearRefund)
} else if value == (common.Hash{}) { // delete slot (2.2.1.2)
evm.StateDB.AddRefund(params.NetSstoreClearRefund)
}
}
if original == value {
if original == (common.Hash{}) { // reset to original inexistent slot (2.2.2.1)
evm.StateDB.AddRefund(params.NetSstoreResetClearRefund)
} else { // reset to original existing slot (2.2.2.2)
evm.StateDB.AddRefund(params.NetSstoreResetRefund)
}
}
return params.NetSstoreDirtyGas, nil
}
func makeGasLog(n uint64) gasFunc {
return func(gt params.GasTable, evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {

View File

@ -727,7 +727,7 @@ func opCreate2(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memo
}
func opCall(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
// Pop gas. The actual gas in in interpreter.evm.callGasTemp.
// Pop gas. The actual gas in interpreter.evm.callGasTemp.
interpreter.intPool.put(stack.pop())
gas := interpreter.evm.callGasTemp
// Pop other call parameters.

View File

@ -40,8 +40,10 @@ type StateDB interface {
GetCodeSize(common.Address) int
AddRefund(uint64)
SubRefund(uint64)
GetRefund() uint64
GetCommittedState(common.Address, common.Hash) common.Hash
GetState(common.Address, common.Hash) common.Hash
SetState(common.Address, common.Hash, common.Hash)

View File

@ -39,6 +39,11 @@ type Config struct {
// may be left uninitialised and will be set to the default
// table.
JumpTable [256]operation
// Type of the EWASM interpreter
EWASMInterpreter string
// Type of the EVM interpreter
EVMInterpreter string
}
// Interpreter is used to run Ethereum based contracts and will utilise the

View File

@ -41,11 +41,6 @@ func (d *dummyContractRef) SetBalance(*big.Int) {}
func (d *dummyContractRef) SetNonce(uint64) {}
func (d *dummyContractRef) Balance() *big.Int { return new(big.Int) }
type dummyStateDB struct {
NoopStateDB
ref *dummyContractRef
}
func TestStoreCapture(t *testing.T) {
var (
env = NewEVM(Context{}, nil, params.TestChainConfig, Config{})

View File

@ -29,7 +29,7 @@ type Memory struct {
lastGasCost uint64
}
// NewMemory returns a new memory memory model.
// NewMemory returns a new memory model.
func NewMemory() *Memory {
return &Memory{}
}

View File

@ -1,70 +0,0 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package vm
import (
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
)
func NoopCanTransfer(db StateDB, from common.Address, balance *big.Int) bool {
return true
}
func NoopTransfer(db StateDB, from, to common.Address, amount *big.Int) {}
type NoopEVMCallContext struct{}
func (NoopEVMCallContext) Call(caller ContractRef, addr common.Address, data []byte, gas, value *big.Int) ([]byte, error) {
return nil, nil
}
func (NoopEVMCallContext) CallCode(caller ContractRef, addr common.Address, data []byte, gas, value *big.Int) ([]byte, error) {
return nil, nil
}
func (NoopEVMCallContext) Create(caller ContractRef, data []byte, gas, value *big.Int) ([]byte, common.Address, error) {
return nil, common.Address{}, nil
}
func (NoopEVMCallContext) DelegateCall(me ContractRef, addr common.Address, data []byte, gas *big.Int) ([]byte, error) {
return nil, nil
}
type NoopStateDB struct{}
func (NoopStateDB) CreateAccount(common.Address) {}
func (NoopStateDB) SubBalance(common.Address, *big.Int) {}
func (NoopStateDB) AddBalance(common.Address, *big.Int) {}
func (NoopStateDB) GetBalance(common.Address) *big.Int { return nil }
func (NoopStateDB) GetNonce(common.Address) uint64 { return 0 }
func (NoopStateDB) SetNonce(common.Address, uint64) {}
func (NoopStateDB) GetCodeHash(common.Address) common.Hash { return common.Hash{} }
func (NoopStateDB) GetCode(common.Address) []byte { return nil }
func (NoopStateDB) SetCode(common.Address, []byte) {}
func (NoopStateDB) GetCodeSize(common.Address) int { return 0 }
func (NoopStateDB) AddRefund(uint64) {}
func (NoopStateDB) GetRefund() uint64 { return 0 }
func (NoopStateDB) GetState(common.Address, common.Hash) common.Hash { return common.Hash{} }
func (NoopStateDB) SetState(common.Address, common.Hash, common.Hash) {}
func (NoopStateDB) Suicide(common.Address) bool { return false }
func (NoopStateDB) HasSuicided(common.Address) bool { return false }
func (NoopStateDB) Exist(common.Address) bool { return false }
func (NoopStateDB) Empty(common.Address) bool { return false }
func (NoopStateDB) RevertToSnapshot(int) {}
func (NoopStateDB) Snapshot() int { return 0 }
func (NoopStateDB) AddLog(*types.Log) {}
func (NoopStateDB) AddPreimage(common.Hash, []byte) {}
func (NoopStateDB) ForEachStorage(common.Address, func(common.Hash, common.Hash) bool) {}

View File

@ -54,7 +54,7 @@ static void secp256k1_num_mul(secp256k1_num *r, const secp256k1_num *a, const se
even if r was negative. */
static void secp256k1_num_mod(secp256k1_num *r, const secp256k1_num *m);
/** Right-shift the passed number by bits bits. */
/** Right-shift the passed number by bits. */
static void secp256k1_num_shift(secp256k1_num *r, int bits);
/** Check whether a number is zero. */

View File

@ -26,7 +26,6 @@
} while(0)
static void default_illegal_callback_fn(const char* str, void* data) {
(void)data;
fprintf(stderr, "[libsecp256k1] illegal argument: %s\n", str);
abort();
}
@ -37,7 +36,6 @@ static const secp256k1_callback default_illegal_callback = {
};
static void default_error_callback_fn(const char* str, void* data) {
(void)data;
fprintf(stderr, "[libsecp256k1] internal consistency check failed: %s\n", str);
abort();
}

View File

@ -127,7 +127,7 @@ func (api *PrivateDebugAPI) TraceChain(ctx context.Context, start, end rpc.Block
// traceChain configures a new tracer according to the provided configuration, and
// executes all the transactions contained within. The return value will be one item
// per transaction, dependent on the requestd tracer.
// per transaction, dependent on the requested tracer.
func (api *PrivateDebugAPI) traceChain(ctx context.Context, start, end *types.Block, config *TraceConfig) (*rpc.Subscription, error) {
// Tracing a chain is a **long** operation, only do with subscriptions
notifier, supported := rpc.NotifierFromContext(ctx)

View File

@ -149,10 +149,14 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
rawdb.WriteDatabaseVersion(chainDb, core.BlockChainVersion)
}
var (
vmConfig = vm.Config{EnablePreimageRecording: config.EnablePreimageRecording}
vmConfig = vm.Config{
EnablePreimageRecording: config.EnablePreimageRecording,
EWASMInterpreter: config.EWASMInterpreter,
EVMInterpreter: config.EVMInterpreter,
}
cacheConfig = &core.CacheConfig{Disabled: config.NoPruning, TrieNodeLimit: config.TrieCache, TrieTimeLimit: config.TrieTimeout}
)
eth.blockchain, err = core.NewBlockChain(chainDb, cacheConfig, eth.chainConfig, eth.engine, vmConfig)
eth.blockchain, err = core.NewBlockChain(chainDb, cacheConfig, eth.chainConfig, eth.engine, vmConfig, eth.shouldPreserve)
if err != nil {
return nil, err
}
@ -173,7 +177,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
return nil, err
}
eth.miner = miner.New(eth, eth.chainConfig, eth.EventMux(), eth.engine, config.MinerRecommit, config.MinerGasFloor, config.MinerGasCeil)
eth.miner = miner.New(eth, eth.chainConfig, eth.EventMux(), eth.engine, config.MinerRecommit, config.MinerGasFloor, config.MinerGasCeil, eth.isLocalBlock)
eth.miner.SetExtra(makeExtraData(config.MinerExtraData))
eth.APIBackend = &EthAPIBackend{eth, nil}
@ -330,6 +334,60 @@ func (s *Ethereum) Etherbase() (eb common.Address, err error) {
return common.Address{}, fmt.Errorf("etherbase must be explicitly specified")
}
// isLocalBlock checks whether the specified block is mined
// by local miner accounts.
//
// We regard two types of accounts as local miner account: etherbase
// and accounts specified via `txpool.locals` flag.
func (s *Ethereum) isLocalBlock(block *types.Block) bool {
author, err := s.engine.Author(block.Header())
if err != nil {
log.Warn("Failed to retrieve block author", "number", block.NumberU64(), "hash", block.Hash(), "err", err)
return false
}
// Check whether the given address is etherbase.
s.lock.RLock()
etherbase := s.etherbase
s.lock.RUnlock()
if author == etherbase {
return true
}
// Check whether the given address is specified by `txpool.local`
// CLI flag.
for _, account := range s.config.TxPool.Locals {
if account == author {
return true
}
}
return false
}
// shouldPreserve checks whether we should preserve the given block
// during the chain reorg depending on whether the author of block
// is a local account.
func (s *Ethereum) shouldPreserve(block *types.Block) bool {
// The reason we need to disable the self-reorg preserving for clique
// is it can be probable to introduce a deadlock.
//
// e.g. If there are 7 available signers
//
// r1 A
// r2 B
// r3 C
// r4 D
// r5 A [X] F G
// r6 [X]
//
// In the round5, the inturn signer E is offline, so the worst case
// is A, F and G sign the block of round5 and reject the block of opponents
// and in the round6, the last available signer B is offline, the whole
// network is stuck.
if _, ok := s.engine.(*clique.Clique); ok {
return false
}
return s.isLocalBlock(block)
}
// SetEtherbase sets the mining reward address.
func (s *Ethereum) SetEtherbase(etherbase common.Address) {
s.lock.Lock()
@ -362,7 +420,7 @@ func (s *Ethereum) StartMining(threads int) error {
s.lock.RUnlock()
s.txPool.SetGasPrice(price)
// Configure the local mining addess
// Configure the local mining address
eb, err := s.Etherbase()
if err != nil {
log.Error("Cannot start mining without etherbase", "err", err)

View File

@ -121,6 +121,11 @@ type Config struct {
// Miscellaneous options
DocRoot string `toml:"-"`
// Type of the EWASM interpreter ("" for detault)
EWASMInterpreter string
// Type of the EVM interpreter ("" for default)
EVMInterpreter string
}
type configMarshaling struct {

View File

@ -37,7 +37,7 @@ import (
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
)
@ -147,7 +147,7 @@ func NewProtocolManager(config *params.ChainConfig, mode downloader.SyncMode, ne
NodeInfo: func() interface{} {
return manager.NodeInfo()
},
PeerInfo: func(id discover.NodeID) interface{} {
PeerInfo: func(id enode.ID) interface{} {
if p := manager.peers.Peer(fmt.Sprintf("%x", id[:8])); p != nil {
return p.Info()
}

View File

@ -472,7 +472,7 @@ func testDAOChallenge(t *testing.T, localForked, remoteForked bool, timeout bool
config = &params.ChainConfig{DAOForkBlock: big.NewInt(1), DAOForkSupport: localForked}
gspec = &core.Genesis{Config: config}
genesis = gspec.MustCommit(db)
blockchain, _ = core.NewBlockChain(db, nil, config, pow, vm.Config{})
blockchain, _ = core.NewBlockChain(db, nil, config, pow, vm.Config{}, nil)
)
pm, err := NewProtocolManager(config, downloader.FullSync, DefaultConfig.NetworkId, evmux, new(testTxPool), pow, blockchain, db)
if err != nil {

View File

@ -37,7 +37,7 @@ import (
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/params"
)
@ -59,7 +59,7 @@ func newTestProtocolManager(mode downloader.SyncMode, blocks int, generator func
Alloc: core.GenesisAlloc{testBank: {Balance: big.NewInt(1000000)}},
}
genesis = gspec.MustCommit(db)
blockchain, _ = core.NewBlockChain(db, nil, gspec.Config, engine, vm.Config{})
blockchain, _ = core.NewBlockChain(db, nil, gspec.Config, engine, vm.Config{}, nil)
)
chain, _ := core.GenerateChain(gspec.Config, genesis, ethash.NewFaker(), db, blocks, generator)
if _, err := blockchain.InsertChain(chain); err != nil {
@ -148,7 +148,7 @@ func newTestPeer(name string, version int, pm *ProtocolManager, shake bool) (*te
app, net := p2p.MsgPipe()
// Generate a random id and create the peer
var id discover.NodeID
var id enode.ID
rand.Read(id[:])
peer := pm.newPeer(version, p2p.NewPeer(id, name, nil), net)

View File

@ -25,7 +25,7 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/p2p/enode"
)
const (
@ -64,7 +64,7 @@ func (pm *ProtocolManager) syncTransactions(p *peer) {
// the transactions in small packs to one peer at a time.
func (pm *ProtocolManager) txsyncLoop() {
var (
pending = make(map[discover.NodeID]*txsync)
pending = make(map[enode.ID]*txsync)
sending = false // whether a send is active
pack = new(txsync) // the pack that is being sent
done = make(chan error, 1) // result of the send

View File

@ -23,7 +23,7 @@ import (
"github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/p2p/enode"
)
// Tests that fast sync gets disabled as soon as a real block is successfully
@ -42,8 +42,8 @@ func TestFastSyncDisabling(t *testing.T) {
// Sync up the two peers
io1, io2 := p2p.MsgPipe()
go pmFull.handle(pmFull.newPeer(63, p2p.NewPeer(discover.NodeID{}, "empty", nil), io2))
go pmEmpty.handle(pmEmpty.newPeer(63, p2p.NewPeer(discover.NodeID{}, "full", nil), io1))
go pmFull.handle(pmFull.newPeer(63, p2p.NewPeer(enode.ID{}, "empty", nil), io2))
go pmEmpty.handle(pmEmpty.newPeer(63, p2p.NewPeer(enode.ID{}, "full", nil), io1))
time.Sleep(250 * time.Millisecond)
pmEmpty.synchronise(pmEmpty.peers.BestPeer())

View File

@ -143,9 +143,9 @@ func CopyFile(dst, src string, mode os.FileMode) {
// so that go commands executed by build use the same version of Go as the 'host' that runs
// build code. e.g.
//
// /usr/lib/go-1.8/bin/go run build/ci.go ...
// /usr/lib/go-1.11/bin/go run build/ci.go ...
//
// runs using go 1.8 and invokes go 1.8 tools from the same GOROOT. This is also important
// runs using go 1.11 and invokes go 1.11 tools from the same GOROOT. This is also important
// because runtime.Version checks on the host should match the tools that are run.
func GoTool(tool string, args ...string) *exec.Cmd {
args = append([]string{tool}, args...)

View File

@ -449,7 +449,7 @@ func (s *PrivateAccountAPI) Sign(ctx context.Context, data hexutil.Bytes, addr c
// addr = ecrecover(hash, signature)
//
// Note, the signature must conform to the secp256k1 curve R, S and V values, where
// the V value must be be 27 or 28 for legacy reasons.
// the V value must be 27 or 28 for legacy reasons.
//
// https://github.com/ethereum/go-ethereum/wiki/Management-APIs#personal_ecRecover
func (s *PrivateAccountAPI) EcRecover(ctx context.Context, data, sig hexutil.Bytes) (common.Address, error) {

View File

@ -1021,7 +1021,7 @@ var formatOutputInt = function (param) {
var value = param.staticPart() || "0";
// check if it's negative number
// it it is, return two's complement
// it is, return two's complement
if (signedIsNegative(value)) {
return new BigNumber(value, 16).minus(new BigNumber('ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff', 16)).minus(1);
}
@ -2250,7 +2250,7 @@ var isAddress = function (address) {
// check if it has the basic requirements of an address
return false;
} else if (/^(0x)?[0-9a-f]{40}$/.test(address) || /^(0x)?[0-9A-F]{40}$/.test(address)) {
// If it's all small caps or all all caps, return true
// If it's all small caps or all caps, return true
return true;
} else {
// Otherwise check each case

View File

@ -26,7 +26,7 @@ import (
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/params"
)
@ -47,7 +47,7 @@ type NodeInfo struct {
Genesis common.Hash `json:"genesis"` // SHA3 hash of the host's genesis block
Config *params.ChainConfig `json:"config"` // Chain configuration for the fork rules
Head common.Hash `json:"head"` // SHA3 hash of the host's best owned block
CHT light.TrustedCheckpoint `json:"cht"` // Trused CHT checkpoint for fast catchup
CHT params.TrustedCheckpoint `json:"cht"` // Trused CHT checkpoint for fast catchup
}
// makeProtocols creates protocol descriptors for the given LES versions.
@ -63,7 +63,7 @@ func (c *lesCommons) makeProtocols(versions []uint) []p2p.Protocol {
Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
return c.protocolManager.runPeer(version, p, rw)
},
PeerInfo: func(id discover.NodeID) interface{} {
PeerInfo: func(id enode.ID) interface{} {
if p := c.protocolManager.peers.Peer(fmt.Sprintf("%x", id[:8])); p != nil {
return p.Info()
}
@ -76,7 +76,7 @@ func (c *lesCommons) makeProtocols(versions []uint) []p2p.Protocol {
// nodeInfo retrieves some protocol metadata about the running host node.
func (c *lesCommons) nodeInfo() interface{} {
var cht light.TrustedCheckpoint
var cht params.TrustedCheckpoint
sections, _, _ := c.chtIndexer.Sections()
sections2, _, _ := c.bloomTrieIndexer.Sections()
@ -98,8 +98,8 @@ func (c *lesCommons) nodeInfo() interface{} {
idxV2 := (sectionIndex+1)*c.iConfig.PairChtSize/c.iConfig.ChtSize - 1
chtRoot = light.GetChtRoot(c.chainDb, idxV2, sectionHead)
}
cht = light.TrustedCheckpoint{
SectionIdx: sectionIndex,
cht = params.TrustedCheckpoint{
SectionIndex: sectionIndex,
SectionHead: sectionHead,
CHTRoot: chtRoot,
BloomRoot: light.GetBloomTrieRoot(c.chainDb, sectionIndex, sectionHead),

View File

@ -213,8 +213,7 @@ func (pm *ProtocolManager) runPeer(version uint, p *p2p.Peer, rw p2p.MsgReadWrit
var entry *poolEntry
peer := pm.newPeer(int(version), pm.networkId, p, rw)
if pm.serverPool != nil {
addr := p.RemoteAddr().(*net.TCPAddr)
entry = pm.serverPool.connect(peer, addr.IP, uint16(addr.Port))
entry = pm.serverPool.connect(peer, peer.Node())
}
peer.poolEntry = entry
select {
@ -382,7 +381,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
}
if p.requestAnnounceType == announceTypeSigned {
if err := req.checkSignature(p.pubKey); err != nil {
if err := req.checkSignature(p.ID()); err != nil {
p.Log().Trace("Invalid announcement signature", "err", err)
return err
}

View File

@ -38,7 +38,7 @@ import (
"github.com/ethereum/go-ethereum/les/flowcontrol"
"github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/params"
)
@ -164,7 +164,7 @@ func newTestProtocolManager(lightSync bool, blocks int, generator func(int, *cor
if lightSync {
chain, _ = light.NewLightChain(odr, gspec.Config, engine)
} else {
blockchain, _ := core.NewBlockChain(db, nil, gspec.Config, engine, vm.Config{})
blockchain, _ := core.NewBlockChain(db, nil, gspec.Config, engine, vm.Config{}, nil)
gchain, _ := core.GenerateChain(gspec.Config, genesis, ethash.NewFaker(), db, blocks, generator)
if _, err := blockchain.InsertChain(gchain); err != nil {
panic(err)
@ -221,7 +221,7 @@ func newTestPeer(t *testing.T, name string, version int, pm *ProtocolManager, sh
app, net := p2p.MsgPipe()
// Generate a random id and create the peer
var id discover.NodeID
var id enode.ID
rand.Read(id[:])
peer := pm.newPeer(version, NetworkId, p2p.NewPeer(id, name, nil), net)
@ -258,7 +258,7 @@ func newTestPeerPair(name string, version int, pm, pm2 *ProtocolManager) (*peer,
app, net := p2p.MsgPipe()
// Generate a random id and create the peer
var id discover.NodeID
var id enode.ID
rand.Read(id[:])
peer := pm.newPeer(version, NetworkId, p2p.NewPeer(id, name, nil), net)

View File

@ -478,7 +478,7 @@ func (r *ChtRequest) Validate(db ethdb.Database, msg *Msg) error {
}
type BloomReq struct {
BloomTrieNum, BitIdx, SectionIdx, FromLevel uint64
BloomTrieNum, BitIdx, SectionIndex, FromLevel uint64
}
// ODR request type for requesting headers by Canonical Hash Trie, see LesOdrRequest interface
@ -487,7 +487,7 @@ type BloomRequest light.BloomRequest
// GetCost returns the cost of the given ODR request according to the serving
// peer's cost table (implementation of LesOdrRequest)
func (r *BloomRequest) GetCost(peer *peer) uint64 {
return peer.GetRequestCost(GetHelperTrieProofsMsg, len(r.SectionIdxList))
return peer.GetRequestCost(GetHelperTrieProofsMsg, len(r.SectionIndexList))
}
// CanSend tells if a certain peer is suitable for serving the given request
@ -503,13 +503,13 @@ func (r *BloomRequest) CanSend(peer *peer) bool {
// Request sends an ODR request to the LES network (implementation of LesOdrRequest)
func (r *BloomRequest) Request(reqID uint64, peer *peer) error {
peer.Log().Debug("Requesting BloomBits", "bloomTrie", r.BloomTrieNum, "bitIdx", r.BitIdx, "sections", r.SectionIdxList)
reqs := make([]HelperTrieReq, len(r.SectionIdxList))
peer.Log().Debug("Requesting BloomBits", "bloomTrie", r.BloomTrieNum, "bitIdx", r.BitIdx, "sections", r.SectionIndexList)
reqs := make([]HelperTrieReq, len(r.SectionIndexList))
var encNumber [10]byte
binary.BigEndian.PutUint16(encNumber[:2], uint16(r.BitIdx))
for i, sectionIdx := range r.SectionIdxList {
for i, sectionIdx := range r.SectionIndexList {
binary.BigEndian.PutUint64(encNumber[2:], sectionIdx)
reqs[i] = HelperTrieReq{
Type: htBloomBits,
@ -524,7 +524,7 @@ func (r *BloomRequest) Request(reqID uint64, peer *peer) error {
// returns true and stores results in memory if the message was a valid reply
// to the request (implementation of LesOdrRequest)
func (r *BloomRequest) Validate(db ethdb.Database, msg *Msg) error {
log.Debug("Validating BloomBits", "bloomTrie", r.BloomTrieNum, "bitIdx", r.BitIdx, "sections", r.SectionIdxList)
log.Debug("Validating BloomBits", "bloomTrie", r.BloomTrieNum, "bitIdx", r.BitIdx, "sections", r.SectionIndexList)
// Ensure we have a correct message with a single proof element
if msg.MsgType != MsgHelperTrieProofs {
@ -535,13 +535,13 @@ func (r *BloomRequest) Validate(db ethdb.Database, msg *Msg) error {
nodeSet := proofs.NodeSet()
reads := &readTraceDB{db: nodeSet}
r.BloomBits = make([][]byte, len(r.SectionIdxList))
r.BloomBits = make([][]byte, len(r.SectionIndexList))
// Verify the proofs
var encNumber [10]byte
binary.BigEndian.PutUint16(encNumber[:2], uint16(r.BitIdx))
for i, idx := range r.SectionIdxList {
for i, idx := range r.SectionIndexList {
binary.BigEndian.PutUint64(encNumber[2:], idx)
value, _, err := trie.VerifyProof(r.BloomTrieRoot, encNumber[:], reads)
if err != nil {

View File

@ -18,7 +18,6 @@
package les
import (
"crypto/ecdsa"
"errors"
"fmt"
"math/big"
@ -51,7 +50,6 @@ const (
type peer struct {
*p2p.Peer
pubKey *ecdsa.PublicKey
rw p2p.MsgReadWriter
@ -80,11 +78,9 @@ type peer struct {
func newPeer(version int, network uint64, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
id := p.ID()
pubKey, _ := id.Pubkey()
return &peer{
Peer: p,
pubKey: pubKey,
rw: rw,
version: version,
network: network,

View File

@ -18,9 +18,7 @@
package les
import (
"bytes"
"crypto/ecdsa"
"crypto/elliptic"
"errors"
"fmt"
"io"
@ -30,7 +28,7 @@ import (
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/secp256k1"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/rlp"
)
@ -148,21 +146,20 @@ func (a *announceData) sign(privKey *ecdsa.PrivateKey) {
}
// checkSignature verifies if the block announcement has a valid signature by the given pubKey
func (a *announceData) checkSignature(pubKey *ecdsa.PublicKey) error {
func (a *announceData) checkSignature(id enode.ID) error {
var sig []byte
if err := a.Update.decode().get("sign", &sig); err != nil {
return err
}
rlp, _ := rlp.EncodeToBytes(announceBlock{a.Hash, a.Number, a.Td})
recPubkey, err := secp256k1.RecoverPubkey(crypto.Keccak256(rlp), sig)
recPubkey, err := crypto.SigToPub(crypto.Keccak256(rlp), sig)
if err != nil {
return err
}
pbytes := elliptic.Marshal(pubKey.Curve, pubKey.X, pubKey.Y)
if bytes.Equal(pbytes, recPubkey) {
if id == enode.PubkeyToIDV4(recPubkey) {
return nil
}
return errors.New("Wrong signature")
return errors.New("wrong signature")
}
type blockInfo struct {

View File

@ -217,6 +217,13 @@ func (r *sentReq) stateRequesting() reqStateFn {
go r.tryRequest()
r.lastReqQueued = true
return r.stateRequesting
case rpDeliveredInvalid:
// if it was the last sent request (set to nil by update) then start a new one
if !r.lastReqQueued && r.lastReqSentTo == nil {
go r.tryRequest()
r.lastReqQueued = true
}
return r.stateRequesting
case rpDeliveredValid:
r.stop(nil)
return r.stateStopped
@ -242,7 +249,11 @@ func (r *sentReq) stateNoMorePeers() reqStateFn {
r.stop(nil)
return r.stateStopped
}
if r.waiting() {
return r.stateNoMorePeers
}
r.stop(light.ErrNoPeers)
return nil
case <-r.stopCh:
return r.stateStopped
}

View File

@ -18,6 +18,7 @@
package les
import (
"crypto/ecdsa"
"fmt"
"io"
"math"
@ -28,11 +29,12 @@ import (
"time"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/p2p/discv5"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/rlp"
)
@ -90,8 +92,7 @@ const (
// connReq represents a request for peer connection.
type connReq struct {
p *peer
ip net.IP
port uint16
node *enode.Node
result chan *poolEntry
}
@ -122,10 +123,10 @@ type serverPool struct {
topic discv5.Topic
discSetPeriod chan time.Duration
discNodes chan *discv5.Node
discNodes chan *enode.Node
discLookups chan bool
entries map[discover.NodeID]*poolEntry
entries map[enode.ID]*poolEntry
timeout, enableRetry chan *poolEntry
adjustStats chan poolStatAdjust
@ -145,7 +146,7 @@ func newServerPool(db ethdb.Database, quit chan struct{}, wg *sync.WaitGroup) *s
db: db,
quit: quit,
wg: wg,
entries: make(map[discover.NodeID]*poolEntry),
entries: make(map[enode.ID]*poolEntry),
timeout: make(chan *poolEntry, 1),
adjustStats: make(chan poolStatAdjust, 100),
enableRetry: make(chan *poolEntry, 1),
@ -170,22 +171,38 @@ func (pool *serverPool) start(server *p2p.Server, topic discv5.Topic) {
if pool.server.DiscV5 != nil {
pool.discSetPeriod = make(chan time.Duration, 1)
pool.discNodes = make(chan *discv5.Node, 100)
pool.discNodes = make(chan *enode.Node, 100)
pool.discLookups = make(chan bool, 100)
go pool.server.DiscV5.SearchTopic(pool.topic, pool.discSetPeriod, pool.discNodes, pool.discLookups)
go pool.discoverNodes()
}
pool.checkDial()
go pool.eventLoop()
}
// discoverNodes wraps SearchTopic, converting result nodes to enode.Node.
func (pool *serverPool) discoverNodes() {
ch := make(chan *discv5.Node)
go func() {
pool.server.DiscV5.SearchTopic(pool.topic, pool.discSetPeriod, ch, pool.discLookups)
close(ch)
}()
for n := range ch {
pubkey, err := decodePubkey64(n.ID[:])
if err != nil {
continue
}
pool.discNodes <- enode.NewV4(pubkey, n.IP, int(n.TCP), int(n.UDP))
}
}
// connect should be called upon any incoming connection. If the connection has been
// dialed by the server pool recently, the appropriate pool entry is returned.
// Otherwise, the connection should be rejected.
// Note that whenever a connection has been accepted and a pool entry has been returned,
// disconnect should also always be called.
func (pool *serverPool) connect(p *peer, ip net.IP, port uint16) *poolEntry {
func (pool *serverPool) connect(p *peer, node *enode.Node) *poolEntry {
log.Debug("Connect new entry", "enode", p.id)
req := &connReq{p: p, ip: ip, port: port, result: make(chan *poolEntry, 1)}
req := &connReq{p: p, node: node, result: make(chan *poolEntry, 1)}
select {
case pool.connCh <- req:
case <-pool.quit:
@ -196,7 +213,7 @@ func (pool *serverPool) connect(p *peer, ip net.IP, port uint16) *poolEntry {
// registered should be called after a successful handshake
func (pool *serverPool) registered(entry *poolEntry) {
log.Debug("Registered new entry", "enode", entry.id)
log.Debug("Registered new entry", "enode", entry.node.ID())
req := &registerReq{entry: entry, done: make(chan struct{})}
select {
case pool.registerCh <- req:
@ -216,7 +233,7 @@ func (pool *serverPool) disconnect(entry *poolEntry) {
stopped = true
default:
}
log.Debug("Disconnected old entry", "enode", entry.id)
log.Debug("Disconnected old entry", "enode", entry.node.ID())
req := &disconnReq{entry: entry, stopped: stopped, done: make(chan struct{})}
// Block until disconnection request is served.
@ -320,7 +337,7 @@ func (pool *serverPool) eventLoop() {
}
case node := <-pool.discNodes:
entry := pool.findOrNewNode(discover.NodeID(node.ID), node.IP, node.TCP)
entry := pool.findOrNewNode(node)
pool.updateCheckDial(entry)
case conv := <-pool.discLookups:
@ -341,7 +358,7 @@ func (pool *serverPool) eventLoop() {
// Handle peer connection requests.
entry := pool.entries[req.p.ID()]
if entry == nil {
entry = pool.findOrNewNode(req.p.ID(), req.ip, req.port)
entry = pool.findOrNewNode(req.node)
}
if entry.state == psConnected || entry.state == psRegistered {
req.result <- nil
@ -351,8 +368,8 @@ func (pool *serverPool) eventLoop() {
entry.peer = req.p
entry.state = psConnected
addr := &poolEntryAddress{
ip: req.ip,
port: req.port,
ip: req.node.IP(),
port: uint16(req.node.TCP()),
lastSeen: mclock.Now(),
}
entry.lastConnected = addr
@ -401,18 +418,18 @@ func (pool *serverPool) eventLoop() {
}
}
func (pool *serverPool) findOrNewNode(id discover.NodeID, ip net.IP, port uint16) *poolEntry {
func (pool *serverPool) findOrNewNode(node *enode.Node) *poolEntry {
now := mclock.Now()
entry := pool.entries[id]
entry := pool.entries[node.ID()]
if entry == nil {
log.Debug("Discovered new entry", "id", id)
log.Debug("Discovered new entry", "id", node.ID())
entry = &poolEntry{
id: id,
node: node,
addr: make(map[string]*poolEntryAddress),
addrSelect: *newWeightedRandomSelect(),
shortRetry: shortRetryCnt,
}
pool.entries[id] = entry
pool.entries[node.ID()] = entry
// initialize previously unknown peers with good statistics to give a chance to prove themselves
entry.connectStats.add(1, initStatsWeight)
entry.delayStats.add(0, initStatsWeight)
@ -420,10 +437,7 @@ func (pool *serverPool) findOrNewNode(id discover.NodeID, ip net.IP, port uint16
entry.timeoutStats.add(0, initStatsWeight)
}
entry.lastDiscovered = now
addr := &poolEntryAddress{
ip: ip,
port: port,
}
addr := &poolEntryAddress{ip: node.IP(), port: uint16(node.TCP())}
if a, ok := entry.addr[addr.strKey()]; ok {
addr = a
} else {
@ -450,12 +464,12 @@ func (pool *serverPool) loadNodes() {
return
}
for _, e := range list {
log.Debug("Loaded server stats", "id", e.id, "fails", e.lastConnected.fails,
log.Debug("Loaded server stats", "id", e.node.ID(), "fails", e.lastConnected.fails,
"conn", fmt.Sprintf("%v/%v", e.connectStats.avg, e.connectStats.weight),
"delay", fmt.Sprintf("%v/%v", time.Duration(e.delayStats.avg), e.delayStats.weight),
"response", fmt.Sprintf("%v/%v", time.Duration(e.responseStats.avg), e.responseStats.weight),
"timeout", fmt.Sprintf("%v/%v", e.timeoutStats.avg, e.timeoutStats.weight))
pool.entries[e.id] = e
pool.entries[e.node.ID()] = e
pool.knownQueue.setLatest(e)
pool.knownSelect.update((*knownEntry)(e))
}
@ -481,7 +495,7 @@ func (pool *serverPool) removeEntry(entry *poolEntry) {
pool.newSelect.remove((*discoveredEntry)(entry))
pool.knownSelect.remove((*knownEntry)(entry))
entry.removed = true
delete(pool.entries, entry.id)
delete(pool.entries, entry.node.ID())
}
// setRetryDial starts the timer which will enable dialing a certain node again
@ -559,10 +573,10 @@ func (pool *serverPool) dial(entry *poolEntry, knownSelected bool) {
pool.newSelected++
}
addr := entry.addrSelect.choose().(*poolEntryAddress)
log.Debug("Dialing new peer", "lesaddr", entry.id.String()+"@"+addr.strKey(), "set", len(entry.addr), "known", knownSelected)
log.Debug("Dialing new peer", "lesaddr", entry.node.ID().String()+"@"+addr.strKey(), "set", len(entry.addr), "known", knownSelected)
entry.dialed = addr
go func() {
pool.server.AddPeer(discover.NewNode(entry.id, addr.ip, addr.port, addr.port))
pool.server.AddPeer(entry.node)
select {
case <-pool.quit:
case <-time.After(dialTimeout):
@ -580,7 +594,7 @@ func (pool *serverPool) checkDialTimeout(entry *poolEntry) {
if entry.state != psDialed {
return
}
log.Debug("Dial timeout", "lesaddr", entry.id.String()+"@"+entry.dialed.strKey())
log.Debug("Dial timeout", "lesaddr", entry.node.ID().String()+"@"+entry.dialed.strKey())
entry.state = psNotConnected
if entry.knownSelected {
pool.knownSelected--
@ -602,8 +616,9 @@ const (
// poolEntry represents a server node and stores its current state and statistics.
type poolEntry struct {
peer *peer
id discover.NodeID
pubkey [64]byte // secp256k1 key of the node
addr map[string]*poolEntryAddress
node *enode.Node
lastConnected, dialed *poolEntryAddress
addrSelect weightedRandomSelect
@ -620,23 +635,39 @@ type poolEntry struct {
shortRetry int
}
func (e *poolEntry) EncodeRLP(w io.Writer) error {
return rlp.Encode(w, []interface{}{e.id, e.lastConnected.ip, e.lastConnected.port, e.lastConnected.fails, &e.connectStats, &e.delayStats, &e.responseStats, &e.timeoutStats})
}
func (e *poolEntry) DecodeRLP(s *rlp.Stream) error {
var entry struct {
ID discover.NodeID
// poolEntryEnc is the RLP encoding of poolEntry.
type poolEntryEnc struct {
Pubkey []byte
IP net.IP
Port uint16
Fails uint
CStat, DStat, RStat, TStat poolStats
}
func (e *poolEntry) EncodeRLP(w io.Writer) error {
return rlp.Encode(w, &poolEntryEnc{
Pubkey: encodePubkey64(e.node.Pubkey()),
IP: e.lastConnected.ip,
Port: e.lastConnected.port,
Fails: e.lastConnected.fails,
CStat: e.connectStats,
DStat: e.delayStats,
RStat: e.responseStats,
TStat: e.timeoutStats,
})
}
func (e *poolEntry) DecodeRLP(s *rlp.Stream) error {
var entry poolEntryEnc
if err := s.Decode(&entry); err != nil {
return err
}
pubkey, err := decodePubkey64(entry.Pubkey)
if err != nil {
return err
}
addr := &poolEntryAddress{ip: entry.IP, port: entry.Port, fails: entry.Fails, lastSeen: mclock.Now()}
e.id = entry.ID
e.node = enode.NewV4(pubkey, entry.IP, int(entry.Port), int(entry.Port))
e.addr = make(map[string]*poolEntryAddress)
e.addr[addr.strKey()] = addr
e.addrSelect = *newWeightedRandomSelect()
@ -651,6 +682,14 @@ func (e *poolEntry) DecodeRLP(s *rlp.Stream) error {
return nil
}
func encodePubkey64(pub *ecdsa.PublicKey) []byte {
return crypto.FromECDSAPub(pub)[:1]
}
func decodePubkey64(b []byte) (*ecdsa.PublicKey, error) {
return crypto.UnmarshalPubkey(append([]byte{0x04}, b...))
}
// discoveredEntry implements wrsItem
type discoveredEntry poolEntry

View File

@ -118,19 +118,19 @@ func NewLightChain(odr OdrBackend, config *params.ChainConfig, engine consensus.
}
// addTrustedCheckpoint adds a trusted checkpoint to the blockchain
func (self *LightChain) addTrustedCheckpoint(cp TrustedCheckpoint) {
func (self *LightChain) addTrustedCheckpoint(cp *params.TrustedCheckpoint) {
if self.odr.ChtIndexer() != nil {
StoreChtRoot(self.chainDb, cp.SectionIdx, cp.SectionHead, cp.CHTRoot)
self.odr.ChtIndexer().AddCheckpoint(cp.SectionIdx, cp.SectionHead)
StoreChtRoot(self.chainDb, cp.SectionIndex, cp.SectionHead, cp.CHTRoot)
self.odr.ChtIndexer().AddCheckpoint(cp.SectionIndex, cp.SectionHead)
}
if self.odr.BloomTrieIndexer() != nil {
StoreBloomTrieRoot(self.chainDb, cp.SectionIdx, cp.SectionHead, cp.BloomRoot)
self.odr.BloomTrieIndexer().AddCheckpoint(cp.SectionIdx, cp.SectionHead)
StoreBloomTrieRoot(self.chainDb, cp.SectionIndex, cp.SectionHead, cp.BloomRoot)
self.odr.BloomTrieIndexer().AddCheckpoint(cp.SectionIndex, cp.SectionHead)
}
if self.odr.BloomIndexer() != nil {
self.odr.BloomIndexer().AddCheckpoint(cp.SectionIdx, cp.SectionHead)
self.odr.BloomIndexer().AddCheckpoint(cp.SectionIndex, cp.SectionHead)
}
log.Info("Added trusted checkpoint", "chain", cp.name, "block", (cp.SectionIdx+1)*self.indexerConfig.ChtSize-1, "hash", cp.SectionHead)
log.Info("Added trusted checkpoint", "chain", cp.Name, "block", (cp.SectionIndex+1)*self.indexerConfig.ChtSize-1, "hash", cp.SectionHead)
}
func (self *LightChain) getProcInterrupt() bool {
@ -157,7 +157,7 @@ func (self *LightChain) loadLastState() error {
// Issue a status log and return
header := self.hc.CurrentHeader()
headerTd := self.GetTd(header.Hash(), header.Number.Uint64())
log.Info("Loaded most recent local header", "number", header.Number, "hash", header.Hash(), "td", headerTd)
log.Info("Loaded most recent local header", "number", header.Number, "hash", header.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(header.Time.Int64(), 0)))
return nil
}
@ -488,7 +488,7 @@ func (self *LightChain) SyncCht(ctx context.Context) bool {
// Ensure the chain didn't move past the latest block while retrieving it
if self.hc.CurrentHeader().Number.Uint64() < header.Number.Uint64() {
log.Info("Updated latest header based on CHT", "number", header.Number, "hash", header.Hash())
log.Info("Updated latest header based on CHT", "number", header.Number, "hash", header.Hash(), "age", common.PrettyAge(time.Unix(header.Time.Int64(), 0)))
self.hc.SetCurrentHeader(header)
}
return true

View File

@ -160,7 +160,7 @@ type BloomRequest struct {
Config *IndexerConfig
BloomTrieNum uint64
BitIdx uint
SectionIdxList []uint64
SectionIndexList []uint64
BloomTrieRoot common.Hash
BloomBits [][]byte
Proofs *NodeSet
@ -168,7 +168,7 @@ type BloomRequest struct {
// StoreResult stores the retrieved data in local database
func (req *BloomRequest) StoreResult(db ethdb.Database) {
for i, sectionIdx := range req.SectionIdxList {
for i, sectionIdx := range req.SectionIndexList {
sectionHead := rawdb.ReadCanonicalHash(db, (sectionIdx+1)*req.Config.BloomTrieSize-1)
// if we don't have the canonical hash stored for this section head number, we'll still store it under
// a key with a zero sectionHead. GetBloomBits will look there too if we still don't have the canonical

View File

@ -257,7 +257,7 @@ func testChainOdr(t *testing.T, protocol int, fn odrTestFn) {
)
gspec.MustCommit(ldb)
// Assemble the test environment
blockchain, _ := core.NewBlockChain(sdb, nil, params.TestChainConfig, ethash.NewFullFaker(), vm.Config{})
blockchain, _ := core.NewBlockChain(sdb, nil, params.TestChainConfig, ethash.NewFullFaker(), vm.Config{}, nil)
gchain, _ := core.GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), sdb, 4, testChainGen)
if _, err := blockchain.InsertChain(gchain); err != nil {
t.Fatal(err)

View File

@ -222,7 +222,7 @@ func GetBloomBits(ctx context.Context, odr OdrBackend, bitIdx uint, sectionIdxLi
}
r := &BloomRequest{BloomTrieRoot: GetBloomTrieRoot(db, bloomTrieCount-1, sectionHead), BloomTrieNum: bloomTrieCount - 1,
BitIdx: bitIdx, SectionIdxList: reqList, Config: odr.IndexerConfig()}
BitIdx: bitIdx, SectionIndexList: reqList, Config: odr.IndexerConfig()}
if err := odr.Retrieve(ctx, r); err != nil {
return nil, err
} else {

View File

@ -104,38 +104,11 @@ var (
}
)
// trustedCheckpoint represents a set of post-processed trie roots (CHT and BloomTrie) associated with
// the appropriate section index and head hash. It is used to start light syncing from this checkpoint
// and avoid downloading the entire header chain while still being able to securely access old headers/logs.
type TrustedCheckpoint struct {
name string
SectionIdx uint64
SectionHead, CHTRoot, BloomRoot common.Hash
}
// trustedCheckpoints associates each known checkpoint with the genesis hash of the chain it belongs to
var trustedCheckpoints = map[common.Hash]TrustedCheckpoint{
params.MainnetGenesisHash: {
name: "mainnet",
SectionIdx: 187,
SectionHead: common.HexToHash("e6baa034efa31562d71ff23676512dec6562c1ad0301e08843b907e81958c696"),
CHTRoot: common.HexToHash("28001955219719cf06de1b08648969139d123a9835fc760547a1e4dabdabc15a"),
BloomRoot: common.HexToHash("395ca2373fc662720ac6b58b3bbe71f68aa0f38b63b2d3553dd32ff3c51eebc4"),
},
params.TestnetGenesisHash: {
name: "ropsten",
SectionIdx: 117,
SectionHead: common.HexToHash("9529b38631ae30783f56cbe4c3b9f07575b770ecba4f6e20a274b1e2f40fede1"),
CHTRoot: common.HexToHash("6f48e9f101f1fac98e7d74fbbcc4fda138358271ffd974d40d2506f0308bb363"),
BloomRoot: common.HexToHash("8242342e66e942c0cd893484e6736b9862ceb88b43ca344bb06a8285ac1b6d64"),
},
params.RinkebyGenesisHash: {
name: "rinkeby",
SectionIdx: 85,
SectionHead: common.HexToHash("92cfa67afc4ad8ab0dcbc6fa49efd14b5b19402442e7317e6bc879d85f89d64d"),
CHTRoot: common.HexToHash("2802ec92cd7a54a75bca96afdc666ae7b99e5d96cf8192dcfb09588812f51564"),
BloomRoot: common.HexToHash("ebefeb31a9a42866d8cf2d2477704b4c3d7c20d0e4e9b5aaa77f396e016a1263"),
},
var trustedCheckpoints = map[common.Hash]*params.TrustedCheckpoint{
params.MainnetGenesisHash: params.MainnetTrustedCheckpoint,
params.TestnetGenesisHash: params.TestnetTrustedCheckpoint,
params.RinkebyGenesisHash: params.RinkebyTrustedCheckpoint,
}
var (
@ -329,7 +302,7 @@ func (b *BloomTrieIndexerBackend) fetchMissingNodes(ctx context.Context, section
for i := 0; i < 20; i++ {
go func() {
for bitIndex := range indexCh {
r := &BloomRequest{BloomTrieRoot: root, BloomTrieNum: section - 1, BitIdx: bitIndex, SectionIdxList: []uint64{section - 1}, Config: b.odr.IndexerConfig()}
r := &BloomRequest{BloomTrieRoot: root, BloomTrieNum: section - 1, BitIdx: bitIndex, SectionIndexList: []uint64{section - 1}, Config: b.odr.IndexerConfig()}
for {
if err := b.odr.Retrieve(ctx, r); err == ErrNoPeers {
// if there are no peers to serve, retry later

View File

@ -40,7 +40,7 @@ func TestNodeIterator(t *testing.T) {
genesis = gspec.MustCommit(fulldb)
)
gspec.MustCommit(lightdb)
blockchain, _ := core.NewBlockChain(fulldb, nil, params.TestChainConfig, ethash.NewFullFaker(), vm.Config{})
blockchain, _ := core.NewBlockChain(fulldb, nil, params.TestChainConfig, ethash.NewFullFaker(), vm.Config{}, nil)
gchain, _ := core.GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), fulldb, 4, testChainGen)
if _, err := blockchain.InsertChain(gchain); err != nil {
panic(err)

View File

@ -88,7 +88,7 @@ func TestTxPool(t *testing.T) {
)
gspec.MustCommit(ldb)
// Assemble the test environment
blockchain, _ := core.NewBlockChain(sdb, nil, params.TestChainConfig, ethash.NewFullFaker(), vm.Config{})
blockchain, _ := core.NewBlockChain(sdb, nil, params.TestChainConfig, ethash.NewFullFaker(), vm.Config{}, nil)
gchain, _ := core.GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), sdb, poolTestBlocks, txPoolTestChainGen)
if _, err := blockchain.InsertChain(gchain); err != nil {
panic(err)

View File

@ -52,13 +52,13 @@ type Miner struct {
shouldStart int32 // should start indicates whether we should start after sync
}
func New(eth Backend, config *params.ChainConfig, mux *event.TypeMux, engine consensus.Engine, recommit time.Duration, gasFloor, gasCeil uint64) *Miner {
func New(eth Backend, config *params.ChainConfig, mux *event.TypeMux, engine consensus.Engine, recommit time.Duration, gasFloor, gasCeil uint64, isLocalBlock func(block *types.Block) bool) *Miner {
miner := &Miner{
eth: eth,
mux: mux,
engine: engine,
exitCh: make(chan struct{}),
worker: newWorker(config, engine, eth, mux, recommit, gasFloor, gasCeil),
worker: newWorker(config, engine, eth, mux, recommit, gasFloor, gasCeil, isLocalBlock),
canStart: 1,
}
go miner.update()

View File

@ -43,7 +43,7 @@ type unconfirmedBlock struct {
}
// unconfirmedBlocks implements a data structure to maintain locally mined blocks
// have have not yet reached enough maturity to guarantee chain inclusion. It is
// have not yet reached enough maturity to guarantee chain inclusion. It is
// used by the miner to provide logs to the user when a previously mined block
// has a high enough guarantee to not be reorged out of the canonical chain.
type unconfirmedBlocks struct {

View File

@ -150,7 +150,8 @@ type worker struct {
resubmitAdjustCh chan *intervalAdjust
current *environment // An environment for current running cycle.
possibleUncles map[common.Hash]*types.Block // A set of side blocks as the possible uncle blocks.
localUncles map[common.Hash]*types.Block // A set of side blocks generated locally as the possible uncle blocks.
remoteUncles map[common.Hash]*types.Block // A set of side blocks as the possible uncle blocks.
unconfirmed *unconfirmedBlocks // A set of locally mined blocks pending canonicalness confirmations.
mu sync.RWMutex // The lock used to protect the coinbase and extra fields
@ -168,6 +169,9 @@ type worker struct {
running int32 // The indicator whether the consensus engine is running or not.
newTxs int32 // New arrival transaction count since last sealing work submitting.
// External functions
isLocalBlock func(block *types.Block) bool // Function used to determine whether the specified block is mined by local miner.
// Test hooks
newTaskHook func(*task) // Method to call upon receiving a new sealing task.
skipSealHook func(*task) bool // Method to decide whether skipping the sealing.
@ -175,7 +179,7 @@ type worker struct {
resubmitHook func(time.Duration, time.Duration) // Method to call upon updating resubmitting interval.
}
func newWorker(config *params.ChainConfig, engine consensus.Engine, eth Backend, mux *event.TypeMux, recommit time.Duration, gasFloor, gasCeil uint64) *worker {
func newWorker(config *params.ChainConfig, engine consensus.Engine, eth Backend, mux *event.TypeMux, recommit time.Duration, gasFloor, gasCeil uint64, isLocalBlock func(*types.Block) bool) *worker {
worker := &worker{
config: config,
engine: engine,
@ -184,7 +188,9 @@ func newWorker(config *params.ChainConfig, engine consensus.Engine, eth Backend,
chain: eth.BlockChain(),
gasFloor: gasFloor,
gasCeil: gasCeil,
possibleUncles: make(map[common.Hash]*types.Block),
isLocalBlock: isLocalBlock,
localUncles: make(map[common.Hash]*types.Block),
remoteUncles: make(map[common.Hash]*types.Block),
unconfirmed: newUnconfirmedBlocks(eth.BlockChain(), miningLogAtDepth),
pendingTasks: make(map[common.Hash]*task),
txsCh: make(chan core.NewTxsEvent, txChanSize),
@ -405,11 +411,19 @@ func (w *worker) mainLoop() {
w.commitNewWork(req.interrupt, req.noempty, req.timestamp)
case ev := <-w.chainSideCh:
if _, exist := w.possibleUncles[ev.Block.Hash()]; exist {
// Short circuit for duplicate side blocks
if _, exist := w.localUncles[ev.Block.Hash()]; exist {
continue
}
// Add side block to possible uncle block set.
w.possibleUncles[ev.Block.Hash()] = ev.Block
if _, exist := w.remoteUncles[ev.Block.Hash()]; exist {
continue
}
// Add side block to possible uncle block set depending on the author.
if w.isLocalBlock != nil && w.isLocalBlock(ev.Block) {
w.localUncles[ev.Block.Hash()] = ev.Block
} else {
w.remoteUncles[ev.Block.Hash()] = ev.Block
}
// If our mining block contains less than 2 uncle blocks,
// add the new uncle block if valid and regenerate a mining block.
if w.isRunning() && w.current != nil && w.current.uncles.Cardinality() < 2 {
@ -421,7 +435,10 @@ func (w *worker) mainLoop() {
if !ok {
return false
}
uncle, exist := w.possibleUncles[hash]
uncle, exist := w.localUncles[hash]
if !exist {
uncle, exist = w.remoteUncles[hash]
}
if !exist {
return false
}
@ -651,7 +668,10 @@ func (w *worker) updateSnapshot() {
if !ok {
return false
}
uncle, exist := w.possibleUncles[hash]
uncle, exist := w.localUncles[hash]
if !exist {
uncle, exist = w.remoteUncles[hash]
}
if !exist {
return false
}
@ -859,13 +879,15 @@ func (w *worker) commitNewWork(interrupt *int32, noempty bool, timestamp int64)
misc.ApplyDAOHardFork(env.state)
}
// Accumulate the uncles for the current block
for hash, uncle := range w.possibleUncles {
if uncle.NumberU64()+staleThreshold <= header.Number.Uint64() {
delete(w.possibleUncles, hash)
}
}
uncles := make([]*types.Header, 0, 2)
for hash, uncle := range w.possibleUncles {
commitUncles := func(blocks map[common.Hash]*types.Block) {
// Clean up stale uncle blocks first
for hash, uncle := range blocks {
if uncle.NumberU64()+staleThreshold <= header.Number.Uint64() {
delete(blocks, hash)
}
}
for hash, uncle := range blocks {
if len(uncles) == 2 {
break
}
@ -876,6 +898,10 @@ func (w *worker) commitNewWork(interrupt *int32, noempty bool, timestamp int64)
uncles = append(uncles, uncle.Header())
}
}
}
// Prefer to locally generated uncle
commitUncles(w.localUncles)
commitUncles(w.remoteUncles)
if !noempty {
// Create an empty block based on temporary copied state for sealing in advance without waiting block

View File

@ -96,7 +96,7 @@ func newTestWorkerBackend(t *testing.T, chainConfig *params.ChainConfig, engine
}
genesis := gspec.MustCommit(db)
chain, _ := core.NewBlockChain(db, nil, gspec.Config, engine, vm.Config{})
chain, _ := core.NewBlockChain(db, nil, gspec.Config, engine, vm.Config{}, nil)
txpool := core.NewTxPool(testTxPoolConfig, chainConfig, chain)
// Generate a small n-block chain and an uncle block for it
@ -133,7 +133,7 @@ func (b *testWorkerBackend) PostChainEvents(events []interface{}) {
func newTestWorker(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine, blocks int) (*worker, *testWorkerBackend) {
backend := newTestWorkerBackend(t, chainConfig, engine, blocks)
backend.txPool.AddLocals(pendingTxs)
w := newWorker(chainConfig, engine, backend, new(event.TypeMux), time.Second, params.GenesisGasLimit, params.GenesisGasLimit)
w := newWorker(chainConfig, engine, backend, new(event.TypeMux), time.Second, params.GenesisGasLimit, params.GenesisGasLimit, nil)
w.setEtherbase(testBankAddress)
return w, backend
}

View File

@ -26,7 +26,7 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/rpc"
)
@ -51,7 +51,7 @@ func (api *PrivateAdminAPI) AddPeer(url string) (bool, error) {
return false, ErrNodeStopped
}
// Try to add the url as a static peer and return
node, err := discover.ParseNode(url)
node, err := enode.ParseV4(url)
if err != nil {
return false, fmt.Errorf("invalid enode: %v", err)
}
@ -67,7 +67,7 @@ func (api *PrivateAdminAPI) RemovePeer(url string) (bool, error) {
return false, ErrNodeStopped
}
// Try to remove the url as a static peer and return
node, err := discover.ParseNode(url)
node, err := enode.ParseV4(url)
if err != nil {
return false, fmt.Errorf("invalid enode: %v", err)
}
@ -82,7 +82,7 @@ func (api *PrivateAdminAPI) AddTrustedPeer(url string) (bool, error) {
if server == nil {
return false, ErrNodeStopped
}
node, err := discover.ParseNode(url)
node, err := enode.ParseV4(url)
if err != nil {
return false, fmt.Errorf("invalid enode: %v", err)
}
@ -98,7 +98,7 @@ func (api *PrivateAdminAPI) RemoveTrustedPeer(url string) (bool, error) {
if server == nil {
return false, ErrNodeStopped
}
node, err := discover.ParseNode(url)
node, err := enode.ParseV4(url)
if err != nil {
return false, fmt.Errorf("invalid enode: %v", err)
}

View File

@ -32,7 +32,7 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/rpc"
)
@ -336,18 +336,18 @@ func (c *Config) NodeKey() *ecdsa.PrivateKey {
}
// StaticNodes returns a list of node enode URLs configured as static nodes.
func (c *Config) StaticNodes() []*discover.Node {
func (c *Config) StaticNodes() []*enode.Node {
return c.parsePersistentNodes(c.ResolvePath(datadirStaticNodes))
}
// TrustedNodes returns a list of node enode URLs configured as trusted nodes.
func (c *Config) TrustedNodes() []*discover.Node {
func (c *Config) TrustedNodes() []*enode.Node {
return c.parsePersistentNodes(c.ResolvePath(datadirTrustedNodes))
}
// parsePersistentNodes parses a list of discovery node URLs loaded from a .json
// file from within the data directory.
func (c *Config) parsePersistentNodes(path string) []*discover.Node {
func (c *Config) parsePersistentNodes(path string) []*enode.Node {
// Short circuit if no node config is present
if c.DataDir == "" {
return nil
@ -362,12 +362,12 @@ func (c *Config) parsePersistentNodes(path string) []*discover.Node {
return nil
}
// Interpret the list as a discovery node array
var nodes []*discover.Node
var nodes []*enode.Node
for _, url := range nodelist {
if url == "" {
continue
}
node, err := discover.ParseNode(url)
node, err := enode.ParseV4(url)
if err != nil {
log.Error(fmt.Sprintf("Node URL %s: %v\n", url, err))
continue

View File

@ -18,14 +18,13 @@ package p2p
import (
"container/heap"
"crypto/rand"
"errors"
"fmt"
"net"
"time"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/netutil"
)
@ -50,7 +49,7 @@ const (
// NodeDialer is used to connect to nodes in the network, typically by using
// an underlying net.Dialer but also using net.Pipe in tests
type NodeDialer interface {
Dial(*discover.Node) (net.Conn, error)
Dial(*enode.Node) (net.Conn, error)
}
// TCPDialer implements the NodeDialer interface by using a net.Dialer to
@ -60,8 +59,8 @@ type TCPDialer struct {
}
// Dial creates a TCP connection to the node
func (t TCPDialer) Dial(dest *discover.Node) (net.Conn, error) {
addr := &net.TCPAddr{IP: dest.IP, Port: int(dest.TCP)}
func (t TCPDialer) Dial(dest *enode.Node) (net.Conn, error) {
addr := &net.TCPAddr{IP: dest.IP(), Port: dest.TCP()}
return t.Dialer.Dial("tcp", addr.String())
}
@ -74,22 +73,22 @@ type dialstate struct {
netrestrict *netutil.Netlist
lookupRunning bool
dialing map[discover.NodeID]connFlag
lookupBuf []*discover.Node // current discovery lookup results
randomNodes []*discover.Node // filled from Table
static map[discover.NodeID]*dialTask
dialing map[enode.ID]connFlag
lookupBuf []*enode.Node // current discovery lookup results
randomNodes []*enode.Node // filled from Table
static map[enode.ID]*dialTask
hist *dialHistory
start time.Time // time when the dialer was first used
bootnodes []*discover.Node // default dials when there are no peers
bootnodes []*enode.Node // default dials when there are no peers
}
type discoverTable interface {
Self() *discover.Node
Self() *enode.Node
Close()
Resolve(target discover.NodeID) *discover.Node
Lookup(target discover.NodeID) []*discover.Node
ReadRandomNodes([]*discover.Node) int
Resolve(*enode.Node) *enode.Node
LookupRandom() []*enode.Node
ReadRandomNodes([]*enode.Node) int
}
// the dial history remembers recent dials.
@ -97,7 +96,7 @@ type dialHistory []pastDial
// pastDial is an entry in the dial history.
type pastDial struct {
id discover.NodeID
id enode.ID
exp time.Time
}
@ -109,7 +108,7 @@ type task interface {
// fields cannot be accessed while the task is running.
type dialTask struct {
flags connFlag
dest *discover.Node
dest *enode.Node
lastResolved time.Time
resolveDelay time.Duration
}
@ -118,7 +117,7 @@ type dialTask struct {
// Only one discoverTask is active at any time.
// discoverTask.Do performs a random lookup.
type discoverTask struct {
results []*discover.Node
results []*enode.Node
}
// A waitExpireTask is generated if there are no other tasks
@ -127,15 +126,15 @@ type waitExpireTask struct {
time.Duration
}
func newDialState(static []*discover.Node, bootnodes []*discover.Node, ntab discoverTable, maxdyn int, netrestrict *netutil.Netlist) *dialstate {
func newDialState(static []*enode.Node, bootnodes []*enode.Node, ntab discoverTable, maxdyn int, netrestrict *netutil.Netlist) *dialstate {
s := &dialstate{
maxDynDials: maxdyn,
ntab: ntab,
netrestrict: netrestrict,
static: make(map[discover.NodeID]*dialTask),
dialing: make(map[discover.NodeID]connFlag),
bootnodes: make([]*discover.Node, len(bootnodes)),
randomNodes: make([]*discover.Node, maxdyn/2),
static: make(map[enode.ID]*dialTask),
dialing: make(map[enode.ID]connFlag),
bootnodes: make([]*enode.Node, len(bootnodes)),
randomNodes: make([]*enode.Node, maxdyn/2),
hist: new(dialHistory),
}
copy(s.bootnodes, bootnodes)
@ -145,32 +144,32 @@ func newDialState(static []*discover.Node, bootnodes []*discover.Node, ntab disc
return s
}
func (s *dialstate) addStatic(n *discover.Node) {
func (s *dialstate) addStatic(n *enode.Node) {
// This overwrites the task instead of updating an existing
// entry, giving users the opportunity to force a resolve operation.
s.static[n.ID] = &dialTask{flags: staticDialedConn, dest: n}
s.static[n.ID()] = &dialTask{flags: staticDialedConn, dest: n}
}
func (s *dialstate) removeStatic(n *discover.Node) {
func (s *dialstate) removeStatic(n *enode.Node) {
// This removes a task so future attempts to connect will not be made.
delete(s.static, n.ID)
delete(s.static, n.ID())
// This removes a previous dial timestamp so that application
// can force a server to reconnect with chosen peer immediately.
s.hist.remove(n.ID)
s.hist.remove(n.ID())
}
func (s *dialstate) newTasks(nRunning int, peers map[discover.NodeID]*Peer, now time.Time) []task {
func (s *dialstate) newTasks(nRunning int, peers map[enode.ID]*Peer, now time.Time) []task {
if s.start.IsZero() {
s.start = now
}
var newtasks []task
addDial := func(flag connFlag, n *discover.Node) bool {
addDial := func(flag connFlag, n *enode.Node) bool {
if err := s.checkDial(n, peers); err != nil {
log.Trace("Skipping dial candidate", "id", n.ID, "addr", &net.TCPAddr{IP: n.IP, Port: int(n.TCP)}, "err", err)
log.Trace("Skipping dial candidate", "id", n.ID(), "addr", &net.TCPAddr{IP: n.IP(), Port: n.TCP()}, "err", err)
return false
}
s.dialing[n.ID] = flag
s.dialing[n.ID()] = flag
newtasks = append(newtasks, &dialTask{flags: flag, dest: n})
return true
}
@ -196,8 +195,8 @@ func (s *dialstate) newTasks(nRunning int, peers map[discover.NodeID]*Peer, now
err := s.checkDial(t.dest, peers)
switch err {
case errNotWhitelisted, errSelf:
log.Warn("Removing static dial candidate", "id", t.dest.ID, "addr", &net.TCPAddr{IP: t.dest.IP, Port: int(t.dest.TCP)}, "err", err)
delete(s.static, t.dest.ID)
log.Warn("Removing static dial candidate", "id", t.dest.ID, "addr", &net.TCPAddr{IP: t.dest.IP(), Port: t.dest.TCP()}, "err", err)
delete(s.static, t.dest.ID())
case nil:
s.dialing[id] = t.flags
newtasks = append(newtasks, t)
@ -260,18 +259,18 @@ var (
errNotWhitelisted = errors.New("not contained in netrestrict whitelist")
)
func (s *dialstate) checkDial(n *discover.Node, peers map[discover.NodeID]*Peer) error {
_, dialing := s.dialing[n.ID]
func (s *dialstate) checkDial(n *enode.Node, peers map[enode.ID]*Peer) error {
_, dialing := s.dialing[n.ID()]
switch {
case dialing:
return errAlreadyDialing
case peers[n.ID] != nil:
case peers[n.ID()] != nil:
return errAlreadyConnected
case s.ntab != nil && n.ID == s.ntab.Self().ID:
case s.ntab != nil && n.ID() == s.ntab.Self().ID():
return errSelf
case s.netrestrict != nil && !s.netrestrict.Contains(n.IP):
case s.netrestrict != nil && !s.netrestrict.Contains(n.IP()):
return errNotWhitelisted
case s.hist.contains(n.ID):
case s.hist.contains(n.ID()):
return errRecentlyDialed
}
return nil
@ -280,8 +279,8 @@ func (s *dialstate) checkDial(n *discover.Node, peers map[discover.NodeID]*Peer)
func (s *dialstate) taskDone(t task, now time.Time) {
switch t := t.(type) {
case *dialTask:
s.hist.add(t.dest.ID, now.Add(dialHistoryExpiration))
delete(s.dialing, t.dest.ID)
s.hist.add(t.dest.ID(), now.Add(dialHistoryExpiration))
delete(s.dialing, t.dest.ID())
case *discoverTask:
s.lookupRunning = false
s.lookupBuf = append(s.lookupBuf, t.results...)
@ -323,7 +322,7 @@ func (t *dialTask) resolve(srv *Server) bool {
if time.Since(t.lastResolved) < t.resolveDelay {
return false
}
resolved := srv.ntab.Resolve(t.dest.ID)
resolved := srv.ntab.Resolve(t.dest)
t.lastResolved = time.Now()
if resolved == nil {
t.resolveDelay *= 2
@ -336,7 +335,7 @@ func (t *dialTask) resolve(srv *Server) bool {
// The node was found.
t.resolveDelay = initialResolveDelay
t.dest = resolved
log.Debug("Resolved node", "id", t.dest.ID, "addr", &net.TCPAddr{IP: t.dest.IP, Port: int(t.dest.TCP)})
log.Debug("Resolved node", "id", t.dest.ID, "addr", &net.TCPAddr{IP: t.dest.IP(), Port: t.dest.TCP()})
return true
}
@ -345,7 +344,7 @@ type dialError struct {
}
// dial performs the actual connection attempt.
func (t *dialTask) dial(srv *Server, dest *discover.Node) error {
func (t *dialTask) dial(srv *Server, dest *enode.Node) error {
fd, err := srv.Dialer.Dial(dest)
if err != nil {
return &dialError{err}
@ -355,7 +354,8 @@ func (t *dialTask) dial(srv *Server, dest *discover.Node) error {
}
func (t *dialTask) String() string {
return fmt.Sprintf("%v %x %v:%d", t.flags, t.dest.ID[:8], t.dest.IP, t.dest.TCP)
id := t.dest.ID()
return fmt.Sprintf("%v %x %v:%d", t.flags, id[:8], t.dest.IP(), t.dest.TCP())
}
func (t *discoverTask) Do(srv *Server) {
@ -367,9 +367,7 @@ func (t *discoverTask) Do(srv *Server) {
time.Sleep(next.Sub(now))
}
srv.lastLookup = time.Now()
var target discover.NodeID
rand.Read(target[:])
t.results = srv.ntab.Lookup(target)
t.results = srv.ntab.LookupRandom()
}
func (t *discoverTask) String() string {
@ -391,11 +389,11 @@ func (t waitExpireTask) String() string {
func (h dialHistory) min() pastDial {
return h[0]
}
func (h *dialHistory) add(id discover.NodeID, exp time.Time) {
func (h *dialHistory) add(id enode.ID, exp time.Time) {
heap.Push(h, pastDial{id, exp})
}
func (h *dialHistory) remove(id discover.NodeID) bool {
func (h *dialHistory) remove(id enode.ID) bool {
for i, v := range *h {
if v.id == id {
heap.Remove(h, i)
@ -404,7 +402,7 @@ func (h *dialHistory) remove(id discover.NodeID) bool {
}
return false
}
func (h dialHistory) contains(id discover.NodeID) bool {
func (h dialHistory) contains(id enode.ID) bool {
for _, v := range h {
if v.id == id {
return true

View File

@ -24,7 +24,8 @@ import (
"time"
"github.com/davecgh/go-spew/spew"
"github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/enr"
"github.com/ethereum/go-ethereum/p2p/netutil"
)
@ -48,10 +49,10 @@ func runDialTest(t *testing.T, test dialtest) {
vtime time.Time
running int
)
pm := func(ps []*Peer) map[discover.NodeID]*Peer {
m := make(map[discover.NodeID]*Peer)
pm := func(ps []*Peer) map[enode.ID]*Peer {
m := make(map[enode.ID]*Peer)
for _, p := range ps {
m[p.rw.id] = p
m[p.ID()] = p
}
return m
}
@ -69,6 +70,7 @@ func runDialTest(t *testing.T, test dialtest) {
t.Errorf("round %d: new tasks mismatch:\ngot %v\nwant %v\nstate: %v\nrunning: %v\n",
i, spew.Sdump(new), spew.Sdump(round.new), spew.Sdump(test.init), spew.Sdump(running))
}
t.Log("tasks:", spew.Sdump(new))
// Time advances by 16 seconds on every round.
vtime = vtime.Add(16 * time.Second)
@ -76,13 +78,13 @@ func runDialTest(t *testing.T, test dialtest) {
}
}
type fakeTable []*discover.Node
type fakeTable []*enode.Node
func (t fakeTable) Self() *discover.Node { return new(discover.Node) }
func (t fakeTable) Self() *enode.Node { return new(enode.Node) }
func (t fakeTable) Close() {}
func (t fakeTable) Lookup(discover.NodeID) []*discover.Node { return nil }
func (t fakeTable) Resolve(discover.NodeID) *discover.Node { return nil }
func (t fakeTable) ReadRandomNodes(buf []*discover.Node) int { return copy(buf, t) }
func (t fakeTable) LookupRandom() []*enode.Node { return nil }
func (t fakeTable) Resolve(*enode.Node) *enode.Node { return nil }
func (t fakeTable) ReadRandomNodes(buf []*enode.Node) int { return copy(buf, t) }
// This test checks that dynamic dials are launched from discovery results.
func TestDialStateDynDial(t *testing.T) {
@ -92,63 +94,63 @@ func TestDialStateDynDial(t *testing.T) {
// A discovery query is launched.
{
peers: []*Peer{
{rw: &conn{flags: staticDialedConn, id: uintID(0)}},
{rw: &conn{flags: dynDialedConn, id: uintID(1)}},
{rw: &conn{flags: dynDialedConn, id: uintID(2)}},
{rw: &conn{flags: staticDialedConn, node: newNode(uintID(0), nil)}},
{rw: &conn{flags: dynDialedConn, node: newNode(uintID(1), nil)}},
{rw: &conn{flags: dynDialedConn, node: newNode(uintID(2), nil)}},
},
new: []task{&discoverTask{}},
},
// Dynamic dials are launched when it completes.
{
peers: []*Peer{
{rw: &conn{flags: staticDialedConn, id: uintID(0)}},
{rw: &conn{flags: dynDialedConn, id: uintID(1)}},
{rw: &conn{flags: dynDialedConn, id: uintID(2)}},
{rw: &conn{flags: staticDialedConn, node: newNode(uintID(0), nil)}},
{rw: &conn{flags: dynDialedConn, node: newNode(uintID(1), nil)}},
{rw: &conn{flags: dynDialedConn, node: newNode(uintID(2), nil)}},
},
done: []task{
&discoverTask{results: []*discover.Node{
{ID: uintID(2)}, // this one is already connected and not dialed.
{ID: uintID(3)},
{ID: uintID(4)},
{ID: uintID(5)},
{ID: uintID(6)}, // these are not tried because max dyn dials is 5
{ID: uintID(7)}, // ...
&discoverTask{results: []*enode.Node{
newNode(uintID(2), nil), // this one is already connected and not dialed.
newNode(uintID(3), nil),
newNode(uintID(4), nil),
newNode(uintID(5), nil),
newNode(uintID(6), nil), // these are not tried because max dyn dials is 5
newNode(uintID(7), nil), // ...
}},
},
new: []task{
&dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(3)}},
&dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(4)}},
&dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(5)}},
&dialTask{flags: dynDialedConn, dest: newNode(uintID(3), nil)},
&dialTask{flags: dynDialedConn, dest: newNode(uintID(4), nil)},
&dialTask{flags: dynDialedConn, dest: newNode(uintID(5), nil)},
},
},
// Some of the dials complete but no new ones are launched yet because
// the sum of active dial count and dynamic peer count is == maxDynDials.
{
peers: []*Peer{
{rw: &conn{flags: staticDialedConn, id: uintID(0)}},
{rw: &conn{flags: dynDialedConn, id: uintID(1)}},
{rw: &conn{flags: dynDialedConn, id: uintID(2)}},
{rw: &conn{flags: dynDialedConn, id: uintID(3)}},
{rw: &conn{flags: dynDialedConn, id: uintID(4)}},
{rw: &conn{flags: staticDialedConn, node: newNode(uintID(0), nil)}},
{rw: &conn{flags: dynDialedConn, node: newNode(uintID(1), nil)}},
{rw: &conn{flags: dynDialedConn, node: newNode(uintID(2), nil)}},
{rw: &conn{flags: dynDialedConn, node: newNode(uintID(3), nil)}},
{rw: &conn{flags: dynDialedConn, node: newNode(uintID(4), nil)}},
},
done: []task{
&dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(3)}},
&dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(4)}},
&dialTask{flags: dynDialedConn, dest: newNode(uintID(3), nil)},
&dialTask{flags: dynDialedConn, dest: newNode(uintID(4), nil)},
},
},
// No new dial tasks are launched in the this round because
// maxDynDials has been reached.
{
peers: []*Peer{
{rw: &conn{flags: staticDialedConn, id: uintID(0)}},
{rw: &conn{flags: dynDialedConn, id: uintID(1)}},
{rw: &conn{flags: dynDialedConn, id: uintID(2)}},
{rw: &conn{flags: dynDialedConn, id: uintID(3)}},
{rw: &conn{flags: dynDialedConn, id: uintID(4)}},
{rw: &conn{flags: dynDialedConn, id: uintID(5)}},
{rw: &conn{flags: staticDialedConn, node: newNode(uintID(0), nil)}},
{rw: &conn{flags: dynDialedConn, node: newNode(uintID(1), nil)}},
{rw: &conn{flags: dynDialedConn, node: newNode(uintID(2), nil)}},
{rw: &conn{flags: dynDialedConn, node: newNode(uintID(3), nil)}},
{rw: &conn{flags: dynDialedConn, node: newNode(uintID(4), nil)}},
{rw: &conn{flags: dynDialedConn, node: newNode(uintID(5), nil)}},
},
done: []task{
&dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(5)}},
&dialTask{flags: dynDialedConn, dest: newNode(uintID(5), nil)},
},
new: []task{
&waitExpireTask{Duration: 14 * time.Second},
@ -158,14 +160,14 @@ func TestDialStateDynDial(t *testing.T) {
// results from last discovery lookup are reused.
{
peers: []*Peer{
{rw: &conn{flags: staticDialedConn, id: uintID(0)}},
{rw: &conn{flags: dynDialedConn, id: uintID(1)}},
{rw: &conn{flags: dynDialedConn, id: uintID(3)}},
{rw: &conn{flags: dynDialedConn, id: uintID(4)}},
{rw: &conn{flags: dynDialedConn, id: uintID(5)}},
{rw: &conn{flags: staticDialedConn, node: newNode(uintID(0), nil)}},
{rw: &conn{flags: dynDialedConn, node: newNode(uintID(1), nil)}},
{rw: &conn{flags: dynDialedConn, node: newNode(uintID(3), nil)}},
{rw: &conn{flags: dynDialedConn, node: newNode(uintID(4), nil)}},
{rw: &conn{flags: dynDialedConn, node: newNode(uintID(5), nil)}},
},
new: []task{
&dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(6)}},
&dialTask{flags: dynDialedConn, dest: newNode(uintID(6), nil)},
},
},
// More peers (3,4) drop off and dial for ID 6 completes.
@ -173,15 +175,15 @@ func TestDialStateDynDial(t *testing.T) {
// and a new one is spawned because more candidates are needed.
{
peers: []*Peer{
{rw: &conn{flags: staticDialedConn, id: uintID(0)}},
{rw: &conn{flags: dynDialedConn, id: uintID(1)}},
{rw: &conn{flags: dynDialedConn, id: uintID(5)}},
{rw: &conn{flags: staticDialedConn, node: newNode(uintID(0), nil)}},
{rw: &conn{flags: dynDialedConn, node: newNode(uintID(1), nil)}},
{rw: &conn{flags: dynDialedConn, node: newNode(uintID(5), nil)}},
},
done: []task{
&dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(6)}},
&dialTask{flags: dynDialedConn, dest: newNode(uintID(6), nil)},
},
new: []task{
&dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(7)}},
&dialTask{flags: dynDialedConn, dest: newNode(uintID(7), nil)},
&discoverTask{},
},
},
@ -190,23 +192,23 @@ func TestDialStateDynDial(t *testing.T) {
// no new is started.
{
peers: []*Peer{
{rw: &conn{flags: staticDialedConn, id: uintID(0)}},
{rw: &conn{flags: dynDialedConn, id: uintID(1)}},
{rw: &conn{flags: dynDialedConn, id: uintID(5)}},
{rw: &conn{flags: dynDialedConn, id: uintID(7)}},
{rw: &conn{flags: staticDialedConn, node: newNode(uintID(0), nil)}},
{rw: &conn{flags: dynDialedConn, node: newNode(uintID(1), nil)}},
{rw: &conn{flags: dynDialedConn, node: newNode(uintID(5), nil)}},
{rw: &conn{flags: dynDialedConn, node: newNode(uintID(7), nil)}},
},
done: []task{
&dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(7)}},
&dialTask{flags: dynDialedConn, dest: newNode(uintID(7), nil)},
},
},
// Finish the running node discovery with an empty set. A new lookup
// should be immediately requested.
{
peers: []*Peer{
{rw: &conn{flags: staticDialedConn, id: uintID(0)}},
{rw: &conn{flags: dynDialedConn, id: uintID(1)}},
{rw: &conn{flags: dynDialedConn, id: uintID(5)}},
{rw: &conn{flags: dynDialedConn, id: uintID(7)}},
{rw: &conn{flags: staticDialedConn, node: newNode(uintID(0), nil)}},
{rw: &conn{flags: dynDialedConn, node: newNode(uintID(1), nil)}},
{rw: &conn{flags: dynDialedConn, node: newNode(uintID(5), nil)}},
{rw: &conn{flags: dynDialedConn, node: newNode(uintID(7), nil)}},
},
done: []task{
&discoverTask{},
@ -221,17 +223,17 @@ func TestDialStateDynDial(t *testing.T) {
// Tests that bootnodes are dialed if no peers are connectd, but not otherwise.
func TestDialStateDynDialBootnode(t *testing.T) {
bootnodes := []*discover.Node{
{ID: uintID(1)},
{ID: uintID(2)},
{ID: uintID(3)},
bootnodes := []*enode.Node{
newNode(uintID(1), nil),
newNode(uintID(2), nil),
newNode(uintID(3), nil),
}
table := fakeTable{
{ID: uintID(4)},
{ID: uintID(5)},
{ID: uintID(6)},
{ID: uintID(7)},
{ID: uintID(8)},
newNode(uintID(4), nil),
newNode(uintID(5), nil),
newNode(uintID(6), nil),
newNode(uintID(7), nil),
newNode(uintID(8), nil),
}
runDialTest(t, dialtest{
init: newDialState(nil, bootnodes, table, 5, nil),
@ -239,16 +241,16 @@ func TestDialStateDynDialBootnode(t *testing.T) {
// 2 dynamic dials attempted, bootnodes pending fallback interval
{
new: []task{
&dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(4)}},
&dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(5)}},
&dialTask{flags: dynDialedConn, dest: newNode(uintID(4), nil)},
&dialTask{flags: dynDialedConn, dest: newNode(uintID(5), nil)},
&discoverTask{},
},
},
// No dials succeed, bootnodes still pending fallback interval
{
done: []task{
&dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(4)}},
&dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(5)}},
&dialTask{flags: dynDialedConn, dest: newNode(uintID(4), nil)},
&dialTask{flags: dynDialedConn, dest: newNode(uintID(5), nil)},
},
},
// No dials succeed, bootnodes still pending fallback interval
@ -256,51 +258,51 @@ func TestDialStateDynDialBootnode(t *testing.T) {
// No dials succeed, 2 dynamic dials attempted and 1 bootnode too as fallback interval was reached
{
new: []task{
&dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(1)}},
&dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(4)}},
&dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(5)}},
&dialTask{flags: dynDialedConn, dest: newNode(uintID(1), nil)},
&dialTask{flags: dynDialedConn, dest: newNode(uintID(4), nil)},
&dialTask{flags: dynDialedConn, dest: newNode(uintID(5), nil)},
},
},
// No dials succeed, 2nd bootnode is attempted
{
done: []task{
&dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(1)}},
&dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(4)}},
&dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(5)}},
&dialTask{flags: dynDialedConn, dest: newNode(uintID(1), nil)},
&dialTask{flags: dynDialedConn, dest: newNode(uintID(4), nil)},
&dialTask{flags: dynDialedConn, dest: newNode(uintID(5), nil)},
},
new: []task{
&dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(2)}},
&dialTask{flags: dynDialedConn, dest: newNode(uintID(2), nil)},
},
},
// No dials succeed, 3rd bootnode is attempted
{
done: []task{
&dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(2)}},
&dialTask{flags: dynDialedConn, dest: newNode(uintID(2), nil)},
},
new: []task{
&dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(3)}},
&dialTask{flags: dynDialedConn, dest: newNode(uintID(3), nil)},
},
},
// No dials succeed, 1st bootnode is attempted again, expired random nodes retried
{
done: []task{
&dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(3)}},
&dialTask{flags: dynDialedConn, dest: newNode(uintID(3), nil)},
},
new: []task{
&dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(1)}},
&dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(4)}},
&dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(5)}},
&dialTask{flags: dynDialedConn, dest: newNode(uintID(1), nil)},
&dialTask{flags: dynDialedConn, dest: newNode(uintID(4), nil)},
&dialTask{flags: dynDialedConn, dest: newNode(uintID(5), nil)},
},
},
// Random dial succeeds, no more bootnodes are attempted
{
peers: []*Peer{
{rw: &conn{flags: dynDialedConn, id: uintID(4)}},
{rw: &conn{flags: dynDialedConn, node: newNode(uintID(4), nil)}},
},
done: []task{
&dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(1)}},
&dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(4)}},
&dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(5)}},
&dialTask{flags: dynDialedConn, dest: newNode(uintID(1), nil)},
&dialTask{flags: dynDialedConn, dest: newNode(uintID(4), nil)},
&dialTask{flags: dynDialedConn, dest: newNode(uintID(5), nil)},
},
},
},
@ -311,14 +313,14 @@ func TestDialStateDynDialFromTable(t *testing.T) {
// This table always returns the same random nodes
// in the order given below.
table := fakeTable{
{ID: uintID(1)},
{ID: uintID(2)},
{ID: uintID(3)},
{ID: uintID(4)},
{ID: uintID(5)},
{ID: uintID(6)},
{ID: uintID(7)},
{ID: uintID(8)},
newNode(uintID(1), nil),
newNode(uintID(2), nil),
newNode(uintID(3), nil),
newNode(uintID(4), nil),
newNode(uintID(5), nil),
newNode(uintID(6), nil),
newNode(uintID(7), nil),
newNode(uintID(8), nil),
}
runDialTest(t, dialtest{
@ -327,63 +329,63 @@ func TestDialStateDynDialFromTable(t *testing.T) {
// 5 out of 8 of the nodes returned by ReadRandomNodes are dialed.
{
new: []task{
&dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(1)}},
&dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(2)}},
&dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(3)}},
&dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(4)}},
&dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(5)}},
&dialTask{flags: dynDialedConn, dest: newNode(uintID(1), nil)},
&dialTask{flags: dynDialedConn, dest: newNode(uintID(2), nil)},
&dialTask{flags: dynDialedConn, dest: newNode(uintID(3), nil)},
&dialTask{flags: dynDialedConn, dest: newNode(uintID(4), nil)},
&dialTask{flags: dynDialedConn, dest: newNode(uintID(5), nil)},
&discoverTask{},
},
},
// Dialing nodes 1,2 succeeds. Dials from the lookup are launched.
{
peers: []*Peer{
{rw: &conn{flags: dynDialedConn, id: uintID(1)}},
{rw: &conn{flags: dynDialedConn, id: uintID(2)}},
{rw: &conn{flags: dynDialedConn, node: newNode(uintID(1), nil)}},
{rw: &conn{flags: dynDialedConn, node: newNode(uintID(2), nil)}},
},
done: []task{
&dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(1)}},
&dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(2)}},
&discoverTask{results: []*discover.Node{
{ID: uintID(10)},
{ID: uintID(11)},
{ID: uintID(12)},
&dialTask{flags: dynDialedConn, dest: newNode(uintID(1), nil)},
&dialTask{flags: dynDialedConn, dest: newNode(uintID(2), nil)},
&discoverTask{results: []*enode.Node{
newNode(uintID(10), nil),
newNode(uintID(11), nil),
newNode(uintID(12), nil),
}},
},
new: []task{
&dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(10)}},
&dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(11)}},
&dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(12)}},
&dialTask{flags: dynDialedConn, dest: newNode(uintID(10), nil)},
&dialTask{flags: dynDialedConn, dest: newNode(uintID(11), nil)},
&dialTask{flags: dynDialedConn, dest: newNode(uintID(12), nil)},
&discoverTask{},
},
},
// Dialing nodes 3,4,5 fails. The dials from the lookup succeed.
{
peers: []*Peer{
{rw: &conn{flags: dynDialedConn, id: uintID(1)}},
{rw: &conn{flags: dynDialedConn, id: uintID(2)}},
{rw: &conn{flags: dynDialedConn, id: uintID(10)}},
{rw: &conn{flags: dynDialedConn, id: uintID(11)}},
{rw: &conn{flags: dynDialedConn, id: uintID(12)}},
{rw: &conn{flags: dynDialedConn, node: newNode(uintID(1), nil)}},
{rw: &conn{flags: dynDialedConn, node: newNode(uintID(2), nil)}},
{rw: &conn{flags: dynDialedConn, node: newNode(uintID(10), nil)}},
{rw: &conn{flags: dynDialedConn, node: newNode(uintID(11), nil)}},
{rw: &conn{flags: dynDialedConn, node: newNode(uintID(12), nil)}},
},
done: []task{
&dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(3)}},
&dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(4)}},
&dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(5)}},
&dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(10)}},
&dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(11)}},
&dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(12)}},
&dialTask{flags: dynDialedConn, dest: newNode(uintID(3), nil)},
&dialTask{flags: dynDialedConn, dest: newNode(uintID(4), nil)},
&dialTask{flags: dynDialedConn, dest: newNode(uintID(5), nil)},
&dialTask{flags: dynDialedConn, dest: newNode(uintID(10), nil)},
&dialTask{flags: dynDialedConn, dest: newNode(uintID(11), nil)},
&dialTask{flags: dynDialedConn, dest: newNode(uintID(12), nil)},
},
},
// Waiting for expiry. No waitExpireTask is launched because the
// discovery query is still running.
{
peers: []*Peer{
{rw: &conn{flags: dynDialedConn, id: uintID(1)}},
{rw: &conn{flags: dynDialedConn, id: uintID(2)}},
{rw: &conn{flags: dynDialedConn, id: uintID(10)}},
{rw: &conn{flags: dynDialedConn, id: uintID(11)}},
{rw: &conn{flags: dynDialedConn, id: uintID(12)}},
{rw: &conn{flags: dynDialedConn, node: newNode(uintID(1), nil)}},
{rw: &conn{flags: dynDialedConn, node: newNode(uintID(2), nil)}},
{rw: &conn{flags: dynDialedConn, node: newNode(uintID(10), nil)}},
{rw: &conn{flags: dynDialedConn, node: newNode(uintID(11), nil)}},
{rw: &conn{flags: dynDialedConn, node: newNode(uintID(12), nil)}},
},
},
// Nodes 3,4 are not tried again because only the first two
@ -391,30 +393,38 @@ func TestDialStateDynDialFromTable(t *testing.T) {
// already connected.
{
peers: []*Peer{
{rw: &conn{flags: dynDialedConn, id: uintID(1)}},
{rw: &conn{flags: dynDialedConn, id: uintID(2)}},
{rw: &conn{flags: dynDialedConn, id: uintID(10)}},
{rw: &conn{flags: dynDialedConn, id: uintID(11)}},
{rw: &conn{flags: dynDialedConn, id: uintID(12)}},
{rw: &conn{flags: dynDialedConn, node: newNode(uintID(1), nil)}},
{rw: &conn{flags: dynDialedConn, node: newNode(uintID(2), nil)}},
{rw: &conn{flags: dynDialedConn, node: newNode(uintID(10), nil)}},
{rw: &conn{flags: dynDialedConn, node: newNode(uintID(11), nil)}},
{rw: &conn{flags: dynDialedConn, node: newNode(uintID(12), nil)}},
},
},
},
})
}
func newNode(id enode.ID, ip net.IP) *enode.Node {
var r enr.Record
if ip != nil {
r.Set(enr.IP(ip))
}
return enode.SignNull(&r, id)
}
// This test checks that candidates that do not match the netrestrict list are not dialed.
func TestDialStateNetRestrict(t *testing.T) {
// This table always returns the same random nodes
// in the order given below.
table := fakeTable{
{ID: uintID(1), IP: net.ParseIP("127.0.0.1")},
{ID: uintID(2), IP: net.ParseIP("127.0.0.2")},
{ID: uintID(3), IP: net.ParseIP("127.0.0.3")},
{ID: uintID(4), IP: net.ParseIP("127.0.0.4")},
{ID: uintID(5), IP: net.ParseIP("127.0.2.5")},
{ID: uintID(6), IP: net.ParseIP("127.0.2.6")},
{ID: uintID(7), IP: net.ParseIP("127.0.2.7")},
{ID: uintID(8), IP: net.ParseIP("127.0.2.8")},
newNode(uintID(1), net.ParseIP("127.0.0.1")),
newNode(uintID(2), net.ParseIP("127.0.0.2")),
newNode(uintID(3), net.ParseIP("127.0.0.3")),
newNode(uintID(4), net.ParseIP("127.0.0.4")),
newNode(uintID(5), net.ParseIP("127.0.2.5")),
newNode(uintID(6), net.ParseIP("127.0.2.6")),
newNode(uintID(7), net.ParseIP("127.0.2.7")),
newNode(uintID(8), net.ParseIP("127.0.2.8")),
}
restrict := new(netutil.Netlist)
restrict.Add("127.0.2.0/24")
@ -434,12 +444,12 @@ func TestDialStateNetRestrict(t *testing.T) {
// This test checks that static dials are launched.
func TestDialStateStaticDial(t *testing.T) {
wantStatic := []*discover.Node{
{ID: uintID(1)},
{ID: uintID(2)},
{ID: uintID(3)},
{ID: uintID(4)},
{ID: uintID(5)},
wantStatic := []*enode.Node{
newNode(uintID(1), nil),
newNode(uintID(2), nil),
newNode(uintID(3), nil),
newNode(uintID(4), nil),
newNode(uintID(5), nil),
}
runDialTest(t, dialtest{
@ -449,40 +459,40 @@ func TestDialStateStaticDial(t *testing.T) {
// aren't yet connected.
{
peers: []*Peer{
{rw: &conn{flags: dynDialedConn, id: uintID(1)}},
{rw: &conn{flags: dynDialedConn, id: uintID(2)}},
{rw: &conn{flags: dynDialedConn, node: newNode(uintID(1), nil)}},
{rw: &conn{flags: dynDialedConn, node: newNode(uintID(2), nil)}},
},
new: []task{
&dialTask{flags: staticDialedConn, dest: &discover.Node{ID: uintID(3)}},
&dialTask{flags: staticDialedConn, dest: &discover.Node{ID: uintID(4)}},
&dialTask{flags: staticDialedConn, dest: &discover.Node{ID: uintID(5)}},
&dialTask{flags: staticDialedConn, dest: newNode(uintID(3), nil)},
&dialTask{flags: staticDialedConn, dest: newNode(uintID(4), nil)},
&dialTask{flags: staticDialedConn, dest: newNode(uintID(5), nil)},
},
},
// No new tasks are launched in this round because all static
// nodes are either connected or still being dialed.
{
peers: []*Peer{
{rw: &conn{flags: dynDialedConn, id: uintID(1)}},
{rw: &conn{flags: dynDialedConn, id: uintID(2)}},
{rw: &conn{flags: staticDialedConn, id: uintID(3)}},
{rw: &conn{flags: dynDialedConn, node: newNode(uintID(1), nil)}},
{rw: &conn{flags: dynDialedConn, node: newNode(uintID(2), nil)}},
{rw: &conn{flags: staticDialedConn, node: newNode(uintID(3), nil)}},
},
done: []task{
&dialTask{flags: staticDialedConn, dest: &discover.Node{ID: uintID(3)}},
&dialTask{flags: staticDialedConn, dest: newNode(uintID(3), nil)},
},
},
// No new dial tasks are launched because all static
// nodes are now connected.
{
peers: []*Peer{
{rw: &conn{flags: dynDialedConn, id: uintID(1)}},
{rw: &conn{flags: dynDialedConn, id: uintID(2)}},
{rw: &conn{flags: staticDialedConn, id: uintID(3)}},
{rw: &conn{flags: staticDialedConn, id: uintID(4)}},
{rw: &conn{flags: staticDialedConn, id: uintID(5)}},
{rw: &conn{flags: dynDialedConn, node: newNode(uintID(1), nil)}},
{rw: &conn{flags: dynDialedConn, node: newNode(uintID(2), nil)}},
{rw: &conn{flags: staticDialedConn, node: newNode(uintID(3), nil)}},
{rw: &conn{flags: staticDialedConn, node: newNode(uintID(4), nil)}},
{rw: &conn{flags: staticDialedConn, node: newNode(uintID(5), nil)}},
},
done: []task{
&dialTask{flags: staticDialedConn, dest: &discover.Node{ID: uintID(4)}},
&dialTask{flags: staticDialedConn, dest: &discover.Node{ID: uintID(5)}},
&dialTask{flags: staticDialedConn, dest: newNode(uintID(4), nil)},
&dialTask{flags: staticDialedConn, dest: newNode(uintID(5), nil)},
},
new: []task{
&waitExpireTask{Duration: 14 * time.Second},
@ -491,24 +501,24 @@ func TestDialStateStaticDial(t *testing.T) {
// Wait a round for dial history to expire, no new tasks should spawn.
{
peers: []*Peer{
{rw: &conn{flags: dynDialedConn, id: uintID(1)}},
{rw: &conn{flags: dynDialedConn, id: uintID(2)}},
{rw: &conn{flags: staticDialedConn, id: uintID(3)}},
{rw: &conn{flags: staticDialedConn, id: uintID(4)}},
{rw: &conn{flags: staticDialedConn, id: uintID(5)}},
{rw: &conn{flags: dynDialedConn, node: newNode(uintID(1), nil)}},
{rw: &conn{flags: dynDialedConn, node: newNode(uintID(2), nil)}},
{rw: &conn{flags: staticDialedConn, node: newNode(uintID(3), nil)}},
{rw: &conn{flags: staticDialedConn, node: newNode(uintID(4), nil)}},
{rw: &conn{flags: staticDialedConn, node: newNode(uintID(5), nil)}},
},
},
// If a static node is dropped, it should be immediately redialed,
// irrespective whether it was originally static or dynamic.
{
peers: []*Peer{
{rw: &conn{flags: dynDialedConn, id: uintID(1)}},
{rw: &conn{flags: staticDialedConn, id: uintID(3)}},
{rw: &conn{flags: staticDialedConn, id: uintID(5)}},
{rw: &conn{flags: dynDialedConn, node: newNode(uintID(1), nil)}},
{rw: &conn{flags: staticDialedConn, node: newNode(uintID(3), nil)}},
{rw: &conn{flags: staticDialedConn, node: newNode(uintID(5), nil)}},
},
new: []task{
&dialTask{flags: staticDialedConn, dest: &discover.Node{ID: uintID(2)}},
&dialTask{flags: staticDialedConn, dest: &discover.Node{ID: uintID(4)}},
&dialTask{flags: staticDialedConn, dest: newNode(uintID(2), nil)},
&dialTask{flags: staticDialedConn, dest: newNode(uintID(4), nil)},
},
},
},
@ -517,9 +527,9 @@ func TestDialStateStaticDial(t *testing.T) {
// This test checks that static peers will be redialed immediately if they were re-added to a static list.
func TestDialStaticAfterReset(t *testing.T) {
wantStatic := []*discover.Node{
{ID: uintID(1)},
{ID: uintID(2)},
wantStatic := []*enode.Node{
newNode(uintID(1), nil),
newNode(uintID(2), nil),
}
rounds := []round{
@ -527,19 +537,19 @@ func TestDialStaticAfterReset(t *testing.T) {
{
peers: nil,
new: []task{
&dialTask{flags: staticDialedConn, dest: &discover.Node{ID: uintID(1)}},
&dialTask{flags: staticDialedConn, dest: &discover.Node{ID: uintID(2)}},
&dialTask{flags: staticDialedConn, dest: newNode(uintID(1), nil)},
&dialTask{flags: staticDialedConn, dest: newNode(uintID(2), nil)},
},
},
// No new dial tasks, all peers are connected.
{
peers: []*Peer{
{rw: &conn{flags: staticDialedConn, id: uintID(1)}},
{rw: &conn{flags: staticDialedConn, id: uintID(2)}},
{rw: &conn{flags: staticDialedConn, node: newNode(uintID(1), nil)}},
{rw: &conn{flags: staticDialedConn, node: newNode(uintID(2), nil)}},
},
done: []task{
&dialTask{flags: staticDialedConn, dest: &discover.Node{ID: uintID(1)}},
&dialTask{flags: staticDialedConn, dest: &discover.Node{ID: uintID(2)}},
&dialTask{flags: staticDialedConn, dest: newNode(uintID(1), nil)},
&dialTask{flags: staticDialedConn, dest: newNode(uintID(2), nil)},
},
new: []task{
&waitExpireTask{Duration: 30 * time.Second},
@ -561,10 +571,10 @@ func TestDialStaticAfterReset(t *testing.T) {
// This test checks that past dials are not retried for some time.
func TestDialStateCache(t *testing.T) {
wantStatic := []*discover.Node{
{ID: uintID(1)},
{ID: uintID(2)},
{ID: uintID(3)},
wantStatic := []*enode.Node{
newNode(uintID(1), nil),
newNode(uintID(2), nil),
newNode(uintID(3), nil),
}
runDialTest(t, dialtest{
@ -575,32 +585,32 @@ func TestDialStateCache(t *testing.T) {
{
peers: nil,
new: []task{
&dialTask{flags: staticDialedConn, dest: &discover.Node{ID: uintID(1)}},
&dialTask{flags: staticDialedConn, dest: &discover.Node{ID: uintID(2)}},
&dialTask{flags: staticDialedConn, dest: &discover.Node{ID: uintID(3)}},
&dialTask{flags: staticDialedConn, dest: newNode(uintID(1), nil)},
&dialTask{flags: staticDialedConn, dest: newNode(uintID(2), nil)},
&dialTask{flags: staticDialedConn, dest: newNode(uintID(3), nil)},
},
},
// No new tasks are launched in this round because all static
// nodes are either connected or still being dialed.
{
peers: []*Peer{
{rw: &conn{flags: staticDialedConn, id: uintID(1)}},
{rw: &conn{flags: staticDialedConn, id: uintID(2)}},
{rw: &conn{flags: staticDialedConn, node: newNode(uintID(1), nil)}},
{rw: &conn{flags: staticDialedConn, node: newNode(uintID(2), nil)}},
},
done: []task{
&dialTask{flags: staticDialedConn, dest: &discover.Node{ID: uintID(1)}},
&dialTask{flags: staticDialedConn, dest: &discover.Node{ID: uintID(2)}},
&dialTask{flags: staticDialedConn, dest: newNode(uintID(1), nil)},
&dialTask{flags: staticDialedConn, dest: newNode(uintID(2), nil)},
},
},
// A salvage task is launched to wait for node 3's history
// entry to expire.
{
peers: []*Peer{
{rw: &conn{flags: dynDialedConn, id: uintID(1)}},
{rw: &conn{flags: dynDialedConn, id: uintID(2)}},
{rw: &conn{flags: dynDialedConn, node: newNode(uintID(1), nil)}},
{rw: &conn{flags: dynDialedConn, node: newNode(uintID(2), nil)}},
},
done: []task{
&dialTask{flags: staticDialedConn, dest: &discover.Node{ID: uintID(3)}},
&dialTask{flags: staticDialedConn, dest: newNode(uintID(3), nil)},
},
new: []task{
&waitExpireTask{Duration: 14 * time.Second},
@ -609,18 +619,18 @@ func TestDialStateCache(t *testing.T) {
// Still waiting for node 3's entry to expire in the cache.
{
peers: []*Peer{
{rw: &conn{flags: dynDialedConn, id: uintID(1)}},
{rw: &conn{flags: dynDialedConn, id: uintID(2)}},
{rw: &conn{flags: dynDialedConn, node: newNode(uintID(1), nil)}},
{rw: &conn{flags: dynDialedConn, node: newNode(uintID(2), nil)}},
},
},
// The cache entry for node 3 has expired and is retried.
{
peers: []*Peer{
{rw: &conn{flags: dynDialedConn, id: uintID(1)}},
{rw: &conn{flags: dynDialedConn, id: uintID(2)}},
{rw: &conn{flags: dynDialedConn, node: newNode(uintID(1), nil)}},
{rw: &conn{flags: dynDialedConn, node: newNode(uintID(2), nil)}},
},
new: []task{
&dialTask{flags: staticDialedConn, dest: &discover.Node{ID: uintID(3)}},
&dialTask{flags: staticDialedConn, dest: newNode(uintID(3), nil)},
},
},
},
@ -628,12 +638,12 @@ func TestDialStateCache(t *testing.T) {
}
func TestDialResolve(t *testing.T) {
resolved := discover.NewNode(uintID(1), net.IP{127, 0, 55, 234}, 3333, 4444)
resolved := newNode(uintID(1), net.IP{127, 0, 55, 234})
table := &resolveMock{answer: resolved}
state := newDialState(nil, nil, table, 0, nil)
// Check that the task is generated with an incomplete ID.
dest := discover.NewNode(uintID(1), nil, 0, 0)
dest := newNode(uintID(1), nil)
state.addStatic(dest)
tasks := state.newTasks(0, nil, time.Time{})
if !reflect.DeepEqual(tasks, []task{&dialTask{flags: staticDialedConn, dest: dest}}) {
@ -644,7 +654,7 @@ func TestDialResolve(t *testing.T) {
config := Config{Dialer: TCPDialer{&net.Dialer{Deadline: time.Now().Add(-5 * time.Minute)}}}
srv := &Server{ntab: table, Config: config}
tasks[0].Do(srv)
if !reflect.DeepEqual(table.resolveCalls, []discover.NodeID{dest.ID}) {
if !reflect.DeepEqual(table.resolveCalls, []*enode.Node{dest}) {
t.Fatalf("wrong resolve calls, got %v", table.resolveCalls)
}
@ -672,25 +682,24 @@ next:
return true
}
func uintID(i uint32) discover.NodeID {
var id discover.NodeID
func uintID(i uint32) enode.ID {
var id enode.ID
binary.BigEndian.PutUint32(id[:], i)
return id
}
// implements discoverTable for TestDialResolve
type resolveMock struct {
resolveCalls []discover.NodeID
answer *discover.Node
resolveCalls []*enode.Node
answer *enode.Node
}
func (t *resolveMock) Resolve(id discover.NodeID) *discover.Node {
t.resolveCalls = append(t.resolveCalls, id)
func (t *resolveMock) Resolve(n *enode.Node) *enode.Node {
t.resolveCalls = append(t.resolveCalls, n)
return t.answer
}
func (t *resolveMock) Self() *discover.Node { return new(discover.Node) }
func (t *resolveMock) Self() *enode.Node { return new(enode.Node) }
func (t *resolveMock) Close() {}
func (t *resolveMock) Bootstrap([]*discover.Node) {}
func (t *resolveMock) Lookup(discover.NodeID) []*discover.Node { return nil }
func (t *resolveMock) ReadRandomNodes(buf []*discover.Node) int { return 0 }
func (t *resolveMock) LookupRandom() []*enode.Node { return nil }
func (t *resolveMock) ReadRandomNodes(buf []*enode.Node) int { return 0 }

View File

@ -18,415 +18,87 @@ package discover
import (
"crypto/ecdsa"
"crypto/elliptic"
"encoding/hex"
"errors"
"fmt"
"math/big"
"math/rand"
"net"
"net/url"
"regexp"
"strconv"
"strings"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/secp256k1"
"github.com/ethereum/go-ethereum/p2p/enode"
)
const NodeIDBits = 512
// Node represents a host on the network.
// node represents a host on the network.
// The fields of Node may not be modified.
type Node struct {
IP net.IP // len 4 for IPv4 or 16 for IPv6
UDP, TCP uint16 // port numbers
ID NodeID // the node's public key
// This is a cached copy of sha3(ID) which is used for node
// distance calculations. This is part of Node in order to make it
// possible to write tests that need a node at a certain distance.
// In those tests, the content of sha will not actually correspond
// with ID.
sha common.Hash
// Time when the node was added to the table.
addedAt time.Time
type node struct {
enode.Node
addedAt time.Time // time when the node was added to the table
}
// NewNode creates a new node. It is mostly meant to be used for
// testing purposes.
func NewNode(id NodeID, ip net.IP, udpPort, tcpPort uint16) *Node {
if ipv4 := ip.To4(); ipv4 != nil {
ip = ipv4
}
return &Node{
IP: ip,
UDP: udpPort,
TCP: tcpPort,
ID: id,
sha: crypto.Keccak256Hash(id[:]),
}
type encPubkey [64]byte
func encodePubkey(key *ecdsa.PublicKey) encPubkey {
var e encPubkey
math.ReadBits(key.X, e[:len(e)/2])
math.ReadBits(key.Y, e[len(e)/2:])
return e
}
func (n *Node) addr() *net.UDPAddr {
return &net.UDPAddr{IP: n.IP, Port: int(n.UDP)}
}
// Incomplete returns true for nodes with no IP address.
func (n *Node) Incomplete() bool {
return n.IP == nil
}
// checks whether n is a valid complete node.
func (n *Node) validateComplete() error {
if n.Incomplete() {
return errors.New("incomplete node")
}
if n.UDP == 0 {
return errors.New("missing UDP port")
}
if n.TCP == 0 {
return errors.New("missing TCP port")
}
if n.IP.IsMulticast() || n.IP.IsUnspecified() {
return errors.New("invalid IP (multicast/unspecified)")
}
_, err := n.ID.Pubkey() // validate the key (on curve, etc.)
return err
}
// The string representation of a Node is a URL.
// Please see ParseNode for a description of the format.
func (n *Node) String() string {
u := url.URL{Scheme: "enode"}
if n.Incomplete() {
u.Host = fmt.Sprintf("%x", n.ID[:])
} else {
addr := net.TCPAddr{IP: n.IP, Port: int(n.TCP)}
u.User = url.User(fmt.Sprintf("%x", n.ID[:]))
u.Host = addr.String()
if n.UDP != n.TCP {
u.RawQuery = "discport=" + strconv.Itoa(int(n.UDP))
}
}
return u.String()
}
var incompleteNodeURL = regexp.MustCompile("(?i)^(?:enode://)?([0-9a-f]+)$")
// ParseNode parses a node designator.
//
// There are two basic forms of node designators
// - incomplete nodes, which only have the public key (node ID)
// - complete nodes, which contain the public key and IP/Port information
//
// For incomplete nodes, the designator must look like one of these
//
// enode://<hex node id>
// <hex node id>
//
// For complete nodes, the node ID is encoded in the username portion
// of the URL, separated from the host by an @ sign. The hostname can
// only be given as an IP address, DNS domain names are not allowed.
// The port in the host name section is the TCP listening port. If the
// TCP and UDP (discovery) ports differ, the UDP port is specified as
// query parameter "discport".
//
// In the following example, the node URL describes
// a node with IP address 10.3.58.6, TCP listening port 30303
// and UDP discovery port 30301.
//
// enode://<hex node id>@10.3.58.6:30303?discport=30301
func ParseNode(rawurl string) (*Node, error) {
if m := incompleteNodeURL.FindStringSubmatch(rawurl); m != nil {
id, err := HexID(m[1])
if err != nil {
return nil, fmt.Errorf("invalid node ID (%v)", err)
}
return NewNode(id, nil, 0, 0), nil
}
return parseComplete(rawurl)
}
func parseComplete(rawurl string) (*Node, error) {
var (
id NodeID
ip net.IP
tcpPort, udpPort uint64
)
u, err := url.Parse(rawurl)
if err != nil {
return nil, err
}
if u.Scheme != "enode" {
return nil, errors.New("invalid URL scheme, want \"enode\"")
}
// Parse the Node ID from the user portion.
if u.User == nil {
return nil, errors.New("does not contain node ID")
}
if id, err = HexID(u.User.String()); err != nil {
return nil, fmt.Errorf("invalid node ID (%v)", err)
}
// Parse the IP address.
host, port, err := net.SplitHostPort(u.Host)
if err != nil {
return nil, fmt.Errorf("invalid host: %v", err)
}
if ip = net.ParseIP(host); ip == nil {
return nil, errors.New("invalid IP address")
}
// Ensure the IP is 4 bytes long for IPv4 addresses.
if ipv4 := ip.To4(); ipv4 != nil {
ip = ipv4
}
// Parse the port numbers.
if tcpPort, err = strconv.ParseUint(port, 10, 16); err != nil {
return nil, errors.New("invalid port")
}
udpPort = tcpPort
qv := u.Query()
if qv.Get("discport") != "" {
udpPort, err = strconv.ParseUint(qv.Get("discport"), 10, 16)
if err != nil {
return nil, errors.New("invalid discport in query")
}
}
return NewNode(id, ip, uint16(udpPort), uint16(tcpPort)), nil
}
// MustParseNode parses a node URL. It panics if the URL is not valid.
func MustParseNode(rawurl string) *Node {
n, err := ParseNode(rawurl)
if err != nil {
panic("invalid node URL: " + err.Error())
}
return n
}
// MarshalText implements encoding.TextMarshaler.
func (n *Node) MarshalText() ([]byte, error) {
return []byte(n.String()), nil
}
// UnmarshalText implements encoding.TextUnmarshaler.
func (n *Node) UnmarshalText(text []byte) error {
dec, err := ParseNode(string(text))
if err == nil {
*n = *dec
}
return err
}
// NodeID is a unique identifier for each node.
// The node identifier is a marshaled elliptic curve public key.
type NodeID [NodeIDBits / 8]byte
// Bytes returns a byte slice representation of the NodeID
func (n NodeID) Bytes() []byte {
return n[:]
}
// NodeID prints as a long hexadecimal number.
func (n NodeID) String() string {
return fmt.Sprintf("%x", n[:])
}
// The Go syntax representation of a NodeID is a call to HexID.
func (n NodeID) GoString() string {
return fmt.Sprintf("discover.HexID(\"%x\")", n[:])
}
// TerminalString returns a shortened hex string for terminal logging.
func (n NodeID) TerminalString() string {
return hex.EncodeToString(n[:8])
}
// MarshalText implements the encoding.TextMarshaler interface.
func (n NodeID) MarshalText() ([]byte, error) {
return []byte(hex.EncodeToString(n[:])), nil
}
// UnmarshalText implements the encoding.TextUnmarshaler interface.
func (n *NodeID) UnmarshalText(text []byte) error {
id, err := HexID(string(text))
if err != nil {
return err
}
*n = id
return nil
}
// BytesID converts a byte slice to a NodeID
func BytesID(b []byte) (NodeID, error) {
var id NodeID
if len(b) != len(id) {
return id, fmt.Errorf("wrong length, want %d bytes", len(id))
}
copy(id[:], b)
return id, nil
}
// MustBytesID converts a byte slice to a NodeID.
// It panics if the byte slice is not a valid NodeID.
func MustBytesID(b []byte) NodeID {
id, err := BytesID(b)
if err != nil {
panic(err)
}
return id
}
// HexID converts a hex string to a NodeID.
// The string may be prefixed with 0x.
func HexID(in string) (NodeID, error) {
var id NodeID
b, err := hex.DecodeString(strings.TrimPrefix(in, "0x"))
if err != nil {
return id, err
} else if len(b) != len(id) {
return id, fmt.Errorf("wrong length, want %d hex chars", len(id)*2)
}
copy(id[:], b)
return id, nil
}
// MustHexID converts a hex string to a NodeID.
// It panics if the string is not a valid NodeID.
func MustHexID(in string) NodeID {
id, err := HexID(in)
if err != nil {
panic(err)
}
return id
}
// PubkeyID returns a marshaled representation of the given public key.
func PubkeyID(pub *ecdsa.PublicKey) NodeID {
var id NodeID
pbytes := elliptic.Marshal(pub.Curve, pub.X, pub.Y)
if len(pbytes)-1 != len(id) {
panic(fmt.Errorf("need %d bit pubkey, got %d bits", (len(id)+1)*8, len(pbytes)))
}
copy(id[:], pbytes[1:])
return id
}
// Pubkey returns the public key represented by the node ID.
// It returns an error if the ID is not a point on the curve.
func (id NodeID) Pubkey() (*ecdsa.PublicKey, error) {
func decodePubkey(e encPubkey) (*ecdsa.PublicKey, error) {
p := &ecdsa.PublicKey{Curve: crypto.S256(), X: new(big.Int), Y: new(big.Int)}
half := len(id) / 2
p.X.SetBytes(id[:half])
p.Y.SetBytes(id[half:])
half := len(e) / 2
p.X.SetBytes(e[:half])
p.Y.SetBytes(e[half:])
if !p.Curve.IsOnCurve(p.X, p.Y) {
return nil, errors.New("id is invalid secp256k1 curve point")
return nil, errors.New("invalid secp256k1 curve point")
}
return p, nil
}
// recoverNodeID computes the public key used to sign the
func (e encPubkey) id() enode.ID {
return enode.ID(crypto.Keccak256Hash(e[:]))
}
// recoverNodeKey computes the public key used to sign the
// given hash from the signature.
func recoverNodeID(hash, sig []byte) (id NodeID, err error) {
func recoverNodeKey(hash, sig []byte) (key encPubkey, err error) {
pubkey, err := secp256k1.RecoverPubkey(hash, sig)
if err != nil {
return id, err
return key, err
}
if len(pubkey)-1 != len(id) {
return id, fmt.Errorf("recovered pubkey has %d bits, want %d bits", len(pubkey)*8, (len(id)+1)*8)
}
for i := range id {
id[i] = pubkey[i+1]
}
return id, nil
copy(key[:], pubkey[1:])
return key, nil
}
// distcmp compares the distances a->target and b->target.
// Returns -1 if a is closer to target, 1 if b is closer to target
// and 0 if they are equal.
func distcmp(target, a, b common.Hash) int {
for i := range target {
da := a[i] ^ target[i]
db := b[i] ^ target[i]
if da > db {
return 1
} else if da < db {
return -1
}
}
return 0
func wrapNode(n *enode.Node) *node {
return &node{Node: *n}
}
// table of leading zero counts for bytes [0..255]
var lzcount = [256]int{
8, 7, 6, 6, 5, 5, 5, 5,
4, 4, 4, 4, 4, 4, 4, 4,
3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3,
2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
func wrapNodes(ns []*enode.Node) []*node {
result := make([]*node, len(ns))
for i, n := range ns {
result[i] = wrapNode(n)
}
return result
}
// logdist returns the logarithmic distance between a and b, log2(a ^ b).
func logdist(a, b common.Hash) int {
lz := 0
for i := range a {
x := a[i] ^ b[i]
if x == 0 {
lz += 8
} else {
lz += lzcount[x]
break
}
}
return len(a)*8 - lz
func unwrapNode(n *node) *enode.Node {
return &n.Node
}
// hashAtDistance returns a random hash such that logdist(a, b) == n
func hashAtDistance(a common.Hash, n int) (b common.Hash) {
if n == 0 {
return a
func unwrapNodes(ns []*node) []*enode.Node {
result := make([]*enode.Node, len(ns))
for i, n := range ns {
result[i] = unwrapNode(n)
}
// flip bit at position n, fill the rest with random bits
b = a
pos := len(a) - n/8 - 1
bit := byte(0x01) << (byte(n%8) - 1)
if bit == 0 {
pos++
bit = 0x80
return result
}
b[pos] = a[pos]&^bit | ^a[pos]&bit // TODO: randomize end bits
for i := pos + 1; i < len(a); i++ {
b[i] = byte(rand.Intn(255))
func (n *node) addr() *net.UDPAddr {
return &net.UDPAddr{IP: n.IP(), Port: n.UDP()}
}
return b
func (n *node) String() string {
return n.Node.String()
}

View File

@ -23,6 +23,7 @@
package discover
import (
"crypto/ecdsa"
crand "crypto/rand"
"encoding/binary"
"fmt"
@ -35,6 +36,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/netutil"
)
@ -65,49 +67,44 @@ const (
type Table struct {
mutex sync.Mutex // protects buckets, bucket content, nursery, rand
buckets [nBuckets]*bucket // index of known nodes by distance
nursery []*Node // bootstrap nodes
nursery []*node // bootstrap nodes
rand *mrand.Rand // source of randomness, periodically reseeded
ips netutil.DistinctNetSet
db *nodeDB // database of known nodes
db *enode.DB // database of known nodes
refreshReq chan chan struct{}
initDone chan struct{}
closeReq chan struct{}
closed chan struct{}
nodeAddedHook func(*Node) // for testing
nodeAddedHook func(*node) // for testing
net transport
self *Node // metadata of the local node
self *node // metadata of the local node
}
// transport is implemented by the UDP transport.
// it is an interface so we can test without opening lots of UDP
// sockets and without generating a private key.
type transport interface {
ping(NodeID, *net.UDPAddr) error
findnode(toid NodeID, addr *net.UDPAddr, target NodeID) ([]*Node, error)
ping(enode.ID, *net.UDPAddr) error
findnode(toid enode.ID, addr *net.UDPAddr, target encPubkey) ([]*node, error)
close()
}
// bucket contains nodes, ordered by their last activity. the entry
// that was most recently active is the first element in entries.
type bucket struct {
entries []*Node // live entries, sorted by time of last contact
replacements []*Node // recently seen nodes to be used if revalidation fails
entries []*node // live entries, sorted by time of last contact
replacements []*node // recently seen nodes to be used if revalidation fails
ips netutil.DistinctNetSet
}
func newTable(t transport, ourID NodeID, ourAddr *net.UDPAddr, nodeDBPath string, bootnodes []*Node) (*Table, error) {
// If no node database was given, use an in-memory one
db, err := newNodeDB(nodeDBPath, nodeDBVersion, ourID)
if err != nil {
return nil, err
}
func newTable(t transport, self *enode.Node, db *enode.DB, bootnodes []*enode.Node) (*Table, error) {
tab := &Table{
net: t,
db: db,
self: NewNode(ourID, ourAddr.IP, uint16(ourAddr.Port), uint16(ourAddr.Port)),
self: wrapNode(self),
refreshReq: make(chan chan struct{}),
initDone: make(chan struct{}),
closeReq: make(chan struct{}),
@ -125,10 +122,7 @@ func newTable(t transport, ourID NodeID, ourAddr *net.UDPAddr, nodeDBPath string
}
tab.seedRand()
tab.loadSeedNodes()
// Start the background expiration goroutine after loading seeds so that the search for
// seed nodes also considers older nodes that would otherwise be removed by the
// expiration.
tab.db.ensureExpirer()
go tab.loop()
return tab, nil
}
@ -143,15 +137,13 @@ func (tab *Table) seedRand() {
}
// Self returns the local node.
// The returned node should not be modified by the caller.
func (tab *Table) Self() *Node {
return tab.self
func (tab *Table) Self() *enode.Node {
return unwrapNode(tab.self)
}
// ReadRandomNodes fills the given slice with random nodes from the
// table. It will not write the same node more than once. The nodes in
// the slice are copies and can be modified by the caller.
func (tab *Table) ReadRandomNodes(buf []*Node) (n int) {
// ReadRandomNodes fills the given slice with random nodes from the table. The results
// are guaranteed to be unique for a single invocation, no node will appear twice.
func (tab *Table) ReadRandomNodes(buf []*enode.Node) (n int) {
if !tab.isInitDone() {
return 0
}
@ -159,7 +151,7 @@ func (tab *Table) ReadRandomNodes(buf []*Node) (n int) {
defer tab.mutex.Unlock()
// Find all non-empty buckets and get a fresh slice of their entries.
var buckets [][]*Node
var buckets [][]*node
for _, b := range &tab.buckets {
if len(b.entries) > 0 {
buckets = append(buckets, b.entries)
@ -177,7 +169,7 @@ func (tab *Table) ReadRandomNodes(buf []*Node) (n int) {
var i, j int
for ; i < len(buf); i, j = i+1, (j+1)%len(buckets) {
b := buckets[j]
buf[i] = &(*b[0])
buf[i] = unwrapNode(b[0])
buckets[j] = b[1:]
if len(b) == 1 {
buckets = append(buckets[:j], buckets[j+1:]...)
@ -202,20 +194,13 @@ func (tab *Table) Close() {
// setFallbackNodes sets the initial points of contact. These nodes
// are used to connect to the network if the table is empty and there
// are no known nodes in the database.
func (tab *Table) setFallbackNodes(nodes []*Node) error {
func (tab *Table) setFallbackNodes(nodes []*enode.Node) error {
for _, n := range nodes {
if err := n.validateComplete(); err != nil {
return fmt.Errorf("bad bootstrap/fallback node %q (%v)", n, err)
if err := n.ValidateComplete(); err != nil {
return fmt.Errorf("bad bootstrap node %q: %v", n, err)
}
}
tab.nursery = make([]*Node, 0, len(nodes))
for _, n := range nodes {
cpy := *n
// Recompute cpy.sha because the node might not have been
// created by NewNode or ParseNode.
cpy.sha = crypto.Keccak256Hash(n.ID[:])
tab.nursery = append(tab.nursery, &cpy)
}
tab.nursery = wrapNodes(nodes)
return nil
}
@ -231,47 +216,48 @@ func (tab *Table) isInitDone() bool {
// Resolve searches for a specific node with the given ID.
// It returns nil if the node could not be found.
func (tab *Table) Resolve(targetID NodeID) *Node {
func (tab *Table) Resolve(n *enode.Node) *enode.Node {
// If the node is present in the local table, no
// network interaction is required.
hash := crypto.Keccak256Hash(targetID[:])
hash := n.ID()
tab.mutex.Lock()
cl := tab.closest(hash, 1)
tab.mutex.Unlock()
if len(cl.entries) > 0 && cl.entries[0].ID == targetID {
return cl.entries[0]
if len(cl.entries) > 0 && cl.entries[0].ID() == hash {
return unwrapNode(cl.entries[0])
}
// Otherwise, do a network lookup.
result := tab.Lookup(targetID)
result := tab.lookup(encodePubkey(n.Pubkey()), true)
for _, n := range result {
if n.ID == targetID {
return n
if n.ID() == hash {
return unwrapNode(n)
}
}
return nil
}
// Lookup performs a network search for nodes close
// to the given target. It approaches the target by querying
// nodes that are closer to it on each iteration.
// The given target does not need to be an actual node
// identifier.
func (tab *Table) Lookup(targetID NodeID) []*Node {
return tab.lookup(targetID, true)
// LookupRandom finds random nodes in the network.
func (tab *Table) LookupRandom() []*enode.Node {
var target encPubkey
crand.Read(target[:])
return unwrapNodes(tab.lookup(target, true))
}
func (tab *Table) lookup(targetID NodeID, refreshIfEmpty bool) []*Node {
// lookup performs a network search for nodes close to the given target. It approaches the
// target by querying nodes that are closer to it on each iteration. The given target does
// not need to be an actual node identifier.
func (tab *Table) lookup(targetKey encPubkey, refreshIfEmpty bool) []*node {
var (
target = crypto.Keccak256Hash(targetID[:])
asked = make(map[NodeID]bool)
seen = make(map[NodeID]bool)
reply = make(chan []*Node, alpha)
target = enode.ID(crypto.Keccak256Hash(targetKey[:]))
asked = make(map[enode.ID]bool)
seen = make(map[enode.ID]bool)
reply = make(chan []*node, alpha)
pendingQueries = 0
result *nodesByDistance
)
// don't query further if we hit ourself.
// unlikely to happen often in practice.
asked[tab.self.ID] = true
asked[tab.self.ID()] = true
for {
tab.mutex.Lock()
@ -293,10 +279,10 @@ func (tab *Table) lookup(targetID NodeID, refreshIfEmpty bool) []*Node {
// ask the alpha closest nodes that we haven't asked yet
for i := 0; i < len(result.entries) && pendingQueries < alpha; i++ {
n := result.entries[i]
if !asked[n.ID] {
asked[n.ID] = true
if !asked[n.ID()] {
asked[n.ID()] = true
pendingQueries++
go tab.findnode(n, targetID, reply)
go tab.findnode(n, targetKey, reply)
}
}
if pendingQueries == 0 {
@ -305,8 +291,8 @@ func (tab *Table) lookup(targetID NodeID, refreshIfEmpty bool) []*Node {
}
// wait for the next reply
for _, n := range <-reply {
if n != nil && !seen[n.ID] {
seen[n.ID] = true
if n != nil && !seen[n.ID()] {
seen[n.ID()] = true
result.push(n, bucketSize)
}
}
@ -315,19 +301,19 @@ func (tab *Table) lookup(targetID NodeID, refreshIfEmpty bool) []*Node {
return result.entries
}
func (tab *Table) findnode(n *Node, targetID NodeID, reply chan<- []*Node) {
fails := tab.db.findFails(n.ID)
r, err := tab.net.findnode(n.ID, n.addr(), targetID)
func (tab *Table) findnode(n *node, targetKey encPubkey, reply chan<- []*node) {
fails := tab.db.FindFails(n.ID())
r, err := tab.net.findnode(n.ID(), n.addr(), targetKey)
if err != nil || len(r) == 0 {
fails++
tab.db.updateFindFails(n.ID, fails)
log.Trace("Findnode failed", "id", n.ID, "failcount", fails, "err", err)
tab.db.UpdateFindFails(n.ID(), fails)
log.Trace("Findnode failed", "id", n.ID(), "failcount", fails, "err", err)
if fails >= maxFindnodeFailures {
log.Trace("Too many findnode failures, dropping", "id", n.ID, "failcount", fails)
log.Trace("Too many findnode failures, dropping", "id", n.ID(), "failcount", fails)
tab.delete(n)
}
} else if fails > 0 {
tab.db.updateFindFails(n.ID, fails-1)
tab.db.UpdateFindFails(n.ID(), fails-1)
}
// Grab as many nodes as possible. Some of them might not be alive anymore, but we'll
@ -405,7 +391,6 @@ loop:
for _, ch := range waiting {
close(ch)
}
tab.db.close()
close(tab.closed)
}
@ -421,7 +406,11 @@ func (tab *Table) doRefresh(done chan struct{}) {
tab.loadSeedNodes()
// Run self lookup to discover new neighbor nodes.
tab.lookup(tab.self.ID, false)
// We can only do this if we have a secp256k1 identity.
var key ecdsa.PublicKey
if err := tab.self.Load((*enode.Secp256k1)(&key)); err == nil {
tab.lookup(encodePubkey(&key), false)
}
// The Kademlia paper specifies that the bucket refresh should
// perform a lookup in the least recently used bucket. We cannot
@ -430,19 +419,19 @@ func (tab *Table) doRefresh(done chan struct{}) {
// sha3 preimage that falls into a chosen bucket.
// We perform a few lookups with a random target instead.
for i := 0; i < 3; i++ {
var target NodeID
var target encPubkey
crand.Read(target[:])
tab.lookup(target, false)
}
}
func (tab *Table) loadSeedNodes() {
seeds := tab.db.querySeeds(seedCount, seedMaxAge)
seeds := wrapNodes(tab.db.QuerySeeds(seedCount, seedMaxAge))
seeds = append(seeds, tab.nursery...)
for i := range seeds {
seed := seeds[i]
age := log.Lazy{Fn: func() interface{} { return time.Since(tab.db.lastPongReceived(seed.ID)) }}
log.Debug("Found seed node in database", "id", seed.ID, "addr", seed.addr(), "age", age)
age := log.Lazy{Fn: func() interface{} { return time.Since(tab.db.LastPongReceived(seed.ID())) }}
log.Debug("Found seed node in database", "id", seed.ID(), "addr", seed.addr(), "age", age)
tab.add(seed)
}
}
@ -459,28 +448,28 @@ func (tab *Table) doRevalidate(done chan<- struct{}) {
}
// Ping the selected node and wait for a pong.
err := tab.net.ping(last.ID, last.addr())
err := tab.net.ping(last.ID(), last.addr())
tab.mutex.Lock()
defer tab.mutex.Unlock()
b := tab.buckets[bi]
if err == nil {
// The node responded, move it to the front.
log.Trace("Revalidated node", "b", bi, "id", last.ID)
log.Debug("Revalidated node", "b", bi, "id", last.ID())
b.bump(last)
return
}
// No reply received, pick a replacement or delete the node if there aren't
// any replacements.
if r := tab.replace(b, last); r != nil {
log.Trace("Replaced dead node", "b", bi, "id", last.ID, "ip", last.IP, "r", r.ID, "rip", r.IP)
log.Debug("Replaced dead node", "b", bi, "id", last.ID(), "ip", last.IP(), "r", r.ID(), "rip", r.IP())
} else {
log.Trace("Removed dead node", "b", bi, "id", last.ID, "ip", last.IP)
log.Debug("Removed dead node", "b", bi, "id", last.ID(), "ip", last.IP())
}
}
// nodeToRevalidate returns the last node in a random, non-empty bucket.
func (tab *Table) nodeToRevalidate() (n *Node, bi int) {
func (tab *Table) nodeToRevalidate() (n *node, bi int) {
tab.mutex.Lock()
defer tab.mutex.Unlock()
@ -511,7 +500,7 @@ func (tab *Table) copyLiveNodes() {
for _, b := range &tab.buckets {
for _, n := range b.entries {
if now.Sub(n.addedAt) >= seedMinTableTime {
tab.db.updateNode(n)
tab.db.UpdateNode(unwrapNode(n))
}
}
}
@ -519,7 +508,7 @@ func (tab *Table) copyLiveNodes() {
// closest returns the n nodes in the table that are closest to the
// given id. The caller must hold tab.mutex.
func (tab *Table) closest(target common.Hash, nresults int) *nodesByDistance {
func (tab *Table) closest(target enode.ID, nresults int) *nodesByDistance {
// This is a very wasteful way to find the closest nodes but
// obviously correct. I believe that tree-based buckets would make
// this easier to implement efficiently.
@ -540,8 +529,8 @@ func (tab *Table) len() (n int) {
}
// bucket returns the bucket for the given node ID hash.
func (tab *Table) bucket(sha common.Hash) *bucket {
d := logdist(tab.self.sha, sha)
func (tab *Table) bucket(id enode.ID) *bucket {
d := enode.LogDist(tab.self.ID(), id)
if d <= bucketMinDistance {
return tab.buckets[0]
}
@ -553,11 +542,14 @@ func (tab *Table) bucket(sha common.Hash) *bucket {
// least recently active node in the bucket does not respond to a ping packet.
//
// The caller must not hold tab.mutex.
func (tab *Table) add(n *Node) {
func (tab *Table) add(n *node) {
if n.ID() == tab.self.ID() {
return
}
tab.mutex.Lock()
defer tab.mutex.Unlock()
b := tab.bucket(n.sha)
b := tab.bucket(n.ID())
if !tab.bumpOrAdd(b, n) {
// Node is not in table. Add it to the replacement list.
tab.addReplacement(b, n)
@ -570,7 +562,7 @@ func (tab *Table) add(n *Node) {
// table could be filled by just sending ping repeatedly.
//
// The caller must not hold tab.mutex.
func (tab *Table) addThroughPing(n *Node) {
func (tab *Table) addThroughPing(n *node) {
if !tab.isInitDone() {
return
}
@ -579,15 +571,15 @@ func (tab *Table) addThroughPing(n *Node) {
// stuff adds nodes the table to the end of their corresponding bucket
// if the bucket is not full. The caller must not hold tab.mutex.
func (tab *Table) stuff(nodes []*Node) {
func (tab *Table) stuff(nodes []*node) {
tab.mutex.Lock()
defer tab.mutex.Unlock()
for _, n := range nodes {
if n.ID == tab.self.ID {
if n.ID() == tab.self.ID() {
continue // don't add self
}
b := tab.bucket(n.sha)
b := tab.bucket(n.ID())
if len(b.entries) < bucketSize {
tab.bumpOrAdd(b, n)
}
@ -595,11 +587,11 @@ func (tab *Table) stuff(nodes []*Node) {
}
// delete removes an entry from the node table. It is used to evacuate dead nodes.
func (tab *Table) delete(node *Node) {
func (tab *Table) delete(node *node) {
tab.mutex.Lock()
defer tab.mutex.Unlock()
tab.deleteInBucket(tab.bucket(node.sha), node)
tab.deleteInBucket(tab.bucket(node.ID()), node)
}
func (tab *Table) addIP(b *bucket, ip net.IP) bool {
@ -626,27 +618,27 @@ func (tab *Table) removeIP(b *bucket, ip net.IP) {
b.ips.Remove(ip)
}
func (tab *Table) addReplacement(b *bucket, n *Node) {
func (tab *Table) addReplacement(b *bucket, n *node) {
for _, e := range b.replacements {
if e.ID == n.ID {
if e.ID() == n.ID() {
return // already in list
}
}
if !tab.addIP(b, n.IP) {
if !tab.addIP(b, n.IP()) {
return
}
var removed *Node
var removed *node
b.replacements, removed = pushNode(b.replacements, n, maxReplacements)
if removed != nil {
tab.removeIP(b, removed.IP)
tab.removeIP(b, removed.IP())
}
}
// replace removes n from the replacement list and replaces 'last' with it if it is the
// last entry in the bucket. If 'last' isn't the last entry, it has either been replaced
// with someone else or became active.
func (tab *Table) replace(b *bucket, last *Node) *Node {
if len(b.entries) == 0 || b.entries[len(b.entries)-1].ID != last.ID {
func (tab *Table) replace(b *bucket, last *node) *node {
if len(b.entries) == 0 || b.entries[len(b.entries)-1].ID() != last.ID() {
// Entry has moved, don't replace it.
return nil
}
@ -658,15 +650,15 @@ func (tab *Table) replace(b *bucket, last *Node) *Node {
r := b.replacements[tab.rand.Intn(len(b.replacements))]
b.replacements = deleteNode(b.replacements, r)
b.entries[len(b.entries)-1] = r
tab.removeIP(b, last.IP)
tab.removeIP(b, last.IP())
return r
}
// bump moves the given node to the front of the bucket entry list
// if it is contained in that list.
func (b *bucket) bump(n *Node) bool {
func (b *bucket) bump(n *node) bool {
for i := range b.entries {
if b.entries[i].ID == n.ID {
if b.entries[i].ID() == n.ID() {
// move it to the front
copy(b.entries[1:], b.entries[:i])
b.entries[0] = n
@ -678,11 +670,11 @@ func (b *bucket) bump(n *Node) bool {
// bumpOrAdd moves n to the front of the bucket entry list or adds it if the list isn't
// full. The return value is true if n is in the bucket.
func (tab *Table) bumpOrAdd(b *bucket, n *Node) bool {
func (tab *Table) bumpOrAdd(b *bucket, n *node) bool {
if b.bump(n) {
return true
}
if len(b.entries) >= bucketSize || !tab.addIP(b, n.IP) {
if len(b.entries) >= bucketSize || !tab.addIP(b, n.IP()) {
return false
}
b.entries, _ = pushNode(b.entries, n, bucketSize)
@ -694,13 +686,13 @@ func (tab *Table) bumpOrAdd(b *bucket, n *Node) bool {
return true
}
func (tab *Table) deleteInBucket(b *bucket, n *Node) {
func (tab *Table) deleteInBucket(b *bucket, n *node) {
b.entries = deleteNode(b.entries, n)
tab.removeIP(b, n.IP)
tab.removeIP(b, n.IP())
}
// pushNode adds n to the front of list, keeping at most max items.
func pushNode(list []*Node, n *Node, max int) ([]*Node, *Node) {
func pushNode(list []*node, n *node, max int) ([]*node, *node) {
if len(list) < max {
list = append(list, nil)
}
@ -711,9 +703,9 @@ func pushNode(list []*Node, n *Node, max int) ([]*Node, *Node) {
}
// deleteNode removes n from list.
func deleteNode(list []*Node, n *Node) []*Node {
func deleteNode(list []*node, n *node) []*node {
for i := range list {
if list[i].ID == n.ID {
if list[i].ID() == n.ID() {
return append(list[:i], list[i+1:]...)
}
}
@ -723,14 +715,14 @@ func deleteNode(list []*Node, n *Node) []*Node {
// nodesByDistance is a list of nodes, ordered by
// distance to target.
type nodesByDistance struct {
entries []*Node
target common.Hash
entries []*node
target enode.ID
}
// push adds the given node to the list, keeping the total size below maxElems.
func (h *nodesByDistance) push(n *Node, maxElems int) {
func (h *nodesByDistance) push(n *node, maxElems int) {
ix := sort.Search(len(h.entries), func(i int) bool {
return distcmp(h.target, h.entries[i].sha, n.sha) > 0
return enode.DistCmp(h.target, h.entries[i].ID(), n.ID()) > 0
})
if len(h.entries) < maxElems {
h.entries = append(h.entries, n)

View File

@ -20,7 +20,6 @@ import (
"crypto/ecdsa"
"fmt"
"math/rand"
"sync"
"net"
"reflect"
@ -28,8 +27,9 @@ import (
"testing/quick"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/enr"
)
func TestTable_pingReplace(t *testing.T) {
@ -49,24 +49,26 @@ func TestTable_pingReplace(t *testing.T) {
func testPingReplace(t *testing.T, newNodeIsResponding, lastInBucketIsResponding bool) {
transport := newPingRecorder()
tab, _ := newTable(transport, NodeID{}, &net.UDPAddr{}, "", nil)
tab, db := newTestTable(transport)
defer tab.Close()
defer db.Close()
<-tab.initDone
// Fill up the sender's bucket.
pingSender := NewNode(MustHexID("a502af0f59b2aab7746995408c79e9ca312d2793cc997e44fc55eda62f0150bbb8c59a6f9269ba3a081518b62699ee807c7c19c20125ddfccca872608af9e370"), net.IP{}, 99, 99)
pingKey, _ := crypto.HexToECDSA("45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8")
pingSender := wrapNode(enode.NewV4(&pingKey.PublicKey, net.IP{}, 99, 99))
last := fillBucket(tab, pingSender)
// Add the sender as if it just pinged us. Revalidate should replace the last node in
// its bucket if it is unresponsive. Revalidate again to ensure that
transport.dead[last.ID] = !lastInBucketIsResponding
transport.dead[pingSender.ID] = !newNodeIsResponding
transport.dead[last.ID()] = !lastInBucketIsResponding
transport.dead[pingSender.ID()] = !newNodeIsResponding
tab.add(pingSender)
tab.doRevalidate(make(chan struct{}, 1))
tab.doRevalidate(make(chan struct{}, 1))
if !transport.pinged[last.ID] {
if !transport.pinged[last.ID()] {
// Oldest node in bucket is pinged to see whether it is still alive.
t.Error("table did not ping last node in bucket")
}
@ -77,14 +79,14 @@ func testPingReplace(t *testing.T, newNodeIsResponding, lastInBucketIsResponding
if !lastInBucketIsResponding && !newNodeIsResponding {
wantSize--
}
if l := len(tab.bucket(pingSender.sha).entries); l != wantSize {
t.Errorf("wrong bucket size after add: got %d, want %d", l, wantSize)
if l := len(tab.bucket(pingSender.ID()).entries); l != wantSize {
t.Errorf("wrong bucket size after bond: got %d, want %d", l, wantSize)
}
if found := contains(tab.bucket(pingSender.sha).entries, last.ID); found != lastInBucketIsResponding {
if found := contains(tab.bucket(pingSender.ID()).entries, last.ID()); found != lastInBucketIsResponding {
t.Errorf("last entry found: %t, want: %t", found, lastInBucketIsResponding)
}
wantNewEntry := newNodeIsResponding && !lastInBucketIsResponding
if found := contains(tab.bucket(pingSender.sha).entries, pingSender.ID); found != wantNewEntry {
if found := contains(tab.bucket(pingSender.ID()).entries, pingSender.ID()); found != wantNewEntry {
t.Errorf("new entry found: %t, want: %t", found, wantNewEntry)
}
}
@ -97,9 +99,9 @@ func TestBucket_bumpNoDuplicates(t *testing.T) {
Values: func(args []reflect.Value, rand *rand.Rand) {
// generate a random list of nodes. this will be the content of the bucket.
n := rand.Intn(bucketSize-1) + 1
nodes := make([]*Node, n)
nodes := make([]*node, n)
for i := range nodes {
nodes[i] = nodeAtDistance(common.Hash{}, 200)
nodes[i] = nodeAtDistance(enode.ID{}, 200, intIP(200))
}
args[0] = reflect.ValueOf(nodes)
// generate random bump positions.
@ -111,8 +113,8 @@ func TestBucket_bumpNoDuplicates(t *testing.T) {
},
}
prop := func(nodes []*Node, bumps []int) (ok bool) {
b := &bucket{entries: make([]*Node, len(nodes))}
prop := func(nodes []*node, bumps []int) (ok bool) {
b := &bucket{entries: make([]*node, len(nodes))}
copy(b.entries, nodes)
for i, pos := range bumps {
b.bump(b.entries[pos])
@ -134,12 +136,12 @@ func TestBucket_bumpNoDuplicates(t *testing.T) {
// This checks that the table-wide IP limit is applied correctly.
func TestTable_IPLimit(t *testing.T) {
transport := newPingRecorder()
tab, _ := newTable(transport, NodeID{}, &net.UDPAddr{}, "", nil)
tab, db := newTestTable(transport)
defer tab.Close()
defer db.Close()
for i := 0; i < tableIPLimit+1; i++ {
n := nodeAtDistance(tab.self.sha, i)
n.IP = net.IP{172, 0, 1, byte(i)}
n := nodeAtDistance(tab.self.ID(), i, net.IP{172, 0, 1, byte(i)})
tab.add(n)
}
if tab.len() > tableIPLimit {
@ -147,16 +149,16 @@ func TestTable_IPLimit(t *testing.T) {
}
}
// This checks that the table-wide IP limit is applied correctly.
// This checks that the per-bucket IP limit is applied correctly.
func TestTable_BucketIPLimit(t *testing.T) {
transport := newPingRecorder()
tab, _ := newTable(transport, NodeID{}, &net.UDPAddr{}, "", nil)
tab, db := newTestTable(transport)
defer tab.Close()
defer db.Close()
d := 3
for i := 0; i < bucketIPLimit+1; i++ {
n := nodeAtDistance(tab.self.sha, d)
n.IP = net.IP{172, 0, 1, byte(i)}
n := nodeAtDistance(tab.self.ID(), d, net.IP{172, 0, 1, byte(i)})
tab.add(n)
}
if tab.len() > bucketIPLimit {
@ -164,69 +166,18 @@ func TestTable_BucketIPLimit(t *testing.T) {
}
}
// fillBucket inserts nodes into the given bucket until
// it is full. The node's IDs dont correspond to their
// hashes.
func fillBucket(tab *Table, n *Node) (last *Node) {
ld := logdist(tab.self.sha, n.sha)
b := tab.bucket(n.sha)
for len(b.entries) < bucketSize {
b.entries = append(b.entries, nodeAtDistance(tab.self.sha, ld))
}
return b.entries[bucketSize-1]
}
// nodeAtDistance creates a node for which logdist(base, n.sha) == ld.
// The node's ID does not correspond to n.sha.
func nodeAtDistance(base common.Hash, ld int) (n *Node) {
n = new(Node)
n.sha = hashAtDistance(base, ld)
n.IP = net.IP{byte(ld), 0, 2, byte(ld)}
copy(n.ID[:], n.sha[:]) // ensure the node still has a unique ID
return n
}
type pingRecorder struct {
mu sync.Mutex
dead, pinged map[NodeID]bool
}
func newPingRecorder() *pingRecorder {
return &pingRecorder{
dead: make(map[NodeID]bool),
pinged: make(map[NodeID]bool),
}
}
func (t *pingRecorder) findnode(toid NodeID, toaddr *net.UDPAddr, target NodeID) ([]*Node, error) {
return nil, nil
}
func (t *pingRecorder) ping(toid NodeID, toaddr *net.UDPAddr) error {
t.mu.Lock()
defer t.mu.Unlock()
t.pinged[toid] = true
if t.dead[toid] {
return errTimeout
} else {
return nil
}
}
func (t *pingRecorder) close() {}
func TestTable_closest(t *testing.T) {
t.Parallel()
test := func(test *closeTest) bool {
// for any node table, Target and N
transport := newPingRecorder()
tab, _ := newTable(transport, test.Self, &net.UDPAddr{}, "", nil)
tab, db := newTestTable(transport)
defer tab.Close()
defer db.Close()
tab.stuff(test.All)
// check that doClosest(Target, N) returns nodes
// check that closest(Target, N) returns nodes
result := tab.closest(test.Target, test.N).entries
if hasDuplicates(result) {
t.Errorf("result contains duplicates")
@ -252,15 +203,15 @@ func TestTable_closest(t *testing.T) {
// check that the result nodes have minimum distance to target.
for _, b := range tab.buckets {
for _, n := range b.entries {
if contains(result, n.ID) {
if contains(result, n.ID()) {
continue // don't run the check below for nodes in result
}
farthestResult := result[len(result)-1].sha
if distcmp(test.Target, n.sha, farthestResult) < 0 {
farthestResult := result[len(result)-1].ID()
if enode.DistCmp(test.Target, n.ID(), farthestResult) < 0 {
t.Errorf("table contains node that is closer to target but it's not in result")
t.Logf(" Target: %v", test.Target)
t.Logf(" Farthest Result: %v", farthestResult)
t.Logf(" ID: %v", n.ID)
t.Logf(" ID: %v", n.ID())
return false
}
}
@ -277,25 +228,26 @@ func TestTable_ReadRandomNodesGetAll(t *testing.T) {
MaxCount: 200,
Rand: rand.New(rand.NewSource(time.Now().Unix())),
Values: func(args []reflect.Value, rand *rand.Rand) {
args[0] = reflect.ValueOf(make([]*Node, rand.Intn(1000)))
args[0] = reflect.ValueOf(make([]*enode.Node, rand.Intn(1000)))
},
}
test := func(buf []*Node) bool {
test := func(buf []*enode.Node) bool {
transport := newPingRecorder()
tab, _ := newTable(transport, NodeID{}, &net.UDPAddr{}, "", nil)
tab, db := newTestTable(transport)
defer tab.Close()
defer db.Close()
<-tab.initDone
for i := 0; i < len(buf); i++ {
ld := cfg.Rand.Intn(len(tab.buckets))
tab.stuff([]*Node{nodeAtDistance(tab.self.sha, ld)})
tab.stuff([]*node{nodeAtDistance(tab.self.ID(), ld, intIP(ld))})
}
gotN := tab.ReadRandomNodes(buf)
if gotN != tab.len() {
t.Errorf("wrong number of nodes, got %d, want %d", gotN, tab.len())
return false
}
if hasDuplicates(buf[:gotN]) {
if hasDuplicates(wrapNodes(buf[:gotN])) {
t.Errorf("result contains duplicates")
return false
}
@ -307,41 +259,43 @@ func TestTable_ReadRandomNodesGetAll(t *testing.T) {
}
type closeTest struct {
Self NodeID
Target common.Hash
All []*Node
Self enode.ID
Target enode.ID
All []*node
N int
}
func (*closeTest) Generate(rand *rand.Rand, size int) reflect.Value {
t := &closeTest{
Self: gen(NodeID{}, rand).(NodeID),
Target: gen(common.Hash{}, rand).(common.Hash),
Self: gen(enode.ID{}, rand).(enode.ID),
Target: gen(enode.ID{}, rand).(enode.ID),
N: rand.Intn(bucketSize),
}
for _, id := range gen([]NodeID{}, rand).([]NodeID) {
t.All = append(t.All, &Node{ID: id})
for _, id := range gen([]enode.ID{}, rand).([]enode.ID) {
n := enode.SignNull(new(enr.Record), id)
t.All = append(t.All, wrapNode(n))
}
return reflect.ValueOf(t)
}
func TestTable_Lookup(t *testing.T) {
self := nodeAtDistance(common.Hash{}, 0)
tab, _ := newTable(lookupTestnet, self.ID, &net.UDPAddr{}, "", nil)
tab, db := newTestTable(lookupTestnet)
defer tab.Close()
defer db.Close()
// lookup on empty table returns no nodes
if results := tab.Lookup(lookupTestnet.target); len(results) > 0 {
if results := tab.lookup(lookupTestnet.target, false); len(results) > 0 {
t.Fatalf("lookup on empty table returned %d results: %#v", len(results), results)
}
// seed table with initial node (otherwise lookup will terminate immediately)
seed := NewNode(lookupTestnet.dists[256][0], net.IP{}, 256, 0)
tab.stuff([]*Node{seed})
seedKey, _ := decodePubkey(lookupTestnet.dists[256][0])
seed := wrapNode(enode.NewV4(seedKey, net.IP{}, 0, 256))
tab.stuff([]*node{seed})
results := tab.Lookup(lookupTestnet.target)
results := tab.lookup(lookupTestnet.target, true)
t.Logf("results:")
for _, e := range results {
t.Logf(" ld=%d, %x", logdist(lookupTestnet.targetSha, e.sha), e.sha[:])
t.Logf(" ld=%d, %x", enode.LogDist(lookupTestnet.targetSha, e.ID()), e.ID().Bytes())
}
if len(results) != bucketSize {
t.Errorf("wrong number of results: got %d, want %d", len(results), bucketSize)
@ -358,235 +312,236 @@ func TestTable_Lookup(t *testing.T) {
// This is the test network for the Lookup test.
// The nodes were obtained by running testnet.mine with a random NodeID as target.
var lookupTestnet = &preminedTestnet{
target: MustHexID("166aea4f556532c6d34e8b740e5d314af7e9ac0ca79833bd751d6b665f12dfd38ec563c363b32f02aef4a80b44fd3def94612d497b99cb5f17fd24de454927ec"),
targetSha: common.Hash{0x5c, 0x94, 0x4e, 0xe5, 0x1c, 0x5a, 0xe9, 0xf7, 0x2a, 0x95, 0xec, 0xcb, 0x8a, 0xed, 0x3, 0x74, 0xee, 0xcb, 0x51, 0x19, 0xd7, 0x20, 0xcb, 0xea, 0x68, 0x13, 0xe8, 0xe0, 0xd6, 0xad, 0x92, 0x61},
dists: [257][]NodeID{
target: hexEncPubkey("166aea4f556532c6d34e8b740e5d314af7e9ac0ca79833bd751d6b665f12dfd38ec563c363b32f02aef4a80b44fd3def94612d497b99cb5f17fd24de454927ec"),
targetSha: enode.HexID("5c944ee51c5ae9f72a95eccb8aed0374eecb5119d720cbea6813e8e0d6ad9261"),
dists: [257][]encPubkey{
240: {
MustHexID("2001ad5e3e80c71b952161bc0186731cf5ffe942d24a79230a0555802296238e57ea7a32f5b6f18564eadc1c65389448481f8c9338df0a3dbd18f708cbc2cbcb"),
MustHexID("6ba3f4f57d084b6bf94cc4555b8c657e4a8ac7b7baf23c6874efc21dd1e4f56b7eb2721e07f5242d2f1d8381fc8cae535e860197c69236798ba1ad231b105794"),
hexEncPubkey("2001ad5e3e80c71b952161bc0186731cf5ffe942d24a79230a0555802296238e57ea7a32f5b6f18564eadc1c65389448481f8c9338df0a3dbd18f708cbc2cbcb"),
hexEncPubkey("6ba3f4f57d084b6bf94cc4555b8c657e4a8ac7b7baf23c6874efc21dd1e4f56b7eb2721e07f5242d2f1d8381fc8cae535e860197c69236798ba1ad231b105794"),
},
244: {
MustHexID("696ba1f0a9d55c59246f776600542a9e6432490f0cd78f8bb55a196918df2081a9b521c3c3ba48e465a75c10768807717f8f689b0b4adce00e1c75737552a178"),
hexEncPubkey("696ba1f0a9d55c59246f776600542a9e6432490f0cd78f8bb55a196918df2081a9b521c3c3ba48e465a75c10768807717f8f689b0b4adce00e1c75737552a178"),
},
246: {
MustHexID("d6d32178bdc38416f46ffb8b3ec9e4cb2cfff8d04dd7e4311a70e403cb62b10be1b447311b60b4f9ee221a8131fc2cbd45b96dd80deba68a949d467241facfa8"),
MustHexID("3ea3d04a43a3dfb5ac11cffc2319248cf41b6279659393c2f55b8a0a5fc9d12581a9d97ef5d8ff9b5abf3321a290e8f63a4f785f450dc8a672aba3ba2ff4fdab"),
MustHexID("2fc897f05ae585553e5c014effd3078f84f37f9333afacffb109f00ca8e7a3373de810a3946be971cbccdfd40249f9fe7f322118ea459ac71acca85a1ef8b7f4"),
hexEncPubkey("d6d32178bdc38416f46ffb8b3ec9e4cb2cfff8d04dd7e4311a70e403cb62b10be1b447311b60b4f9ee221a8131fc2cbd45b96dd80deba68a949d467241facfa8"),
hexEncPubkey("3ea3d04a43a3dfb5ac11cffc2319248cf41b6279659393c2f55b8a0a5fc9d12581a9d97ef5d8ff9b5abf3321a290e8f63a4f785f450dc8a672aba3ba2ff4fdab"),
hexEncPubkey("2fc897f05ae585553e5c014effd3078f84f37f9333afacffb109f00ca8e7a3373de810a3946be971cbccdfd40249f9fe7f322118ea459ac71acca85a1ef8b7f4"),
},
247: {
MustHexID("3155e1427f85f10a5c9a7755877748041af1bcd8d474ec065eb33df57a97babf54bfd2103575fa829115d224c523596b401065a97f74010610fce76382c0bf32"),
MustHexID("312c55512422cf9b8a4097e9a6ad79402e87a15ae909a4bfefa22398f03d20951933beea1e4dfa6f968212385e829f04c2d314fc2d4e255e0d3bc08792b069db"),
MustHexID("38643200b172dcfef857492156971f0e6aa2c538d8b74010f8e140811d53b98c765dd2d96126051913f44582e8c199ad7c6d6819e9a56483f637feaac9448aac"),
MustHexID("8dcab8618c3253b558d459da53bd8fa68935a719aff8b811197101a4b2b47dd2d47295286fc00cc081bb542d760717d1bdd6bec2c37cd72eca367d6dd3b9df73"),
MustHexID("8b58c6073dd98bbad4e310b97186c8f822d3a5c7d57af40e2136e88e315afd115edb27d2d0685a908cfe5aa49d0debdda6e6e63972691d6bd8c5af2d771dd2a9"),
MustHexID("2cbb718b7dc682da19652e7d9eb4fefaf7b7147d82c1c2b6805edf77b85e29fde9f6da195741467ff2638dc62c8d3e014ea5686693c15ed0080b6de90354c137"),
MustHexID("e84027696d3f12f2de30a9311afea8fbd313c2360daff52bb5fc8c7094d5295758bec3134e4eef24e4cdf377b40da344993284628a7a346eba94f74160998feb"),
MustHexID("f1357a4f04f9d33753a57c0b65ba20a5d8777abbffd04e906014491c9103fb08590e45548d37aa4bd70965e2e81ddba94f31860348df01469eec8c1829200a68"),
MustHexID("4ab0a75941b12892369b4490a1928c8ca52a9ad6d3dffbd1d8c0b907bc200fe74c022d011ec39b64808a39c0ca41f1d3254386c3e7733e7044c44259486461b6"),
MustHexID("d45150a72dc74388773e68e03133a3b5f51447fe91837d566706b3c035ee4b56f160c878c6273394daee7f56cc398985269052f22f75a8057df2fe6172765354"),
hexEncPubkey("3155e1427f85f10a5c9a7755877748041af1bcd8d474ec065eb33df57a97babf54bfd2103575fa829115d224c523596b401065a97f74010610fce76382c0bf32"),
hexEncPubkey("312c55512422cf9b8a4097e9a6ad79402e87a15ae909a4bfefa22398f03d20951933beea1e4dfa6f968212385e829f04c2d314fc2d4e255e0d3bc08792b069db"),
hexEncPubkey("38643200b172dcfef857492156971f0e6aa2c538d8b74010f8e140811d53b98c765dd2d96126051913f44582e8c199ad7c6d6819e9a56483f637feaac9448aac"),
hexEncPubkey("8dcab8618c3253b558d459da53bd8fa68935a719aff8b811197101a4b2b47dd2d47295286fc00cc081bb542d760717d1bdd6bec2c37cd72eca367d6dd3b9df73"),
hexEncPubkey("8b58c6073dd98bbad4e310b97186c8f822d3a5c7d57af40e2136e88e315afd115edb27d2d0685a908cfe5aa49d0debdda6e6e63972691d6bd8c5af2d771dd2a9"),
hexEncPubkey("2cbb718b7dc682da19652e7d9eb4fefaf7b7147d82c1c2b6805edf77b85e29fde9f6da195741467ff2638dc62c8d3e014ea5686693c15ed0080b6de90354c137"),
hexEncPubkey("e84027696d3f12f2de30a9311afea8fbd313c2360daff52bb5fc8c7094d5295758bec3134e4eef24e4cdf377b40da344993284628a7a346eba94f74160998feb"),
hexEncPubkey("f1357a4f04f9d33753a57c0b65ba20a5d8777abbffd04e906014491c9103fb08590e45548d37aa4bd70965e2e81ddba94f31860348df01469eec8c1829200a68"),
hexEncPubkey("4ab0a75941b12892369b4490a1928c8ca52a9ad6d3dffbd1d8c0b907bc200fe74c022d011ec39b64808a39c0ca41f1d3254386c3e7733e7044c44259486461b6"),
hexEncPubkey("d45150a72dc74388773e68e03133a3b5f51447fe91837d566706b3c035ee4b56f160c878c6273394daee7f56cc398985269052f22f75a8057df2fe6172765354"),
},
248: {
MustHexID("6aadfce366a189bab08ac84721567483202c86590642ea6d6a14f37ca78d82bdb6509eb7b8b2f6f63c78ae3ae1d8837c89509e41497d719b23ad53dd81574afa"),
MustHexID("a605ecfd6069a4cf4cf7f5840e5bc0ce10d23a3ac59e2aaa70c6afd5637359d2519b4524f56fc2ca180cdbebe54262f720ccaae8c1b28fd553c485675831624d"),
MustHexID("29701451cb9448ca33fc33680b44b840d815be90146eb521641efbffed0859c154e8892d3906eae9934bfacee72cd1d2fa9dd050fd18888eea49da155ab0efd2"),
MustHexID("3ed426322dee7572b08592e1e079f8b6c6b30e10e6243edd144a6a48fdbdb83df73a6e41b1143722cb82604f2203a32758610b5d9544f44a1a7921ba001528c1"),
MustHexID("b2e2a2b7fdd363572a3256e75435fab1da3b16f7891a8bd2015f30995dae665d7eabfd194d87d99d5df628b4bbc7b04e5b492c596422dd8272746c7a1b0b8e4f"),
MustHexID("0c69c9756162c593e85615b814ce57a2a8ca2df6c690b9c4e4602731b61e1531a3bbe3f7114271554427ffabea80ad8f36fa95a49fa77b675ae182c6ccac1728"),
MustHexID("8d28be21d5a97b0876442fa4f5e5387f5bf3faad0b6f13b8607b64d6e448c0991ca28dd7fe2f64eb8eadd7150bff5d5666aa6ed868b84c71311f4ba9a38569dd"),
MustHexID("2c677e1c64b9c9df6359348a7f5f33dc79e22f0177042486d125f8b6ca7f0dc756b1f672aceee5f1746bcff80aaf6f92a8dc0c9fbeb259b3fa0da060de5ab7e8"),
MustHexID("3994880f94a8678f0cd247a43f474a8af375d2a072128da1ad6cae84a244105ff85e94fc7d8496f639468de7ee998908a91c7e33ef7585fff92e984b210941a1"),
MustHexID("b45a9153c08d002a48090d15d61a7c7dad8c2af85d4ff5bd36ce23a9a11e0709bf8d56614c7b193bc028c16cbf7f20dfbcc751328b64a924995d47b41e452422"),
MustHexID("057ab3a9e53c7a84b0f3fc586117a525cdd18e313f52a67bf31798d48078e325abe5cfee3f6c2533230cb37d0549289d692a29dd400e899b8552d4b928f6f907"),
MustHexID("0ddf663d308791eb92e6bd88a2f8cb45e4f4f35bb16708a0e6ff7f1362aa6a73fedd0a1b1557fb3365e38e1b79d6918e2fae2788728b70c9ab6b51a3b94a4338"),
MustHexID("f637e07ff50cc1e3731735841c4798411059f2023abcf3885674f3e8032531b0edca50fd715df6feb489b6177c345374d64f4b07d257a7745de393a107b013a5"),
MustHexID("e24ec7c6eec094f63c7b3239f56d311ec5a3e45bc4e622a1095a65b95eea6fe13e29f3b6b7a2cbfe40906e3989f17ac834c3102dd0cadaaa26e16ee06d782b72"),
MustHexID("b76ea1a6fd6506ef6e3506a4f1f60ed6287fff8114af6141b2ff13e61242331b54082b023cfea5b3083354a4fb3f9eb8be01fb4a518f579e731a5d0707291a6b"),
MustHexID("9b53a37950ca8890ee349b325032d7b672cab7eced178d3060137b24ef6b92a43977922d5bdfb4a3409a2d80128e02f795f9dae6d7d99973ad0e23a2afb8442f"),
hexEncPubkey("6aadfce366a189bab08ac84721567483202c86590642ea6d6a14f37ca78d82bdb6509eb7b8b2f6f63c78ae3ae1d8837c89509e41497d719b23ad53dd81574afa"),
hexEncPubkey("a605ecfd6069a4cf4cf7f5840e5bc0ce10d23a3ac59e2aaa70c6afd5637359d2519b4524f56fc2ca180cdbebe54262f720ccaae8c1b28fd553c485675831624d"),
hexEncPubkey("29701451cb9448ca33fc33680b44b840d815be90146eb521641efbffed0859c154e8892d3906eae9934bfacee72cd1d2fa9dd050fd18888eea49da155ab0efd2"),
hexEncPubkey("3ed426322dee7572b08592e1e079f8b6c6b30e10e6243edd144a6a48fdbdb83df73a6e41b1143722cb82604f2203a32758610b5d9544f44a1a7921ba001528c1"),
hexEncPubkey("b2e2a2b7fdd363572a3256e75435fab1da3b16f7891a8bd2015f30995dae665d7eabfd194d87d99d5df628b4bbc7b04e5b492c596422dd8272746c7a1b0b8e4f"),
hexEncPubkey("0c69c9756162c593e85615b814ce57a2a8ca2df6c690b9c4e4602731b61e1531a3bbe3f7114271554427ffabea80ad8f36fa95a49fa77b675ae182c6ccac1728"),
hexEncPubkey("8d28be21d5a97b0876442fa4f5e5387f5bf3faad0b6f13b8607b64d6e448c0991ca28dd7fe2f64eb8eadd7150bff5d5666aa6ed868b84c71311f4ba9a38569dd"),
hexEncPubkey("2c677e1c64b9c9df6359348a7f5f33dc79e22f0177042486d125f8b6ca7f0dc756b1f672aceee5f1746bcff80aaf6f92a8dc0c9fbeb259b3fa0da060de5ab7e8"),
hexEncPubkey("3994880f94a8678f0cd247a43f474a8af375d2a072128da1ad6cae84a244105ff85e94fc7d8496f639468de7ee998908a91c7e33ef7585fff92e984b210941a1"),
hexEncPubkey("b45a9153c08d002a48090d15d61a7c7dad8c2af85d4ff5bd36ce23a9a11e0709bf8d56614c7b193bc028c16cbf7f20dfbcc751328b64a924995d47b41e452422"),
hexEncPubkey("057ab3a9e53c7a84b0f3fc586117a525cdd18e313f52a67bf31798d48078e325abe5cfee3f6c2533230cb37d0549289d692a29dd400e899b8552d4b928f6f907"),
hexEncPubkey("0ddf663d308791eb92e6bd88a2f8cb45e4f4f35bb16708a0e6ff7f1362aa6a73fedd0a1b1557fb3365e38e1b79d6918e2fae2788728b70c9ab6b51a3b94a4338"),
hexEncPubkey("f637e07ff50cc1e3731735841c4798411059f2023abcf3885674f3e8032531b0edca50fd715df6feb489b6177c345374d64f4b07d257a7745de393a107b013a5"),
hexEncPubkey("e24ec7c6eec094f63c7b3239f56d311ec5a3e45bc4e622a1095a65b95eea6fe13e29f3b6b7a2cbfe40906e3989f17ac834c3102dd0cadaaa26e16ee06d782b72"),
hexEncPubkey("b76ea1a6fd6506ef6e3506a4f1f60ed6287fff8114af6141b2ff13e61242331b54082b023cfea5b3083354a4fb3f9eb8be01fb4a518f579e731a5d0707291a6b"),
hexEncPubkey("9b53a37950ca8890ee349b325032d7b672cab7eced178d3060137b24ef6b92a43977922d5bdfb4a3409a2d80128e02f795f9dae6d7d99973ad0e23a2afb8442f"),
},
249: {
MustHexID("675ae65567c3c72c50c73bc0fd4f61f202ea5f93346ca57b551de3411ccc614fad61cb9035493af47615311b9d44ee7a161972ee4d77c28fe1ec029d01434e6a"),
MustHexID("8eb81408389da88536ae5800392b16ef5109d7ea132c18e9a82928047ecdb502693f6e4a4cdd18b54296caf561db937185731456c456c98bfe7de0baf0eaa495"),
MustHexID("2adba8b1612a541771cb93a726a38a4b88e97b18eced2593eb7daf82f05a5321ca94a72cc780c306ff21e551a932fc2c6d791e4681907b5ceab7f084c3fa2944"),
MustHexID("b1b4bfbda514d9b8f35b1c28961da5d5216fe50548f4066f69af3b7666a3b2e06eac646735e963e5c8f8138a2fb95af15b13b23ff00c6986eccc0efaa8ee6fb4"),
MustHexID("d2139281b289ad0e4d7b4243c4364f5c51aac8b60f4806135de06b12b5b369c9e43a6eb494eab860d115c15c6fbb8c5a1b0e382972e0e460af395b8385363de7"),
MustHexID("4a693df4b8fc5bdc7cec342c3ed2e228d7c5b4ab7321ddaa6cccbeb45b05a9f1d95766b4002e6d4791c2deacb8a667aadea6a700da28a3eea810a30395701bbc"),
MustHexID("ab41611195ec3c62bb8cd762ee19fb182d194fd141f4a66780efbef4b07ce916246c022b841237a3a6b512a93431157edd221e854ed2a259b72e9c5351f44d0c"),
MustHexID("68e8e26099030d10c3c703ae7045c0a48061fb88058d853b3e67880014c449d4311014da99d617d3150a20f1a3da5e34bf0f14f1c51fe4dd9d58afd222823176"),
MustHexID("3fbcacf546fb129cd70fc48de3b593ba99d3c473798bc309292aca280320e0eacc04442c914cad5c4cf6950345ba79b0d51302df88285d4e83ee3fe41339eee7"),
MustHexID("1d4a623659f7c8f80b6c3939596afdf42e78f892f682c768ad36eb7bfba402dbf97aea3a268f3badd8fe7636be216edf3d67ee1e08789ebbc7be625056bd7109"),
MustHexID("a283c474ab09da02bbc96b16317241d0627646fcc427d1fe790b76a7bf1989ced90f92101a973047ae9940c92720dffbac8eff21df8cae468a50f72f9e159417"),
MustHexID("dbf7e5ad7f87c3dfecae65d87c3039e14ed0bdc56caf00ce81931073e2e16719d746295512ff7937a15c3b03603e7c41a4f9df94fcd37bb200dd8f332767e9cb"),
MustHexID("caaa070a26692f64fc77f30d7b5ae980d419b4393a0f442b1c821ef58c0862898b0d22f74a4f8c5d83069493e3ec0b92f17dc1fe6e4cd437c1ec25039e7ce839"),
MustHexID("874cc8d1213beb65c4e0e1de38ef5d8165235893ac74ab5ea937c885eaab25c8d79dad0456e9fd3e9450626cac7e107b004478fb59842f067857f39a47cee695"),
MustHexID("d94193f236105010972f5df1b7818b55846592a0445b9cdc4eaed811b8c4c0f7c27dc8cc9837a4774656d6b34682d6d329d42b6ebb55da1d475c2474dc3dfdf4"),
MustHexID("edd9af6aded4094e9785637c28fccbd3980cbe28e2eb9a411048a23c2ace4bd6b0b7088a7817997b49a3dd05fc6929ca6c7abbb69438dbdabe65e971d2a794b2"),
hexEncPubkey("675ae65567c3c72c50c73bc0fd4f61f202ea5f93346ca57b551de3411ccc614fad61cb9035493af47615311b9d44ee7a161972ee4d77c28fe1ec029d01434e6a"),
hexEncPubkey("8eb81408389da88536ae5800392b16ef5109d7ea132c18e9a82928047ecdb502693f6e4a4cdd18b54296caf561db937185731456c456c98bfe7de0baf0eaa495"),
hexEncPubkey("2adba8b1612a541771cb93a726a38a4b88e97b18eced2593eb7daf82f05a5321ca94a72cc780c306ff21e551a932fc2c6d791e4681907b5ceab7f084c3fa2944"),
hexEncPubkey("b1b4bfbda514d9b8f35b1c28961da5d5216fe50548f4066f69af3b7666a3b2e06eac646735e963e5c8f8138a2fb95af15b13b23ff00c6986eccc0efaa8ee6fb4"),
hexEncPubkey("d2139281b289ad0e4d7b4243c4364f5c51aac8b60f4806135de06b12b5b369c9e43a6eb494eab860d115c15c6fbb8c5a1b0e382972e0e460af395b8385363de7"),
hexEncPubkey("4a693df4b8fc5bdc7cec342c3ed2e228d7c5b4ab7321ddaa6cccbeb45b05a9f1d95766b4002e6d4791c2deacb8a667aadea6a700da28a3eea810a30395701bbc"),
hexEncPubkey("ab41611195ec3c62bb8cd762ee19fb182d194fd141f4a66780efbef4b07ce916246c022b841237a3a6b512a93431157edd221e854ed2a259b72e9c5351f44d0c"),
hexEncPubkey("68e8e26099030d10c3c703ae7045c0a48061fb88058d853b3e67880014c449d4311014da99d617d3150a20f1a3da5e34bf0f14f1c51fe4dd9d58afd222823176"),
hexEncPubkey("3fbcacf546fb129cd70fc48de3b593ba99d3c473798bc309292aca280320e0eacc04442c914cad5c4cf6950345ba79b0d51302df88285d4e83ee3fe41339eee7"),
hexEncPubkey("1d4a623659f7c8f80b6c3939596afdf42e78f892f682c768ad36eb7bfba402dbf97aea3a268f3badd8fe7636be216edf3d67ee1e08789ebbc7be625056bd7109"),
hexEncPubkey("a283c474ab09da02bbc96b16317241d0627646fcc427d1fe790b76a7bf1989ced90f92101a973047ae9940c92720dffbac8eff21df8cae468a50f72f9e159417"),
hexEncPubkey("dbf7e5ad7f87c3dfecae65d87c3039e14ed0bdc56caf00ce81931073e2e16719d746295512ff7937a15c3b03603e7c41a4f9df94fcd37bb200dd8f332767e9cb"),
hexEncPubkey("caaa070a26692f64fc77f30d7b5ae980d419b4393a0f442b1c821ef58c0862898b0d22f74a4f8c5d83069493e3ec0b92f17dc1fe6e4cd437c1ec25039e7ce839"),
hexEncPubkey("874cc8d1213beb65c4e0e1de38ef5d8165235893ac74ab5ea937c885eaab25c8d79dad0456e9fd3e9450626cac7e107b004478fb59842f067857f39a47cee695"),
hexEncPubkey("d94193f236105010972f5df1b7818b55846592a0445b9cdc4eaed811b8c4c0f7c27dc8cc9837a4774656d6b34682d6d329d42b6ebb55da1d475c2474dc3dfdf4"),
hexEncPubkey("edd9af6aded4094e9785637c28fccbd3980cbe28e2eb9a411048a23c2ace4bd6b0b7088a7817997b49a3dd05fc6929ca6c7abbb69438dbdabe65e971d2a794b2"),
},
250: {
MustHexID("53a5bd1215d4ab709ae8fdc2ced50bba320bced78bd9c5dc92947fb402250c914891786db0978c898c058493f86fc68b1c5de8a5cb36336150ac7a88655b6c39"),
MustHexID("b7f79e3ab59f79262623c9ccefc8f01d682323aee56ffbe295437487e9d5acaf556a9c92e1f1c6a9601f2b9eb6b027ae1aeaebac71d61b9b78e88676efd3e1a3"),
MustHexID("d374bf7e8d7ffff69cc00bebff38ef5bc1dcb0a8d51c1a3d70e61ac6b2e2d6617109254b0ac224354dfbf79009fe4239e09020c483cc60c071e00b9238684f30"),
MustHexID("1e1eac1c9add703eb252eb991594f8f5a173255d526a855fab24ae57dc277e055bc3c7a7ae0b45d437c4f47a72d97eb7b126f2ba344ba6c0e14b2c6f27d4b1e6"),
MustHexID("ae28953f63d4bc4e706712a59319c111f5ff8f312584f65d7436b4cd3d14b217b958f8486bad666b4481fe879019fb1f767cf15b3e3e2711efc33b56d460448a"),
MustHexID("934bb1edf9c7a318b82306aca67feb3d6b434421fa275d694f0b4927afd8b1d3935b727fd4ff6e3d012e0c82f1824385174e8c6450ade59c2a43281a4b3446b6"),
MustHexID("9eef3f28f70ce19637519a0916555bf76d26de31312ac656cf9d3e379899ea44e4dd7ffcce923b4f3563f8a00489a34bd6936db0cbb4c959d32c49f017e07d05"),
MustHexID("82200872e8f871c48f1fad13daec6478298099b591bb3dbc4ef6890aa28ebee5860d07d70be62f4c0af85085a90ae8179ee8f937cf37915c67ea73e704b03ee7"),
MustHexID("6c75a5834a08476b7fc37ff3dc2011dc3ea3b36524bad7a6d319b18878fad813c0ba76d1f4555cacd3890c865438c21f0e0aed1f80e0a157e642124c69f43a11"),
MustHexID("995b873742206cb02b736e73a88580c2aacb0bd4a3c97a647b647bcab3f5e03c0e0736520a8b3600da09edf4248991fb01091ec7ff3ec7cdc8a1beae011e7aae"),
MustHexID("c773a056594b5cdef2e850d30891ff0e927c3b1b9c35cd8e8d53a1017001e237468e1ece3ae33d612ca3e6abb0a9169aa352e9dcda358e5af2ad982b577447db"),
MustHexID("2b46a5f6923f475c6be99ec6d134437a6d11f6bb4b4ac6bcd94572fa1092639d1c08aeefcb51f0912f0a060f71d4f38ee4da70ecc16010b05dd4a674aab14c3a"),
MustHexID("af6ab501366debbaa0d22e20e9688f32ef6b3b644440580fd78de4fe0e99e2a16eb5636bbae0d1c259df8ddda77b35b9a35cbc36137473e9c68fbc9d203ba842"),
MustHexID("c9f6f2dd1a941926f03f770695bda289859e85fabaf94baaae20b93e5015dc014ba41150176a36a1884adb52f405194693e63b0c464a6891cc9cc1c80d450326"),
MustHexID("5b116f0751526868a909b61a30b0c5282c37df6925cc03ddea556ef0d0602a9595fd6c14d371f8ed7d45d89918a032dcd22be4342a8793d88fdbeb3ca3d75bd7"),
MustHexID("50f3222fb6b82481c7c813b2172e1daea43e2710a443b9c2a57a12bd160dd37e20f87aa968c82ad639af6972185609d47036c0d93b4b7269b74ebd7073221c10"),
hexEncPubkey("53a5bd1215d4ab709ae8fdc2ced50bba320bced78bd9c5dc92947fb402250c914891786db0978c898c058493f86fc68b1c5de8a5cb36336150ac7a88655b6c39"),
hexEncPubkey("b7f79e3ab59f79262623c9ccefc8f01d682323aee56ffbe295437487e9d5acaf556a9c92e1f1c6a9601f2b9eb6b027ae1aeaebac71d61b9b78e88676efd3e1a3"),
hexEncPubkey("d374bf7e8d7ffff69cc00bebff38ef5bc1dcb0a8d51c1a3d70e61ac6b2e2d6617109254b0ac224354dfbf79009fe4239e09020c483cc60c071e00b9238684f30"),
hexEncPubkey("1e1eac1c9add703eb252eb991594f8f5a173255d526a855fab24ae57dc277e055bc3c7a7ae0b45d437c4f47a72d97eb7b126f2ba344ba6c0e14b2c6f27d4b1e6"),
hexEncPubkey("ae28953f63d4bc4e706712a59319c111f5ff8f312584f65d7436b4cd3d14b217b958f8486bad666b4481fe879019fb1f767cf15b3e3e2711efc33b56d460448a"),
hexEncPubkey("934bb1edf9c7a318b82306aca67feb3d6b434421fa275d694f0b4927afd8b1d3935b727fd4ff6e3d012e0c82f1824385174e8c6450ade59c2a43281a4b3446b6"),
hexEncPubkey("9eef3f28f70ce19637519a0916555bf76d26de31312ac656cf9d3e379899ea44e4dd7ffcce923b4f3563f8a00489a34bd6936db0cbb4c959d32c49f017e07d05"),
hexEncPubkey("82200872e8f871c48f1fad13daec6478298099b591bb3dbc4ef6890aa28ebee5860d07d70be62f4c0af85085a90ae8179ee8f937cf37915c67ea73e704b03ee7"),
hexEncPubkey("6c75a5834a08476b7fc37ff3dc2011dc3ea3b36524bad7a6d319b18878fad813c0ba76d1f4555cacd3890c865438c21f0e0aed1f80e0a157e642124c69f43a11"),
hexEncPubkey("995b873742206cb02b736e73a88580c2aacb0bd4a3c97a647b647bcab3f5e03c0e0736520a8b3600da09edf4248991fb01091ec7ff3ec7cdc8a1beae011e7aae"),
hexEncPubkey("c773a056594b5cdef2e850d30891ff0e927c3b1b9c35cd8e8d53a1017001e237468e1ece3ae33d612ca3e6abb0a9169aa352e9dcda358e5af2ad982b577447db"),
hexEncPubkey("2b46a5f6923f475c6be99ec6d134437a6d11f6bb4b4ac6bcd94572fa1092639d1c08aeefcb51f0912f0a060f71d4f38ee4da70ecc16010b05dd4a674aab14c3a"),
hexEncPubkey("af6ab501366debbaa0d22e20e9688f32ef6b3b644440580fd78de4fe0e99e2a16eb5636bbae0d1c259df8ddda77b35b9a35cbc36137473e9c68fbc9d203ba842"),
hexEncPubkey("c9f6f2dd1a941926f03f770695bda289859e85fabaf94baaae20b93e5015dc014ba41150176a36a1884adb52f405194693e63b0c464a6891cc9cc1c80d450326"),
hexEncPubkey("5b116f0751526868a909b61a30b0c5282c37df6925cc03ddea556ef0d0602a9595fd6c14d371f8ed7d45d89918a032dcd22be4342a8793d88fdbeb3ca3d75bd7"),
hexEncPubkey("50f3222fb6b82481c7c813b2172e1daea43e2710a443b9c2a57a12bd160dd37e20f87aa968c82ad639af6972185609d47036c0d93b4b7269b74ebd7073221c10"),
},
251: {
MustHexID("9b8f702a62d1bee67bedfeb102eca7f37fa1713e310f0d6651cc0c33ea7c5477575289ccd463e5a2574a00a676a1fdce05658ba447bb9d2827f0ba47b947e894"),
MustHexID("b97532eb83054ed054b4abdf413bb30c00e4205545c93521554dbe77faa3cfaa5bd31ef466a107b0b34a71ec97214c0c83919720142cddac93aa7a3e928d4708"),
MustHexID("2f7a5e952bfb67f2f90b8441b5fadc9ee13b1dcde3afeeb3dd64bf937f86663cc5c55d1fa83952b5422763c7df1b7f2794b751c6be316ebc0beb4942e65ab8c1"),
MustHexID("42c7483781727051a0b3660f14faf39e0d33de5e643702ae933837d036508ab856ce7eec8ec89c4929a4901256e5233a3d847d5d4893f91bcf21835a9a880fee"),
MustHexID("873bae27bf1dc854408fba94046a53ab0c965cebe1e4e12290806fc62b88deb1f4a47f9e18f78fc0e7913a0c6e42ac4d0fc3a20cea6bc65f0c8a0ca90b67521e"),
MustHexID("a7e3a370bbd761d413f8d209e85886f68bf73d5c3089b2dc6fa42aab1ecb5162635497eed95dee2417f3c9c74a3e76319625c48ead2e963c7de877cd4551f347"),
MustHexID("528597534776a40df2addaaea15b6ff832ce36b9748a265768368f657e76d58569d9f30dbb91e91cf0ae7efe8f402f17aa0ae15f5c55051ba03ba830287f4c42"),
MustHexID("461d8bd4f13c3c09031fdb84f104ed737a52f630261463ce0bdb5704259bab4b737dda688285b8444dbecaecad7f50f835190b38684ced5e90c54219e5adf1bc"),
MustHexID("6ec50c0be3fd232737090fc0111caaf0bb6b18f72be453428087a11a97fd6b52db0344acbf789a689bd4f5f50f79017ea784f8fd6fe723ad6ae675b9e3b13e21"),
MustHexID("12fc5e2f77a83fdcc727b79d8ae7fe6a516881138d3011847ee136b400fed7cfba1f53fd7a9730253c7aa4f39abeacd04f138417ba7fcb0f36cccc3514e0dab6"),
MustHexID("4fdbe75914ccd0bce02101606a1ccf3657ec963e3b3c20239d5fec87673fe446d649b4f15f1fe1a40e6cfbd446dda2d31d40bb602b1093b8fcd5f139ba0eb46a"),
MustHexID("3753668a0f6281e425ea69b52cb2d17ab97afbe6eb84cf5d25425bc5e53009388857640668fadd7c110721e6047c9697803bd8a6487b43bb343bfa32ebf24039"),
MustHexID("2e81b16346637dec4410fd88e527346145b9c0a849dbf2628049ac7dae016c8f4305649d5659ec77f1e8a0fac0db457b6080547226f06283598e3740ad94849a"),
MustHexID("802c3cc27f91c89213223d758f8d2ecd41135b357b6d698f24d811cdf113033a81c38e0bdff574a5c005b00a8c193dc2531f8c1fa05fa60acf0ab6f2858af09f"),
MustHexID("fcc9a2e1ac3667026ff16192876d1813bb75abdbf39b929a92863012fe8b1d890badea7a0de36274d5c1eb1e8f975785532c50d80fd44b1a4b692f437303393f"),
MustHexID("6d8b3efb461151dd4f6de809b62726f5b89e9b38e9ba1391967f61cde844f7528fecf821b74049207cee5a527096b31f3ad623928cd3ce51d926fa345a6b2951"),
hexEncPubkey("9b8f702a62d1bee67bedfeb102eca7f37fa1713e310f0d6651cc0c33ea7c5477575289ccd463e5a2574a00a676a1fdce05658ba447bb9d2827f0ba47b947e894"),
hexEncPubkey("b97532eb83054ed054b4abdf413bb30c00e4205545c93521554dbe77faa3cfaa5bd31ef466a107b0b34a71ec97214c0c83919720142cddac93aa7a3e928d4708"),
hexEncPubkey("2f7a5e952bfb67f2f90b8441b5fadc9ee13b1dcde3afeeb3dd64bf937f86663cc5c55d1fa83952b5422763c7df1b7f2794b751c6be316ebc0beb4942e65ab8c1"),
hexEncPubkey("42c7483781727051a0b3660f14faf39e0d33de5e643702ae933837d036508ab856ce7eec8ec89c4929a4901256e5233a3d847d5d4893f91bcf21835a9a880fee"),
hexEncPubkey("873bae27bf1dc854408fba94046a53ab0c965cebe1e4e12290806fc62b88deb1f4a47f9e18f78fc0e7913a0c6e42ac4d0fc3a20cea6bc65f0c8a0ca90b67521e"),
hexEncPubkey("a7e3a370bbd761d413f8d209e85886f68bf73d5c3089b2dc6fa42aab1ecb5162635497eed95dee2417f3c9c74a3e76319625c48ead2e963c7de877cd4551f347"),
hexEncPubkey("528597534776a40df2addaaea15b6ff832ce36b9748a265768368f657e76d58569d9f30dbb91e91cf0ae7efe8f402f17aa0ae15f5c55051ba03ba830287f4c42"),
hexEncPubkey("461d8bd4f13c3c09031fdb84f104ed737a52f630261463ce0bdb5704259bab4b737dda688285b8444dbecaecad7f50f835190b38684ced5e90c54219e5adf1bc"),
hexEncPubkey("6ec50c0be3fd232737090fc0111caaf0bb6b18f72be453428087a11a97fd6b52db0344acbf789a689bd4f5f50f79017ea784f8fd6fe723ad6ae675b9e3b13e21"),
hexEncPubkey("12fc5e2f77a83fdcc727b79d8ae7fe6a516881138d3011847ee136b400fed7cfba1f53fd7a9730253c7aa4f39abeacd04f138417ba7fcb0f36cccc3514e0dab6"),
hexEncPubkey("4fdbe75914ccd0bce02101606a1ccf3657ec963e3b3c20239d5fec87673fe446d649b4f15f1fe1a40e6cfbd446dda2d31d40bb602b1093b8fcd5f139ba0eb46a"),
hexEncPubkey("3753668a0f6281e425ea69b52cb2d17ab97afbe6eb84cf5d25425bc5e53009388857640668fadd7c110721e6047c9697803bd8a6487b43bb343bfa32ebf24039"),
hexEncPubkey("2e81b16346637dec4410fd88e527346145b9c0a849dbf2628049ac7dae016c8f4305649d5659ec77f1e8a0fac0db457b6080547226f06283598e3740ad94849a"),
hexEncPubkey("802c3cc27f91c89213223d758f8d2ecd41135b357b6d698f24d811cdf113033a81c38e0bdff574a5c005b00a8c193dc2531f8c1fa05fa60acf0ab6f2858af09f"),
hexEncPubkey("fcc9a2e1ac3667026ff16192876d1813bb75abdbf39b929a92863012fe8b1d890badea7a0de36274d5c1eb1e8f975785532c50d80fd44b1a4b692f437303393f"),
hexEncPubkey("6d8b3efb461151dd4f6de809b62726f5b89e9b38e9ba1391967f61cde844f7528fecf821b74049207cee5a527096b31f3ad623928cd3ce51d926fa345a6b2951"),
},
252: {
MustHexID("f1ae93157cc48c2075dd5868fbf523e79e06caf4b8198f352f6e526680b78ff4227263de92612f7d63472bd09367bb92a636fff16fe46ccf41614f7a72495c2a"),
MustHexID("587f482d111b239c27c0cb89b51dd5d574db8efd8de14a2e6a1400c54d4567e77c65f89c1da52841212080b91604104768350276b6682f2f961cdaf4039581c7"),
MustHexID("e3f88274d35cefdaabdf205afe0e80e936cc982b8e3e47a84ce664c413b29016a4fb4f3a3ebae0a2f79671f8323661ed462bf4390af94c424dc8ace0c301b90f"),
MustHexID("0ddc736077da9a12ba410dc5ea63cbcbe7659dd08596485b2bff3435221f82c10d263efd9af938e128464be64a178b7cd22e19f400d5802f4c9df54bf89f2619"),
MustHexID("784aa34d833c6ce63fcc1279630113c3272e82c4ae8c126c5a52a88ac461b6baeed4244e607b05dc14e5b2f41c70a273c3804dea237f14f7a1e546f6d1309d14"),
MustHexID("f253a2c354ee0e27cfcae786d726753d4ad24be6516b279a936195a487de4a59dbc296accf20463749ff55293263ed8c1b6365eecb248d44e75e9741c0d18205"),
MustHexID("a1910b80357b3ad9b4593e0628922939614dc9056a5fbf477279c8b2c1d0b4b31d89a0c09d0d41f795271d14d3360ef08a3f821e65e7e1f56c07a36afe49c7c5"),
MustHexID("f1168552c2efe541160f0909b0b4a9d6aeedcf595cdf0e9b165c97e3e197471a1ee6320e93389edfba28af6eaf10de98597ad56e7ab1b504ed762451996c3b98"),
MustHexID("b0c8e5d2c8634a7930e1a6fd082e448c6cf9d2d8b7293558b59238815a4df926c286bf297d2049f14e8296a6eb3256af614ec1812c4f2bbe807673b58bf14c8c"),
MustHexID("0fb346076396a38badc342df3679b55bd7f40a609ab103411fe45082c01f12ea016729e95914b2b5540e987ff5c9b133e85862648e7f36abdfd23100d248d234"),
MustHexID("f736e0cc83417feaa280d9483f5d4d72d1b036cd0c6d9cbdeb8ac35ceb2604780de46dddaa32a378474e1d5ccdf79b373331c30c7911ade2ae32f98832e5de1f"),
MustHexID("8b02991457602f42b38b342d3f2259ae4100c354b3843885f7e4e07bd644f64dab94bb7f38a3915f8b7f11d8e3f81c28e07a0078cf79d7397e38a7b7e0c857e2"),
MustHexID("9221d9f04a8a184993d12baa91116692bb685f887671302999d69300ad103eb2d2c75a09d8979404c6dd28f12362f58a1a43619c493d9108fd47588a23ce5824"),
MustHexID("652797801744dada833fff207d67484742eea6835d695925f3e618d71b68ec3c65bdd85b4302b2cdcb835ad3f94fd00d8da07e570b41bc0d2bcf69a8de1b3284"),
MustHexID("d84f06fe64debc4cd0625e36d19b99014b6218375262cc2209202bdbafd7dffcc4e34ce6398e182e02fd8faeed622c3e175545864902dfd3d1ac57647cddf4c6"),
MustHexID("d0ed87b294f38f1d741eb601020eeec30ac16331d05880fe27868f1e454446de367d7457b41c79e202eaf9525b029e4f1d7e17d85a55f83a557c005c68d7328a"),
hexEncPubkey("f1ae93157cc48c2075dd5868fbf523e79e06caf4b8198f352f6e526680b78ff4227263de92612f7d63472bd09367bb92a636fff16fe46ccf41614f7a72495c2a"),
hexEncPubkey("587f482d111b239c27c0cb89b51dd5d574db8efd8de14a2e6a1400c54d4567e77c65f89c1da52841212080b91604104768350276b6682f2f961cdaf4039581c7"),
hexEncPubkey("e3f88274d35cefdaabdf205afe0e80e936cc982b8e3e47a84ce664c413b29016a4fb4f3a3ebae0a2f79671f8323661ed462bf4390af94c424dc8ace0c301b90f"),
hexEncPubkey("0ddc736077da9a12ba410dc5ea63cbcbe7659dd08596485b2bff3435221f82c10d263efd9af938e128464be64a178b7cd22e19f400d5802f4c9df54bf89f2619"),
hexEncPubkey("784aa34d833c6ce63fcc1279630113c3272e82c4ae8c126c5a52a88ac461b6baeed4244e607b05dc14e5b2f41c70a273c3804dea237f14f7a1e546f6d1309d14"),
hexEncPubkey("f253a2c354ee0e27cfcae786d726753d4ad24be6516b279a936195a487de4a59dbc296accf20463749ff55293263ed8c1b6365eecb248d44e75e9741c0d18205"),
hexEncPubkey("a1910b80357b3ad9b4593e0628922939614dc9056a5fbf477279c8b2c1d0b4b31d89a0c09d0d41f795271d14d3360ef08a3f821e65e7e1f56c07a36afe49c7c5"),
hexEncPubkey("f1168552c2efe541160f0909b0b4a9d6aeedcf595cdf0e9b165c97e3e197471a1ee6320e93389edfba28af6eaf10de98597ad56e7ab1b504ed762451996c3b98"),
hexEncPubkey("b0c8e5d2c8634a7930e1a6fd082e448c6cf9d2d8b7293558b59238815a4df926c286bf297d2049f14e8296a6eb3256af614ec1812c4f2bbe807673b58bf14c8c"),
hexEncPubkey("0fb346076396a38badc342df3679b55bd7f40a609ab103411fe45082c01f12ea016729e95914b2b5540e987ff5c9b133e85862648e7f36abdfd23100d248d234"),
hexEncPubkey("f736e0cc83417feaa280d9483f5d4d72d1b036cd0c6d9cbdeb8ac35ceb2604780de46dddaa32a378474e1d5ccdf79b373331c30c7911ade2ae32f98832e5de1f"),
hexEncPubkey("8b02991457602f42b38b342d3f2259ae4100c354b3843885f7e4e07bd644f64dab94bb7f38a3915f8b7f11d8e3f81c28e07a0078cf79d7397e38a7b7e0c857e2"),
hexEncPubkey("9221d9f04a8a184993d12baa91116692bb685f887671302999d69300ad103eb2d2c75a09d8979404c6dd28f12362f58a1a43619c493d9108fd47588a23ce5824"),
hexEncPubkey("652797801744dada833fff207d67484742eea6835d695925f3e618d71b68ec3c65bdd85b4302b2cdcb835ad3f94fd00d8da07e570b41bc0d2bcf69a8de1b3284"),
hexEncPubkey("d84f06fe64debc4cd0625e36d19b99014b6218375262cc2209202bdbafd7dffcc4e34ce6398e182e02fd8faeed622c3e175545864902dfd3d1ac57647cddf4c6"),
hexEncPubkey("d0ed87b294f38f1d741eb601020eeec30ac16331d05880fe27868f1e454446de367d7457b41c79e202eaf9525b029e4f1d7e17d85a55f83a557c005c68d7328a"),
},
253: {
MustHexID("ad4485e386e3cc7c7310366a7c38fb810b8896c0d52e55944bfd320ca294e7912d6c53c0a0cf85e7ce226e92491d60430e86f8f15cda0161ed71893fb4a9e3a1"),
MustHexID("36d0e7e5b7734f98c6183eeeb8ac5130a85e910a925311a19c4941b1290f945d4fc3996b12ef4966960b6fa0fb29b1604f83a0f81bd5fd6398d2e1a22e46af0c"),
MustHexID("7d307d8acb4a561afa23bdf0bd945d35c90245e26345ec3a1f9f7df354222a7cdcb81339c9ed6744526c27a1a0c8d10857e98df942fa433602facac71ac68a31"),
MustHexID("d97bf55f88c83fae36232661af115d66ca600fc4bd6d1fb35ff9bb4dad674c02cf8c8d05f317525b5522250db58bb1ecafb7157392bf5aa61b178c61f098d995"),
MustHexID("7045d678f1f9eb7a4613764d17bd5698796494d0bf977b16f2dbc272b8a0f7858a60805c022fc3d1fe4f31c37e63cdaca0416c0d053ef48a815f8b19121605e0"),
MustHexID("14e1f21418d445748de2a95cd9a8c3b15b506f86a0acabd8af44bb968ce39885b19c8822af61b3dd58a34d1f265baec30e3ae56149dc7d2aa4a538f7319f69c8"),
MustHexID("b9453d78281b66a4eac95a1546017111eaaa5f92a65d0de10b1122940e92b319728a24edf4dec6acc412321b1c95266d39c7b3a5d265c629c3e49a65fb022c09"),
MustHexID("e8a49248419e3824a00d86af422f22f7366e2d4922b304b7169937616a01d9d6fa5abf5cc01061a352dc866f48e1fa2240dbb453d872b1d7be62bdfc1d5e248c"),
MustHexID("bebcff24b52362f30e0589ee573ce2d86f073d58d18e6852a592fa86ceb1a6c9b96d7fb9ec7ed1ed98a51b6743039e780279f6bb49d0a04327ac7a182d9a56f6"),
MustHexID("d0835e5a4291db249b8d2fca9f503049988180c7d247bedaa2cf3a1bad0a76709360a85d4f9a1423b2cbc82bb4d94b47c0cde20afc430224834c49fe312a9ae3"),
MustHexID("6b087fe2a2da5e4f0b0f4777598a4a7fb66bf77dbd5bfc44e8a7eaa432ab585a6e226891f56a7d4f5ed11a7c57b90f1661bba1059590ca4267a35801c2802913"),
MustHexID("d901e5bde52d1a0f4ddf010a686a53974cdae4ebe5c6551b3c37d6b6d635d38d5b0e5f80bc0186a2c7809dbf3a42870dd09643e68d32db896c6da8ba734579e7"),
MustHexID("96419fb80efae4b674402bb969ebaab86c1274f29a83a311e24516d36cdf148fe21754d46c97688cdd7468f24c08b13e4727c29263393638a3b37b99ff60ebca"),
MustHexID("7b9c1889ae916a5d5abcdfb0aaedcc9c6f9eb1c1a4f68d0c2d034fe79ac610ce917c3abc670744150fa891bfcd8ab14fed6983fca964de920aa393fa7b326748"),
MustHexID("7a369b2b8962cc4c65900be046482fbf7c14f98a135bbbae25152c82ad168fb2097b3d1429197cf46d3ce9fdeb64808f908a489cc6019725db040060fdfe5405"),
MustHexID("47bcae48288da5ecc7f5058dfa07cf14d89d06d6e449cb946e237aa6652ea050d9f5a24a65efdc0013ccf232bf88670979eddef249b054f63f38da9d7796dbd8"),
hexEncPubkey("ad4485e386e3cc7c7310366a7c38fb810b8896c0d52e55944bfd320ca294e7912d6c53c0a0cf85e7ce226e92491d60430e86f8f15cda0161ed71893fb4a9e3a1"),
hexEncPubkey("36d0e7e5b7734f98c6183eeeb8ac5130a85e910a925311a19c4941b1290f945d4fc3996b12ef4966960b6fa0fb29b1604f83a0f81bd5fd6398d2e1a22e46af0c"),
hexEncPubkey("7d307d8acb4a561afa23bdf0bd945d35c90245e26345ec3a1f9f7df354222a7cdcb81339c9ed6744526c27a1a0c8d10857e98df942fa433602facac71ac68a31"),
hexEncPubkey("d97bf55f88c83fae36232661af115d66ca600fc4bd6d1fb35ff9bb4dad674c02cf8c8d05f317525b5522250db58bb1ecafb7157392bf5aa61b178c61f098d995"),
hexEncPubkey("7045d678f1f9eb7a4613764d17bd5698796494d0bf977b16f2dbc272b8a0f7858a60805c022fc3d1fe4f31c37e63cdaca0416c0d053ef48a815f8b19121605e0"),
hexEncPubkey("14e1f21418d445748de2a95cd9a8c3b15b506f86a0acabd8af44bb968ce39885b19c8822af61b3dd58a34d1f265baec30e3ae56149dc7d2aa4a538f7319f69c8"),
hexEncPubkey("b9453d78281b66a4eac95a1546017111eaaa5f92a65d0de10b1122940e92b319728a24edf4dec6acc412321b1c95266d39c7b3a5d265c629c3e49a65fb022c09"),
hexEncPubkey("e8a49248419e3824a00d86af422f22f7366e2d4922b304b7169937616a01d9d6fa5abf5cc01061a352dc866f48e1fa2240dbb453d872b1d7be62bdfc1d5e248c"),
hexEncPubkey("bebcff24b52362f30e0589ee573ce2d86f073d58d18e6852a592fa86ceb1a6c9b96d7fb9ec7ed1ed98a51b6743039e780279f6bb49d0a04327ac7a182d9a56f6"),
hexEncPubkey("d0835e5a4291db249b8d2fca9f503049988180c7d247bedaa2cf3a1bad0a76709360a85d4f9a1423b2cbc82bb4d94b47c0cde20afc430224834c49fe312a9ae3"),
hexEncPubkey("6b087fe2a2da5e4f0b0f4777598a4a7fb66bf77dbd5bfc44e8a7eaa432ab585a6e226891f56a7d4f5ed11a7c57b90f1661bba1059590ca4267a35801c2802913"),
hexEncPubkey("d901e5bde52d1a0f4ddf010a686a53974cdae4ebe5c6551b3c37d6b6d635d38d5b0e5f80bc0186a2c7809dbf3a42870dd09643e68d32db896c6da8ba734579e7"),
hexEncPubkey("96419fb80efae4b674402bb969ebaab86c1274f29a83a311e24516d36cdf148fe21754d46c97688cdd7468f24c08b13e4727c29263393638a3b37b99ff60ebca"),
hexEncPubkey("7b9c1889ae916a5d5abcdfb0aaedcc9c6f9eb1c1a4f68d0c2d034fe79ac610ce917c3abc670744150fa891bfcd8ab14fed6983fca964de920aa393fa7b326748"),
hexEncPubkey("7a369b2b8962cc4c65900be046482fbf7c14f98a135bbbae25152c82ad168fb2097b3d1429197cf46d3ce9fdeb64808f908a489cc6019725db040060fdfe5405"),
hexEncPubkey("47bcae48288da5ecc7f5058dfa07cf14d89d06d6e449cb946e237aa6652ea050d9f5a24a65efdc0013ccf232bf88670979eddef249b054f63f38da9d7796dbd8"),
},
254: {
MustHexID("099739d7abc8abd38ecc7a816c521a1168a4dbd359fa7212a5123ab583ffa1cf485a5fed219575d6475dbcdd541638b2d3631a6c7fce7474e7fe3cba1d4d5853"),
MustHexID("c2b01603b088a7182d0cf7ef29fb2b04c70acb320fccf78526bf9472e10c74ee70b3fcfa6f4b11d167bd7d3bc4d936b660f2c9bff934793d97cb21750e7c3d31"),
MustHexID("20e4d8f45f2f863e94b45548c1ef22a11f7d36f263e4f8623761e05a64c4572379b000a52211751e2561b0f14f4fc92dd4130410c8ccc71eb4f0e95a700d4ca9"),
MustHexID("27f4a16cc085e72d86e25c98bd2eca173eaaee7565c78ec5a52e9e12b2211f35de81b5b45e9195de2ebfe29106742c59112b951a04eb7ae48822911fc1f9389e"),
MustHexID("55db5ee7d98e7f0b1c3b9d5be6f2bc619a1b86c3cdd513160ad4dcf267037a5fffad527ac15d50aeb32c59c13d1d4c1e567ebbf4de0d25236130c8361f9aac63"),
MustHexID("883df308b0130fc928a8559fe50667a0fff80493bc09685d18213b2db241a3ad11310ed86b0ef662b3ce21fc3d9aa7f3fc24b8d9afe17c7407e9afd3345ae548"),
MustHexID("c7af968cc9bc8200c3ee1a387405f7563be1dce6710a3439f42ea40657d0eae9d2b3c16c42d779605351fcdece4da637b9804e60ca08cfb89aec32c197beffa6"),
MustHexID("3e66f2b788e3ff1d04106b80597915cd7afa06c405a7ae026556b6e583dca8e05cfbab5039bb9a1b5d06083ffe8de5780b1775550e7218f5e98624bf7af9a0a8"),
MustHexID("4fc7f53764de3337fdaec0a711d35d3a923e72fa65025444d12230b3552ed43d9b2d1ad08ccb11f2d50c58809e6dd74dde910e195294fca3b47ae5a3967cc479"),
MustHexID("bafdfdcf6ccaa989436752fa97c77477b6baa7deb374b16c095492c529eb133e8e2f99e1977012b64767b9d34b2cf6d2048ed489bd822b5139b523f6a423167b"),
MustHexID("7f5d78008a4312fe059104ce80202c82b8915c2eb4411c6b812b16f7642e57c00f2c9425121f5cbac4257fe0b3e81ef5dea97ea2dbaa98f6a8b6fd4d1e5980bb"),
MustHexID("598c37fe78f922751a052f463aeb0cb0bc7f52b7c2a4cf2da72ec0931c7c32175d4165d0f8998f7320e87324ac3311c03f9382a5385c55f0407b7a66b2acd864"),
MustHexID("f758c4136e1c148777a7f3275a76e2db0b2b04066fd738554ec398c1c6cc9fb47e14a3b4c87bd47deaeab3ffd2110514c3855685a374794daff87b605b27ee2e"),
MustHexID("0307bb9e4fd865a49dcf1fe4333d1b944547db650ab580af0b33e53c4fef6c789531110fac801bbcbce21fc4d6f61b6d5b24abdf5b22e3030646d579f6dca9c2"),
MustHexID("82504b6eb49bb2c0f91a7006ce9cefdbaf6df38706198502c2e06601091fc9dc91e4f15db3410d45c6af355bc270b0f268d3dff560f956985c7332d4b10bd1ed"),
MustHexID("b39b5b677b45944ceebe76e76d1f051de2f2a0ec7b0d650da52135743e66a9a5dba45f638258f9a7545d9a790c7fe6d3fdf82c25425c7887323e45d27d06c057"),
hexEncPubkey("099739d7abc8abd38ecc7a816c521a1168a4dbd359fa7212a5123ab583ffa1cf485a5fed219575d6475dbcdd541638b2d3631a6c7fce7474e7fe3cba1d4d5853"),
hexEncPubkey("c2b01603b088a7182d0cf7ef29fb2b04c70acb320fccf78526bf9472e10c74ee70b3fcfa6f4b11d167bd7d3bc4d936b660f2c9bff934793d97cb21750e7c3d31"),
hexEncPubkey("20e4d8f45f2f863e94b45548c1ef22a11f7d36f263e4f8623761e05a64c4572379b000a52211751e2561b0f14f4fc92dd4130410c8ccc71eb4f0e95a700d4ca9"),
hexEncPubkey("27f4a16cc085e72d86e25c98bd2eca173eaaee7565c78ec5a52e9e12b2211f35de81b5b45e9195de2ebfe29106742c59112b951a04eb7ae48822911fc1f9389e"),
hexEncPubkey("55db5ee7d98e7f0b1c3b9d5be6f2bc619a1b86c3cdd513160ad4dcf267037a5fffad527ac15d50aeb32c59c13d1d4c1e567ebbf4de0d25236130c8361f9aac63"),
hexEncPubkey("883df308b0130fc928a8559fe50667a0fff80493bc09685d18213b2db241a3ad11310ed86b0ef662b3ce21fc3d9aa7f3fc24b8d9afe17c7407e9afd3345ae548"),
hexEncPubkey("c7af968cc9bc8200c3ee1a387405f7563be1dce6710a3439f42ea40657d0eae9d2b3c16c42d779605351fcdece4da637b9804e60ca08cfb89aec32c197beffa6"),
hexEncPubkey("3e66f2b788e3ff1d04106b80597915cd7afa06c405a7ae026556b6e583dca8e05cfbab5039bb9a1b5d06083ffe8de5780b1775550e7218f5e98624bf7af9a0a8"),
hexEncPubkey("4fc7f53764de3337fdaec0a711d35d3a923e72fa65025444d12230b3552ed43d9b2d1ad08ccb11f2d50c58809e6dd74dde910e195294fca3b47ae5a3967cc479"),
hexEncPubkey("bafdfdcf6ccaa989436752fa97c77477b6baa7deb374b16c095492c529eb133e8e2f99e1977012b64767b9d34b2cf6d2048ed489bd822b5139b523f6a423167b"),
hexEncPubkey("7f5d78008a4312fe059104ce80202c82b8915c2eb4411c6b812b16f7642e57c00f2c9425121f5cbac4257fe0b3e81ef5dea97ea2dbaa98f6a8b6fd4d1e5980bb"),
hexEncPubkey("598c37fe78f922751a052f463aeb0cb0bc7f52b7c2a4cf2da72ec0931c7c32175d4165d0f8998f7320e87324ac3311c03f9382a5385c55f0407b7a66b2acd864"),
hexEncPubkey("f758c4136e1c148777a7f3275a76e2db0b2b04066fd738554ec398c1c6cc9fb47e14a3b4c87bd47deaeab3ffd2110514c3855685a374794daff87b605b27ee2e"),
hexEncPubkey("0307bb9e4fd865a49dcf1fe4333d1b944547db650ab580af0b33e53c4fef6c789531110fac801bbcbce21fc4d6f61b6d5b24abdf5b22e3030646d579f6dca9c2"),
hexEncPubkey("82504b6eb49bb2c0f91a7006ce9cefdbaf6df38706198502c2e06601091fc9dc91e4f15db3410d45c6af355bc270b0f268d3dff560f956985c7332d4b10bd1ed"),
hexEncPubkey("b39b5b677b45944ceebe76e76d1f051de2f2a0ec7b0d650da52135743e66a9a5dba45f638258f9a7545d9a790c7fe6d3fdf82c25425c7887323e45d27d06c057"),
},
255: {
MustHexID("5c4d58d46e055dd1f093f81ee60a675e1f02f54da6206720adee4dccef9b67a31efc5c2a2949c31a04ee31beadc79aba10da31440a1f9ff2a24093c63c36d784"),
MustHexID("ea72161ffdd4b1e124c7b93b0684805f4c4b58d617ed498b37a145c670dbc2e04976f8785583d9c805ffbf343c31d492d79f841652bbbd01b61ed85640b23495"),
MustHexID("51caa1d93352d47a8e531692a3612adac1e8ac68d0a200d086c1c57ae1e1a91aa285ab242e8c52ef9d7afe374c9485b122ae815f1707b875569d0433c1c3ce85"),
MustHexID("c08397d5751b47bd3da044b908be0fb0e510d3149574dff7aeab33749b023bb171b5769990fe17469dbebc100bc150e798aeda426a2dcc766699a225fddd75c6"),
MustHexID("0222c1c194b749736e593f937fad67ee348ac57287a15c7e42877aa38a9b87732a408bca370f812efd0eedbff13e6d5b854bf3ba1dec431a796ed47f32552b09"),
MustHexID("03d859cd46ef02d9bfad5268461a6955426845eef4126de6be0fa4e8d7e0727ba2385b78f1a883a8239e95ebb814f2af8379632c7d5b100688eebc5841209582"),
MustHexID("64d5004b7e043c39ff0bd10cb20094c287721d5251715884c280a612b494b3e9e1c64ba6f67614994c7d969a0d0c0295d107d53fc225d47c44c4b82852d6f960"),
MustHexID("b0a5eefb2dab6f786670f35bf9641eefe6dd87fd3f1362bcab4aaa792903500ab23d88fae68411372e0813b057535a601d46e454323745a948017f6063a47b1f"),
MustHexID("0cc6df0a3433d448b5684d2a3ffa9d1a825388177a18f44ad0008c7bd7702f1ec0fc38b83506f7de689c3b6ecb552599927e29699eed6bb867ff08f80068b287"),
MustHexID("50772f7b8c03a4e153355fbbf79c8a80cf32af656ff0c7873c99911099d04a0dae0674706c357e0145ad017a0ade65e6052cb1b0d574fcd6f67da3eee0ace66b"),
MustHexID("1ae37829c9ef41f8b508b82259ebac76b1ed900d7a45c08b7970f25d2d48ddd1829e2f11423a18749940b6dab8598c6e416cef0efd47e46e51f29a0bc65b37cd"),
MustHexID("ba973cab31c2af091fc1644a93527d62b2394999e2b6ccbf158dd5ab9796a43d408786f1803ef4e29debfeb62fce2b6caa5ab2b24d1549c822a11c40c2856665"),
MustHexID("bc413ad270dd6ea25bddba78f3298b03b8ba6f8608ac03d06007d4116fa78ef5a0cfe8c80155089382fc7a193243ee5500082660cb5d7793f60f2d7d18650964"),
MustHexID("5a6a9ef07634d9eec3baa87c997b529b92652afa11473dfee41ef7037d5c06e0ddb9fe842364462d79dd31cff8a59a1b8d5bc2b810dea1d4cbbd3beb80ecec83"),
MustHexID("f492c6ee2696d5f682f7f537757e52744c2ae560f1090a07024609e903d334e9e174fc01609c5a229ddbcac36c9d21adaf6457dab38a25bfd44f2f0ee4277998"),
MustHexID("459e4db99298cb0467a90acee6888b08bb857450deac11015cced5104853be5adce5b69c740968bc7f931495d671a70cad9f48546d7cd203357fe9af0e8d2164"),
hexEncPubkey("5c4d58d46e055dd1f093f81ee60a675e1f02f54da6206720adee4dccef9b67a31efc5c2a2949c31a04ee31beadc79aba10da31440a1f9ff2a24093c63c36d784"),
hexEncPubkey("ea72161ffdd4b1e124c7b93b0684805f4c4b58d617ed498b37a145c670dbc2e04976f8785583d9c805ffbf343c31d492d79f841652bbbd01b61ed85640b23495"),
hexEncPubkey("51caa1d93352d47a8e531692a3612adac1e8ac68d0a200d086c1c57ae1e1a91aa285ab242e8c52ef9d7afe374c9485b122ae815f1707b875569d0433c1c3ce85"),
hexEncPubkey("c08397d5751b47bd3da044b908be0fb0e510d3149574dff7aeab33749b023bb171b5769990fe17469dbebc100bc150e798aeda426a2dcc766699a225fddd75c6"),
hexEncPubkey("0222c1c194b749736e593f937fad67ee348ac57287a15c7e42877aa38a9b87732a408bca370f812efd0eedbff13e6d5b854bf3ba1dec431a796ed47f32552b09"),
hexEncPubkey("03d859cd46ef02d9bfad5268461a6955426845eef4126de6be0fa4e8d7e0727ba2385b78f1a883a8239e95ebb814f2af8379632c7d5b100688eebc5841209582"),
hexEncPubkey("64d5004b7e043c39ff0bd10cb20094c287721d5251715884c280a612b494b3e9e1c64ba6f67614994c7d969a0d0c0295d107d53fc225d47c44c4b82852d6f960"),
hexEncPubkey("b0a5eefb2dab6f786670f35bf9641eefe6dd87fd3f1362bcab4aaa792903500ab23d88fae68411372e0813b057535a601d46e454323745a948017f6063a47b1f"),
hexEncPubkey("0cc6df0a3433d448b5684d2a3ffa9d1a825388177a18f44ad0008c7bd7702f1ec0fc38b83506f7de689c3b6ecb552599927e29699eed6bb867ff08f80068b287"),
hexEncPubkey("50772f7b8c03a4e153355fbbf79c8a80cf32af656ff0c7873c99911099d04a0dae0674706c357e0145ad017a0ade65e6052cb1b0d574fcd6f67da3eee0ace66b"),
hexEncPubkey("1ae37829c9ef41f8b508b82259ebac76b1ed900d7a45c08b7970f25d2d48ddd1829e2f11423a18749940b6dab8598c6e416cef0efd47e46e51f29a0bc65b37cd"),
hexEncPubkey("ba973cab31c2af091fc1644a93527d62b2394999e2b6ccbf158dd5ab9796a43d408786f1803ef4e29debfeb62fce2b6caa5ab2b24d1549c822a11c40c2856665"),
hexEncPubkey("bc413ad270dd6ea25bddba78f3298b03b8ba6f8608ac03d06007d4116fa78ef5a0cfe8c80155089382fc7a193243ee5500082660cb5d7793f60f2d7d18650964"),
hexEncPubkey("5a6a9ef07634d9eec3baa87c997b529b92652afa11473dfee41ef7037d5c06e0ddb9fe842364462d79dd31cff8a59a1b8d5bc2b810dea1d4cbbd3beb80ecec83"),
hexEncPubkey("f492c6ee2696d5f682f7f537757e52744c2ae560f1090a07024609e903d334e9e174fc01609c5a229ddbcac36c9d21adaf6457dab38a25bfd44f2f0ee4277998"),
hexEncPubkey("459e4db99298cb0467a90acee6888b08bb857450deac11015cced5104853be5adce5b69c740968bc7f931495d671a70cad9f48546d7cd203357fe9af0e8d2164"),
},
256: {
MustHexID("a8593af8a4aef7b806b5197612017951bac8845a1917ca9a6a15dd6086d608505144990b245785c4cd2d67a295701c7aac2aa18823fb0033987284b019656268"),
MustHexID("d2eebef914928c3aad77fc1b2a495f52d2294acf5edaa7d8a530b540f094b861a68fe8348a46a7c302f08ab609d85912a4968eacfea0740847b29421b4795d9e"),
MustHexID("b14bfcb31495f32b650b63cf7d08492e3e29071fdc73cf2da0da48d4b191a70ba1a65f42ad8c343206101f00f8a48e8db4b08bf3f622c0853e7323b250835b91"),
MustHexID("7feaee0d818c03eb30e4e0bf03ade0f3c21ca38e938a761aa1781cf70bda8cc5cd631a6cc53dd44f1d4a6d3e2dae6513c6c66ee50cb2f0e9ad6f7e319b309fd9"),
MustHexID("4ca3b657b139311db8d583c25dd5963005e46689e1317620496cc64129c7f3e52870820e0ec7941d28809311df6db8a2867bbd4f235b4248af24d7a9c22d1232"),
MustHexID("1181defb1d16851d42dd951d84424d6bd1479137f587fa184d5a8152be6b6b16ed08bcdb2c2ed8539bcde98c80c432875f9f724737c316a2bd385a39d3cab1d8"),
MustHexID("d9dd818769fa0c3ec9f553c759b92476f082817252a04a47dc1777740b1731d280058c66f982812f173a294acf4944a85ba08346e2de153ba3ba41ce8a62cb64"),
MustHexID("bd7c4f8a9e770aa915c771b15e107ca123d838762da0d3ffc53aa6b53e9cd076cffc534ec4d2e4c334c683f1f5ea72e0e123f6c261915ed5b58ac1b59f003d88"),
MustHexID("3dd5739c73649d510456a70e9d6b46a855864a4a3f744e088fd8c8da11b18e4c9b5f2d7da50b1c147b2bae5ca9609ae01f7a3cdea9dce34f80a91d29cd82f918"),
MustHexID("f0d7df1efc439b4bcc0b762118c1cfa99b2a6143a9f4b10e3c9465125f4c9fca4ab88a2504169bbcad65492cf2f50da9dd5d077c39574a944f94d8246529066b"),
MustHexID("dd598b9ba441448e5fb1a6ec6c5f5aa9605bad6e223297c729b1705d11d05f6bfd3d41988b694681ae69bb03b9a08bff4beab5596503d12a39bffb5cd6e94c7c"),
MustHexID("3fce284ac97e567aebae681b15b7a2b6df9d873945536335883e4bbc26460c064370537f323fd1ada828ea43154992d14ac0cec0940a2bd2a3f42ec156d60c83"),
MustHexID("7c8dfa8c1311cb14fb29a8ac11bca23ecc115e56d9fcf7b7ac1db9066aa4eb39f8b1dabf46e192a65be95ebfb4e839b5ab4533fef414921825e996b210dd53bd"),
MustHexID("cafa6934f82120456620573d7f801390ed5e16ed619613a37e409e44ab355ef755e83565a913b48a9466db786f8d4fbd590bfec474c2524d4a2608d4eafd6abd"),
MustHexID("9d16600d0dd310d77045769fed2cb427f32db88cd57d86e49390c2ba8a9698cfa856f775be2013237226e7bf47b248871cf865d23015937d1edeb20db5e3e760"),
MustHexID("17be6b6ba54199b1d80eff866d348ea11d8a4b341d63ad9a6681d3ef8a43853ac564d153eb2a8737f0afc9ab320f6f95c55aa11aaa13bbb1ff422fd16bdf8188"),
hexEncPubkey("a8593af8a4aef7b806b5197612017951bac8845a1917ca9a6a15dd6086d608505144990b245785c4cd2d67a295701c7aac2aa18823fb0033987284b019656268"),
hexEncPubkey("d2eebef914928c3aad77fc1b2a495f52d2294acf5edaa7d8a530b540f094b861a68fe8348a46a7c302f08ab609d85912a4968eacfea0740847b29421b4795d9e"),
hexEncPubkey("b14bfcb31495f32b650b63cf7d08492e3e29071fdc73cf2da0da48d4b191a70ba1a65f42ad8c343206101f00f8a48e8db4b08bf3f622c0853e7323b250835b91"),
hexEncPubkey("7feaee0d818c03eb30e4e0bf03ade0f3c21ca38e938a761aa1781cf70bda8cc5cd631a6cc53dd44f1d4a6d3e2dae6513c6c66ee50cb2f0e9ad6f7e319b309fd9"),
hexEncPubkey("4ca3b657b139311db8d583c25dd5963005e46689e1317620496cc64129c7f3e52870820e0ec7941d28809311df6db8a2867bbd4f235b4248af24d7a9c22d1232"),
hexEncPubkey("1181defb1d16851d42dd951d84424d6bd1479137f587fa184d5a8152be6b6b16ed08bcdb2c2ed8539bcde98c80c432875f9f724737c316a2bd385a39d3cab1d8"),
hexEncPubkey("d9dd818769fa0c3ec9f553c759b92476f082817252a04a47dc1777740b1731d280058c66f982812f173a294acf4944a85ba08346e2de153ba3ba41ce8a62cb64"),
hexEncPubkey("bd7c4f8a9e770aa915c771b15e107ca123d838762da0d3ffc53aa6b53e9cd076cffc534ec4d2e4c334c683f1f5ea72e0e123f6c261915ed5b58ac1b59f003d88"),
hexEncPubkey("3dd5739c73649d510456a70e9d6b46a855864a4a3f744e088fd8c8da11b18e4c9b5f2d7da50b1c147b2bae5ca9609ae01f7a3cdea9dce34f80a91d29cd82f918"),
hexEncPubkey("f0d7df1efc439b4bcc0b762118c1cfa99b2a6143a9f4b10e3c9465125f4c9fca4ab88a2504169bbcad65492cf2f50da9dd5d077c39574a944f94d8246529066b"),
hexEncPubkey("dd598b9ba441448e5fb1a6ec6c5f5aa9605bad6e223297c729b1705d11d05f6bfd3d41988b694681ae69bb03b9a08bff4beab5596503d12a39bffb5cd6e94c7c"),
hexEncPubkey("3fce284ac97e567aebae681b15b7a2b6df9d873945536335883e4bbc26460c064370537f323fd1ada828ea43154992d14ac0cec0940a2bd2a3f42ec156d60c83"),
hexEncPubkey("7c8dfa8c1311cb14fb29a8ac11bca23ecc115e56d9fcf7b7ac1db9066aa4eb39f8b1dabf46e192a65be95ebfb4e839b5ab4533fef414921825e996b210dd53bd"),
hexEncPubkey("cafa6934f82120456620573d7f801390ed5e16ed619613a37e409e44ab355ef755e83565a913b48a9466db786f8d4fbd590bfec474c2524d4a2608d4eafd6abd"),
hexEncPubkey("9d16600d0dd310d77045769fed2cb427f32db88cd57d86e49390c2ba8a9698cfa856f775be2013237226e7bf47b248871cf865d23015937d1edeb20db5e3e760"),
hexEncPubkey("17be6b6ba54199b1d80eff866d348ea11d8a4b341d63ad9a6681d3ef8a43853ac564d153eb2a8737f0afc9ab320f6f95c55aa11aaa13bbb1ff422fd16bdf8188"),
},
},
}
type preminedTestnet struct {
target NodeID
targetSha common.Hash // sha3(target)
dists [hashBits + 1][]NodeID
target encPubkey
targetSha enode.ID // sha3(target)
dists [hashBits + 1][]encPubkey
}
func (tn *preminedTestnet) findnode(toid NodeID, toaddr *net.UDPAddr, target NodeID) ([]*Node, error) {
func (tn *preminedTestnet) findnode(toid enode.ID, toaddr *net.UDPAddr, target encPubkey) ([]*node, error) {
// current log distance is encoded in port number
// fmt.Println("findnode query at dist", toaddr.Port)
if toaddr.Port == 0 {
panic("query to node at distance 0")
}
next := uint16(toaddr.Port) - 1
var result []*Node
for i, id := range tn.dists[toaddr.Port] {
result = append(result, NewNode(id, net.ParseIP("127.0.0.1"), next, uint16(i)))
next := toaddr.Port - 1
var result []*node
for i, ekey := range tn.dists[toaddr.Port] {
key, _ := decodePubkey(ekey)
node := wrapNode(enode.NewV4(key, net.ParseIP("127.0.0.1"), i, next))
result = append(result, node)
}
return result, nil
}
func (*preminedTestnet) close() {}
func (*preminedTestnet) waitping(from NodeID) error { return nil }
func (*preminedTestnet) ping(toid NodeID, toaddr *net.UDPAddr) error { return nil }
func (*preminedTestnet) waitping(from enode.ID) error { return nil }
func (*preminedTestnet) ping(toid enode.ID, toaddr *net.UDPAddr) error { return nil }
// mine generates a testnet struct literal with nodes at
// various distances to the given target.
func (tn *preminedTestnet) mine(target NodeID) {
func (tn *preminedTestnet) mine(target encPubkey) {
tn.target = target
tn.targetSha = crypto.Keccak256Hash(tn.target[:])
tn.targetSha = tn.target.id()
found := 0
for found < bucketSize*10 {
k := newkey()
id := PubkeyID(&k.PublicKey)
sha := crypto.Keccak256Hash(id[:])
ld := logdist(tn.targetSha, sha)
key := encodePubkey(&k.PublicKey)
ld := enode.LogDist(tn.targetSha, key.id())
if len(tn.dists[ld]) < bucketSize {
tn.dists[ld] = append(tn.dists[ld], id)
tn.dists[ld] = append(tn.dists[ld], key)
fmt.Println("found ID with ld", ld)
found++
}
@ -594,14 +549,14 @@ func (tn *preminedTestnet) mine(target NodeID) {
fmt.Println("&preminedTestnet{")
fmt.Printf(" target: %#v,\n", tn.target)
fmt.Printf(" targetSha: %#v,\n", tn.targetSha)
fmt.Printf(" dists: [%d][]NodeID{\n", len(tn.dists))
fmt.Printf(" dists: [%d][]encPubkey{\n", len(tn.dists))
for ld, ns := range tn.dists {
if len(ns) == 0 {
continue
}
fmt.Printf(" %d: []NodeID{\n", ld)
fmt.Printf(" %d: []encPubkey{\n", ld)
for _, n := range ns {
fmt.Printf(" MustHexID(\"%x\"),\n", n[:])
fmt.Printf(" hexEncPubkey(\"%x\"),\n", n[:])
}
fmt.Println(" },")
}
@ -609,40 +564,6 @@ func (tn *preminedTestnet) mine(target NodeID) {
fmt.Println("}")
}
func hasDuplicates(slice []*Node) bool {
seen := make(map[NodeID]bool)
for i, e := range slice {
if e == nil {
panic(fmt.Sprintf("nil *Node at %d", i))
}
if seen[e.ID] {
return true
}
seen[e.ID] = true
}
return false
}
func sortedByDistanceTo(distbase common.Hash, slice []*Node) bool {
var last common.Hash
for i, e := range slice {
if i > 0 && distcmp(distbase, e.sha, last) < 0 {
return false
}
last = e.sha
}
return true
}
func contains(ns []*Node, id NodeID) bool {
for _, n := range ns {
if n.ID == id {
return true
}
}
return false
}
// gen wraps quick.Value so it's easier to use.
// it generates a random value of the given value's type.
func gen(typ interface{}, rand *rand.Rand) interface{} {
@ -653,6 +574,13 @@ func gen(typ interface{}, rand *rand.Rand) interface{} {
return v.Interface()
}
func quickcfg() *quick.Config {
return &quick.Config{
MaxCount: 5000,
Rand: rand.New(rand.NewSource(time.Now().Unix())),
}
}
func newkey() *ecdsa.PrivateKey {
key, err := crypto.GenerateKey()
if err != nil {

View File

@ -0,0 +1,167 @@
// Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package discover
import (
"crypto/ecdsa"
"encoding/hex"
"fmt"
"math/rand"
"net"
"sync"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/enr"
)
func newTestTable(t transport) (*Table, *enode.DB) {
var r enr.Record
r.Set(enr.IP{0, 0, 0, 0})
n := enode.SignNull(&r, enode.ID{})
db, _ := enode.OpenDB("")
tab, _ := newTable(t, n, db, nil)
return tab, db
}
// nodeAtDistance creates a node for which enode.LogDist(base, n.id) == ld.
func nodeAtDistance(base enode.ID, ld int, ip net.IP) *node {
var r enr.Record
r.Set(enr.IP(ip))
return wrapNode(enode.SignNull(&r, idAtDistance(base, ld)))
}
// idAtDistance returns a random hash such that enode.LogDist(a, b) == n
func idAtDistance(a enode.ID, n int) (b enode.ID) {
if n == 0 {
return a
}
// flip bit at position n, fill the rest with random bits
b = a
pos := len(a) - n/8 - 1
bit := byte(0x01) << (byte(n%8) - 1)
if bit == 0 {
pos++
bit = 0x80
}
b[pos] = a[pos]&^bit | ^a[pos]&bit // TODO: randomize end bits
for i := pos + 1; i < len(a); i++ {
b[i] = byte(rand.Intn(255))
}
return b
}
func intIP(i int) net.IP {
return net.IP{byte(i), 0, 2, byte(i)}
}
// fillBucket inserts nodes into the given bucket until it is full.
func fillBucket(tab *Table, n *node) (last *node) {
ld := enode.LogDist(tab.self.ID(), n.ID())
b := tab.bucket(n.ID())
for len(b.entries) < bucketSize {
b.entries = append(b.entries, nodeAtDistance(tab.self.ID(), ld, intIP(ld)))
}
return b.entries[bucketSize-1]
}
type pingRecorder struct {
mu sync.Mutex
dead, pinged map[enode.ID]bool
}
func newPingRecorder() *pingRecorder {
return &pingRecorder{
dead: make(map[enode.ID]bool),
pinged: make(map[enode.ID]bool),
}
}
func (t *pingRecorder) findnode(toid enode.ID, toaddr *net.UDPAddr, target encPubkey) ([]*node, error) {
return nil, nil
}
func (t *pingRecorder) waitping(from enode.ID) error {
return nil // remote always pings
}
func (t *pingRecorder) ping(toid enode.ID, toaddr *net.UDPAddr) error {
t.mu.Lock()
defer t.mu.Unlock()
t.pinged[toid] = true
if t.dead[toid] {
return errTimeout
} else {
return nil
}
}
func (t *pingRecorder) close() {}
func hasDuplicates(slice []*node) bool {
seen := make(map[enode.ID]bool)
for i, e := range slice {
if e == nil {
panic(fmt.Sprintf("nil *Node at %d", i))
}
if seen[e.ID()] {
return true
}
seen[e.ID()] = true
}
return false
}
func contains(ns []*node, id enode.ID) bool {
for _, n := range ns {
if n.ID() == id {
return true
}
}
return false
}
func sortedByDistanceTo(distbase enode.ID, slice []*node) bool {
var last enode.ID
for i, e := range slice {
if i > 0 && enode.DistCmp(distbase, e.ID(), last) < 0 {
return false
}
last = e.ID()
}
return true
}
func hexEncPubkey(h string) (ret encPubkey) {
b, err := hex.DecodeString(h)
if err != nil {
panic(err)
}
if len(b) != len(ret) {
panic("invalid length")
}
copy(ret[:], b)
return ret
}
func hexPubkey(h string) *ecdsa.PublicKey {
k, err := decodePubkey(hexEncPubkey(h))
if err != nil {
panic(err)
}
return k
}

View File

@ -27,6 +27,7 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/nat"
"github.com/ethereum/go-ethereum/p2p/netutil"
"github.com/ethereum/go-ethereum/rlp"
@ -48,6 +49,7 @@ var (
const (
respTimeout = 500 * time.Millisecond
expiration = 20 * time.Second
bondExpiration = 24 * time.Hour
ntpFailureThreshold = 32 // Continuous timeouts after which to check NTP
ntpWarningCooldown = 10 * time.Minute // Minimum amount of time to pass before repeating NTP warning
@ -87,7 +89,7 @@ type (
// findnode is a query for nodes close to the given target.
findnode struct {
Target NodeID // doesn't need to be an actual public key
Target encPubkey
Expiration uint64
// Ignore additional fields (for forward compatibility).
Rest []rlp.RawValue `rlp:"tail"`
@ -105,7 +107,7 @@ type (
IP net.IP // len 4 for IPv4 or 16 for IPv6
UDP uint16 // for discovery protocol
TCP uint16 // for RLPx protocol
ID NodeID
ID encPubkey
}
rpcEndpoint struct {
@ -123,7 +125,7 @@ func makeEndpoint(addr *net.UDPAddr, tcpPort uint16) rpcEndpoint {
return rpcEndpoint{IP: ip, UDP: uint16(addr.Port), TCP: tcpPort}
}
func (t *udp) nodeFromRPC(sender *net.UDPAddr, rn rpcNode) (*Node, error) {
func (t *udp) nodeFromRPC(sender *net.UDPAddr, rn rpcNode) (*node, error) {
if rn.UDP <= 1024 {
return nil, errors.New("low port")
}
@ -133,17 +135,26 @@ func (t *udp) nodeFromRPC(sender *net.UDPAddr, rn rpcNode) (*Node, error) {
if t.netrestrict != nil && !t.netrestrict.Contains(rn.IP) {
return nil, errors.New("not contained in netrestrict whitelist")
}
n := NewNode(rn.ID, rn.IP, rn.UDP, rn.TCP)
err := n.validateComplete()
key, err := decodePubkey(rn.ID)
if err != nil {
return nil, err
}
n := wrapNode(enode.NewV4(key, rn.IP, int(rn.TCP), int(rn.UDP)))
err = n.ValidateComplete()
return n, err
}
func nodeToRPC(n *Node) rpcNode {
return rpcNode{ID: n.ID, IP: n.IP, UDP: n.UDP, TCP: n.TCP}
func nodeToRPC(n *node) rpcNode {
var key ecdsa.PublicKey
var ekey encPubkey
if err := n.Load((*enode.Secp256k1)(&key)); err == nil {
ekey = encodePubkey(&key)
}
return rpcNode{ID: ekey, IP: n.IP(), UDP: uint16(n.UDP()), TCP: uint16(n.TCP())}
}
type packet interface {
handle(t *udp, from *net.UDPAddr, fromID NodeID, mac []byte) error
handle(t *udp, from *net.UDPAddr, fromKey encPubkey, mac []byte) error
name() string
}
@ -181,7 +192,7 @@ type udp struct {
// to all the callback functions for that node.
type pending struct {
// these fields must match in the reply.
from NodeID
from enode.ID
ptype byte
// time when the request must complete
@ -199,7 +210,7 @@ type pending struct {
}
type reply struct {
from NodeID
from enode.ID
ptype byte
data interface{}
// loop indicates whether there was
@ -222,7 +233,7 @@ type Config struct {
AnnounceAddr *net.UDPAddr // local address announced in the DHT
NodeDBPath string // if set, the node database is stored at this filesystem location
NetRestrict *netutil.Netlist // network whitelist
Bootnodes []*Node // list of bootstrap nodes
Bootnodes []*enode.Node // list of bootstrap nodes
Unhandled chan<- ReadPacket // unhandled packets are sent on this channel
}
@ -237,6 +248,16 @@ func ListenUDP(c conn, cfg Config) (*Table, error) {
}
func newUDP(c conn, cfg Config) (*Table, *udp, error) {
realaddr := c.LocalAddr().(*net.UDPAddr)
if cfg.AnnounceAddr != nil {
realaddr = cfg.AnnounceAddr
}
self := enode.NewV4(&cfg.PrivateKey.PublicKey, realaddr.IP, realaddr.Port, realaddr.Port)
db, err := enode.OpenDB(cfg.NodeDBPath)
if err != nil {
return nil, nil, err
}
udp := &udp{
conn: c,
priv: cfg.PrivateKey,
@ -245,13 +266,9 @@ func newUDP(c conn, cfg Config) (*Table, *udp, error) {
gotreply: make(chan reply),
addpending: make(chan *pending),
}
realaddr := c.LocalAddr().(*net.UDPAddr)
if cfg.AnnounceAddr != nil {
realaddr = cfg.AnnounceAddr
}
// TODO: separate TCP port
udp.ourEndpoint = makeEndpoint(realaddr, uint16(realaddr.Port))
tab, err := newTable(udp, PubkeyID(&cfg.PrivateKey.PublicKey), realaddr, cfg.NodeDBPath, cfg.Bootnodes)
tab, err := newTable(udp, self, db, cfg.Bootnodes)
if err != nil {
return nil, nil, err
}
@ -265,17 +282,18 @@ func newUDP(c conn, cfg Config) (*Table, *udp, error) {
func (t *udp) close() {
close(t.closing)
t.conn.Close()
t.db.Close()
// TODO: wait for the loops to end.
}
// ping sends a ping message to the given node and waits for a reply.
func (t *udp) ping(toid NodeID, toaddr *net.UDPAddr) error {
func (t *udp) ping(toid enode.ID, toaddr *net.UDPAddr) error {
return <-t.sendPing(toid, toaddr, nil)
}
// sendPing sends a ping message to the given node and invokes the callback
// when the reply arrives.
func (t *udp) sendPing(toid NodeID, toaddr *net.UDPAddr, callback func()) <-chan error {
func (t *udp) sendPing(toid enode.ID, toaddr *net.UDPAddr, callback func()) <-chan error {
req := &ping{
Version: 4,
From: t.ourEndpoint,
@ -299,21 +317,21 @@ func (t *udp) sendPing(toid NodeID, toaddr *net.UDPAddr, callback func()) <-chan
return errc
}
func (t *udp) waitping(from NodeID) error {
func (t *udp) waitping(from enode.ID) error {
return <-t.pending(from, pingPacket, func(interface{}) bool { return true })
}
// findnode sends a findnode request to the given node and waits until
// the node has sent up to k neighbors.
func (t *udp) findnode(toid NodeID, toaddr *net.UDPAddr, target NodeID) ([]*Node, error) {
func (t *udp) findnode(toid enode.ID, toaddr *net.UDPAddr, target encPubkey) ([]*node, error) {
// If we haven't seen a ping from the destination node for a while, it won't remember
// our endpoint proof and reject findnode. Solicit a ping first.
if time.Since(t.db.lastPingReceived(toid)) > nodeDBNodeExpiration {
if time.Since(t.db.LastPingReceived(toid)) > bondExpiration {
t.ping(toid, toaddr)
t.waitping(toid)
}
nodes := make([]*Node, 0, bucketSize)
nodes := make([]*node, 0, bucketSize)
nreceived := 0
errc := t.pending(toid, neighborsPacket, func(r interface{}) bool {
reply := r.(*neighbors)
@ -337,7 +355,7 @@ func (t *udp) findnode(toid NodeID, toaddr *net.UDPAddr, target NodeID) ([]*Node
// pending adds a reply callback to the pending reply queue.
// see the documentation of type pending for a detailed explanation.
func (t *udp) pending(id NodeID, ptype byte, callback func(interface{}) bool) <-chan error {
func (t *udp) pending(id enode.ID, ptype byte, callback func(interface{}) bool) <-chan error {
ch := make(chan error, 1)
p := &pending{from: id, ptype: ptype, callback: callback, errc: ch}
select {
@ -349,7 +367,7 @@ func (t *udp) pending(id NodeID, ptype byte, callback func(interface{}) bool) <-
return ch
}
func (t *udp) handleReply(from NodeID, ptype byte, req packet) bool {
func (t *udp) handleReply(from enode.ID, ptype byte, req packet) bool {
matched := make(chan bool, 1)
select {
case t.gotreply <- reply{from, ptype, req, matched}:
@ -563,19 +581,20 @@ func (t *udp) handlePacket(from *net.UDPAddr, buf []byte) error {
return err
}
func decodePacket(buf []byte) (packet, NodeID, []byte, error) {
func decodePacket(buf []byte) (packet, encPubkey, []byte, error) {
if len(buf) < headSize+1 {
return nil, NodeID{}, nil, errPacketTooSmall
return nil, encPubkey{}, nil, errPacketTooSmall
}
hash, sig, sigdata := buf[:macSize], buf[macSize:headSize], buf[headSize:]
shouldhash := crypto.Keccak256(buf[macSize:])
if !bytes.Equal(hash, shouldhash) {
return nil, NodeID{}, nil, errBadHash
return nil, encPubkey{}, nil, errBadHash
}
fromID, err := recoverNodeID(crypto.Keccak256(buf[headSize:]), sig)
fromKey, err := recoverNodeKey(crypto.Keccak256(buf[headSize:]), sig)
if err != nil {
return nil, NodeID{}, hash, err
return nil, fromKey, hash, err
}
var req packet
switch ptype := sigdata[0]; ptype {
case pingPacket:
@ -587,56 +606,59 @@ func decodePacket(buf []byte) (packet, NodeID, []byte, error) {
case neighborsPacket:
req = new(neighbors)
default:
return nil, fromID, hash, fmt.Errorf("unknown type: %d", ptype)
return nil, fromKey, hash, fmt.Errorf("unknown type: %d", ptype)
}
s := rlp.NewStream(bytes.NewReader(sigdata[1:]), 0)
err = s.Decode(req)
return req, fromID, hash, err
return req, fromKey, hash, err
}
func (req *ping) handle(t *udp, from *net.UDPAddr, fromID NodeID, mac []byte) error {
func (req *ping) handle(t *udp, from *net.UDPAddr, fromKey encPubkey, mac []byte) error {
if expired(req.Expiration) {
return errExpired
}
key, err := decodePubkey(fromKey)
if err != nil {
return fmt.Errorf("invalid public key: %v", err)
}
t.send(from, pongPacket, &pong{
To: makeEndpoint(from, req.From.TCP),
ReplyTok: mac,
Expiration: uint64(time.Now().Add(expiration).Unix()),
})
t.handleReply(fromID, pingPacket, req)
// Add the node to the table. Before doing so, ensure that we have a recent enough pong
// recorded in the database so their findnode requests will be accepted later.
n := NewNode(fromID, from.IP, uint16(from.Port), req.From.TCP)
if time.Since(t.db.lastPongReceived(fromID)) > nodeDBNodeExpiration {
t.sendPing(fromID, from, func() { t.addThroughPing(n) })
n := wrapNode(enode.NewV4(key, from.IP, int(req.From.TCP), from.Port))
t.handleReply(n.ID(), pingPacket, req)
if time.Since(t.db.LastPongReceived(n.ID())) > bondExpiration {
t.sendPing(n.ID(), from, func() { t.addThroughPing(n) })
} else {
t.addThroughPing(n)
}
t.db.updateLastPingReceived(fromID, time.Now())
t.db.UpdateLastPingReceived(n.ID(), time.Now())
return nil
}
func (req *ping) name() string { return "PING/v4" }
func (req *pong) handle(t *udp, from *net.UDPAddr, fromID NodeID, mac []byte) error {
func (req *pong) handle(t *udp, from *net.UDPAddr, fromKey encPubkey, mac []byte) error {
if expired(req.Expiration) {
return errExpired
}
fromID := fromKey.id()
if !t.handleReply(fromID, pongPacket, req) {
return errUnsolicitedReply
}
t.db.updateLastPongReceived(fromID, time.Now())
t.db.UpdateLastPongReceived(fromID, time.Now())
return nil
}
func (req *pong) name() string { return "PONG/v4" }
func (req *findnode) handle(t *udp, from *net.UDPAddr, fromID NodeID, mac []byte) error {
func (req *findnode) handle(t *udp, from *net.UDPAddr, fromKey encPubkey, mac []byte) error {
if expired(req.Expiration) {
return errExpired
}
if !t.db.hasBond(fromID) {
fromID := fromKey.id()
if time.Since(t.db.LastPongReceived(fromID)) > bondExpiration {
// No endpoint proof pong exists, we don't process the packet. This prevents an
// attack vector where the discovery protocol could be used to amplify traffic in a
// DDOS attack. A malicious actor would send a findnode request with the IP address
@ -645,7 +667,7 @@ func (req *findnode) handle(t *udp, from *net.UDPAddr, fromID NodeID, mac []byte
// findnode) to the victim.
return errUnknownNode
}
target := crypto.Keccak256Hash(req.Target[:])
target := enode.ID(crypto.Keccak256Hash(req.Target[:]))
t.mutex.Lock()
closest := t.closest(target, bucketSize).entries
t.mutex.Unlock()
@ -655,7 +677,7 @@ func (req *findnode) handle(t *udp, from *net.UDPAddr, fromID NodeID, mac []byte
// Send neighbors in chunks with at most maxNeighbors per packet
// to stay below the 1280 byte limit.
for _, n := range closest {
if netutil.CheckRelayIP(from.IP, n.IP) == nil {
if netutil.CheckRelayIP(from.IP, n.IP()) == nil {
p.Nodes = append(p.Nodes, nodeToRPC(n))
}
if len(p.Nodes) == maxNeighbors {
@ -672,11 +694,11 @@ func (req *findnode) handle(t *udp, from *net.UDPAddr, fromID NodeID, mac []byte
func (req *findnode) name() string { return "FINDNODE/v4" }
func (req *neighbors) handle(t *udp, from *net.UDPAddr, fromID NodeID, mac []byte) error {
func (req *neighbors) handle(t *udp, from *net.UDPAddr, fromKey encPubkey, mac []byte) error {
if expired(req.Expiration) {
return errExpired
}
if !t.handleReply(fromID, neighborsPacket, req) {
if !t.handleReply(fromKey.id(), neighborsPacket, req) {
return errUnsolicitedReply
}
return nil

View File

@ -36,6 +36,7 @@ import (
"github.com/davecgh/go-spew/spew"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/rlp"
)
@ -46,7 +47,7 @@ func init() {
// shared test variables
var (
futureExp = uint64(time.Now().Add(10 * time.Hour).Unix())
testTarget = NodeID{0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1}
testTarget = encPubkey{0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1}
testRemote = rpcEndpoint{IP: net.ParseIP("1.1.1.1").To4(), UDP: 1, TCP: 2}
testLocalAnnounced = rpcEndpoint{IP: net.ParseIP("2.2.2.2").To4(), UDP: 3, TCP: 4}
testLocal = rpcEndpoint{IP: net.ParseIP("3.3.3.3").To4(), UDP: 5, TCP: 6}
@ -136,7 +137,7 @@ func TestUDP_pingTimeout(t *testing.T) {
defer test.table.Close()
toaddr := &net.UDPAddr{IP: net.ParseIP("1.2.3.4"), Port: 2222}
toid := NodeID{1, 2, 3, 4}
toid := enode.ID{1, 2, 3, 4}
if err := test.udp.ping(toid, toaddr); err != errTimeout {
t.Error("expected timeout error, got", err)
}
@ -220,8 +221,8 @@ func TestUDP_findnodeTimeout(t *testing.T) {
defer test.table.Close()
toaddr := &net.UDPAddr{IP: net.ParseIP("1.2.3.4"), Port: 2222}
toid := NodeID{1, 2, 3, 4}
target := NodeID{4, 5, 6, 7}
toid := enode.ID{1, 2, 3, 4}
target := encPubkey{4, 5, 6, 7}
result, err := test.udp.findnode(toid, toaddr, target)
if err != errTimeout {
t.Error("expected timeout error, got", err)
@ -238,28 +239,30 @@ func TestUDP_findnode(t *testing.T) {
// put a few nodes into the table. their exact
// distribution shouldn't matter much, although we need to
// take care not to overflow any bucket.
targetHash := crypto.Keccak256Hash(testTarget[:])
nodes := &nodesByDistance{target: targetHash}
nodes := &nodesByDistance{target: testTarget.id()}
for i := 0; i < bucketSize; i++ {
nodes.push(nodeAtDistance(test.table.self.sha, i+2), bucketSize)
key := newkey()
n := wrapNode(enode.NewV4(&key.PublicKey, net.IP{10, 13, 0, 1}, 0, i))
nodes.push(n, bucketSize)
}
test.table.stuff(nodes.entries)
// ensure there's a bond with the test node,
// findnode won't be accepted otherwise.
test.table.db.updateLastPongReceived(PubkeyID(&test.remotekey.PublicKey), time.Now())
remoteID := encodePubkey(&test.remotekey.PublicKey).id()
test.table.db.UpdateLastPongReceived(remoteID, time.Now())
// check that closest neighbors are returned.
test.packetIn(nil, findnodePacket, &findnode{Target: testTarget, Expiration: futureExp})
expected := test.table.closest(targetHash, bucketSize)
expected := test.table.closest(testTarget.id(), bucketSize)
waitNeighbors := func(want []*Node) {
waitNeighbors := func(want []*node) {
test.waitPacketOut(func(p *neighbors) {
if len(p.Nodes) != len(want) {
t.Errorf("wrong number of results: got %d, want %d", len(p.Nodes), bucketSize)
}
for i := range p.Nodes {
if p.Nodes[i].ID != want[i].ID {
if p.Nodes[i].ID.id() != want[i].ID() {
t.Errorf("result mismatch at %d:\n got: %v\n want: %v", i, p.Nodes[i], expected.entries[i])
}
}
@ -273,12 +276,13 @@ func TestUDP_findnodeMultiReply(t *testing.T) {
test := newUDPTest(t)
defer test.table.Close()
rid := PubkeyID(&test.remotekey.PublicKey)
test.table.db.updateLastPingReceived(rid, time.Now())
rid := enode.PubkeyToIDV4(&test.remotekey.PublicKey)
test.table.db.UpdateLastPingReceived(rid, time.Now())
// queue a pending findnode request
resultc, errc := make(chan []*Node), make(chan error)
resultc, errc := make(chan []*node), make(chan error)
go func() {
rid := encodePubkey(&test.remotekey.PublicKey).id()
ns, err := test.udp.findnode(rid, test.remoteaddr, testTarget)
if err != nil && len(ns) == 0 {
errc <- err
@ -296,11 +300,11 @@ func TestUDP_findnodeMultiReply(t *testing.T) {
})
// send the reply as two packets.
list := []*Node{
MustParseNode("enode://ba85011c70bcc5c04d8607d3a0ed29aa6179c092cbdda10d5d32684fb33ed01bd94f588ca8f91ac48318087dcb02eaf36773a7a453f0eedd6742af668097b29c@10.0.1.16:30303?discport=30304"),
MustParseNode("enode://81fa361d25f157cd421c60dcc28d8dac5ef6a89476633339c5df30287474520caca09627da18543d9079b5b288698b542d56167aa5c09111e55acdbbdf2ef799@10.0.1.16:30303"),
MustParseNode("enode://9bffefd833d53fac8e652415f4973bee289e8b1a5c6c4cbe70abf817ce8a64cee11b823b66a987f51aaa9fba0d6a91b3e6bf0d5a5d1042de8e9eeea057b217f8@10.0.1.36:30301?discport=17"),
MustParseNode("enode://1b5b4aa662d7cb44a7221bfba67302590b643028197a7d5214790f3bac7aaa4a3241be9e83c09cf1f6c69d007c634faae3dc1b1221793e8446c0b3a09de65960@10.0.1.16:30303"),
list := []*node{
wrapNode(enode.MustParseV4("enode://ba85011c70bcc5c04d8607d3a0ed29aa6179c092cbdda10d5d32684fb33ed01bd94f588ca8f91ac48318087dcb02eaf36773a7a453f0eedd6742af668097b29c@10.0.1.16:30303?discport=30304")),
wrapNode(enode.MustParseV4("enode://81fa361d25f157cd421c60dcc28d8dac5ef6a89476633339c5df30287474520caca09627da18543d9079b5b288698b542d56167aa5c09111e55acdbbdf2ef799@10.0.1.16:30303")),
wrapNode(enode.MustParseV4("enode://9bffefd833d53fac8e652415f4973bee289e8b1a5c6c4cbe70abf817ce8a64cee11b823b66a987f51aaa9fba0d6a91b3e6bf0d5a5d1042de8e9eeea057b217f8@10.0.1.36:30301?discport=17")),
wrapNode(enode.MustParseV4("enode://1b5b4aa662d7cb44a7221bfba67302590b643028197a7d5214790f3bac7aaa4a3241be9e83c09cf1f6c69d007c634faae3dc1b1221793e8446c0b3a09de65960@10.0.1.16:30303")),
}
rpclist := make([]rpcNode, len(list))
for i := range list {
@ -325,8 +329,8 @@ func TestUDP_findnodeMultiReply(t *testing.T) {
func TestUDP_successfulPing(t *testing.T) {
test := newUDPTest(t)
added := make(chan *Node, 1)
test.table.nodeAddedHook = func(n *Node) { added <- n }
added := make(chan *node, 1)
test.table.nodeAddedHook = func(n *node) { added <- n }
defer test.table.Close()
// The remote side sends a ping packet to initiate the exchange.
@ -370,18 +374,18 @@ func TestUDP_successfulPing(t *testing.T) {
// pong packet.
select {
case n := <-added:
rid := PubkeyID(&test.remotekey.PublicKey)
if n.ID != rid {
t.Errorf("node has wrong ID: got %v, want %v", n.ID, rid)
rid := encodePubkey(&test.remotekey.PublicKey).id()
if n.ID() != rid {
t.Errorf("node has wrong ID: got %v, want %v", n.ID(), rid)
}
if !n.IP.Equal(test.remoteaddr.IP) {
t.Errorf("node has wrong IP: got %v, want: %v", n.IP, test.remoteaddr.IP)
if !n.IP().Equal(test.remoteaddr.IP) {
t.Errorf("node has wrong IP: got %v, want: %v", n.IP(), test.remoteaddr.IP)
}
if int(n.UDP) != test.remoteaddr.Port {
t.Errorf("node has wrong UDP port: got %v, want: %v", n.UDP, test.remoteaddr.Port)
if int(n.UDP()) != test.remoteaddr.Port {
t.Errorf("node has wrong UDP port: got %v, want: %v", n.UDP(), test.remoteaddr.Port)
}
if n.TCP != testRemote.TCP {
t.Errorf("node has wrong TCP port: got %v, want: %v", n.TCP, testRemote.TCP)
if n.TCP() != int(testRemote.TCP) {
t.Errorf("node has wrong TCP port: got %v, want: %v", n.TCP(), testRemote.TCP)
}
case <-time.After(2 * time.Second):
t.Errorf("node was not added within 2 seconds")
@ -434,7 +438,7 @@ var testPackets = []struct {
{
input: "c7c44041b9f7c7e41934417ebac9a8e1a4c6298f74553f2fcfdcae6ed6fe53163eb3d2b52e39fe91831b8a927bf4fc222c3902202027e5e9eb812195f95d20061ef5cd31d502e47ecb61183f74a504fe04c51e73df81f25c4d506b26db4517490103f84eb840ca634cae0d49acb401d8a4c6b6fe8c55b70d115bf400769cc1400f3258cd31387574077f301b421bc84df7266c44e9e6d569fc56be00812904767bf5ccd1fc7f8443b9a35582999983999999280dc62cc8255c73471e0a61da0c89acdc0e035e260add7fc0c04ad9ebf3919644c91cb247affc82b69bd2ca235c71eab8e49737c937a2c396",
wantPacket: &findnode{
Target: MustHexID("ca634cae0d49acb401d8a4c6b6fe8c55b70d115bf400769cc1400f3258cd31387574077f301b421bc84df7266c44e9e6d569fc56be00812904767bf5ccd1fc7f"),
Target: hexEncPubkey("ca634cae0d49acb401d8a4c6b6fe8c55b70d115bf400769cc1400f3258cd31387574077f301b421bc84df7266c44e9e6d569fc56be00812904767bf5ccd1fc7f"),
Expiration: 1136239445,
Rest: []rlp.RawValue{{0x82, 0x99, 0x99}, {0x83, 0x99, 0x99, 0x99}},
},
@ -444,25 +448,25 @@ var testPackets = []struct {
wantPacket: &neighbors{
Nodes: []rpcNode{
{
ID: MustHexID("3155e1427f85f10a5c9a7755877748041af1bcd8d474ec065eb33df57a97babf54bfd2103575fa829115d224c523596b401065a97f74010610fce76382c0bf32"),
ID: hexEncPubkey("3155e1427f85f10a5c9a7755877748041af1bcd8d474ec065eb33df57a97babf54bfd2103575fa829115d224c523596b401065a97f74010610fce76382c0bf32"),
IP: net.ParseIP("99.33.22.55").To4(),
UDP: 4444,
TCP: 4445,
},
{
ID: MustHexID("312c55512422cf9b8a4097e9a6ad79402e87a15ae909a4bfefa22398f03d20951933beea1e4dfa6f968212385e829f04c2d314fc2d4e255e0d3bc08792b069db"),
ID: hexEncPubkey("312c55512422cf9b8a4097e9a6ad79402e87a15ae909a4bfefa22398f03d20951933beea1e4dfa6f968212385e829f04c2d314fc2d4e255e0d3bc08792b069db"),
IP: net.ParseIP("1.2.3.4").To4(),
UDP: 1,
TCP: 1,
},
{
ID: MustHexID("38643200b172dcfef857492156971f0e6aa2c538d8b74010f8e140811d53b98c765dd2d96126051913f44582e8c199ad7c6d6819e9a56483f637feaac9448aac"),
ID: hexEncPubkey("38643200b172dcfef857492156971f0e6aa2c538d8b74010f8e140811d53b98c765dd2d96126051913f44582e8c199ad7c6d6819e9a56483f637feaac9448aac"),
IP: net.ParseIP("2001:db8:3c4d:15::abcd:ef12"),
UDP: 3333,
TCP: 3333,
},
{
ID: MustHexID("8dcab8618c3253b558d459da53bd8fa68935a719aff8b811197101a4b2b47dd2d47295286fc00cc081bb542d760717d1bdd6bec2c37cd72eca367d6dd3b9df73"),
ID: hexEncPubkey("8dcab8618c3253b558d459da53bd8fa68935a719aff8b811197101a4b2b47dd2d47295286fc00cc081bb542d760717d1bdd6bec2c37cd72eca367d6dd3b9df73"),
IP: net.ParseIP("2001:db8:85a3:8d3:1319:8a2e:370:7348"),
UDP: 999,
TCP: 1000,
@ -476,14 +480,14 @@ var testPackets = []struct {
func TestForwardCompatibility(t *testing.T) {
testkey, _ := crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
wantNodeID := PubkeyID(&testkey.PublicKey)
wantNodeKey := encodePubkey(&testkey.PublicKey)
for _, test := range testPackets {
input, err := hex.DecodeString(test.input)
if err != nil {
t.Fatalf("invalid hex: %s", test.input)
}
packet, nodeid, _, err := decodePacket(input)
packet, nodekey, _, err := decodePacket(input)
if err != nil {
t.Errorf("did not accept packet %s\n%v", test.input, err)
continue
@ -491,8 +495,8 @@ func TestForwardCompatibility(t *testing.T) {
if !reflect.DeepEqual(packet, test.wantPacket) {
t.Errorf("got %s\nwant %s", spew.Sdump(packet), spew.Sdump(test.wantPacket))
}
if nodeid != wantNodeID {
t.Errorf("got id %v\nwant id %v", nodeid, wantNodeID)
if nodekey != wantNodeKey {
t.Errorf("got id %v\nwant id %v", nodekey, wantNodeKey)
}
}
}

View File

@ -173,7 +173,7 @@ var nodeDBSeedQueryNodes = []struct {
),
pong: time.Now().Add(-3 * time.Hour),
},
// This one shouldn't be in in the result set because its
// This one shouldn't be in the result set because its
// nodeID is the local node's ID.
{
node: NewNode(

View File

@ -14,57 +14,38 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package enr
package enode
import (
"crypto/ecdsa"
"fmt"
"sync"
"io"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/p2p/enr"
"github.com/ethereum/go-ethereum/rlp"
)
// Registry of known identity schemes.
var schemes sync.Map
// An IdentityScheme is capable of verifying record signatures and
// deriving node addresses.
type IdentityScheme interface {
Verify(r *Record, sig []byte) error
NodeAddr(r *Record) []byte
// List of known secure identity schemes.
var ValidSchemes = enr.SchemeMap{
"v4": V4ID{},
}
// RegisterIdentityScheme adds an identity scheme to the global registry.
func RegisterIdentityScheme(name string, scheme IdentityScheme) {
if _, loaded := schemes.LoadOrStore(name, scheme); loaded {
panic("identity scheme " + name + " already registered")
}
}
// FindIdentityScheme resolves name to an identity scheme in the global registry.
func FindIdentityScheme(name string) IdentityScheme {
s, ok := schemes.Load(name)
if !ok {
return nil
}
return s.(IdentityScheme)
var ValidSchemesForTesting = enr.SchemeMap{
"v4": V4ID{},
"null": NullID{},
}
// v4ID is the "v4" identity scheme.
type v4ID struct{}
func init() {
RegisterIdentityScheme("v4", v4ID{})
}
type V4ID struct{}
// SignV4 signs a record using the v4 scheme.
func SignV4(r *Record, privkey *ecdsa.PrivateKey) error {
func SignV4(r *enr.Record, privkey *ecdsa.PrivateKey) error {
// Copy r to avoid modifying it if signing fails.
cpy := *r
cpy.Set(ID("v4"))
cpy.Set(enr.ID("v4"))
cpy.Set(Secp256k1(privkey.PublicKey))
h := sha3.NewKeccak256()
@ -74,18 +55,13 @@ func SignV4(r *Record, privkey *ecdsa.PrivateKey) error {
return err
}
sig = sig[:len(sig)-1] // remove v
if err = cpy.SetSig("v4", sig); err == nil {
if err = cpy.SetSig(V4ID{}, sig); err == nil {
*r = cpy
}
return err
}
// s256raw is an unparsed secp256k1 public key entry.
type s256raw []byte
func (s256raw) ENRKey() string { return "secp256k1" }
func (v4ID) Verify(r *Record, sig []byte) error {
func (V4ID) Verify(r *enr.Record, sig []byte) error {
var entry s256raw
if err := r.Load(&entry); err != nil {
return err
@ -96,12 +72,12 @@ func (v4ID) Verify(r *Record, sig []byte) error {
h := sha3.NewKeccak256()
rlp.Encode(h, r.AppendElements(nil))
if !crypto.VerifySignature(entry, h.Sum(nil), sig) {
return errInvalidSig
return enr.ErrInvalidSig
}
return nil
}
func (v4ID) NodeAddr(r *Record) []byte {
func (V4ID) NodeAddr(r *enr.Record) []byte {
var pubkey Secp256k1
err := r.Load(&pubkey)
if err != nil {
@ -112,3 +88,73 @@ func (v4ID) NodeAddr(r *Record) []byte {
math.ReadBits(pubkey.Y, buf[32:])
return crypto.Keccak256(buf)
}
// Secp256k1 is the "secp256k1" key, which holds a public key.
type Secp256k1 ecdsa.PublicKey
func (v Secp256k1) ENRKey() string { return "secp256k1" }
// EncodeRLP implements rlp.Encoder.
func (v Secp256k1) EncodeRLP(w io.Writer) error {
return rlp.Encode(w, crypto.CompressPubkey((*ecdsa.PublicKey)(&v)))
}
// DecodeRLP implements rlp.Decoder.
func (v *Secp256k1) DecodeRLP(s *rlp.Stream) error {
buf, err := s.Bytes()
if err != nil {
return err
}
pk, err := crypto.DecompressPubkey(buf)
if err != nil {
return err
}
*v = (Secp256k1)(*pk)
return nil
}
// s256raw is an unparsed secp256k1 public key entry.
type s256raw []byte
func (s256raw) ENRKey() string { return "secp256k1" }
// v4CompatID is a weaker and insecure version of the "v4" scheme which only checks for the
// presence of a secp256k1 public key, but doesn't verify the signature.
type v4CompatID struct {
V4ID
}
func (v4CompatID) Verify(r *enr.Record, sig []byte) error {
var pubkey Secp256k1
return r.Load(&pubkey)
}
func signV4Compat(r *enr.Record, pubkey *ecdsa.PublicKey) {
r.Set((*Secp256k1)(pubkey))
if err := r.SetSig(v4CompatID{}, []byte{}); err != nil {
panic(err)
}
}
// NullID is the "null" ENR identity scheme. This scheme stores the node
// ID in the record without any signature.
type NullID struct{}
func (NullID) Verify(r *enr.Record, sig []byte) error {
return nil
}
func (NullID) NodeAddr(r *enr.Record) []byte {
var id ID
r.Load(enr.WithEntry("nulladdr", &id))
return id[:]
}
func SignNull(r *enr.Record, id ID) *Node {
r.Set(enr.ID("null"))
r.Set(enr.WithEntry("nulladdr", id))
if err := r.SetSig(NullID{}, []byte{}); err != nil {
panic(err)
}
return &Node{r: *r, id: id}
}

View File

@ -0,0 +1,74 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package enode
import (
"bytes"
"crypto/ecdsa"
"encoding/hex"
"math/big"
"testing"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/p2p/enr"
"github.com/ethereum/go-ethereum/rlp"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var (
privkey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
pubkey = &privkey.PublicKey
)
func TestEmptyNodeID(t *testing.T) {
var r enr.Record
if addr := ValidSchemes.NodeAddr(&r); addr != nil {
t.Errorf("wrong address on empty record: got %v, want %v", addr, nil)
}
require.NoError(t, SignV4(&r, privkey))
expected := "a448f24c6d18e575453db13171562b71999873db5b286df957af199ec94617f7"
assert.Equal(t, expected, hex.EncodeToString(ValidSchemes.NodeAddr(&r)))
}
// Checks that failure to sign leaves the record unmodified.
func TestSignError(t *testing.T) {
invalidKey := &ecdsa.PrivateKey{D: new(big.Int), PublicKey: *pubkey}
var r enr.Record
emptyEnc, _ := rlp.EncodeToBytes(&r)
if err := SignV4(&r, invalidKey); err == nil {
t.Fatal("expected error from SignV4")
}
newEnc, _ := rlp.EncodeToBytes(&r)
if !bytes.Equal(newEnc, emptyEnc) {
t.Fatal("record modified even though signing failed")
}
}
// TestGetSetSecp256k1 tests encoding/decoding and setting/getting of the Secp256k1 key.
func TestGetSetSecp256k1(t *testing.T) {
var r enr.Record
if err := SignV4(&r, privkey); err != nil {
t.Fatal(err)
}
var pk Secp256k1
require.NoError(t, r.Load(&pk))
assert.EqualValues(t, pubkey, &pk)
}

248
p2p/enode/node.go Normal file
View File

@ -0,0 +1,248 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package enode
import (
"crypto/ecdsa"
"encoding/hex"
"errors"
"fmt"
"math/bits"
"math/rand"
"net"
"strings"
"github.com/ethereum/go-ethereum/p2p/enr"
)
// Node represents a host on the network.
type Node struct {
r enr.Record
id ID
}
// New wraps a node record. The record must be valid according to the given
// identity scheme.
func New(validSchemes enr.IdentityScheme, r *enr.Record) (*Node, error) {
if err := r.VerifySignature(validSchemes); err != nil {
return nil, err
}
node := &Node{r: *r}
if n := copy(node.id[:], validSchemes.NodeAddr(&node.r)); n != len(ID{}) {
return nil, fmt.Errorf("invalid node ID length %d, need %d", n, len(ID{}))
}
return node, nil
}
// ID returns the node identifier.
func (n *Node) ID() ID {
return n.id
}
// Seq returns the sequence number of the underlying record.
func (n *Node) Seq() uint64 {
return n.r.Seq()
}
// Incomplete returns true for nodes with no IP address.
func (n *Node) Incomplete() bool {
return n.IP() == nil
}
// Load retrieves an entry from the underlying record.
func (n *Node) Load(k enr.Entry) error {
return n.r.Load(k)
}
// IP returns the IP address of the node.
func (n *Node) IP() net.IP {
var ip net.IP
n.Load((*enr.IP)(&ip))
return ip
}
// UDP returns the UDP port of the node.
func (n *Node) UDP() int {
var port enr.UDP
n.Load(&port)
return int(port)
}
// UDP returns the TCP port of the node.
func (n *Node) TCP() int {
var port enr.TCP
n.Load(&port)
return int(port)
}
// Pubkey returns the secp256k1 public key of the node, if present.
func (n *Node) Pubkey() *ecdsa.PublicKey {
var key ecdsa.PublicKey
if n.Load((*Secp256k1)(&key)) != nil {
return nil
}
return &key
}
// checks whether n is a valid complete node.
func (n *Node) ValidateComplete() error {
if n.Incomplete() {
return errors.New("incomplete node")
}
if n.UDP() == 0 {
return errors.New("missing UDP port")
}
ip := n.IP()
if ip.IsMulticast() || ip.IsUnspecified() {
return errors.New("invalid IP (multicast/unspecified)")
}
// Validate the node key (on curve, etc.).
var key Secp256k1
return n.Load(&key)
}
// The string representation of a Node is a URL.
// Please see ParseNode for a description of the format.
func (n *Node) String() string {
return n.v4URL()
}
// MarshalText implements encoding.TextMarshaler.
func (n *Node) MarshalText() ([]byte, error) {
return []byte(n.v4URL()), nil
}
// UnmarshalText implements encoding.TextUnmarshaler.
func (n *Node) UnmarshalText(text []byte) error {
dec, err := ParseV4(string(text))
if err == nil {
*n = *dec
}
return err
}
// ID is a unique identifier for each node.
type ID [32]byte
// Bytes returns a byte slice representation of the ID
func (n ID) Bytes() []byte {
return n[:]
}
// ID prints as a long hexadecimal number.
func (n ID) String() string {
return fmt.Sprintf("%x", n[:])
}
// The Go syntax representation of a ID is a call to HexID.
func (n ID) GoString() string {
return fmt.Sprintf("enode.HexID(\"%x\")", n[:])
}
// TerminalString returns a shortened hex string for terminal logging.
func (n ID) TerminalString() string {
return hex.EncodeToString(n[:8])
}
// MarshalText implements the encoding.TextMarshaler interface.
func (n ID) MarshalText() ([]byte, error) {
return []byte(hex.EncodeToString(n[:])), nil
}
// UnmarshalText implements the encoding.TextUnmarshaler interface.
func (n *ID) UnmarshalText(text []byte) error {
id, err := parseID(string(text))
if err != nil {
return err
}
*n = id
return nil
}
// HexID converts a hex string to an ID.
// The string may be prefixed with 0x.
// It panics if the string is not a valid ID.
func HexID(in string) ID {
id, err := parseID(in)
if err != nil {
panic(err)
}
return id
}
func parseID(in string) (ID, error) {
var id ID
b, err := hex.DecodeString(strings.TrimPrefix(in, "0x"))
if err != nil {
return id, err
} else if len(b) != len(id) {
return id, fmt.Errorf("wrong length, want %d hex chars", len(id)*2)
}
copy(id[:], b)
return id, nil
}
// DistCmp compares the distances a->target and b->target.
// Returns -1 if a is closer to target, 1 if b is closer to target
// and 0 if they are equal.
func DistCmp(target, a, b ID) int {
for i := range target {
da := a[i] ^ target[i]
db := b[i] ^ target[i]
if da > db {
return 1
} else if da < db {
return -1
}
}
return 0
}
// LogDist returns the logarithmic distance between a and b, log2(a ^ b).
func LogDist(a, b ID) int {
lz := 0
for i := range a {
x := a[i] ^ b[i]
if x == 0 {
lz += 8
} else {
lz += bits.LeadingZeros8(x)
break
}
}
return len(a)*8 - lz
}
// RandomID returns a random ID b such that logdist(a, b) == n.
func RandomID(a ID, n int) (b ID) {
if n == 0 {
return a
}
// flip bit at position n, fill the rest with random bits
b = a
pos := len(a) - n/8 - 1
bit := byte(0x01) << (byte(n%8) - 1)
if bit == 0 {
pos++
bit = 0x80
}
b[pos] = a[pos]&^bit | ^a[pos]&bit // TODO: randomize end bits
for i := pos + 1; i < len(a); i++ {
b[i] = byte(rand.Intn(255))
}
return b
}

62
p2p/enode/node_test.go Normal file
View File

@ -0,0 +1,62 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package enode
import (
"encoding/hex"
"fmt"
"testing"
"github.com/ethereum/go-ethereum/p2p/enr"
"github.com/ethereum/go-ethereum/rlp"
"github.com/stretchr/testify/assert"
)
var pyRecord, _ = hex.DecodeString("f884b8407098ad865b00a582051940cb9cf36836572411a47278783077011599ed5cd16b76f2635f4e234738f30813a89eb9137e3e3df5266e3a1f11df72ecf1145ccb9c01826964827634826970847f00000189736563703235366b31a103ca634cae0d49acb401d8a4c6b6fe8c55b70d115bf400769cc1400f3258cd31388375647082765f")
// TestPythonInterop checks that we can decode and verify a record produced by the Python
// implementation.
func TestPythonInterop(t *testing.T) {
var r enr.Record
if err := rlp.DecodeBytes(pyRecord, &r); err != nil {
t.Fatalf("can't decode: %v", err)
}
n, err := New(ValidSchemes, &r)
if err != nil {
t.Fatalf("can't verify record: %v", err)
}
var (
wantID = HexID("a448f24c6d18e575453db13171562b71999873db5b286df957af199ec94617f7")
wantSeq = uint64(1)
wantIP = enr.IP{127, 0, 0, 1}
wantUDP = enr.UDP(30303)
)
if n.Seq() != wantSeq {
t.Errorf("wrong seq: got %d, want %d", n.Seq(), wantSeq)
}
if n.ID() != wantID {
t.Errorf("wrong id: got %x, want %x", n.ID(), wantID)
}
want := map[enr.Entry]interface{}{new(enr.IP): &wantIP, new(enr.UDP): &wantUDP}
for k, v := range want {
desc := fmt.Sprintf("loading key %q", k.ENRKey())
if assert.NoError(t, n.Load(k), desc) {
assert.Equal(t, k, v, desc)
}
}
}

View File

@ -14,20 +14,17 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Contains the node database, storing previously seen nodes and any collected
// metadata about them for QoS purposes.
package discover
package enode
import (
"bytes"
"crypto/rand"
"encoding/binary"
"fmt"
"os"
"sync"
"time"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
"github.com/syndtr/goleveldb/leveldb"
@ -39,16 +36,16 @@ import (
)
var (
nodeDBNilNodeID = NodeID{} // Special node ID to use as a nil element.
nodeDBNilID = ID{} // Special node ID to use as a nil element.
nodeDBNodeExpiration = 24 * time.Hour // Time after which an unseen node should be dropped.
nodeDBCleanupCycle = time.Hour // Time period for running the expiration task.
nodeDBVersion = 5
nodeDBVersion = 6
)
// nodeDB stores all nodes we know about.
type nodeDB struct {
// DB is the node database, storing previously seen nodes and any collected metadata about
// them for QoS purposes.
type DB struct {
lvl *leveldb.DB // Interface to the database itself
self NodeID // Own node id to prevent adding it into the database
runner sync.Once // Ensures we can start at most one expirer
quit chan struct{} // Channel to signal the expiring thread to stop
}
@ -64,33 +61,27 @@ var (
nodeDBDiscoverFindFails = nodeDBDiscoverRoot + ":findfail"
)
// newNodeDB creates a new node database for storing and retrieving infos about
// known peers in the network. If no path is given, an in-memory, temporary
// database is constructed.
func newNodeDB(path string, version int, self NodeID) (*nodeDB, error) {
// OpenDB opens a node database for storing and retrieving infos about known peers in the
// network. If no path is given an in-memory, temporary database is constructed.
func OpenDB(path string) (*DB, error) {
if path == "" {
return newMemoryNodeDB(self)
return newMemoryDB()
}
return newPersistentNodeDB(path, version, self)
return newPersistentDB(path)
}
// newMemoryNodeDB creates a new in-memory node database without a persistent
// backend.
func newMemoryNodeDB(self NodeID) (*nodeDB, error) {
// newMemoryNodeDB creates a new in-memory node database without a persistent backend.
func newMemoryDB() (*DB, error) {
db, err := leveldb.Open(storage.NewMemStorage(), nil)
if err != nil {
return nil, err
}
return &nodeDB{
lvl: db,
self: self,
quit: make(chan struct{}),
}, nil
return &DB{lvl: db, quit: make(chan struct{})}, nil
}
// newPersistentNodeDB creates/opens a leveldb backed persistent node database,
// also flushing its contents in case of a version mismatch.
func newPersistentNodeDB(path string, version int, self NodeID) (*nodeDB, error) {
func newPersistentDB(path string) (*DB, error) {
opts := &opt.Options{OpenFilesCacheCapacity: 5}
db, err := leveldb.OpenFile(path, opts)
if _, iscorrupted := err.(*errors.ErrCorrupted); iscorrupted {
@ -102,7 +93,7 @@ func newPersistentNodeDB(path string, version int, self NodeID) (*nodeDB, error)
// The nodes contained in the cache correspond to a certain protocol version.
// Flush all nodes if the version doesn't match.
currentVer := make([]byte, binary.MaxVarintLen64)
currentVer = currentVer[:binary.PutVarint(currentVer, int64(version))]
currentVer = currentVer[:binary.PutVarint(currentVer, int64(nodeDBVersion))]
blob, err := db.Get(nodeDBVersionKey, nil)
switch err {
@ -120,30 +111,26 @@ func newPersistentNodeDB(path string, version int, self NodeID) (*nodeDB, error)
if err = os.RemoveAll(path); err != nil {
return nil, err
}
return newPersistentNodeDB(path, version, self)
return newPersistentDB(path)
}
}
return &nodeDB{
lvl: db,
self: self,
quit: make(chan struct{}),
}, nil
return &DB{lvl: db, quit: make(chan struct{})}, nil
}
// makeKey generates the leveldb key-blob from a node id and its particular
// field of interest.
func makeKey(id NodeID, field string) []byte {
if bytes.Equal(id[:], nodeDBNilNodeID[:]) {
func makeKey(id ID, field string) []byte {
if bytes.Equal(id[:], nodeDBNilID[:]) {
return []byte(field)
}
return append(nodeDBItemPrefix, append(id[:], field...)...)
}
// splitKey tries to split a database key into a node id and a field part.
func splitKey(key []byte) (id NodeID, field string) {
func splitKey(key []byte) (id ID, field string) {
// If the key is not of a node, return it plainly
if !bytes.HasPrefix(key, nodeDBItemPrefix) {
return NodeID{}, string(key)
return ID{}, string(key)
}
// Otherwise split the id and field
item := key[len(nodeDBItemPrefix):]
@ -155,7 +142,7 @@ func splitKey(key []byte) (id NodeID, field string) {
// fetchInt64 retrieves an integer instance associated with a particular
// database key.
func (db *nodeDB) fetchInt64(key []byte) int64 {
func (db *DB) fetchInt64(key []byte) int64 {
blob, err := db.lvl.Get(key, nil)
if err != nil {
return 0
@ -169,39 +156,43 @@ func (db *nodeDB) fetchInt64(key []byte) int64 {
// storeInt64 update a specific database entry to the current time instance as a
// unix timestamp.
func (db *nodeDB) storeInt64(key []byte, n int64) error {
func (db *DB) storeInt64(key []byte, n int64) error {
blob := make([]byte, binary.MaxVarintLen64)
blob = blob[:binary.PutVarint(blob, n)]
return db.lvl.Put(key, blob, nil)
}
// node retrieves a node with a given id from the database.
func (db *nodeDB) node(id NodeID) *Node {
// Node retrieves a node with a given id from the database.
func (db *DB) Node(id ID) *Node {
blob, err := db.lvl.Get(makeKey(id, nodeDBDiscoverRoot), nil)
if err != nil {
return nil
}
node := new(Node)
if err := rlp.DecodeBytes(blob, node); err != nil {
log.Error("Failed to decode node RLP", "err", err)
return nil
return mustDecodeNode(id[:], blob)
}
node.sha = crypto.Keccak256Hash(node.ID[:])
func mustDecodeNode(id, data []byte) *Node {
node := new(Node)
if err := rlp.DecodeBytes(data, &node.r); err != nil {
panic(fmt.Errorf("p2p/enode: can't decode node %x in DB: %v", id, err))
}
// Restore node id cache.
copy(node.id[:], id)
return node
}
// updateNode inserts - potentially overwriting - a node into the peer database.
func (db *nodeDB) updateNode(node *Node) error {
blob, err := rlp.EncodeToBytes(node)
// UpdateNode inserts - potentially overwriting - a node into the peer database.
func (db *DB) UpdateNode(node *Node) error {
blob, err := rlp.EncodeToBytes(&node.r)
if err != nil {
return err
}
return db.lvl.Put(makeKey(node.ID, nodeDBDiscoverRoot), blob, nil)
return db.lvl.Put(makeKey(node.ID(), nodeDBDiscoverRoot), blob, nil)
}
// deleteNode deletes all information/keys associated with a node.
func (db *nodeDB) deleteNode(id NodeID) error {
// DeleteNode deletes all information/keys associated with a node.
func (db *DB) DeleteNode(id ID) error {
deleter := db.lvl.NewIterator(util.BytesPrefix(makeKey(id, "")), nil)
for deleter.Next() {
if err := db.lvl.Delete(deleter.Key(), nil); err != nil {
@ -220,13 +211,13 @@ func (db *nodeDB) deleteNode(id NodeID) error {
// it would require significant overhead to exactly trace the first successful
// convergence, it's simpler to "ensure" the correct state when an appropriate
// condition occurs (i.e. a successful bonding), and discard further events.
func (db *nodeDB) ensureExpirer() {
func (db *DB) ensureExpirer() {
db.runner.Do(func() { go db.expirer() })
}
// expirer should be started in a go routine, and is responsible for looping ad
// infinitum and dropping stale data from the database.
func (db *nodeDB) expirer() {
func (db *DB) expirer() {
tick := time.NewTicker(nodeDBCleanupCycle)
defer tick.Stop()
for {
@ -243,7 +234,7 @@ func (db *nodeDB) expirer() {
// expireNodes iterates over the database and deletes all nodes that have not
// been seen (i.e. received a pong from) for some allotted time.
func (db *nodeDB) expireNodes() error {
func (db *DB) expireNodes() error {
threshold := time.Now().Add(-nodeDBNodeExpiration)
// Find discovered nodes that are older than the allowance
@ -257,60 +248,56 @@ func (db *nodeDB) expireNodes() error {
continue
}
// Skip the node if not expired yet (and not self)
if !bytes.Equal(id[:], db.self[:]) {
if seen := db.lastPongReceived(id); seen.After(threshold) {
if seen := db.LastPongReceived(id); seen.After(threshold) {
continue
}
}
// Otherwise delete all associated information
db.deleteNode(id)
db.DeleteNode(id)
}
return nil
}
// lastPingReceived retrieves the time of the last ping packet sent by the remote node.
func (db *nodeDB) lastPingReceived(id NodeID) time.Time {
// LastPingReceived retrieves the time of the last ping packet received from
// a remote node.
func (db *DB) LastPingReceived(id ID) time.Time {
return time.Unix(db.fetchInt64(makeKey(id, nodeDBDiscoverPing)), 0)
}
// updateLastPing updates the last time remote node pinged us.
func (db *nodeDB) updateLastPingReceived(id NodeID, instance time.Time) error {
// UpdateLastPingReceived updates the last time we tried contacting a remote node.
func (db *DB) UpdateLastPingReceived(id ID, instance time.Time) error {
return db.storeInt64(makeKey(id, nodeDBDiscoverPing), instance.Unix())
}
// lastPongReceived retrieves the time of the last successful pong from remote node.
func (db *nodeDB) lastPongReceived(id NodeID) time.Time {
// LastPongReceived retrieves the time of the last successful pong from remote node.
func (db *DB) LastPongReceived(id ID) time.Time {
// Launch expirer
db.ensureExpirer()
return time.Unix(db.fetchInt64(makeKey(id, nodeDBDiscoverPong)), 0)
}
// hasBond reports whether the given node is considered bonded.
func (db *nodeDB) hasBond(id NodeID) bool {
return time.Since(db.lastPongReceived(id)) < nodeDBNodeExpiration
}
// updateLastPongReceived updates the last pong time of a node.
func (db *nodeDB) updateLastPongReceived(id NodeID, instance time.Time) error {
// UpdateLastPongReceived updates the last pong time of a node.
func (db *DB) UpdateLastPongReceived(id ID, instance time.Time) error {
return db.storeInt64(makeKey(id, nodeDBDiscoverPong), instance.Unix())
}
// findFails retrieves the number of findnode failures since bonding.
func (db *nodeDB) findFails(id NodeID) int {
// FindFails retrieves the number of findnode failures since bonding.
func (db *DB) FindFails(id ID) int {
return int(db.fetchInt64(makeKey(id, nodeDBDiscoverFindFails)))
}
// updateFindFails updates the number of findnode failures since bonding.
func (db *nodeDB) updateFindFails(id NodeID, fails int) error {
// UpdateFindFails updates the number of findnode failures since bonding.
func (db *DB) UpdateFindFails(id ID, fails int) error {
return db.storeInt64(makeKey(id, nodeDBDiscoverFindFails), int64(fails))
}
// querySeeds retrieves random nodes to be used as potential seed nodes
// QuerySeeds retrieves random nodes to be used as potential seed nodes
// for bootstrapping.
func (db *nodeDB) querySeeds(n int, maxAge time.Duration) []*Node {
func (db *DB) QuerySeeds(n int, maxAge time.Duration) []*Node {
var (
now = time.Now()
nodes = make([]*Node, 0, n)
it = db.lvl.NewIterator(nil, nil)
id NodeID
id ID
)
defer it.Release()
@ -329,14 +316,11 @@ seek:
id[0] = 0
continue seek // iterator exhausted
}
if n.ID == db.self {
continue seek
}
if now.Sub(db.lastPongReceived(n.ID)) > maxAge {
if now.Sub(db.LastPongReceived(n.ID())) > maxAge {
continue seek
}
for i := range nodes {
if nodes[i].ID == n.ID {
if nodes[i].ID() == n.ID() {
continue seek // duplicate
}
}
@ -353,18 +337,13 @@ func nextNode(it iterator.Iterator) *Node {
if field != nodeDBDiscoverRoot {
continue
}
var n Node
if err := rlp.DecodeBytes(it.Value(), &n); err != nil {
log.Warn("Failed to decode node RLP", "id", id, "err", err)
continue
}
return &n
return mustDecodeNode(id[:], it.Value())
}
return nil
}
// close flushes and closes the database files.
func (db *nodeDB) close() {
func (db *DB) Close() {
close(db.quit)
db.lvl.Close()
}

View File

@ -14,10 +14,11 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package discover
package enode
import (
"bytes"
"fmt"
"io/ioutil"
"net"
"os"
@ -28,24 +29,21 @@ import (
)
var nodeDBKeyTests = []struct {
id NodeID
id ID
field string
key []byte
}{
{
id: NodeID{},
id: ID{},
field: "version",
key: []byte{0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e}, // field
},
{
id: MustHexID("0x1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"),
id: HexID("51232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"),
field: ":discover",
key: []byte{0x6e, 0x3a, // prefix
0x1d, 0xd9, 0xd6, 0x5c, 0x45, 0x52, 0xb5, 0xeb, // node id
0x43, 0xd5, 0xad, 0x55, 0xa2, 0xee, 0x3f, 0x56, //
0xc6, 0xcb, 0xc1, 0xc6, 0x4a, 0x5c, 0x8d, 0x65, //
0x9f, 0x51, 0xfc, 0xd5, 0x1b, 0xac, 0xe2, 0x43, //
0x51, 0x23, 0x2b, 0x8d, 0x78, 0x21, 0x61, 0x7d, //
key: []byte{
0x6e, 0x3a, // prefix
0x51, 0x23, 0x2b, 0x8d, 0x78, 0x21, 0x61, 0x7d, // node id
0x2b, 0x29, 0xb5, 0x4b, 0x81, 0xcd, 0xef, 0xb9, //
0xb3, 0xe9, 0xc3, 0x7d, 0x7f, 0xd5, 0xf6, 0x32, //
0x70, 0xbc, 0xc9, 0xe1, 0xa6, 0xf6, 0xa4, 0x39, //
@ -54,7 +52,7 @@ var nodeDBKeyTests = []struct {
},
}
func TestNodeDBKeys(t *testing.T) {
func TestDBKeys(t *testing.T) {
for i, tt := range nodeDBKeyTests {
if key := makeKey(tt.id, tt.field); !bytes.Equal(key, tt.key) {
t.Errorf("make test %d: key mismatch: have 0x%x, want 0x%x", i, key, tt.key)
@ -78,9 +76,9 @@ var nodeDBInt64Tests = []struct {
{key: []byte{0x03}, value: 3},
}
func TestNodeDBInt64(t *testing.T) {
db, _ := newNodeDB("", nodeDBVersion, NodeID{})
defer db.close()
func TestDBInt64(t *testing.T) {
db, _ := OpenDB("")
defer db.Close()
tests := nodeDBInt64Tests
for i := 0; i < len(tests); i++ {
@ -101,9 +99,9 @@ func TestNodeDBInt64(t *testing.T) {
}
}
func TestNodeDBFetchStore(t *testing.T) {
node := NewNode(
MustHexID("0x1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"),
func TestDBFetchStore(t *testing.T) {
node := NewV4(
hexPubkey("1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"),
net.IP{192, 168, 0, 1},
30303,
30303,
@ -111,47 +109,47 @@ func TestNodeDBFetchStore(t *testing.T) {
inst := time.Now()
num := 314
db, _ := newNodeDB("", nodeDBVersion, NodeID{})
defer db.close()
db, _ := OpenDB("")
defer db.Close()
// Check fetch/store operations on a node ping object
if stored := db.lastPingReceived(node.ID); stored.Unix() != 0 {
if stored := db.LastPingReceived(node.ID()); stored.Unix() != 0 {
t.Errorf("ping: non-existing object: %v", stored)
}
if err := db.updateLastPingReceived(node.ID, inst); err != nil {
if err := db.UpdateLastPingReceived(node.ID(), inst); err != nil {
t.Errorf("ping: failed to update: %v", err)
}
if stored := db.lastPingReceived(node.ID); stored.Unix() != inst.Unix() {
if stored := db.LastPingReceived(node.ID()); stored.Unix() != inst.Unix() {
t.Errorf("ping: value mismatch: have %v, want %v", stored, inst)
}
// Check fetch/store operations on a node pong object
if stored := db.lastPongReceived(node.ID); stored.Unix() != 0 {
if stored := db.LastPongReceived(node.ID()); stored.Unix() != 0 {
t.Errorf("pong: non-existing object: %v", stored)
}
if err := db.updateLastPongReceived(node.ID, inst); err != nil {
if err := db.UpdateLastPongReceived(node.ID(), inst); err != nil {
t.Errorf("pong: failed to update: %v", err)
}
if stored := db.lastPongReceived(node.ID); stored.Unix() != inst.Unix() {
if stored := db.LastPongReceived(node.ID()); stored.Unix() != inst.Unix() {
t.Errorf("pong: value mismatch: have %v, want %v", stored, inst)
}
// Check fetch/store operations on a node findnode-failure object
if stored := db.findFails(node.ID); stored != 0 {
if stored := db.FindFails(node.ID()); stored != 0 {
t.Errorf("find-node fails: non-existing object: %v", stored)
}
if err := db.updateFindFails(node.ID, num); err != nil {
if err := db.UpdateFindFails(node.ID(), num); err != nil {
t.Errorf("find-node fails: failed to update: %v", err)
}
if stored := db.findFails(node.ID); stored != num {
if stored := db.FindFails(node.ID()); stored != num {
t.Errorf("find-node fails: value mismatch: have %v, want %v", stored, num)
}
// Check fetch/store operations on an actual node object
if stored := db.node(node.ID); stored != nil {
if stored := db.Node(node.ID()); stored != nil {
t.Errorf("node: non-existing object: %v", stored)
}
if err := db.updateNode(node); err != nil {
if err := db.UpdateNode(node); err != nil {
t.Errorf("node: failed to update: %v", err)
}
if stored := db.node(node.ID); stored == nil {
if stored := db.Node(node.ID()); stored == nil {
t.Errorf("node: not found")
} else if !reflect.DeepEqual(stored, node) {
t.Errorf("node: data mismatch: have %v, want %v", stored, node)
@ -165,19 +163,19 @@ var nodeDBSeedQueryNodes = []struct {
// This one should not be in the result set because its last
// pong time is too far in the past.
{
node: NewNode(
MustHexID("0x84d9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"),
node: NewV4(
hexPubkey("1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"),
net.IP{127, 0, 0, 3},
30303,
30303,
),
pong: time.Now().Add(-3 * time.Hour),
},
// This one shouldn't be in in the result set because its
// This one shouldn't be in the result set because its
// nodeID is the local node's ID.
{
node: NewNode(
MustHexID("0x57d9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"),
node: NewV4(
hexPubkey("ff93ff820abacd4351b0f14e47b324bc82ff014c226f3f66a53535734a3c150e7e38ca03ef0964ba55acddc768f5e99cd59dea95ddd4defbab1339c92fa319b2"),
net.IP{127, 0, 0, 3},
30303,
30303,
@ -187,8 +185,8 @@ var nodeDBSeedQueryNodes = []struct {
// These should be in the result set.
{
node: NewNode(
MustHexID("0x22d9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"),
node: NewV4(
hexPubkey("c2b5eb3f5dde05f815b63777809ee3e7e0cbb20035a6b00ce327191e6eaa8f26a8d461c9112b7ab94698e7361fa19fd647e603e73239002946d76085b6f928d6"),
net.IP{127, 0, 0, 1},
30303,
30303,
@ -196,8 +194,8 @@ var nodeDBSeedQueryNodes = []struct {
pong: time.Now().Add(-2 * time.Second),
},
{
node: NewNode(
MustHexID("0x44d9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"),
node: NewV4(
hexPubkey("6ca1d400c8ddf8acc94bcb0dd254911ad71a57bed5e0ae5aa205beed59b28c2339908e97990c493499613cff8ecf6c3dc7112a8ead220cdcd00d8847ca3db755"),
net.IP{127, 0, 0, 2},
30303,
30303,
@ -205,56 +203,91 @@ var nodeDBSeedQueryNodes = []struct {
pong: time.Now().Add(-3 * time.Second),
},
{
node: NewNode(
MustHexID("0xe2d9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"),
node: NewV4(
hexPubkey("234dc63fe4d131212b38236c4c3411288d7bec61cbf7b120ff12c43dc60c96182882f4291d209db66f8a38e986c9c010ff59231a67f9515c7d1668b86b221a47"),
net.IP{127, 0, 0, 3},
30303,
30303,
),
pong: time.Now().Add(-1 * time.Second),
},
{
node: NewV4(
hexPubkey("c013a50b4d1ebce5c377d8af8cb7114fd933ffc9627f96ad56d90fef5b7253ec736fd07ef9a81dc2955a997e54b7bf50afd0aa9f110595e2bec5bb7ce1657004"),
net.IP{127, 0, 0, 3},
30303,
30303,
),
pong: time.Now().Add(-2 * time.Second),
},
{
node: NewV4(
hexPubkey("f141087e3e08af1aeec261ff75f48b5b1637f594ea9ad670e50051646b0416daa3b134c28788cbe98af26992a47652889cd8577ccc108ac02c6a664db2dc1283"),
net.IP{127, 0, 0, 3},
30303,
30303,
),
pong: time.Now().Add(-2 * time.Second),
},
}
func TestNodeDBSeedQuery(t *testing.T) {
db, _ := newNodeDB("", nodeDBVersion, nodeDBSeedQueryNodes[1].node.ID)
defer db.close()
func TestDBSeedQuery(t *testing.T) {
// Querying seeds uses seeks an might not find all nodes
// every time when the database is small. Run the test multiple
// times to avoid flakes.
const attempts = 15
var err error
for i := 0; i < attempts; i++ {
if err = testSeedQuery(); err == nil {
return
}
}
if err != nil {
t.Errorf("no successful run in %d attempts: %v", attempts, err)
}
}
func testSeedQuery() error {
db, _ := OpenDB("")
defer db.Close()
// Insert a batch of nodes for querying
for i, seed := range nodeDBSeedQueryNodes {
if err := db.updateNode(seed.node); err != nil {
t.Fatalf("node %d: failed to insert: %v", i, err)
if err := db.UpdateNode(seed.node); err != nil {
return fmt.Errorf("node %d: failed to insert: %v", i, err)
}
if err := db.updateLastPongReceived(seed.node.ID, seed.pong); err != nil {
t.Fatalf("node %d: failed to insert bondTime: %v", i, err)
if err := db.UpdateLastPongReceived(seed.node.ID(), seed.pong); err != nil {
return fmt.Errorf("node %d: failed to insert bondTime: %v", i, err)
}
}
// Retrieve the entire batch and check for duplicates
seeds := db.querySeeds(len(nodeDBSeedQueryNodes)*2, time.Hour)
have := make(map[NodeID]struct{})
seeds := db.QuerySeeds(len(nodeDBSeedQueryNodes)*2, time.Hour)
have := make(map[ID]struct{})
for _, seed := range seeds {
have[seed.ID] = struct{}{}
have[seed.ID()] = struct{}{}
}
want := make(map[NodeID]struct{})
for _, seed := range nodeDBSeedQueryNodes[2:] {
want[seed.node.ID] = struct{}{}
want := make(map[ID]struct{})
for _, seed := range nodeDBSeedQueryNodes[1:] {
want[seed.node.ID()] = struct{}{}
}
if len(seeds) != len(want) {
t.Errorf("seed count mismatch: have %v, want %v", len(seeds), len(want))
return fmt.Errorf("seed count mismatch: have %v, want %v", len(seeds), len(want))
}
for id := range have {
if _, ok := want[id]; !ok {
t.Errorf("extra seed: %v", id)
return fmt.Errorf("extra seed: %v", id)
}
}
for id := range want {
if _, ok := have[id]; !ok {
t.Errorf("missing seed: %v", id)
return fmt.Errorf("missing seed: %v", id)
}
}
return nil
}
func TestNodeDBPersistency(t *testing.T) {
func TestDBPersistency(t *testing.T) {
root, err := ioutil.TempDir("", "nodedb-")
if err != nil {
t.Fatalf("failed to create temporary data folder: %v", err)
@ -267,34 +300,24 @@ func TestNodeDBPersistency(t *testing.T) {
)
// Create a persistent database and store some values
db, err := newNodeDB(filepath.Join(root, "database"), nodeDBVersion, NodeID{})
db, err := OpenDB(filepath.Join(root, "database"))
if err != nil {
t.Fatalf("failed to create persistent database: %v", err)
}
if err := db.storeInt64(testKey, testInt); err != nil {
t.Fatalf("failed to store value: %v.", err)
}
db.close()
db.Close()
// Reopen the database and check the value
db, err = newNodeDB(filepath.Join(root, "database"), nodeDBVersion, NodeID{})
db, err = OpenDB(filepath.Join(root, "database"))
if err != nil {
t.Fatalf("failed to open persistent database: %v", err)
}
if val := db.fetchInt64(testKey); val != testInt {
t.Fatalf("value mismatch: have %v, want %v", val, testInt)
}
db.close()
// Change the database version and check flush
db, err = newNodeDB(filepath.Join(root, "database"), nodeDBVersion+1, NodeID{})
if err != nil {
t.Fatalf("failed to open persistent database: %v", err)
}
if val := db.fetchInt64(testKey); val != 0 {
t.Fatalf("value mismatch: have %v, want %v", val, 0)
}
db.close()
db.Close()
}
var nodeDBExpirationNodes = []struct {
@ -303,8 +326,8 @@ var nodeDBExpirationNodes = []struct {
exp bool
}{
{
node: NewNode(
MustHexID("0x01d9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"),
node: NewV4(
hexPubkey("8d110e2ed4b446d9b5fb50f117e5f37fb7597af455e1dab0e6f045a6eeaa786a6781141659020d38bdc5e698ed3d4d2bafa8b5061810dfa63e8ac038db2e9b67"),
net.IP{127, 0, 0, 1},
30303,
30303,
@ -312,8 +335,8 @@ var nodeDBExpirationNodes = []struct {
pong: time.Now().Add(-nodeDBNodeExpiration + time.Minute),
exp: false,
}, {
node: NewNode(
MustHexID("0x02d9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"),
node: NewV4(
hexPubkey("913a205579c32425b220dfba999d215066e5bdbf900226b11da1907eae5e93eb40616d47412cf819664e9eacbdfcca6b0c6e07e09847a38472d4be46ab0c3672"),
net.IP{127, 0, 0, 2},
30303,
30303,
@ -323,16 +346,16 @@ var nodeDBExpirationNodes = []struct {
},
}
func TestNodeDBExpiration(t *testing.T) {
db, _ := newNodeDB("", nodeDBVersion, NodeID{})
defer db.close()
func TestDBExpiration(t *testing.T) {
db, _ := OpenDB("")
defer db.Close()
// Add all the test nodes and set their last pong time
for i, seed := range nodeDBExpirationNodes {
if err := db.updateNode(seed.node); err != nil {
if err := db.UpdateNode(seed.node); err != nil {
t.Fatalf("node %d: failed to insert: %v", i, err)
}
if err := db.updateLastPongReceived(seed.node.ID, seed.pong); err != nil {
if err := db.UpdateLastPongReceived(seed.node.ID(), seed.pong); err != nil {
t.Fatalf("node %d: failed to update bondTime: %v", i, err)
}
}
@ -341,40 +364,9 @@ func TestNodeDBExpiration(t *testing.T) {
t.Fatalf("failed to expire nodes: %v", err)
}
for i, seed := range nodeDBExpirationNodes {
node := db.node(seed.node.ID)
node := db.Node(seed.node.ID())
if (node == nil && !seed.exp) || (node != nil && seed.exp) {
t.Errorf("node %d: expiration mismatch: have %v, want %v", i, node, seed.exp)
}
}
}
func TestNodeDBSelfExpiration(t *testing.T) {
// Find a node in the tests that shouldn't expire, and assign it as self
var self NodeID
for _, node := range nodeDBExpirationNodes {
if !node.exp {
self = node.node.ID
break
}
}
db, _ := newNodeDB("", nodeDBVersion, self)
defer db.close()
// Add all the test nodes and set their last pong time
for i, seed := range nodeDBExpirationNodes {
if err := db.updateNode(seed.node); err != nil {
t.Fatalf("node %d: failed to insert: %v", i, err)
}
if err := db.updateLastPongReceived(seed.node.ID, seed.pong); err != nil {
t.Fatalf("node %d: failed to update bondTime: %v", i, err)
}
}
// Expire the nodes and make sure self has been evacuated too
if err := db.expireNodes(); err != nil {
t.Fatalf("failed to expire nodes: %v", err)
}
node := db.node(self)
if node != nil {
t.Errorf("self not evacuated")
}
}

Some files were not shown because too many files have changed in this diff Show More