swarm, cmd/swarm: Merge branch 'master' into multiple-ens-endpoints
This commit is contained in:
commit
6a9730edaa
14
.mailmap
14
.mailmap
@ -65,7 +65,8 @@ Enrique Fynn <enriquefynn@gmail.com>
|
||||
|
||||
Vincent G <caktux@gmail.com>
|
||||
|
||||
RJ Catalano <rj@erisindustries.com>
|
||||
RJ Catalano <catalanor0220@gmail.com>
|
||||
RJ Catalano <catalanor0220@gmail.com> <rj@erisindustries.com>
|
||||
|
||||
Nchinda Nchinda <nchinda2@gmail.com>
|
||||
|
||||
@ -109,3 +110,14 @@ Frank Wang <eternnoir@gmail.com>
|
||||
Gary Rong <garyrong0905@gmail.com>
|
||||
|
||||
Guillaume Nicolas <guin56@gmail.com>
|
||||
|
||||
Sorin Neacsu <sorin.neacsu@gmail.com>
|
||||
Sorin Neacsu <sorin.neacsu@gmail.com> <sorin@users.noreply.github.com>
|
||||
|
||||
Valentin Wüstholz <wuestholz@gmail.com>
|
||||
Valentin Wüstholz <wuestholz@gmail.com> <wuestholz@users.noreply.github.com>
|
||||
|
||||
Armin Braun <me@obrown.io>
|
||||
|
||||
Ernesto del Toro <ernesto.deltoro@gmail.com>
|
||||
Ernesto del Toro <ernesto.deltoro@gmail.com> <ernestodeltoro@users.noreply.github.com>
|
||||
|
@ -185,6 +185,8 @@ matrix:
|
||||
- xctool -version
|
||||
- xcrun simctl list
|
||||
|
||||
# Workaround for https://github.com/golang/go/issues/23749
|
||||
- export CGO_CFLAGS_ALLOW='-fmodules|-fblocks|-fobjc-arc'
|
||||
- go run build/ci.go xcode -signer IOS_SIGNING_KEY -deploy trunk -upload gethstore/builds
|
||||
|
||||
# This builder does the Azure archive purges to avoid accumulating junk
|
||||
|
92
AUTHORS
92
AUTHORS
@ -1,85 +1,173 @@
|
||||
# This is the official list of go-ethereum authors for copyright purposes.
|
||||
|
||||
Afri Schoedon <5chdn@users.noreply.github.com>
|
||||
Agustin Armellini Fischer <armellini13@gmail.com>
|
||||
Airead <fgh1987168@gmail.com>
|
||||
Alan Chen <alanchchen@users.noreply.github.com>
|
||||
Alejandro Isaza <alejandro.isaza@gmail.com>
|
||||
Ales Katona <ales@coinbase.com>
|
||||
Alex Leverington <alex@ethdev.com>
|
||||
Alex Wu <wuyiding@gmail.com>
|
||||
Alexandre Van de Sande <alex.vandesande@ethdev.com>
|
||||
Ali Hajimirza <Ali92hm@users.noreply.github.com>
|
||||
Anton Evangelatov <anton.evangelatov@gmail.com>
|
||||
Arba Sasmoyo <arba.sasmoyo@gmail.com>
|
||||
Armani Ferrante <armaniferrante@berkeley.edu>
|
||||
Armin Braun <me@obrown.io>
|
||||
Aron Fischer <github@aron.guru>
|
||||
Bas van Kervel <bas@ethdev.com>
|
||||
Benjamin Brent <benjamin@benjaminbrent.com>
|
||||
Benoit Verkindt <benoit.verkindt@gmail.com>
|
||||
Bo <bohende@gmail.com>
|
||||
Bo Ye <boy.e.computer.1982@outlook.com>
|
||||
Bob Glickstein <bobg@users.noreply.github.com>
|
||||
Brian Schroeder <bts@gmail.com>
|
||||
Casey Detrio <cdetrio@gmail.com>
|
||||
Chase Wright <mysticryuujin@gmail.com>
|
||||
Christoph Jentzsch <jentzsch.software@gmail.com>
|
||||
Daniel A. Nagy <nagy.da@gmail.com>
|
||||
Daniel Sloof <goapsychadelic@gmail.com>
|
||||
Darrel Herbst <dherbst@gmail.com>
|
||||
Dave Appleton <calistralabs@gmail.com>
|
||||
Diego Siqueira <DiSiqueira@users.noreply.github.com>
|
||||
Dmitry Shulyak <yashulyak@gmail.com>
|
||||
Egon Elbre <egonelbre@gmail.com>
|
||||
Elias Naur <elias.naur@gmail.com>
|
||||
Elliot Shepherd <elliot@identitii.com>
|
||||
Enrique Fynn <enriquefynn@gmail.com>
|
||||
Ernesto del Toro <ernesto.deltoro@gmail.com>
|
||||
Ethan Buchman <ethan@coinculture.info>
|
||||
Eugene Valeyev <evgen.povt@gmail.com>
|
||||
Evangelos Pappas <epappas@evalonlabs.com>
|
||||
Evgeny Danilenko <6655321@bk.ru>
|
||||
Fabian Vogelsteller <fabian@frozeman.de>
|
||||
Fabio Barone <fabio.barone.co@gmail.com>
|
||||
Fabio Berger <fabioberger1991@gmail.com>
|
||||
FaceHo <facehoshi@gmail.com>
|
||||
Felix Lange <fjl@twurst.com>
|
||||
Fiisio <liangcszzu@163.com>
|
||||
Frank Wang <eternnoir@gmail.com>
|
||||
Furkan KAMACI <furkankamaci@gmail.com>
|
||||
Gary Rong <garyrong0905@gmail.com>
|
||||
George Ornbo <george@shapeshed.com>
|
||||
Gregg Dourgarian <greggd@tempworks.com>
|
||||
Guillaume Ballet <gballet@gmail.com>
|
||||
Guillaume Nicolas <guin56@gmail.com>
|
||||
Gustav Simonsson <gustav.simonsson@gmail.com>
|
||||
Hao Bryan Cheng <haobcheng@gmail.com>
|
||||
Henning Diedrich <hd@eonblast.com>
|
||||
Isidoro Ghezzi <isidoro.ghezzi@icloud.com>
|
||||
Ivan Daniluk <ivan.daniluk@gmail.com>
|
||||
Jae Kwon <jkwon.work@gmail.com>
|
||||
Jamie Pitts <james.pitts@gmail.com>
|
||||
Janoš Guljaš <janos@users.noreply.github.com>
|
||||
Jason Carver <jacarver@linkedin.com>
|
||||
Jay Guo <guojiannan1101@gmail.com>
|
||||
Jeff R. Allen <jra@nella.org>
|
||||
Jeffrey Wilcke <jeffrey@ethereum.org>
|
||||
Jens Agerberg <github@agerberg.me>
|
||||
Jia Chenhui <jiachenhui1989@gmail.com>
|
||||
Jim McDonald <Jim@mcdee.net>
|
||||
Joel Burget <joelburget@gmail.com>
|
||||
Jonathan Brown <jbrown@bluedroplet.com>
|
||||
Joseph Chow <ethereum@outlook.com>
|
||||
Justin Clark-Casey <justincc@justincc.org>
|
||||
Justin Drake <drakefjustin@gmail.com>
|
||||
Kenji Siu <kenji@isuntv.com>
|
||||
Kobi Gurkan <kobigurk@gmail.com>
|
||||
Konrad Feldmeier <konrad@brainbot.com>
|
||||
Kurkó Mihály <kurkomisi@users.noreply.github.com>
|
||||
Kyuntae Ethan Kim <ethan.kyuntae.kim@gmail.com>
|
||||
Lefteris Karapetsas <lefteris@refu.co>
|
||||
Leif Jurvetson <leijurv@gmail.com>
|
||||
Leo Shklovskii <leo@thermopylae.net>
|
||||
Lewis Marshall <lewis@lmars.net>
|
||||
Lio李欧 <lionello@users.noreply.github.com>
|
||||
Louis Holbrook <dev@holbrook.no>
|
||||
Luca Zeug <luclu@users.noreply.github.com>
|
||||
Magicking <s@6120.eu>
|
||||
Maran Hidskes <maran.hidskes@gmail.com>
|
||||
Marek Kotewicz <marek.kotewicz@gmail.com>
|
||||
Mark <markya0616@gmail.com>
|
||||
Martin Holst Swende <martin@swende.se>
|
||||
Matthew Di Ferrante <mattdf@users.noreply.github.com>
|
||||
Matthew Wampler-Doty <matthew.wampler.doty@gmail.com>
|
||||
Maximilian Meister <mmeister@suse.de>
|
||||
Micah Zoltu <micah@zoltu.net>
|
||||
Michael Ruminer <michael.ruminer+github@gmail.com>
|
||||
Miguel Mota <miguelmota2@gmail.com>
|
||||
Miya Chen <miyatlchen@gmail.com>
|
||||
Nchinda Nchinda <nchinda2@gmail.com>
|
||||
Nick Dodson <silentcicero@outlook.com>
|
||||
Nick Johnson <arachnid@notdot.net>
|
||||
Nicolas Guillaume <gunicolas@sqli.com>
|
||||
Noman <noman@noman.land>
|
||||
Oli Bye <olibye@users.noreply.github.com>
|
||||
Paul Litvak <litvakpol@012.net.il>
|
||||
Paulo L F Casaretto <pcasaretto@gmail.com>
|
||||
Paweł Bylica <chfast@gmail.com>
|
||||
Peter Pratscher <pratscher@gmail.com>
|
||||
Petr Mikusek <petr@mikusek.info>
|
||||
Péter Szilágyi <peterke@gmail.com>
|
||||
RJ Catalano <rj@erisindustries.com>
|
||||
RJ Catalano <catalanor0220@gmail.com>
|
||||
Ramesh Nair <ram@hiddentao.com>
|
||||
Ricardo Catalinas Jiménez <r@untroubled.be>
|
||||
Ricardo Domingos <ricardohsd@gmail.com>
|
||||
Richard Hart <richardhart92@gmail.com>
|
||||
Rob <robert@rojotek.com>
|
||||
Robert Zaremba <robert.zaremba@scale-it.pl>
|
||||
Russ Cox <rsc@golang.org>
|
||||
Rémy Roy <remyroy@remyroy.com>
|
||||
S. Matthew English <s-matthew-english@users.noreply.github.com>
|
||||
Shintaro Kaneko <kaneshin0120@gmail.com>
|
||||
Sorin Neacsu <sorin.neacsu@gmail.com>
|
||||
Stein Dekker <dekker.stein@gmail.com>
|
||||
Steve Waldman <swaldman@mchange.com>
|
||||
Steven Roose <stevenroose@gmail.com>
|
||||
Taylor Gerring <taylor.gerring@gmail.com>
|
||||
Thomas Bocek <tom@tomp2p.net>
|
||||
Ti Zhou <tizhou1986@gmail.com>
|
||||
Tosh Camille <tochecamille@gmail.com>
|
||||
Valentin Wüstholz <wuestholz@users.noreply.github.com>
|
||||
Valentin Wüstholz <wuestholz@gmail.com>
|
||||
Victor Farazdagi <simple.square@gmail.com>
|
||||
Victor Tran <vu.tran54@gmail.com>
|
||||
Viktor Trón <viktor.tron@gmail.com>
|
||||
Ville Sundell <github@solarius.fi>
|
||||
Vincent G <caktux@gmail.com>
|
||||
Vitalik Buterin <v@buterin.com>
|
||||
Vitaly V <vvelikodny@gmail.com>
|
||||
Vivek Anand <vivekanand1101@users.noreply.github.com>
|
||||
Vlad Gluhovsky <gluk256@users.noreply.github.com>
|
||||
Yohann Léon <sybiload@gmail.com>
|
||||
Yoichi Hirai <i@yoichihirai.com>
|
||||
Yondon Fu <yondon.fu@gmail.com>
|
||||
Zach <zach.ramsay@gmail.com>
|
||||
Zahoor Mohamed <zahoor@zahoor.in>
|
||||
Zoe Nolan <github@zoenolan.org>
|
||||
Zsolt Felföldi <zsfelfoldi@gmail.com>
|
||||
am2rican5 <am2rican5@gmail.com>
|
||||
ayeowch <ayeowch@gmail.com>
|
||||
b00ris <b00ris@mail.ru>
|
||||
bailantaotao <Edwin@maicoin.com>
|
||||
baizhenxuan <nkbai@163.com>
|
||||
bloonfield <bloonfield@163.com>
|
||||
changhong <changhong.yu@shanbay.com>
|
||||
evgk <evgeniy.kamyshev@gmail.com>
|
||||
ferhat elmas <elmas.ferhat@gmail.com>
|
||||
holisticode <holistic.computing@gmail.com>
|
||||
jtakalai <juuso.takalainen@streamr.com>
|
||||
ken10100147 <sunhongping@kanjian.com>
|
||||
ligi <ligi@ligi.de>
|
||||
mark.lin <mark@maicoin.com>
|
||||
necaremus <necaremus@gmail.com>
|
||||
njupt-moon <1015041018@njupt.edu.cn>
|
||||
nkbai <nkbai@163.com>
|
||||
rhaps107 <dod-source@yandex.ru>
|
||||
slumber1122 <slumber1122@gmail.com>
|
||||
sunxiaojun2014 <sunxiaojun-xy@360.cn>
|
||||
terasum <terasum@163.com>
|
||||
tsarpaul <Litvakpol@012.net.il>
|
||||
xiekeyang <xiekeyang@users.noreply.github.com>
|
||||
yoza <yoza.is12s@gmail.com>
|
||||
ΞTHΞЯSPHΞЯΞ <{viktor.tron,nagydani,zsfelfoldi}@gmail.com>
|
||||
Максим Чусовлянов <mchusovlianov@gmail.com>
|
||||
|
30
README.md
30
README.md
@ -5,6 +5,7 @@ Official golang implementation of the Ethereum protocol.
|
||||
[![API Reference](
|
||||
https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/68747470733a2f2f676f646f632e6f72672f6769746875622e636f6d2f676f6c616e672f6764646f3f7374617475732e737667
|
||||
)](https://godoc.org/github.com/ethereum/go-ethereum)
|
||||
[![Go Report Card](https://goreportcard.com/badge/github.com/ethereum/go-ethereum)](https://goreportcard.com/report/github.com/ethereum/go-ethereum)
|
||||
[![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/ethereum/go-ethereum?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)
|
||||
|
||||
Automated builds are available for stable releases and the unstable master branch.
|
||||
@ -56,16 +57,14 @@ the user doesn't care about years-old historical data, so we can fast-sync quick
|
||||
state of the network. To do so:
|
||||
|
||||
```
|
||||
$ geth --fast --cache=512 console
|
||||
$ geth console
|
||||
```
|
||||
|
||||
This command will:
|
||||
|
||||
* Start geth in fast sync mode (`--fast`), causing it to download more data in exchange for avoiding
|
||||
processing the entire history of the Ethereum network, which is very CPU intensive.
|
||||
* Bump the memory allowance of the database to 512MB (`--cache=512`), which can help significantly in
|
||||
sync times especially for HDD users. This flag is optional and you can set it as high or as low as
|
||||
you'd like, though we'd recommend the 512MB - 2GB range.
|
||||
* Start geth in fast sync mode (default, can be changed with the `--syncmode` flag), causing it to
|
||||
download more data in exchange for avoiding processing the entire history of the Ethereum network,
|
||||
which is very CPU intensive.
|
||||
* Start up Geth's built-in interactive [JavaScript console](https://github.com/ethereum/go-ethereum/wiki/JavaScript-Console),
|
||||
(via the trailing `console` subcommand) through which you can invoke all official [`web3` methods](https://github.com/ethereum/wiki/wiki/JavaScript-API)
|
||||
as well as Geth's own [management APIs](https://github.com/ethereum/go-ethereum/wiki/Management-APIs).
|
||||
@ -80,12 +79,11 @@ entire system. In other words, instead of attaching to the main network, you wan
|
||||
network with your node, which is fully equivalent to the main network, but with play-Ether only.
|
||||
|
||||
```
|
||||
$ geth --testnet --fast --cache=512 console
|
||||
$ geth --testnet console
|
||||
```
|
||||
|
||||
The `--fast`, `--cache` flags and `console` subcommand have the exact same meaning as above and they
|
||||
are equally useful on the testnet too. Please see above for their explanations if you've skipped to
|
||||
here.
|
||||
The `console` subcommand have the exact same meaning as above and they are equally useful on the
|
||||
testnet too. Please see above for their explanations if you've skipped to here.
|
||||
|
||||
Specifying the `--testnet` flag however will reconfigure your Geth instance a bit:
|
||||
|
||||
@ -102,6 +100,14 @@ over between the main network and test network, you should make sure to always u
|
||||
for play-money and real-money. Unless you manually move accounts, Geth will by default correctly
|
||||
separate the two networks and will not make any accounts available between them.*
|
||||
|
||||
### Full node on the Rinkeby test network
|
||||
|
||||
The above test network is a cross client one based on the ethash proof-of-work consensus algorithm. As such, it has certain extra overhead and is more susceptible to reorganization attacks due to the network's low difficulty / security. Go Ethereum also supports connecting to a proof-of-authority based test network called [*Rinkeby*](https://www.rinkeby.io) (operated by members of the community). This network is lighter, more secure, but is only supported by go-ethereum.
|
||||
|
||||
```
|
||||
$ geth --rinkeby console
|
||||
```
|
||||
|
||||
### Configuration
|
||||
|
||||
As an alternative to passing the numerous flags to the `geth` binary, you can also pass a configuration file via:
|
||||
@ -125,10 +131,10 @@ One of the quickest ways to get Ethereum up and running on your machine is by us
|
||||
```
|
||||
docker run -d --name ethereum-node -v /Users/alice/ethereum:/root \
|
||||
-p 8545:8545 -p 30303:30303 \
|
||||
ethereum/client-go --fast --cache=512
|
||||
ethereum/client-go
|
||||
```
|
||||
|
||||
This will start geth in fast sync mode with a DB memory allowance of 512MB just as the above command does. It will also create a persistent volume in your home directory for saving your blockchain as well as map the default ports. There is also an `alpine` tag available for a slim version of the image.
|
||||
This will start geth in fast-sync mode with a DB memory allowance of 1GB just as the above command does. It will also create a persistent volume in your home directory for saving your blockchain as well as map the default ports. There is also an `alpine` tag available for a slim version of the image.
|
||||
|
||||
Do not forget `--rpcaddr 0.0.0.0`, if you want to access RPC from other containers and/or hosts. By default, `geth` binds to the local interface and RPC endpoints is not accessible from the outside.
|
||||
|
||||
|
@ -136,11 +136,11 @@ func (abi *ABI) UnmarshalJSON(data []byte) error {
|
||||
|
||||
// MethodById looks up a method by the 4-byte id
|
||||
// returns nil if none found
|
||||
func (abi *ABI) MethodById(sigdata []byte) *Method {
|
||||
func (abi *ABI) MethodById(sigdata []byte) (*Method, error) {
|
||||
for _, method := range abi.Methods {
|
||||
if bytes.Equal(method.Id(), sigdata[:4]) {
|
||||
return &method
|
||||
return &method, nil
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return nil, fmt.Errorf("no method with id: %#x", sigdata[:4])
|
||||
}
|
||||
|
@ -689,7 +689,11 @@ func TestABI_MethodById(t *testing.T) {
|
||||
}
|
||||
for name, m := range abi.Methods {
|
||||
a := fmt.Sprintf("%v", m)
|
||||
b := fmt.Sprintf("%v", abi.MethodById(m.Id()))
|
||||
m2, err := abi.MethodById(m.Id())
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to look up ABI method: %v", err)
|
||||
}
|
||||
b := fmt.Sprintf("%v", m2)
|
||||
if a != b {
|
||||
t.Errorf("Method %v (id %v) not 'findable' by id in ABI", name, common.ToHex(m.Id()))
|
||||
}
|
||||
|
@ -67,6 +67,17 @@ func (arguments Arguments) LengthNonIndexed() int {
|
||||
return out
|
||||
}
|
||||
|
||||
// NonIndexed returns the arguments with indexed arguments filtered out
|
||||
func (arguments Arguments) NonIndexed() Arguments {
|
||||
var ret []Argument
|
||||
for _, arg := range arguments {
|
||||
if !arg.Indexed {
|
||||
ret = append(ret, arg)
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// isTuple returns true for non-atomic constructs, like (uint,uint) or uint[]
|
||||
func (arguments Arguments) isTuple() bool {
|
||||
return len(arguments) > 1
|
||||
@ -74,21 +85,25 @@ func (arguments Arguments) isTuple() bool {
|
||||
|
||||
// Unpack performs the operation hexdata -> Go format
|
||||
func (arguments Arguments) Unpack(v interface{}, data []byte) error {
|
||||
if arguments.isTuple() {
|
||||
return arguments.unpackTuple(v, data)
|
||||
}
|
||||
return arguments.unpackAtomic(v, data)
|
||||
}
|
||||
|
||||
func (arguments Arguments) unpackTuple(v interface{}, output []byte) error {
|
||||
// make sure the passed value is arguments pointer
|
||||
valueOf := reflect.ValueOf(v)
|
||||
if reflect.Ptr != valueOf.Kind() {
|
||||
if reflect.Ptr != reflect.ValueOf(v).Kind() {
|
||||
return fmt.Errorf("abi: Unpack(non-pointer %T)", v)
|
||||
}
|
||||
marshalledValues, err := arguments.UnpackValues(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if arguments.isTuple() {
|
||||
return arguments.unpackTuple(v, marshalledValues)
|
||||
}
|
||||
return arguments.unpackAtomic(v, marshalledValues)
|
||||
}
|
||||
|
||||
func (arguments Arguments) unpackTuple(v interface{}, marshalledValues []interface{}) error {
|
||||
|
||||
var (
|
||||
value = valueOf.Elem()
|
||||
value = reflect.ValueOf(v).Elem()
|
||||
typ = value.Type()
|
||||
kind = value.Kind()
|
||||
)
|
||||
@ -110,30 +125,9 @@ func (arguments Arguments) unpackTuple(v interface{}, output []byte) error {
|
||||
exists[field] = true
|
||||
}
|
||||
}
|
||||
// `i` counts the nonindexed arguments.
|
||||
// `j` counts the number of complex types.
|
||||
// both `i` and `j` are used to to correctly compute `data` offset.
|
||||
for i, arg := range arguments.NonIndexed() {
|
||||
|
||||
i, j := -1, 0
|
||||
for _, arg := range arguments {
|
||||
|
||||
if arg.Indexed {
|
||||
// can't read, continue
|
||||
continue
|
||||
}
|
||||
i++
|
||||
marshalledValue, err := toGoType((i+j)*32, arg.Type, output)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if arg.Type.T == ArrayTy {
|
||||
// combined index ('i' + 'j') need to be adjusted only by size of array, thus
|
||||
// we need to decrement 'j' because 'i' was incremented
|
||||
j += arg.Type.Size - 1
|
||||
}
|
||||
|
||||
reflectValue := reflect.ValueOf(marshalledValue)
|
||||
reflectValue := reflect.ValueOf(marshalledValues[i])
|
||||
|
||||
switch kind {
|
||||
case reflect.Struct:
|
||||
@ -166,34 +160,52 @@ func (arguments Arguments) unpackTuple(v interface{}, output []byte) error {
|
||||
}
|
||||
|
||||
// unpackAtomic unpacks ( hexdata -> go ) a single value
|
||||
func (arguments Arguments) unpackAtomic(v interface{}, output []byte) error {
|
||||
// make sure the passed value is arguments pointer
|
||||
valueOf := reflect.ValueOf(v)
|
||||
if reflect.Ptr != valueOf.Kind() {
|
||||
return fmt.Errorf("abi: Unpack(non-pointer %T)", v)
|
||||
func (arguments Arguments) unpackAtomic(v interface{}, marshalledValues []interface{}) error {
|
||||
if len(marshalledValues) != 1 {
|
||||
return fmt.Errorf("abi: wrong length, expected single value, got %d", len(marshalledValues))
|
||||
}
|
||||
arg := arguments[0]
|
||||
if arg.Indexed {
|
||||
return fmt.Errorf("abi: attempting to unpack indexed variable into element.")
|
||||
elem := reflect.ValueOf(v).Elem()
|
||||
reflectValue := reflect.ValueOf(marshalledValues[0])
|
||||
return set(elem, reflectValue, arguments.NonIndexed()[0])
|
||||
}
|
||||
|
||||
value := valueOf.Elem()
|
||||
// UnpackValues can be used to unpack ABI-encoded hexdata according to the ABI-specification,
|
||||
// without supplying a struct to unpack into. Instead, this method returns a list containing the
|
||||
// values. An atomic argument will be a list with one element.
|
||||
func (arguments Arguments) UnpackValues(data []byte) ([]interface{}, error) {
|
||||
retval := make([]interface{}, 0, arguments.LengthNonIndexed())
|
||||
virtualArgs := 0
|
||||
for index, arg := range arguments.NonIndexed() {
|
||||
marshalledValue, err := toGoType((index+virtualArgs)*32, arg.Type, data)
|
||||
if arg.Type.T == ArrayTy {
|
||||
// If we have a static array, like [3]uint256, these are coded as
|
||||
// just like uint256,uint256,uint256.
|
||||
// This means that we need to add two 'virtual' arguments when
|
||||
// we count the index from now on
|
||||
|
||||
marshalledValue, err := toGoType(0, arg.Type, output)
|
||||
virtualArgs += arg.Type.Size - 1
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
return set(value, reflect.ValueOf(marshalledValue), arg)
|
||||
retval = append(retval, marshalledValue)
|
||||
}
|
||||
return retval, nil
|
||||
}
|
||||
|
||||
// Unpack performs the operation Go format -> Hexdata
|
||||
// PackValues performs the operation Go format -> Hexdata
|
||||
// It is the semantic opposite of UnpackValues
|
||||
func (arguments Arguments) PackValues(args []interface{}) ([]byte, error) {
|
||||
return arguments.Pack(args...)
|
||||
}
|
||||
|
||||
// Pack performs the operation Go format -> Hexdata
|
||||
func (arguments Arguments) Pack(args ...interface{}) ([]byte, error) {
|
||||
// Make sure arguments match up and pack them
|
||||
abiArgs := arguments
|
||||
if len(args) != len(abiArgs) {
|
||||
return nil, fmt.Errorf("argument count mismatch: %d for %d", len(args), len(abiArgs))
|
||||
}
|
||||
|
||||
// variable input is the output appended at the end of packed
|
||||
// output. This is used for strings and bytes types input.
|
||||
var variableInput []byte
|
||||
@ -207,7 +219,6 @@ func (arguments Arguments) Pack(args ...interface{}) ([]byte, error) {
|
||||
inputOffset += 32
|
||||
}
|
||||
}
|
||||
|
||||
var ret []byte
|
||||
for i, a := range args {
|
||||
input := abiArgs[i]
|
||||
@ -216,7 +227,6 @@ func (arguments Arguments) Pack(args ...interface{}) ([]byte, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// check for a slice type (string, bytes, slice)
|
||||
if input.Type.requiresLengthPrefix() {
|
||||
// calculate the offset
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2015 The go-ethereum Authors
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2015 The go-ethereum Authors
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
@ -95,6 +95,9 @@ func readFixedBytes(t Type, word []byte) (interface{}, error) {
|
||||
|
||||
// iteratively unpack elements
|
||||
func forEachUnpack(t Type, output []byte, start, size int) (interface{}, error) {
|
||||
if size < 0 {
|
||||
return nil, fmt.Errorf("cannot marshal input to array, size is negative (%d)", size)
|
||||
}
|
||||
if start+32*size > len(output) {
|
||||
return nil, fmt.Errorf("abi: cannot marshal in to go array: offset %d would go over slice boundary (len=%d)", len(output), start+32*size)
|
||||
}
|
||||
@ -181,16 +184,32 @@ func toGoType(index int, t Type, output []byte) (interface{}, error) {
|
||||
|
||||
// interprets a 32 byte slice as an offset and then determines which indice to look to decode the type.
|
||||
func lengthPrefixPointsTo(index int, output []byte) (start int, length int, err error) {
|
||||
offset := int(binary.BigEndian.Uint64(output[index+24 : index+32]))
|
||||
if offset+32 > len(output) {
|
||||
return 0, 0, fmt.Errorf("abi: cannot marshal in to go slice: offset %d would go over slice boundary (len=%d)", len(output), offset+32)
|
||||
}
|
||||
length = int(binary.BigEndian.Uint64(output[offset+24 : offset+32]))
|
||||
if offset+32+length > len(output) {
|
||||
return 0, 0, fmt.Errorf("abi: cannot marshal in to go type: length insufficient %d require %d", len(output), offset+32+length)
|
||||
}
|
||||
start = offset + 32
|
||||
bigOffsetEnd := big.NewInt(0).SetBytes(output[index : index+32])
|
||||
bigOffsetEnd.Add(bigOffsetEnd, common.Big32)
|
||||
outputLength := big.NewInt(int64(len(output)))
|
||||
|
||||
//fmt.Printf("LENGTH PREFIX INFO: \nsize: %v\noffset: %v\nstart: %v\n", length, offset, start)
|
||||
if bigOffsetEnd.Cmp(outputLength) > 0 {
|
||||
return 0, 0, fmt.Errorf("abi: cannot marshal in to go slice: offset %v would go over slice boundary (len=%v)", bigOffsetEnd, outputLength)
|
||||
}
|
||||
|
||||
if bigOffsetEnd.BitLen() > 63 {
|
||||
return 0, 0, fmt.Errorf("abi offset larger than int64: %v", bigOffsetEnd)
|
||||
}
|
||||
|
||||
offsetEnd := int(bigOffsetEnd.Uint64())
|
||||
lengthBig := big.NewInt(0).SetBytes(output[offsetEnd-32 : offsetEnd])
|
||||
|
||||
totalSize := big.NewInt(0)
|
||||
totalSize.Add(totalSize, bigOffsetEnd)
|
||||
totalSize.Add(totalSize, lengthBig)
|
||||
if totalSize.BitLen() > 63 {
|
||||
return 0, 0, fmt.Errorf("abi length larger than int64: %v", totalSize)
|
||||
}
|
||||
|
||||
if totalSize.Cmp(outputLength) > 0 {
|
||||
return 0, 0, fmt.Errorf("abi: cannot marshal in to go type: length insufficient %v require %v", outputLength, totalSize)
|
||||
}
|
||||
start = int(bigOffsetEnd.Uint64())
|
||||
length = int(lengthBig.Uint64())
|
||||
return
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2015 The go-ethereum Authors
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
@ -130,7 +130,7 @@ var unpackTests = []unpackTest{
|
||||
{
|
||||
def: `[{"type": "bytes32"}]`,
|
||||
enc: "0100000000000000000000000000000000000000000000000000000000000000",
|
||||
want: common.HexToHash("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||
want: [32]byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "function"}]`,
|
||||
@ -683,3 +683,73 @@ func TestUnmarshal(t *testing.T) {
|
||||
t.Fatal("expected error:", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOOMMaliciousInput(t *testing.T) {
|
||||
oomTests := []unpackTest{
|
||||
{
|
||||
def: `[{"type": "uint8[]"}]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000020" + // offset
|
||||
"0000000000000000000000000000000000000000000000000000000000000003" + // num elems
|
||||
"0000000000000000000000000000000000000000000000000000000000000001" + // elem 1
|
||||
"0000000000000000000000000000000000000000000000000000000000000002", // elem 2
|
||||
},
|
||||
{ // Length larger than 64 bits
|
||||
def: `[{"type": "uint8[]"}]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000020" + // offset
|
||||
"00ffffffffffffffffffffffffffffffffffffffffffffff0000000000000002" + // num elems
|
||||
"0000000000000000000000000000000000000000000000000000000000000001" + // elem 1
|
||||
"0000000000000000000000000000000000000000000000000000000000000002", // elem 2
|
||||
},
|
||||
{ // Offset very large (over 64 bits)
|
||||
def: `[{"type": "uint8[]"}]`,
|
||||
enc: "00ffffffffffffffffffffffffffffffffffffffffffffff0000000000000020" + // offset
|
||||
"0000000000000000000000000000000000000000000000000000000000000002" + // num elems
|
||||
"0000000000000000000000000000000000000000000000000000000000000001" + // elem 1
|
||||
"0000000000000000000000000000000000000000000000000000000000000002", // elem 2
|
||||
},
|
||||
{ // Offset very large (below 64 bits)
|
||||
def: `[{"type": "uint8[]"}]`,
|
||||
enc: "0000000000000000000000000000000000000000000000007ffffffffff00020" + // offset
|
||||
"0000000000000000000000000000000000000000000000000000000000000002" + // num elems
|
||||
"0000000000000000000000000000000000000000000000000000000000000001" + // elem 1
|
||||
"0000000000000000000000000000000000000000000000000000000000000002", // elem 2
|
||||
},
|
||||
{ // Offset negative (as 64 bit)
|
||||
def: `[{"type": "uint8[]"}]`,
|
||||
enc: "000000000000000000000000000000000000000000000000f000000000000020" + // offset
|
||||
"0000000000000000000000000000000000000000000000000000000000000002" + // num elems
|
||||
"0000000000000000000000000000000000000000000000000000000000000001" + // elem 1
|
||||
"0000000000000000000000000000000000000000000000000000000000000002", // elem 2
|
||||
},
|
||||
|
||||
{ // Negative length
|
||||
def: `[{"type": "uint8[]"}]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000020" + // offset
|
||||
"000000000000000000000000000000000000000000000000f000000000000002" + // num elems
|
||||
"0000000000000000000000000000000000000000000000000000000000000001" + // elem 1
|
||||
"0000000000000000000000000000000000000000000000000000000000000002", // elem 2
|
||||
},
|
||||
{ // Very large length
|
||||
def: `[{"type": "uint8[]"}]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000020" + // offset
|
||||
"0000000000000000000000000000000000000000000000007fffffffff000002" + // num elems
|
||||
"0000000000000000000000000000000000000000000000000000000000000001" + // elem 1
|
||||
"0000000000000000000000000000000000000000000000000000000000000002", // elem 2
|
||||
},
|
||||
}
|
||||
for i, test := range oomTests {
|
||||
def := fmt.Sprintf(`[{ "name" : "method", "outputs": %s}]`, test.def)
|
||||
abi, err := JSON(strings.NewReader(def))
|
||||
if err != nil {
|
||||
t.Fatalf("invalid ABI definition %s: %v", def, err)
|
||||
}
|
||||
encb, err := hex.DecodeString(test.enc)
|
||||
if err != nil {
|
||||
t.Fatalf("invalid hex: %s" + test.enc)
|
||||
}
|
||||
_, err = abi.Methods["method"].Outputs.UnpackValues(encb)
|
||||
if err == nil {
|
||||
t.Fatalf("Expected error on malicious input, test %d", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -121,7 +121,8 @@ var (
|
||||
// Note: vivid is unsupported because there is no golang-1.6 package for it.
|
||||
// Note: wily is unsupported because it was officially deprecated on lanchpad.
|
||||
// Note: yakkety is unsupported because it was officially deprecated on lanchpad.
|
||||
debDistros = []string{"trusty", "xenial", "zesty", "artful"}
|
||||
// Note: zesty is unsupported because it was officially deprecated on lanchpad.
|
||||
debDistros = []string{"trusty", "xenial", "artful", "bionic"}
|
||||
)
|
||||
|
||||
var GOBIN, _ = filepath.Abs(filepath.Join("build", "bin"))
|
||||
|
@ -55,10 +55,9 @@ var (
|
||||
"crypto/sha3/",
|
||||
"internal/jsre/deps",
|
||||
"log/",
|
||||
"common/bitutil/bitutil",
|
||||
// don't license generated files
|
||||
"contracts/chequebook/contract/",
|
||||
"contracts/ens/contract/",
|
||||
"contracts/release/contract.go",
|
||||
"contracts/chequebook/contract/code.go",
|
||||
}
|
||||
|
||||
// paths with this prefix are licensed as GPL. all other files are LGPL.
|
||||
|
@ -122,7 +122,12 @@ func main() {
|
||||
utils.Fatalf("%v", err)
|
||||
}
|
||||
} else {
|
||||
if _, err := discover.ListenUDP(nodeKey, conn, realaddr, nil, "", restrictList); err != nil {
|
||||
cfg := discover.Config{
|
||||
PrivateKey: nodeKey,
|
||||
AnnounceAddr: realaddr,
|
||||
NetRestrict: restrictList,
|
||||
}
|
||||
if _, err := discover.ListenUDP(conn, cfg); err != nil {
|
||||
utils.Fatalf("%v", err)
|
||||
}
|
||||
}
|
||||
|
@ -1,3 +1,19 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
|
@ -1,18 +1,18 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
|
@ -686,8 +686,6 @@ func authTwitter(url string) (string, string, common.Address, error) {
|
||||
if len(parts) < 4 || parts[len(parts)-2] != "status" {
|
||||
return "", "", common.Address{}, errors.New("Invalid Twitter status URL")
|
||||
}
|
||||
username := parts[len(parts)-3]
|
||||
|
||||
// Twitter's API isn't really friendly with direct links. Still, we don't
|
||||
// want to do ask read permissions from users, so just load the public posts and
|
||||
// scrape it for the Ethereum address and profile URL.
|
||||
@ -697,6 +695,13 @@ func authTwitter(url string) (string, string, common.Address, error) {
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
// Resolve the username from the final redirect, no intermediate junk
|
||||
parts = strings.Split(res.Request.URL.String(), "/")
|
||||
if len(parts) < 4 || parts[len(parts)-2] != "status" {
|
||||
return "", "", common.Address{}, errors.New("Invalid Twitter status URL")
|
||||
}
|
||||
username := parts[len(parts)-3]
|
||||
|
||||
body, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return "", "", common.Address{}, err
|
||||
|
@ -67,6 +67,9 @@ It expects the genesis file as argument.`,
|
||||
utils.DataDirFlag,
|
||||
utils.CacheFlag,
|
||||
utils.LightModeFlag,
|
||||
utils.GCModeFlag,
|
||||
utils.CacheDatabaseFlag,
|
||||
utils.CacheGCFlag,
|
||||
},
|
||||
Category: "BLOCKCHAIN COMMANDS",
|
||||
Description: `
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/console"
|
||||
@ -207,7 +208,7 @@ func ephemeralConsole(ctx *cli.Context) error {
|
||||
}
|
||||
// Wait for pending callbacks, but stop for Ctrl-C.
|
||||
abort := make(chan os.Signal, 1)
|
||||
signal.Notify(abort, os.Interrupt)
|
||||
signal.Notify(abort, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
go func() {
|
||||
<-abort
|
||||
|
@ -114,6 +114,7 @@ var (
|
||||
utils.VMEnableDebugFlag,
|
||||
utils.NetworkIdFlag,
|
||||
utils.RPCCORSDomainFlag,
|
||||
utils.RPCVirtualHostsFlag,
|
||||
utils.EthStatsURLFlag,
|
||||
utils.MetricsEnabledFlag,
|
||||
utils.FakePoWFlag,
|
||||
|
@ -156,6 +156,7 @@ var AppHelpFlagGroups = []flagGroup{
|
||||
utils.IPCDisabledFlag,
|
||||
utils.IPCPathFlag,
|
||||
utils.RPCCORSDomainFlag,
|
||||
utils.RPCVirtualHostsFlag,
|
||||
utils.JSpathFlag,
|
||||
utils.ExecFlag,
|
||||
utils.PreloadJSFlag,
|
||||
|
@ -1,3 +1,19 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// p2psim provides a command-line client for a simulation HTTP API.
|
||||
//
|
||||
// Here is an example of creating a 2 node network with the first node
|
||||
|
@ -117,7 +117,7 @@ var dashboardContent = `
|
||||
<br/>
|
||||
<p>To run an archive node, download <a href="/{{.GethGenesis}}"><code>{{.GethGenesis}}</code></a> and start Geth with:
|
||||
<pre>geth --datadir=$HOME/.{{.Network}} init {{.GethGenesis}}</pre>
|
||||
<pre>geth --networkid={{.NetworkID}} --datadir=$HOME/.{{.Network}} --cache=1024 --syncmode=full{{if .Ethstats}} --ethstats='{{.Ethstats}}'{{end}} --bootnodes={{.BootnodesFullFlat}}</pre>
|
||||
<pre>geth --networkid={{.NetworkID}} --datadir=$HOME/.{{.Network}} --cache=1024 --syncmode=full{{if .Ethstats}} --ethstats='{{.Ethstats}}'{{end}} --bootnodes={{.BootnodesFlat}}</pre>
|
||||
</p>
|
||||
<br/>
|
||||
<p>You can download Geth from <a href="https://geth.ethereum.org/downloads/" target="about:blank">https://geth.ethereum.org/downloads/</a>.</p>
|
||||
@ -136,7 +136,7 @@ var dashboardContent = `
|
||||
<br/>
|
||||
<p>To run a full node, download <a href="/{{.GethGenesis}}"><code>{{.GethGenesis}}</code></a> and start Geth with:
|
||||
<pre>geth --datadir=$HOME/.{{.Network}} init {{.GethGenesis}}</pre>
|
||||
<pre>geth --networkid={{.NetworkID}} --datadir=$HOME/.{{.Network}} --cache=512{{if .Ethstats}} --ethstats='{{.Ethstats}}'{{end}} --bootnodes={{.BootnodesFullFlat}}</pre>
|
||||
<pre>geth --networkid={{.NetworkID}} --datadir=$HOME/.{{.Network}} --cache=512{{if .Ethstats}} --ethstats='{{.Ethstats}}'{{end}} --bootnodes={{.BootnodesFlat}}</pre>
|
||||
</p>
|
||||
<br/>
|
||||
<p>You can download Geth from <a href="https://geth.ethereum.org/downloads/" target="about:blank">https://geth.ethereum.org/downloads/</a>.</p>
|
||||
@ -158,7 +158,7 @@ var dashboardContent = `
|
||||
<br/>
|
||||
<p>To run a light node, download <a href="/{{.GethGenesis}}"><code>{{.GethGenesis}}</code></a> and start Geth with:
|
||||
<pre>geth --datadir=$HOME/.{{.Network}} init {{.GethGenesis}}</pre>
|
||||
<pre>geth --networkid={{.NetworkID}} --datadir=$HOME/.{{.Network}} --syncmode=light{{if .Ethstats}} --ethstats='{{.Ethstats}}'{{end}} --bootnodes={{.BootnodesLightFlat}}</pre>
|
||||
<pre>geth --networkid={{.NetworkID}} --datadir=$HOME/.{{.Network}} --syncmode=light{{if .Ethstats}} --ethstats='{{.Ethstats}}'{{end}} --bootnodes={{.BootnodesFlat}}</pre>
|
||||
</p>
|
||||
<br/>
|
||||
<p>You can download Geth from <a href="https://geth.ethereum.org/downloads/" target="about:blank">https://geth.ethereum.org/downloads/</a>.</p>
|
||||
@ -177,7 +177,7 @@ var dashboardContent = `
|
||||
<br/>
|
||||
<p>To run an embedded node, download <a href="/{{.GethGenesis}}"><code>{{.GethGenesis}}</code></a> and start Geth with:
|
||||
<pre>geth --datadir=$HOME/.{{.Network}} init {{.GethGenesis}}</pre>
|
||||
<pre>geth --networkid={{.NetworkID}} --datadir=$HOME/.{{.Network}} --cache=16 --ethash.cachesinmem=1 --syncmode=light{{if .Ethstats}} --ethstats='{{.Ethstats}}'{{end}} --bootnodes={{.BootnodesLightFlat}}</pre>
|
||||
<pre>geth --networkid={{.NetworkID}} --datadir=$HOME/.{{.Network}} --cache=16 --ethash.cachesinmem=1 --syncmode=light{{if .Ethstats}} --ethstats='{{.Ethstats}}'{{end}} --bootnodes={{.BootnodesFlat}}</pre>
|
||||
</p>
|
||||
<br/>
|
||||
<p>You can download Geth from <a href="https://geth.ethereum.org/downloads/" target="about:blank">https://geth.ethereum.org/downloads/</a>.</p>
|
||||
@ -208,7 +208,7 @@ var dashboardContent = `
|
||||
<pre>geth --datadir=$HOME/.{{.Network}} init {{.GethGenesis}}</pre>
|
||||
</p>
|
||||
<p>With your local chain initialized, you can start the Ethereum Wallet:
|
||||
<pre>ethereumwallet --rpc $HOME/.{{.Network}}/geth.ipc --node-networkid={{.NetworkID}} --node-datadir=$HOME/.{{.Network}}{{if .Ethstats}} --node-ethstats='{{.Ethstats}}'{{end}} --node-bootnodes={{.BootnodesFullFlat}}</pre>
|
||||
<pre>ethereumwallet --rpc $HOME/.{{.Network}}/geth.ipc --node-networkid={{.NetworkID}} --node-datadir=$HOME/.{{.Network}}{{if .Ethstats}} --node-ethstats='{{.Ethstats}}'{{end}} --node-bootnodes={{.BootnodesFlat}}</pre>
|
||||
<p>
|
||||
<br/>
|
||||
<p>You can download the Ethereum Wallet from <a href="https://github.com/ethereum/mist/releases" target="about:blank">https://github.com/ethereum/mist/releases</a>.</p>
|
||||
@ -229,7 +229,7 @@ var dashboardContent = `
|
||||
<pre>geth --datadir=$HOME/.{{.Network}} init {{.GethGenesis}}</pre>
|
||||
</p>
|
||||
<p>With your local chain initialized, you can start Mist:
|
||||
<pre>mist --rpc $HOME/.{{.Network}}/geth.ipc --node-networkid={{.NetworkID}} --node-datadir=$HOME/.{{.Network}}{{if .Ethstats}} --node-ethstats='{{.Ethstats}}'{{end}} --node-bootnodes={{.BootnodesFullFlat}}</pre>
|
||||
<pre>mist --rpc $HOME/.{{.Network}}/geth.ipc --node-networkid={{.NetworkID}} --node-datadir=$HOME/.{{.Network}}{{if .Ethstats}} --node-ethstats='{{.Ethstats}}'{{end}} --node-bootnodes={{.BootnodesFlat}}</pre>
|
||||
<p>
|
||||
<br/>
|
||||
<p>You can download the Mist browser from <a href="https://github.com/ethereum/mist/releases" target="about:blank">https://github.com/ethereum/mist/releases</a>.</p>
|
||||
@ -261,7 +261,7 @@ var dashboardContent = `
|
||||
<p>Inside your Java code you can now import the geth archive and connect to Ethereum:
|
||||
<pre>import org.ethereum.geth.*;</pre>
|
||||
<pre>
|
||||
Enodes bootnodes = new Enodes();{{range .BootnodesLight}}
|
||||
Enodes bootnodes = new Enodes();{{range .Bootnodes}}
|
||||
bootnodes.append(new Enode("{{.}}"));{{end}}
|
||||
|
||||
NodeConfig config = new NodeConfig();
|
||||
@ -294,7 +294,7 @@ node.start();
|
||||
<pre>
|
||||
var error: NSError?
|
||||
|
||||
let bootnodes = GethNewEnodesEmpty(){{range .BootnodesLight}}
|
||||
let bootnodes = GethNewEnodesEmpty(){{range .Bootnodes}}
|
||||
bootnodes?.append(GethNewEnode("{{.}}", &error)){{end}}
|
||||
|
||||
let config = GethNewNodeConfig()
|
||||
@ -595,16 +595,16 @@ func deployDashboard(client *sshClient, network string, conf *config, config *da
|
||||
statsLogin = ""
|
||||
}
|
||||
indexfile := new(bytes.Buffer)
|
||||
bootCpp := make([]string, len(conf.bootFull))
|
||||
for i, boot := range conf.bootFull {
|
||||
bootCpp := make([]string, len(conf.bootnodes))
|
||||
for i, boot := range conf.bootnodes {
|
||||
bootCpp[i] = "required:" + strings.TrimPrefix(boot, "enode://")
|
||||
}
|
||||
bootHarmony := make([]string, len(conf.bootFull))
|
||||
for i, boot := range conf.bootFull {
|
||||
bootHarmony := make([]string, len(conf.bootnodes))
|
||||
for i, boot := range conf.bootnodes {
|
||||
bootHarmony[i] = fmt.Sprintf("-Dpeer.active.%d.url=%s", i, boot)
|
||||
}
|
||||
bootPython := make([]string, len(conf.bootFull))
|
||||
for i, boot := range conf.bootFull {
|
||||
bootPython := make([]string, len(conf.bootnodes))
|
||||
for i, boot := range conf.bootnodes {
|
||||
bootPython[i] = "'" + boot + "'"
|
||||
}
|
||||
template.Must(template.New("").Parse(dashboardContent)).Execute(indexfile, map[string]interface{}{
|
||||
@ -616,10 +616,8 @@ func deployDashboard(client *sshClient, network string, conf *config, config *da
|
||||
"WalletPage": config.wallet,
|
||||
"FaucetPage": config.faucet,
|
||||
"GethGenesis": network + ".json",
|
||||
"BootnodesFull": conf.bootFull,
|
||||
"BootnodesLight": conf.bootLight,
|
||||
"BootnodesFullFlat": strings.Join(conf.bootFull, ","),
|
||||
"BootnodesLightFlat": strings.Join(conf.bootLight, ","),
|
||||
"Bootnodes": conf.bootnodes,
|
||||
"BootnodesFlat": strings.Join(conf.bootnodes, ","),
|
||||
"Ethstats": statsLogin,
|
||||
"Ethash": conf.Genesis.Config.Ethash != nil,
|
||||
"CppGenesis": network + "-cpp.json",
|
||||
@ -651,7 +649,7 @@ func deployDashboard(client *sshClient, network string, conf *config, config *da
|
||||
harmonySpecJSON, _ := conf.Genesis.MarshalJSON()
|
||||
files[filepath.Join(workdir, network+"-harmony.json")] = harmonySpecJSON
|
||||
|
||||
paritySpec, err := newParityChainSpec(network, conf.Genesis, conf.bootFull)
|
||||
paritySpec, err := newParityChainSpec(network, conf.Genesis, conf.bootnodes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -93,7 +93,7 @@ func deployFaucet(client *sshClient, network string, bootnodes []string, config
|
||||
"NetworkID": config.node.network,
|
||||
"Bootnodes": strings.Join(bootnodes, ","),
|
||||
"Ethstats": config.node.ethstats,
|
||||
"EthPort": config.node.portFull,
|
||||
"EthPort": config.node.port,
|
||||
"CaptchaToken": config.captchaToken,
|
||||
"CaptchaSecret": config.captchaSecret,
|
||||
"FaucetName": strings.Title(network),
|
||||
@ -110,7 +110,7 @@ func deployFaucet(client *sshClient, network string, bootnodes []string, config
|
||||
"Datadir": config.node.datadir,
|
||||
"VHost": config.host,
|
||||
"ApiPort": config.port,
|
||||
"EthPort": config.node.portFull,
|
||||
"EthPort": config.node.port,
|
||||
"EthName": config.node.ethstats[:strings.Index(config.node.ethstats, ":")],
|
||||
"CaptchaToken": config.captchaToken,
|
||||
"CaptchaSecret": config.captchaSecret,
|
||||
@ -158,7 +158,7 @@ func (info *faucetInfos) Report() map[string]string {
|
||||
report := map[string]string{
|
||||
"Website address": info.host,
|
||||
"Website listener port": strconv.Itoa(info.port),
|
||||
"Ethereum listener port": strconv.Itoa(info.node.portFull),
|
||||
"Ethereum listener port": strconv.Itoa(info.node.port),
|
||||
"Funding amount (base tier)": fmt.Sprintf("%d Ethers", info.amount),
|
||||
"Funding cooldown (base tier)": fmt.Sprintf("%d mins", info.minutes),
|
||||
"Funding tiers": strconv.Itoa(info.tiers),
|
||||
@ -228,7 +228,7 @@ func checkFaucet(client *sshClient, network string) (*faucetInfos, error) {
|
||||
return &faucetInfos{
|
||||
node: &nodeInfos{
|
||||
datadir: infos.volumes["/root/.faucet"],
|
||||
portFull: infos.portmap[infos.envvars["ETH_PORT"]+"/tcp"],
|
||||
port: infos.portmap[infos.envvars["ETH_PORT"]+"/tcp"],
|
||||
ethstats: infos.envvars["ETH_NAME"],
|
||||
keyJSON: keyJSON,
|
||||
keyPass: keyPass,
|
||||
|
@ -42,7 +42,7 @@ ADD genesis.json /genesis.json
|
||||
RUN \
|
||||
echo 'geth --cache 512 init /genesis.json' > geth.sh && \{{if .Unlock}}
|
||||
echo 'mkdir -p /root/.ethereum/keystore/ && cp /signer.json /root/.ethereum/keystore/' >> geth.sh && \{{end}}
|
||||
echo $'geth --networkid {{.NetworkID}} --cache 512 --port {{.Port}} --maxpeers {{.Peers}} {{.LightFlag}} --ethstats \'{{.Ethstats}}\' {{if .BootV4}}--bootnodesv4 {{.BootV4}}{{end}} {{if .BootV5}}--bootnodesv5 {{.BootV5}}{{end}} {{if .Etherbase}}--etherbase {{.Etherbase}} --mine --minerthreads 1{{end}} {{if .Unlock}}--unlock 0 --password /signer.pass --mine{{end}} --targetgaslimit {{.GasTarget}} --gasprice {{.GasPrice}}' >> geth.sh
|
||||
echo $'geth --networkid {{.NetworkID}} --cache 512 --port {{.Port}} --maxpeers {{.Peers}} {{.LightFlag}} --ethstats \'{{.Ethstats}}\' {{if .Bootnodes}}--bootnodes {{.Bootnodes}}{{end}} {{if .Etherbase}}--etherbase {{.Etherbase}} --mine --minerthreads 1{{end}} {{if .Unlock}}--unlock 0 --password /signer.pass --mine{{end}} --targetgaslimit {{.GasTarget}} --gasprice {{.GasPrice}}' >> geth.sh
|
||||
|
||||
ENTRYPOINT ["/bin/sh", "geth.sh"]
|
||||
`
|
||||
@ -56,15 +56,13 @@ services:
|
||||
build: .
|
||||
image: {{.Network}}/{{.Type}}
|
||||
ports:
|
||||
- "{{.FullPort}}:{{.FullPort}}"
|
||||
- "{{.FullPort}}:{{.FullPort}}/udp"{{if .Light}}
|
||||
- "{{.LightPort}}:{{.LightPort}}/udp"{{end}}
|
||||
- "{{.Port}}:{{.Port}}"
|
||||
- "{{.Port}}:{{.Port}}/udp"
|
||||
volumes:
|
||||
- {{.Datadir}}:/root/.ethereum{{if .Ethashdir}}
|
||||
- {{.Ethashdir}}:/root/.ethash{{end}}
|
||||
environment:
|
||||
- FULL_PORT={{.FullPort}}/tcp
|
||||
- LIGHT_PORT={{.LightPort}}/udp
|
||||
- PORT={{.Port}}/tcp
|
||||
- TOTAL_PEERS={{.TotalPeers}}
|
||||
- LIGHT_PEERS={{.LightPeers}}
|
||||
- STATS_NAME={{.Ethstats}}
|
||||
@ -82,12 +80,11 @@ services:
|
||||
// deployNode deploys a new Ethereum node container to a remote machine via SSH,
|
||||
// docker and docker-compose. If an instance with the specified network name
|
||||
// already exists there, it will be overwritten!
|
||||
func deployNode(client *sshClient, network string, bootv4, bootv5 []string, config *nodeInfos, nocache bool) ([]byte, error) {
|
||||
func deployNode(client *sshClient, network string, bootnodes []string, config *nodeInfos, nocache bool) ([]byte, error) {
|
||||
kind := "sealnode"
|
||||
if config.keyJSON == "" && config.etherbase == "" {
|
||||
kind = "bootnode"
|
||||
bootv4 = make([]string, 0)
|
||||
bootv5 = make([]string, 0)
|
||||
bootnodes = make([]string, 0)
|
||||
}
|
||||
// Generate the content to upload to the server
|
||||
workdir := fmt.Sprintf("%d", rand.Int63())
|
||||
@ -100,11 +97,10 @@ func deployNode(client *sshClient, network string, bootv4, bootv5 []string, conf
|
||||
dockerfile := new(bytes.Buffer)
|
||||
template.Must(template.New("").Parse(nodeDockerfile)).Execute(dockerfile, map[string]interface{}{
|
||||
"NetworkID": config.network,
|
||||
"Port": config.portFull,
|
||||
"Port": config.port,
|
||||
"Peers": config.peersTotal,
|
||||
"LightFlag": lightFlag,
|
||||
"BootV4": strings.Join(bootv4, ","),
|
||||
"BootV5": strings.Join(bootv5, ","),
|
||||
"Bootnodes": strings.Join(bootnodes, ","),
|
||||
"Ethstats": config.ethstats,
|
||||
"Etherbase": config.etherbase,
|
||||
"GasTarget": uint64(1000000 * config.gasTarget),
|
||||
@ -119,10 +115,9 @@ func deployNode(client *sshClient, network string, bootv4, bootv5 []string, conf
|
||||
"Datadir": config.datadir,
|
||||
"Ethashdir": config.ethashdir,
|
||||
"Network": network,
|
||||
"FullPort": config.portFull,
|
||||
"Port": config.port,
|
||||
"TotalPeers": config.peersTotal,
|
||||
"Light": config.peersLight > 0,
|
||||
"LightPort": config.portFull + 1,
|
||||
"LightPeers": config.peersLight,
|
||||
"Ethstats": config.ethstats[:strings.Index(config.ethstats, ":")],
|
||||
"Etherbase": config.etherbase,
|
||||
@ -157,10 +152,8 @@ type nodeInfos struct {
|
||||
datadir string
|
||||
ethashdir string
|
||||
ethstats string
|
||||
portFull int
|
||||
portLight int
|
||||
enodeFull string
|
||||
enodeLight string
|
||||
port int
|
||||
enode string
|
||||
peersTotal int
|
||||
peersLight int
|
||||
etherbase string
|
||||
@ -175,15 +168,11 @@ type nodeInfos struct {
|
||||
func (info *nodeInfos) Report() map[string]string {
|
||||
report := map[string]string{
|
||||
"Data directory": info.datadir,
|
||||
"Listener port (full nodes)": strconv.Itoa(info.portFull),
|
||||
"Listener port": strconv.Itoa(info.port),
|
||||
"Peer count (all total)": strconv.Itoa(info.peersTotal),
|
||||
"Peer count (light nodes)": strconv.Itoa(info.peersLight),
|
||||
"Ethstats username": info.ethstats,
|
||||
}
|
||||
if info.peersLight > 0 {
|
||||
// Light server enabled
|
||||
report["Listener port (light nodes)"] = strconv.Itoa(info.portLight)
|
||||
}
|
||||
if info.gasTarget > 0 {
|
||||
// Miner or signer node
|
||||
report["Gas limit (baseline target)"] = fmt.Sprintf("%0.3f MGas", info.gasTarget)
|
||||
@ -250,7 +239,7 @@ func checkNode(client *sshClient, network string, boot bool) (*nodeInfos, error)
|
||||
keyPass = string(bytes.TrimSpace(out))
|
||||
}
|
||||
// Run a sanity check to see if the devp2p is reachable
|
||||
port := infos.portmap[infos.envvars["FULL_PORT"]]
|
||||
port := infos.portmap[infos.envvars["PORT"]]
|
||||
if err = checkPort(client.server, port); err != nil {
|
||||
log.Warn(fmt.Sprintf("%s devp2p port seems unreachable", strings.Title(kind)), "server", client.server, "port", port, "err", err)
|
||||
}
|
||||
@ -259,8 +248,7 @@ func checkNode(client *sshClient, network string, boot bool) (*nodeInfos, error)
|
||||
genesis: genesis,
|
||||
datadir: infos.volumes["/root/.ethereum"],
|
||||
ethashdir: infos.volumes["/root/.ethash"],
|
||||
portFull: infos.portmap[infos.envvars["FULL_PORT"]],
|
||||
portLight: infos.portmap[infos.envvars["LIGHT_PORT"]],
|
||||
port: port,
|
||||
peersTotal: totalPeers,
|
||||
peersLight: lightPeers,
|
||||
ethstats: infos.envvars["STATS_NAME"],
|
||||
@ -270,9 +258,7 @@ func checkNode(client *sshClient, network string, boot bool) (*nodeInfos, error)
|
||||
gasTarget: gasTarget,
|
||||
gasPrice: gasPrice,
|
||||
}
|
||||
stats.enodeFull = fmt.Sprintf("enode://%s@%s:%d", id, client.address, stats.portFull)
|
||||
if stats.portLight != 0 {
|
||||
stats.enodeLight = fmt.Sprintf("enode://%s@%s:%d?discport=%d", id, client.address, stats.portFull, stats.portLight)
|
||||
}
|
||||
stats.enode = fmt.Sprintf("enode://%s@%s:%d", id, client.address, stats.port)
|
||||
|
||||
return stats, nil
|
||||
}
|
||||
|
@ -40,8 +40,7 @@ import (
|
||||
// between sessions.
|
||||
type config struct {
|
||||
path string // File containing the configuration values
|
||||
bootFull []string // Bootnodes to always connect to by full nodes
|
||||
bootLight []string // Bootnodes to always connect to by light nodes
|
||||
bootnodes []string // Bootnodes to always connect to by all nodes
|
||||
ethstats string // Ethstats settings to cache for node deploys
|
||||
|
||||
Genesis *core.Genesis `json:"genesis,omitempty"` // Genesis block to cache for node deploys
|
||||
|
@ -55,7 +55,7 @@ func (w *wizard) deployExplorer() {
|
||||
}
|
||||
existed := err == nil
|
||||
|
||||
chainspec, err := newParityChainSpec(w.network, w.conf.Genesis, w.conf.bootFull)
|
||||
chainspec, err := newParityChainSpec(w.network, w.conf.Genesis, w.conf.bootnodes)
|
||||
if err != nil {
|
||||
log.Error("Failed to create chain spec for explorer", "err", err)
|
||||
return
|
||||
|
@ -38,7 +38,7 @@ func (w *wizard) deployFaucet() {
|
||||
infos, err := checkFaucet(client, w.network)
|
||||
if err != nil {
|
||||
infos = &faucetInfos{
|
||||
node: &nodeInfos{portFull: 30303, peersTotal: 25},
|
||||
node: &nodeInfos{port: 30303, peersTotal: 25},
|
||||
port: 80,
|
||||
host: client.server,
|
||||
amount: 1,
|
||||
@ -113,8 +113,8 @@ func (w *wizard) deployFaucet() {
|
||||
}
|
||||
// Figure out which port to listen on
|
||||
fmt.Println()
|
||||
fmt.Printf("Which TCP/UDP port should the light client listen on? (default = %d)\n", infos.node.portFull)
|
||||
infos.node.portFull = w.readDefaultInt(infos.node.portFull)
|
||||
fmt.Printf("Which TCP/UDP port should the light client listen on? (default = %d)\n", infos.node.port)
|
||||
infos.node.port = w.readDefaultInt(infos.node.port)
|
||||
|
||||
// Set a proper name to report on the stats page
|
||||
fmt.Println()
|
||||
@ -168,7 +168,7 @@ func (w *wizard) deployFaucet() {
|
||||
fmt.Printf("Should the faucet be built from scratch (y/n)? (default = no)\n")
|
||||
nocache = w.readDefaultString("n") != "n"
|
||||
}
|
||||
if out, err := deployFaucet(client, w.network, w.conf.bootLight, infos, nocache); err != nil {
|
||||
if out, err := deployFaucet(client, w.network, w.conf.bootnodes, infos, nocache); err != nil {
|
||||
log.Error("Failed to deploy faucet container", "err", err)
|
||||
if len(out) > 0 {
|
||||
fmt.Printf("%s\n", out)
|
||||
|
@ -59,15 +59,16 @@ func (w *wizard) run() {
|
||||
fmt.Println()
|
||||
|
||||
// Make sure we have a good network name to work with fmt.Println()
|
||||
// Docker accepts hyphens in image names, but doesn't like it for container names
|
||||
if w.network == "" {
|
||||
fmt.Println("Please specify a network name to administer (no spaces, please)")
|
||||
fmt.Println("Please specify a network name to administer (no spaces or hyphens, please)")
|
||||
for {
|
||||
w.network = w.readString()
|
||||
if !strings.Contains(w.network, " ") {
|
||||
if !strings.Contains(w.network, " ") && !strings.Contains(w.network, "-") {
|
||||
fmt.Printf("\nSweet, you can set this via --network=%s next time!\n\n", w.network)
|
||||
break
|
||||
}
|
||||
log.Error("I also like to live dangerously, still no spaces")
|
||||
log.Error("I also like to live dangerously, still no spaces or hyphens")
|
||||
}
|
||||
}
|
||||
log.Info("Administering Ethereum network", "name", w.network)
|
||||
|
@ -37,8 +37,7 @@ func (w *wizard) networkStats() {
|
||||
}
|
||||
// Clear out some previous configs to refill from current scan
|
||||
w.conf.ethstats = ""
|
||||
w.conf.bootFull = w.conf.bootFull[:0]
|
||||
w.conf.bootLight = w.conf.bootLight[:0]
|
||||
w.conf.bootnodes = w.conf.bootnodes[:0]
|
||||
|
||||
// Iterate over all the specified hosts and check their status
|
||||
var pend sync.WaitGroup
|
||||
@ -76,8 +75,7 @@ func (w *wizard) gatherStats(server string, pubkey []byte, client *sshClient) *s
|
||||
var (
|
||||
genesis string
|
||||
ethstats string
|
||||
bootFull []string
|
||||
bootLight []string
|
||||
bootnodes []string
|
||||
)
|
||||
// Ensure a valid SSH connection to the remote server
|
||||
logger := log.New("server", server)
|
||||
@ -123,10 +121,7 @@ func (w *wizard) gatherStats(server string, pubkey []byte, client *sshClient) *s
|
||||
stat.services["bootnode"] = infos.Report()
|
||||
|
||||
genesis = string(infos.genesis)
|
||||
bootFull = append(bootFull, infos.enodeFull)
|
||||
if infos.enodeLight != "" {
|
||||
bootLight = append(bootLight, infos.enodeLight)
|
||||
}
|
||||
bootnodes = append(bootnodes, infos.enode)
|
||||
}
|
||||
logger.Debug("Checking for sealnode availability")
|
||||
if infos, err := checkNode(client, w.network, false); err != nil {
|
||||
@ -184,8 +179,7 @@ func (w *wizard) gatherStats(server string, pubkey []byte, client *sshClient) *s
|
||||
if ethstats != "" {
|
||||
w.conf.ethstats = ethstats
|
||||
}
|
||||
w.conf.bootFull = append(w.conf.bootFull, bootFull...)
|
||||
w.conf.bootLight = append(w.conf.bootLight, bootLight...)
|
||||
w.conf.bootnodes = append(w.conf.bootnodes, bootnodes...)
|
||||
|
||||
return stat
|
||||
}
|
||||
|
@ -48,9 +48,9 @@ func (w *wizard) deployNode(boot bool) {
|
||||
infos, err := checkNode(client, w.network, boot)
|
||||
if err != nil {
|
||||
if boot {
|
||||
infos = &nodeInfos{portFull: 30303, peersTotal: 512, peersLight: 256}
|
||||
infos = &nodeInfos{port: 30303, peersTotal: 512, peersLight: 256}
|
||||
} else {
|
||||
infos = &nodeInfos{portFull: 30303, peersTotal: 50, peersLight: 0, gasTarget: 4.7, gasPrice: 18}
|
||||
infos = &nodeInfos{port: 30303, peersTotal: 50, peersLight: 0, gasTarget: 4.7, gasPrice: 18}
|
||||
}
|
||||
}
|
||||
existed := err == nil
|
||||
@ -79,8 +79,8 @@ func (w *wizard) deployNode(boot bool) {
|
||||
}
|
||||
// Figure out which port to listen on
|
||||
fmt.Println()
|
||||
fmt.Printf("Which TCP/UDP port to listen on? (default = %d)\n", infos.portFull)
|
||||
infos.portFull = w.readDefaultInt(infos.portFull)
|
||||
fmt.Printf("Which TCP/UDP port to listen on? (default = %d)\n", infos.port)
|
||||
infos.port = w.readDefaultInt(infos.port)
|
||||
|
||||
// Figure out how many peers to allow (different based on node type)
|
||||
fmt.Println()
|
||||
@ -163,7 +163,7 @@ func (w *wizard) deployNode(boot bool) {
|
||||
fmt.Printf("Should the node be built from scratch (y/n)? (default = no)\n")
|
||||
nocache = w.readDefaultString("n") != "n"
|
||||
}
|
||||
if out, err := deployNode(client, w.network, w.conf.bootFull, w.conf.bootLight, infos, nocache); err != nil {
|
||||
if out, err := deployNode(client, w.network, w.conf.bootnodes, infos, nocache); err != nil {
|
||||
log.Error("Failed to deploy Ethereum node container", "err", err)
|
||||
if len(out) > 0 {
|
||||
fmt.Printf("%s\n", out)
|
||||
|
@ -98,7 +98,7 @@ func (w *wizard) deployWallet() {
|
||||
fmt.Printf("Should the wallet be built from scratch (y/n)? (default = no)\n")
|
||||
nocache = w.readDefaultString("n") != "n"
|
||||
}
|
||||
if out, err := deployWallet(client, w.network, w.conf.bootFull, infos, nocache); err != nil {
|
||||
if out, err := deployWallet(client, w.network, w.conf.bootnodes, infos, nocache); err != nil {
|
||||
log.Error("Failed to deploy wallet container", "err", err)
|
||||
if len(out) > 0 {
|
||||
fmt.Printf("%s\n", out)
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
|
@ -25,6 +25,7 @@ import (
|
||||
"os/signal"
|
||||
"runtime"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
@ -64,7 +65,7 @@ func StartNode(stack *node.Node) {
|
||||
}
|
||||
go func() {
|
||||
sigc := make(chan os.Signal, 1)
|
||||
signal.Notify(sigc, os.Interrupt)
|
||||
signal.Notify(sigc, syscall.SIGINT, syscall.SIGTERM)
|
||||
defer signal.Stop(sigc)
|
||||
<-sigc
|
||||
log.Info("Got interrupt, shutting down...")
|
||||
@ -85,7 +86,7 @@ func ImportChain(chain *core.BlockChain, fn string) error {
|
||||
// If a signal is received, the import will stop at the next batch.
|
||||
interrupt := make(chan os.Signal, 1)
|
||||
stop := make(chan struct{})
|
||||
signal.Notify(interrupt, os.Interrupt)
|
||||
signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
|
||||
defer signal.Stop(interrupt)
|
||||
defer close(interrupt)
|
||||
go func() {
|
||||
|
@ -397,6 +397,11 @@ var (
|
||||
Usage: "Comma separated list of domains from which to accept cross origin requests (browser enforced)",
|
||||
Value: "",
|
||||
}
|
||||
RPCVirtualHostsFlag = cli.StringFlag{
|
||||
Name: "rpcvhosts",
|
||||
Usage: "Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard.",
|
||||
Value: "localhost",
|
||||
}
|
||||
RPCApiFlag = cli.StringFlag{
|
||||
Name: "rpcapi",
|
||||
Usage: "API's offered over the HTTP-RPC interface",
|
||||
@ -690,6 +695,8 @@ func setHTTP(ctx *cli.Context, cfg *node.Config) {
|
||||
if ctx.GlobalIsSet(RPCApiFlag.Name) {
|
||||
cfg.HTTPModules = splitAndTrim(ctx.GlobalString(RPCApiFlag.Name))
|
||||
}
|
||||
|
||||
cfg.HTTPVirtualHosts = splitAndTrim(ctx.GlobalString(RPCVirtualHostsFlag.Name))
|
||||
}
|
||||
|
||||
// setWS creates the WebSocket RPC listener interface string from the set
|
||||
|
@ -43,7 +43,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/p2p/nat"
|
||||
"github.com/ethereum/go-ethereum/whisper/mailserver"
|
||||
whisper "github.com/ethereum/go-ethereum/whisper/whisperv5"
|
||||
whisper "github.com/ethereum/go-ethereum/whisper/whisperv6"
|
||||
"golang.org/x/crypto/pbkdf2"
|
||||
)
|
||||
|
||||
@ -66,8 +66,10 @@ var (
|
||||
asymKey *ecdsa.PrivateKey
|
||||
nodeid *ecdsa.PrivateKey
|
||||
topic whisper.TopicType
|
||||
|
||||
asymKeyID string
|
||||
filterID string
|
||||
asymFilterID string
|
||||
symFilterID string
|
||||
symPass string
|
||||
msPassword string
|
||||
)
|
||||
@ -263,7 +265,7 @@ func initialize() {
|
||||
Config: p2p.Config{
|
||||
PrivateKey: nodeid,
|
||||
MaxPeers: maxPeers,
|
||||
Name: common.MakeName("wnode", "5.0"),
|
||||
Name: common.MakeName("wnode", "6.0"),
|
||||
Protocols: shh.Protocols(),
|
||||
ListenAddr: *argIP,
|
||||
NAT: nat.Any(),
|
||||
@ -363,13 +365,22 @@ func configureNode() {
|
||||
}
|
||||
}
|
||||
|
||||
filter := whisper.Filter{
|
||||
symFilter := whisper.Filter{
|
||||
KeySym: symKey,
|
||||
Topics: [][]byte{topic[:]},
|
||||
AllowP2P: p2pAccept,
|
||||
}
|
||||
symFilterID, err = shh.Subscribe(&symFilter)
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to install filter: %s", err)
|
||||
}
|
||||
|
||||
asymFilter := whisper.Filter{
|
||||
KeyAsym: asymKey,
|
||||
Topics: [][]byte{topic[:]},
|
||||
AllowP2P: p2pAccept,
|
||||
}
|
||||
filterID, err = shh.Subscribe(&filter)
|
||||
asymFilterID, err = shh.Subscribe(&asymFilter)
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to install filter: %s", err)
|
||||
}
|
||||
@ -522,9 +533,14 @@ func sendMsg(payload []byte) common.Hash {
|
||||
}
|
||||
|
||||
func messageLoop() {
|
||||
f := shh.GetFilter(filterID)
|
||||
if f == nil {
|
||||
utils.Fatalf("filter is not installed")
|
||||
sf := shh.GetFilter(symFilterID)
|
||||
if sf == nil {
|
||||
utils.Fatalf("symmetric filter is not installed")
|
||||
}
|
||||
|
||||
af := shh.GetFilter(asymFilterID)
|
||||
if af == nil {
|
||||
utils.Fatalf("asymmetric filter is not installed")
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(time.Millisecond * 50)
|
||||
@ -532,7 +548,16 @@ func messageLoop() {
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
messages := f.Retrieve()
|
||||
messages := sf.Retrieve()
|
||||
for _, msg := range messages {
|
||||
if *fileExMode || len(msg.Payload) > 2048 {
|
||||
writeMessageToFile(*argSaveDir, msg)
|
||||
} else {
|
||||
printMessageInfo(msg)
|
||||
}
|
||||
}
|
||||
|
||||
messages = af.Retrieve()
|
||||
for _, msg := range messages {
|
||||
if *fileExMode || len(msg.Payload) > 2048 {
|
||||
writeMessageToFile(*argSaveDir, msg)
|
||||
@ -631,7 +656,7 @@ func requestExpiredMessagesLoop() {
|
||||
params.PoW = *argServerPoW
|
||||
params.Payload = data
|
||||
params.KeySym = key
|
||||
params.Src = nodeid
|
||||
params.Src = asymKey
|
||||
params.WorkTime = 5
|
||||
|
||||
msg, err := whisper.NewSentMessage(¶ms)
|
||||
|
@ -1,18 +1,18 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// +build freebsd
|
||||
|
||||
|
@ -1,18 +1,18 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package fdlimit
|
||||
|
||||
|
@ -1,18 +1,18 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// +build linux darwin netbsd openbsd solaris
|
||||
|
||||
|
@ -1,18 +1,18 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package fdlimit
|
||||
|
||||
|
@ -26,6 +26,7 @@ import (
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/ethereum/go-ethereum/internal/jsre"
|
||||
"github.com/ethereum/go-ethereum/internal/web3ext"
|
||||
@ -332,7 +333,7 @@ func (c *Console) Interactive() {
|
||||
}()
|
||||
// Monitor Ctrl-C too in case the input is empty and we need to bail
|
||||
abort := make(chan os.Signal, 1)
|
||||
signal.Notify(abort, os.Interrupt)
|
||||
signal.Notify(abort, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
// Start sending prompts to the user and reading back inputs
|
||||
for {
|
||||
|
@ -2,7 +2,7 @@ FROM alpine:3.7
|
||||
|
||||
RUN \
|
||||
apk add --update go git make gcc musl-dev linux-headers ca-certificates && \
|
||||
git clone --depth 1 --branch release/1.7 https://github.com/ethereum/go-ethereum && \
|
||||
git clone --depth 1 --branch release/1.8 https://github.com/ethereum/go-ethereum && \
|
||||
(cd go-ethereum && make geth) && \
|
||||
cp go-ethereum/build/bin/geth /geth && \
|
||||
apk del go git make gcc musl-dev linux-headers && \
|
||||
|
@ -5,7 +5,7 @@ ENV PATH=/usr/lib/go-1.9/bin:$PATH
|
||||
RUN \
|
||||
apt-get update && apt-get upgrade -q -y && \
|
||||
apt-get install -y --no-install-recommends golang-1.9 git make gcc libc-dev ca-certificates && \
|
||||
git clone --depth 1 --branch release/1.7 https://github.com/ethereum/go-ethereum && \
|
||||
git clone --depth 1 --branch release/1.8 https://github.com/ethereum/go-ethereum && \
|
||||
(cd go-ethereum && make geth) && \
|
||||
cp go-ethereum/build/bin/geth /geth && \
|
||||
apt-get remove -y golang-1.9 git make gcc libc-dev && apt autoremove -y && apt-get clean && \
|
||||
|
@ -926,13 +926,9 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
|
||||
if chosen < lastWrite+triesInMemory {
|
||||
switch {
|
||||
case size >= 2*limit:
|
||||
log.Error("Trie memory critical, forcing to disk", "size", size, "limit", limit, "optimum", float64(chosen-lastWrite)/triesInMemory)
|
||||
log.Warn("State memory usage too high, committing", "size", size, "limit", limit, "optimum", float64(chosen-lastWrite)/triesInMemory)
|
||||
case bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit:
|
||||
log.Error("Trie timing critical, forcing to disk", "time", bc.gcproc, "allowance", bc.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/triesInMemory)
|
||||
case size > limit:
|
||||
log.Warn("Trie memory at dangerous levels", "size", size, "limit", limit, "optimum", float64(chosen-lastWrite)/triesInMemory)
|
||||
case bc.gcproc > bc.cacheConfig.TrieTimeLimit:
|
||||
log.Warn("Trie timing at dangerous levels", "time", bc.gcproc, "limit", bc.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/triesInMemory)
|
||||
log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", bc.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/triesInMemory)
|
||||
}
|
||||
}
|
||||
// If optimum or critical limits reached, write to disk
|
||||
@ -1070,8 +1066,12 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty
|
||||
}
|
||||
switch {
|
||||
case err == ErrKnownBlock:
|
||||
// Block and state both already known. However if the current block is below
|
||||
// this number we did a rollback and we should reimport it nonetheless.
|
||||
if bc.CurrentBlock().NumberU64() >= block.NumberU64() {
|
||||
stats.ignored++
|
||||
continue
|
||||
}
|
||||
|
||||
case err == consensus.ErrFutureBlock:
|
||||
// Allow up to MaxFuture second in the future blocks. If this limit is exceeded
|
||||
|
@ -215,6 +215,9 @@ func (st *StateTransition) TransitionDb() (ret []byte, usedGas uint64, failed bo
|
||||
|
||||
// Pay intrinsic gas
|
||||
gas, err := IntrinsicGas(st.data, contractCreation, homestead)
|
||||
if err != nil {
|
||||
return nil, 0, false, err
|
||||
}
|
||||
if err = st.useGas(gas); err != nil {
|
||||
return nil, 0, false, err
|
||||
}
|
||||
|
@ -1,3 +1,19 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package vm
|
||||
|
||||
import (
|
||||
|
@ -1,3 +1,19 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package vm
|
||||
|
||||
import (
|
||||
|
@ -20,9 +20,7 @@ import (
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/math"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
)
|
||||
|
||||
@ -123,11 +121,6 @@ func (in *Interpreter) Run(contract *Contract, input []byte) (ret []byte, err er
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
codehash := contract.CodeHash // codehash is used when doing jump dest caching
|
||||
if codehash == (common.Hash{}) {
|
||||
codehash = crypto.Keccak256Hash(contract.Code)
|
||||
}
|
||||
|
||||
var (
|
||||
op OpCode // current opcode
|
||||
mem = NewMemory() // bound memory
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
|
@ -173,8 +173,8 @@ type LightChain interface {
|
||||
type BlockChain interface {
|
||||
LightChain
|
||||
|
||||
// HasBlockAndState verifies block and associated states' presence in the local chain.
|
||||
HasBlockAndState(common.Hash, uint64) bool
|
||||
// HasBlock verifies a block's presence in the local chain.
|
||||
HasBlock(common.Hash, uint64) bool
|
||||
|
||||
// GetBlockByHash retrieves a block from the local chain.
|
||||
GetBlockByHash(common.Hash) *types.Block
|
||||
@ -266,7 +266,6 @@ func (d *Downloader) Synchronising() bool {
|
||||
// RegisterPeer injects a new download peer into the set of block source to be
|
||||
// used for fetching hashes and blocks from.
|
||||
func (d *Downloader) RegisterPeer(id string, version int, peer Peer) error {
|
||||
|
||||
logger := log.New("peer", id)
|
||||
logger.Trace("Registering sync peer")
|
||||
if err := d.peers.Register(newPeerConnection(id, version, peer, logger)); err != nil {
|
||||
@ -583,7 +582,6 @@ func (d *Downloader) findAncestor(p *peerConnection, height uint64) (uint64, err
|
||||
// Figure out the valid ancestor range to prevent rewrite attacks
|
||||
floor, ceil := int64(-1), d.lightchain.CurrentHeader().Number.Uint64()
|
||||
|
||||
p.log.Debug("Looking for common ancestor", "local", ceil, "remote", height)
|
||||
if d.mode == FullSync {
|
||||
ceil = d.blockchain.CurrentBlock().NumberU64()
|
||||
} else if d.mode == FastSync {
|
||||
@ -592,6 +590,8 @@ func (d *Downloader) findAncestor(p *peerConnection, height uint64) (uint64, err
|
||||
if ceil >= MaxForkAncestry {
|
||||
floor = int64(ceil - MaxForkAncestry)
|
||||
}
|
||||
p.log.Debug("Looking for common ancestor", "local", ceil, "remote", height)
|
||||
|
||||
// Request the topmost blocks to short circuit binary ancestor lookup
|
||||
head := ceil
|
||||
if head > height {
|
||||
@ -647,7 +647,7 @@ func (d *Downloader) findAncestor(p *peerConnection, height uint64) (uint64, err
|
||||
continue
|
||||
}
|
||||
// Otherwise check if we already know the header or not
|
||||
if (d.mode == FullSync && d.blockchain.HasBlockAndState(headers[i].Hash(), headers[i].Number.Uint64())) || (d.mode != FullSync && d.lightchain.HasHeader(headers[i].Hash(), headers[i].Number.Uint64())) {
|
||||
if (d.mode == FullSync && d.blockchain.HasBlock(headers[i].Hash(), headers[i].Number.Uint64())) || (d.mode != FullSync && d.lightchain.HasHeader(headers[i].Hash(), headers[i].Number.Uint64())) {
|
||||
number, hash = headers[i].Number.Uint64(), headers[i].Hash()
|
||||
|
||||
// If every header is known, even future ones, the peer straight out lied about its head
|
||||
@ -712,7 +712,7 @@ func (d *Downloader) findAncestor(p *peerConnection, height uint64) (uint64, err
|
||||
arrived = true
|
||||
|
||||
// Modify the search interval based on the response
|
||||
if (d.mode == FullSync && !d.blockchain.HasBlockAndState(headers[0].Hash(), headers[0].Number.Uint64())) || (d.mode != FullSync && !d.lightchain.HasHeader(headers[0].Hash(), headers[0].Number.Uint64())) {
|
||||
if (d.mode == FullSync && !d.blockchain.HasBlock(headers[0].Hash(), headers[0].Number.Uint64())) || (d.mode != FullSync && !d.lightchain.HasHeader(headers[0].Hash(), headers[0].Number.Uint64())) {
|
||||
end = check
|
||||
break
|
||||
}
|
||||
|
@ -221,14 +221,9 @@ func (dl *downloadTester) HasHeader(hash common.Hash, number uint64) bool {
|
||||
return dl.GetHeaderByHash(hash) != nil
|
||||
}
|
||||
|
||||
// HasBlockAndState checks if a block and associated state is present in the testers canonical chain.
|
||||
func (dl *downloadTester) HasBlockAndState(hash common.Hash, number uint64) bool {
|
||||
block := dl.GetBlockByHash(hash)
|
||||
if block == nil {
|
||||
return false
|
||||
}
|
||||
_, err := dl.stateDb.Get(block.Root().Bytes())
|
||||
return err == nil
|
||||
// HasBlock checks if a block is present in the testers canonical chain.
|
||||
func (dl *downloadTester) HasBlock(hash common.Hash, number uint64) bool {
|
||||
return dl.GetBlockByHash(hash) != nil
|
||||
}
|
||||
|
||||
// GetHeader retrieves a header from the testers canonical chain.
|
||||
|
11
eth/sync.go
11
eth/sync.go
@ -189,18 +189,13 @@ func (pm *ProtocolManager) synchronise(peer *peer) {
|
||||
mode = downloader.FastSync
|
||||
}
|
||||
// Run the sync cycle, and disable fast sync if we've went past the pivot block
|
||||
err := pm.downloader.Synchronise(peer.id, pHead, pTd, mode)
|
||||
|
||||
if err := pm.downloader.Synchronise(peer.id, pHead, pTd, mode); err != nil {
|
||||
return
|
||||
}
|
||||
if atomic.LoadUint32(&pm.fastSync) == 1 {
|
||||
// Disable fast sync if we indeed have something in our chain
|
||||
if pm.blockchain.CurrentBlock().NumberU64() > 0 {
|
||||
log.Info("Fast sync complete, auto disabling")
|
||||
atomic.StoreUint32(&pm.fastSync, 0)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
atomic.StoreUint32(&pm.acceptTxs, 1) // Mark initial sync done
|
||||
if head := pm.blockchain.CurrentBlock(); head.NumberU64() > 0 {
|
||||
// We've completed a sync cycle, notify all peers of new state. This path is
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
|
@ -94,7 +94,7 @@ func LocalEnv() Environment {
|
||||
}
|
||||
if env.Branch == "" {
|
||||
if head != "HEAD" {
|
||||
env.Branch = strings.TrimLeft(head, "refs/heads/")
|
||||
env.Branch = strings.TrimPrefix(head, "refs/heads/")
|
||||
}
|
||||
}
|
||||
if info, err := os.Stat(".git/objects"); err == nil && info.IsDir() && env.Tag == "" {
|
||||
|
@ -1,18 +1,18 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmdtest
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2015 The go-ethereum Authors
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
|
@ -1135,6 +1135,18 @@ func (args *SendTxArgs) setDefaults(ctx context.Context, b Backend) error {
|
||||
if args.Data != nil && args.Input != nil && !bytes.Equal(*args.Data, *args.Input) {
|
||||
return errors.New(`Both "data" and "input" are set and not equal. Please use "input" to pass transaction call data.`)
|
||||
}
|
||||
if args.To == nil {
|
||||
// Contract creation
|
||||
var input []byte
|
||||
if args.Data != nil {
|
||||
input = *args.Data
|
||||
} else if args.Input != nil {
|
||||
input = *args.Input
|
||||
}
|
||||
if len(input) == 0 {
|
||||
return errors.New(`contract creation without any data provided`)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -36,24 +36,26 @@ const (
|
||||
maxNodeCount = 20 // maximum number of fetcherTreeNode entries remembered for each peer
|
||||
)
|
||||
|
||||
// lightFetcher
|
||||
// lightFetcher implements retrieval of newly announced headers. It also provides a peerHasBlock function for the
|
||||
// ODR system to ensure that we only request data related to a certain block from peers who have already processed
|
||||
// and announced that block.
|
||||
type lightFetcher struct {
|
||||
pm *ProtocolManager
|
||||
odr *LesOdr
|
||||
chain *light.LightChain
|
||||
|
||||
lock sync.Mutex // lock protects access to the fetcher's internal state variables except sent requests
|
||||
maxConfirmedTd *big.Int
|
||||
peers map[*peer]*fetcherPeerInfo
|
||||
lastUpdateStats *updateStatsEntry
|
||||
|
||||
lock sync.Mutex // qwerqwerqwe
|
||||
deliverChn chan fetchResponse
|
||||
reqMu sync.RWMutex
|
||||
requested map[uint64]fetchRequest
|
||||
timeoutChn chan uint64
|
||||
requestChn chan bool // true if initiated from outside
|
||||
syncing bool
|
||||
syncDone chan *peer
|
||||
|
||||
reqMu sync.RWMutex // reqMu protects access to sent header fetch requests
|
||||
requested map[uint64]fetchRequest
|
||||
deliverChn chan fetchResponse
|
||||
timeoutChn chan uint64
|
||||
requestChn chan bool // true if initiated from outside
|
||||
}
|
||||
|
||||
// fetcherPeerInfo holds fetcher-specific information about each active peer
|
||||
@ -425,6 +427,9 @@ func (f *lightFetcher) nextRequest() (*distReq, uint64) {
|
||||
},
|
||||
canSend: func(dp distPeer) bool {
|
||||
p := dp.(*peer)
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
fp := f.peers[p]
|
||||
return fp != nil && fp.nodeByHash[bestHash] != nil
|
||||
},
|
||||
@ -557,8 +562,13 @@ func (f *lightFetcher) checkAnnouncedHeaders(fp *fetcherPeerInfo, headers []*typ
|
||||
return true
|
||||
}
|
||||
// we ran out of recently delivered headers but have not reached a node known by this peer yet, continue matching
|
||||
td = f.chain.GetTd(header.ParentHash, header.Number.Uint64()-1)
|
||||
header = f.chain.GetHeader(header.ParentHash, header.Number.Uint64()-1)
|
||||
hash, number := header.ParentHash, header.Number.Uint64()-1
|
||||
td = f.chain.GetTd(hash, number)
|
||||
header = f.chain.GetHeader(hash, number)
|
||||
if header == nil || td == nil {
|
||||
log.Error("Missing parent of validated header", "hash", hash, "number", number)
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
header = headers[i]
|
||||
td = tds[i]
|
||||
@ -642,13 +652,18 @@ func (f *lightFetcher) checkKnownNode(p *peer, n *fetcherTreeNode) bool {
|
||||
if td == nil {
|
||||
return false
|
||||
}
|
||||
header := f.chain.GetHeader(n.hash, n.number)
|
||||
// check the availability of both header and td because reads are not protected by chain db mutex
|
||||
// Note: returning false is always safe here
|
||||
if header == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
fp := f.peers[p]
|
||||
if fp == nil {
|
||||
p.Log().Debug("Unknown peer to check known nodes")
|
||||
return false
|
||||
}
|
||||
header := f.chain.GetHeader(n.hash, n.number)
|
||||
if !f.checkAnnouncedHeaders(fp, []*types.Header{header}, []*big.Int{td}) {
|
||||
p.Log().Debug("Inconsistent announcement")
|
||||
go f.pm.removePeer(p.id)
|
||||
|
@ -790,10 +790,9 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
||||
break
|
||||
}
|
||||
}
|
||||
proofs := nodes.NodeList()
|
||||
bv, rcost := p.fcClient.RequestProcessed(costs.baseCost + uint64(reqCnt)*costs.reqCost)
|
||||
pm.server.fcCostStats.update(msg.Code, uint64(reqCnt), rcost)
|
||||
return p.SendProofsV2(req.ReqID, bv, proofs)
|
||||
return p.SendProofsV2(req.ReqID, bv, nodes.NodeList())
|
||||
|
||||
case ProofsV1Msg:
|
||||
if pm.odr == nil {
|
||||
@ -856,15 +855,12 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
||||
if reject(uint64(reqCnt), MaxHelperTrieProofsFetch) {
|
||||
return errResp(ErrRequestRejected, "")
|
||||
}
|
||||
trieDb := trie.NewDatabase(ethdb.NewTable(pm.chainDb, light.ChtTablePrefix))
|
||||
for _, req := range req.Reqs {
|
||||
if header := pm.blockchain.GetHeaderByNumber(req.BlockNum); header != nil {
|
||||
sectionHead := core.GetCanonicalHash(pm.chainDb, req.ChtNum*light.ChtV1Frequency-1)
|
||||
sectionHead := core.GetCanonicalHash(pm.chainDb, req.ChtNum*light.CHTFrequencyServer-1)
|
||||
if root := light.GetChtRoot(pm.chainDb, req.ChtNum-1, sectionHead); root != (common.Hash{}) {
|
||||
statedb, err := pm.blockchain.State()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
trie, err := statedb.Database().OpenTrie(root)
|
||||
trie, err := trie.New(root, trieDb)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
@ -878,7 +874,6 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
||||
if bytes += proof.DataSize() + estHeaderRlpSize; bytes >= softResponseLimit {
|
||||
break
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -910,20 +905,16 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
||||
lastIdx uint64
|
||||
lastType uint
|
||||
root common.Hash
|
||||
statedb *state.StateDB
|
||||
trie state.Trie
|
||||
auxTrie *trie.Trie
|
||||
)
|
||||
|
||||
nodes := light.NewNodeSet()
|
||||
|
||||
for _, req := range req.Reqs {
|
||||
if trie == nil || req.HelperTrieType != lastType || req.TrieIdx != lastIdx {
|
||||
statedb, trie, lastType, lastIdx = nil, nil, req.HelperTrieType, req.TrieIdx
|
||||
if auxTrie == nil || req.Type != lastType || req.TrieIdx != lastIdx {
|
||||
auxTrie, lastType, lastIdx = nil, req.Type, req.TrieIdx
|
||||
|
||||
if root, _ = pm.getHelperTrie(req.HelperTrieType, req.TrieIdx); root != (common.Hash{}) {
|
||||
if statedb, _ = pm.blockchain.State(); statedb != nil {
|
||||
trie, _ = statedb.Database().OpenTrie(root)
|
||||
}
|
||||
var prefix string
|
||||
if root, prefix = pm.getHelperTrie(req.Type, req.TrieIdx); root != (common.Hash{}) {
|
||||
auxTrie, _ = trie.New(root, trie.NewDatabase(ethdb.NewTable(pm.chainDb, prefix)))
|
||||
}
|
||||
}
|
||||
if req.AuxReq == auxRoot {
|
||||
@ -934,8 +925,8 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
||||
auxData = append(auxData, data)
|
||||
auxBytes += len(data)
|
||||
} else {
|
||||
if trie != nil {
|
||||
trie.Prove(req.Key, req.FromLevel, nodes)
|
||||
if auxTrie != nil {
|
||||
auxTrie.Prove(req.Key, req.FromLevel, nodes)
|
||||
}
|
||||
if req.AuxReq != 0 {
|
||||
data := pm.getHelperTrieAuxData(req)
|
||||
@ -947,10 +938,9 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
||||
break
|
||||
}
|
||||
}
|
||||
proofs := nodes.NodeList()
|
||||
bv, rcost := p.fcClient.RequestProcessed(costs.baseCost + uint64(reqCnt)*costs.reqCost)
|
||||
pm.server.fcCostStats.update(msg.Code, uint64(reqCnt), rcost)
|
||||
return p.SendHelperTrieProofs(req.ReqID, bv, HelperTrieResps{Proofs: proofs, AuxData: auxData})
|
||||
return p.SendHelperTrieProofs(req.ReqID, bv, HelperTrieResps{Proofs: nodes.NodeList(), AuxData: auxData})
|
||||
|
||||
case HeaderProofsMsg:
|
||||
if pm.odr == nil {
|
||||
@ -1123,7 +1113,7 @@ func (pm *ProtocolManager) getAccount(statedb *state.StateDB, root, hash common.
|
||||
func (pm *ProtocolManager) getHelperTrie(id uint, idx uint64) (common.Hash, string) {
|
||||
switch id {
|
||||
case htCanonical:
|
||||
sectionHead := core.GetCanonicalHash(pm.chainDb, (idx+1)*light.ChtFrequency-1)
|
||||
sectionHead := core.GetCanonicalHash(pm.chainDb, (idx+1)*light.CHTFrequencyClient-1)
|
||||
return light.GetChtV2Root(pm.chainDb, idx, sectionHead), light.ChtTablePrefix
|
||||
case htBloomBits:
|
||||
sectionHead := core.GetCanonicalHash(pm.chainDb, (idx+1)*light.BloomTrieFrequency-1)
|
||||
@ -1134,10 +1124,8 @@ func (pm *ProtocolManager) getHelperTrie(id uint, idx uint64) (common.Hash, stri
|
||||
|
||||
// getHelperTrieAuxData returns requested auxiliary data for the given HelperTrie request
|
||||
func (pm *ProtocolManager) getHelperTrieAuxData(req HelperTrieReq) []byte {
|
||||
if req.HelperTrieType == htCanonical && req.AuxReq == auxHeader {
|
||||
if len(req.Key) != 8 {
|
||||
return nil
|
||||
}
|
||||
switch {
|
||||
case req.Type == htCanonical && req.AuxReq == auxHeader && len(req.Key) == 8:
|
||||
blockNum := binary.BigEndian.Uint64(req.Key)
|
||||
hash := core.GetCanonicalHash(pm.chainDb, blockNum)
|
||||
return core.GetHeaderRLP(pm.chainDb, hash, blockNum)
|
||||
|
@ -17,7 +17,7 @@
|
||||
package les
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"math/big"
|
||||
"math/rand"
|
||||
"testing"
|
||||
@ -45,27 +45,8 @@ func expectResponse(r p2p.MsgReader, msgcode, reqID, bv uint64, data interface{}
|
||||
return p2p.ExpectMsg(r, msgcode, resp{reqID, bv, data})
|
||||
}
|
||||
|
||||
func testCheckProof(t *testing.T, exp *light.NodeSet, got light.NodeList) {
|
||||
if exp.KeyCount() > len(got) {
|
||||
t.Errorf("proof has fewer nodes than expected")
|
||||
return
|
||||
}
|
||||
if exp.KeyCount() < len(got) {
|
||||
t.Errorf("proof has more nodes than expected")
|
||||
return
|
||||
}
|
||||
for _, node := range got {
|
||||
n, _ := exp.Get(crypto.Keccak256(node))
|
||||
if !bytes.Equal(n, node) {
|
||||
t.Errorf("proof contents mismatch")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that block headers can be retrieved from a remote chain based on user queries.
|
||||
func TestGetBlockHeadersLes1(t *testing.T) { testGetBlockHeaders(t, 1) }
|
||||
|
||||
func TestGetBlockHeadersLes2(t *testing.T) { testGetBlockHeaders(t, 2) }
|
||||
|
||||
func testGetBlockHeaders(t *testing.T, protocol int) {
|
||||
@ -196,7 +177,6 @@ func testGetBlockHeaders(t *testing.T, protocol int) {
|
||||
|
||||
// Tests that block contents can be retrieved from a remote chain based on their hashes.
|
||||
func TestGetBlockBodiesLes1(t *testing.T) { testGetBlockBodies(t, 1) }
|
||||
|
||||
func TestGetBlockBodiesLes2(t *testing.T) { testGetBlockBodies(t, 2) }
|
||||
|
||||
func testGetBlockBodies(t *testing.T, protocol int) {
|
||||
@ -274,7 +254,6 @@ func testGetBlockBodies(t *testing.T, protocol int) {
|
||||
|
||||
// Tests that the contract codes can be retrieved based on account addresses.
|
||||
func TestGetCodeLes1(t *testing.T) { testGetCode(t, 1) }
|
||||
|
||||
func TestGetCodeLes2(t *testing.T) { testGetCode(t, 2) }
|
||||
|
||||
func testGetCode(t *testing.T, protocol int) {
|
||||
@ -309,7 +288,6 @@ func testGetCode(t *testing.T, protocol int) {
|
||||
|
||||
// Tests that the transaction receipts can be retrieved based on hashes.
|
||||
func TestGetReceiptLes1(t *testing.T) { testGetReceipt(t, 1) }
|
||||
|
||||
func TestGetReceiptLes2(t *testing.T) { testGetReceipt(t, 2) }
|
||||
|
||||
func testGetReceipt(t *testing.T, protocol int) {
|
||||
@ -338,7 +316,6 @@ func testGetReceipt(t *testing.T, protocol int) {
|
||||
|
||||
// Tests that trie merkle proofs can be retrieved
|
||||
func TestGetProofsLes1(t *testing.T) { testGetProofs(t, 1) }
|
||||
|
||||
func TestGetProofsLes2(t *testing.T) { testGetProofs(t, 2) }
|
||||
|
||||
func testGetProofs(t *testing.T, protocol int) {
|
||||
@ -389,27 +366,126 @@ func testGetProofs(t *testing.T, protocol int) {
|
||||
case 2:
|
||||
cost := peer.GetRequestCost(GetProofsV2Msg, len(proofreqs))
|
||||
sendRequest(peer.app, GetProofsV2Msg, 42, cost, proofreqs)
|
||||
msg, err := peer.app.ReadMsg()
|
||||
if err != nil {
|
||||
t.Errorf("Message read error: %v", err)
|
||||
if err := expectResponse(peer.app, ProofsV2Msg, 42, testBufLimit, proofsV2.NodeList()); err != nil {
|
||||
t.Errorf("proofs mismatch: %v", err)
|
||||
}
|
||||
var resp struct {
|
||||
ReqID, BV uint64
|
||||
Data light.NodeList
|
||||
}
|
||||
if err := msg.Decode(&resp); err != nil {
|
||||
t.Errorf("reply decode error: %v", err)
|
||||
}
|
||||
if msg.Code != ProofsV2Msg {
|
||||
t.Errorf("Message code mismatch")
|
||||
|
||||
// Tests that CHT proofs can be correctly retrieved.
|
||||
func TestGetCHTProofsLes1(t *testing.T) { testGetCHTProofs(t, 1) }
|
||||
func TestGetCHTProofsLes2(t *testing.T) { testGetCHTProofs(t, 2) }
|
||||
|
||||
func testGetCHTProofs(t *testing.T, protocol int) {
|
||||
// Figure out the client's CHT frequency
|
||||
frequency := uint64(light.CHTFrequencyClient)
|
||||
if protocol == 1 {
|
||||
frequency = uint64(light.CHTFrequencyServer)
|
||||
}
|
||||
if resp.ReqID != 42 {
|
||||
t.Errorf("ReqID mismatch")
|
||||
// Assemble the test environment
|
||||
db, _ := ethdb.NewMemDatabase()
|
||||
pm := newTestProtocolManagerMust(t, false, int(frequency)+light.HelperTrieProcessConfirmations, testChainGen, nil, nil, db)
|
||||
bc := pm.blockchain.(*core.BlockChain)
|
||||
peer, _ := newTestPeer(t, "peer", protocol, pm, true)
|
||||
defer peer.close()
|
||||
|
||||
// Wait a while for the CHT indexer to process the new headers
|
||||
time.Sleep(100 * time.Millisecond * time.Duration(frequency/light.CHTFrequencyServer)) // Chain indexer throttling
|
||||
time.Sleep(250 * time.Millisecond) // CI tester slack
|
||||
|
||||
// Assemble the proofs from the different protocols
|
||||
header := bc.GetHeaderByNumber(frequency)
|
||||
rlp, _ := rlp.EncodeToBytes(header)
|
||||
|
||||
key := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(key, frequency)
|
||||
|
||||
proofsV1 := []ChtResp{{
|
||||
Header: header,
|
||||
}}
|
||||
proofsV2 := HelperTrieResps{
|
||||
AuxData: [][]byte{rlp},
|
||||
}
|
||||
if resp.BV != testBufLimit {
|
||||
t.Errorf("BV mismatch")
|
||||
switch protocol {
|
||||
case 1:
|
||||
root := light.GetChtRoot(db, 0, bc.GetHeaderByNumber(frequency-1).Hash())
|
||||
trie, _ := trie.New(root, trie.NewDatabase(ethdb.NewTable(db, light.ChtTablePrefix)))
|
||||
|
||||
var proof light.NodeList
|
||||
trie.Prove(key, 0, &proof)
|
||||
proofsV1[0].Proof = proof
|
||||
|
||||
case 2:
|
||||
root := light.GetChtV2Root(db, 0, bc.GetHeaderByNumber(frequency-1).Hash())
|
||||
trie, _ := trie.New(root, trie.NewDatabase(ethdb.NewTable(db, light.ChtTablePrefix)))
|
||||
trie.Prove(key, 0, &proofsV2.Proofs)
|
||||
}
|
||||
// Assemble the requests for the different protocols
|
||||
requestsV1 := []ChtReq{{
|
||||
ChtNum: 1,
|
||||
BlockNum: frequency,
|
||||
}}
|
||||
requestsV2 := []HelperTrieReq{{
|
||||
Type: htCanonical,
|
||||
TrieIdx: 0,
|
||||
Key: key,
|
||||
AuxReq: auxHeader,
|
||||
}}
|
||||
// Send the proof request and verify the response
|
||||
switch protocol {
|
||||
case 1:
|
||||
cost := peer.GetRequestCost(GetHeaderProofsMsg, len(requestsV1))
|
||||
sendRequest(peer.app, GetHeaderProofsMsg, 42, cost, requestsV1)
|
||||
if err := expectResponse(peer.app, HeaderProofsMsg, 42, testBufLimit, proofsV1); err != nil {
|
||||
t.Errorf("proofs mismatch: %v", err)
|
||||
}
|
||||
case 2:
|
||||
cost := peer.GetRequestCost(GetHelperTrieProofsMsg, len(requestsV2))
|
||||
sendRequest(peer.app, GetHelperTrieProofsMsg, 42, cost, requestsV2)
|
||||
if err := expectResponse(peer.app, HelperTrieProofsMsg, 42, testBufLimit, proofsV2); err != nil {
|
||||
t.Errorf("proofs mismatch: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that bloombits proofs can be correctly retrieved.
|
||||
func TestGetBloombitsProofs(t *testing.T) {
|
||||
// Assemble the test environment
|
||||
db, _ := ethdb.NewMemDatabase()
|
||||
pm := newTestProtocolManagerMust(t, false, light.BloomTrieFrequency+256, testChainGen, nil, nil, db)
|
||||
bc := pm.blockchain.(*core.BlockChain)
|
||||
peer, _ := newTestPeer(t, "peer", 2, pm, true)
|
||||
defer peer.close()
|
||||
|
||||
// Wait a while for the bloombits indexer to process the new headers
|
||||
time.Sleep(100 * time.Millisecond * time.Duration(light.BloomTrieFrequency/4096)) // Chain indexer throttling
|
||||
time.Sleep(250 * time.Millisecond) // CI tester slack
|
||||
|
||||
// Request and verify each bit of the bloom bits proofs
|
||||
for bit := 0; bit < 2048; bit++ {
|
||||
// Assemble therequest and proofs for the bloombits
|
||||
key := make([]byte, 10)
|
||||
|
||||
binary.BigEndian.PutUint16(key[:2], uint16(bit))
|
||||
binary.BigEndian.PutUint64(key[2:], uint64(light.BloomTrieFrequency))
|
||||
|
||||
requests := []HelperTrieReq{{
|
||||
Type: htBloomBits,
|
||||
TrieIdx: 0,
|
||||
Key: key,
|
||||
}}
|
||||
var proofs HelperTrieResps
|
||||
|
||||
root := light.GetBloomTrieRoot(db, 0, bc.GetHeaderByNumber(light.BloomTrieFrequency-1).Hash())
|
||||
trie, _ := trie.New(root, trie.NewDatabase(ethdb.NewTable(db, light.BloomTrieTablePrefix)))
|
||||
trie.Prove(key, 0, &proofs.Proofs)
|
||||
|
||||
// Send the proof request and verify the response
|
||||
cost := peer.GetRequestCost(GetHelperTrieProofsMsg, len(requests))
|
||||
sendRequest(peer.app, GetHelperTrieProofsMsg, 42, cost, requests)
|
||||
if err := expectResponse(peer.app, HelperTrieProofsMsg, 42, testBufLimit, proofs); err != nil {
|
||||
t.Errorf("bit %d: proofs mismatch: %v", bit, err)
|
||||
}
|
||||
testCheckProof(t, proofsV2, resp.Data)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -31,6 +31,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/eth"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/les/flowcontrol"
|
||||
@ -55,6 +56,9 @@ var (
|
||||
testContractCodeDeployed = testContractCode[16:]
|
||||
testContractDeployed = uint64(2)
|
||||
|
||||
testEventEmitterCode = common.Hex2Bytes("60606040523415600e57600080fd5b7f57050ab73f6b9ebdd9f76b8d4997793f48cf956e965ee070551b9ca0bb71584e60405160405180910390a160358060476000396000f3006060604052600080fd00a165627a7a723058203f727efcad8b5811f8cb1fc2620ce5e8c63570d697aef968172de296ea3994140029")
|
||||
testEventEmitterAddr common.Address
|
||||
|
||||
testBufLimit = uint64(100)
|
||||
)
|
||||
|
||||
@ -85,15 +89,19 @@ func testChainGen(i int, block *core.BlockGen) {
|
||||
// In block 2, the test bank sends some more ether to account #1.
|
||||
// acc1Addr passes it on to account #2.
|
||||
// acc1Addr creates a test contract.
|
||||
tx1, _ := types.SignTx(types.NewTransaction(block.TxNonce(testBankAddress), acc1Addr, big.NewInt(1000), params.TxGas, nil, nil), signer, testBankKey)
|
||||
// acc1Addr creates a test event.
|
||||
nonce := block.TxNonce(acc1Addr)
|
||||
|
||||
tx1, _ := types.SignTx(types.NewTransaction(block.TxNonce(testBankAddress), acc1Addr, big.NewInt(1000), params.TxGas, nil, nil), signer, testBankKey)
|
||||
tx2, _ := types.SignTx(types.NewTransaction(nonce, acc2Addr, big.NewInt(1000), params.TxGas, nil, nil), signer, acc1Key)
|
||||
nonce++
|
||||
tx3, _ := types.SignTx(types.NewContractCreation(nonce, big.NewInt(0), 200000, big.NewInt(0), testContractCode), signer, acc1Key)
|
||||
testContractAddr = crypto.CreateAddress(acc1Addr, nonce)
|
||||
tx3, _ := types.SignTx(types.NewContractCreation(nonce+1, big.NewInt(0), 200000, big.NewInt(0), testContractCode), signer, acc1Key)
|
||||
testContractAddr = crypto.CreateAddress(acc1Addr, nonce+1)
|
||||
tx4, _ := types.SignTx(types.NewContractCreation(nonce+2, big.NewInt(0), 200000, big.NewInt(0), testEventEmitterCode), signer, acc1Key)
|
||||
testEventEmitterAddr = crypto.CreateAddress(acc1Addr, nonce+2)
|
||||
block.AddTx(tx1)
|
||||
block.AddTx(tx2)
|
||||
block.AddTx(tx3)
|
||||
block.AddTx(tx4)
|
||||
case 2:
|
||||
// Block 3 is empty but was mined by account #2.
|
||||
block.SetCoinbase(acc2Addr)
|
||||
@ -147,6 +155,16 @@ func newTestProtocolManager(lightSync bool, blocks int, generator func(int, *cor
|
||||
chain, _ = light.NewLightChain(odr, gspec.Config, engine)
|
||||
} else {
|
||||
blockchain, _ := core.NewBlockChain(db, nil, gspec.Config, engine, vm.Config{})
|
||||
|
||||
chtIndexer := light.NewChtIndexer(db, false)
|
||||
chtIndexer.Start(blockchain)
|
||||
|
||||
bbtIndexer := light.NewBloomTrieIndexer(db, false)
|
||||
|
||||
bloomIndexer := eth.NewBloomIndexer(db, params.BloomBitsBlocks)
|
||||
bloomIndexer.AddChildIndexer(bbtIndexer)
|
||||
bloomIndexer.Start(blockchain)
|
||||
|
||||
gchain, _ := core.GenerateChain(gspec.Config, genesis, ethash.NewFaker(), db, blocks, generator)
|
||||
if _, err := blockchain.InsertChain(gchain); err != nil {
|
||||
panic(err)
|
||||
|
@ -321,7 +321,7 @@ const (
|
||||
)
|
||||
|
||||
type HelperTrieReq struct {
|
||||
HelperTrieType uint
|
||||
Type uint
|
||||
TrieIdx uint64
|
||||
Key []byte
|
||||
FromLevel, AuxReq uint
|
||||
@ -365,7 +365,7 @@ func (r *ChtRequest) CanSend(peer *peer) bool {
|
||||
peer.lock.RLock()
|
||||
defer peer.lock.RUnlock()
|
||||
|
||||
return peer.headInfo.Number >= light.HelperTrieConfirmations && r.ChtNum <= (peer.headInfo.Number-light.HelperTrieConfirmations)/light.ChtFrequency
|
||||
return peer.headInfo.Number >= light.HelperTrieConfirmations && r.ChtNum <= (peer.headInfo.Number-light.HelperTrieConfirmations)/light.CHTFrequencyClient
|
||||
}
|
||||
|
||||
// Request sends an ODR request to the LES network (implementation of LesOdrRequest)
|
||||
@ -374,7 +374,7 @@ func (r *ChtRequest) Request(reqID uint64, peer *peer) error {
|
||||
var encNum [8]byte
|
||||
binary.BigEndian.PutUint64(encNum[:], r.BlockNum)
|
||||
req := HelperTrieReq{
|
||||
HelperTrieType: htCanonical,
|
||||
Type: htCanonical,
|
||||
TrieIdx: r.ChtNum,
|
||||
Key: encNum[:],
|
||||
AuxReq: auxHeader,
|
||||
@ -493,12 +493,12 @@ func (r *BloomRequest) Request(reqID uint64, peer *peer) error {
|
||||
reqs := make([]HelperTrieReq, len(r.SectionIdxList))
|
||||
|
||||
var encNumber [10]byte
|
||||
binary.BigEndian.PutUint16(encNumber[0:2], uint16(r.BitIdx))
|
||||
binary.BigEndian.PutUint16(encNumber[:2], uint16(r.BitIdx))
|
||||
|
||||
for i, sectionIdx := range r.SectionIdxList {
|
||||
binary.BigEndian.PutUint64(encNumber[2:10], sectionIdx)
|
||||
binary.BigEndian.PutUint64(encNumber[2:], sectionIdx)
|
||||
reqs[i] = HelperTrieReq{
|
||||
HelperTrieType: htBloomBits,
|
||||
Type: htBloomBits,
|
||||
TrieIdx: r.BloomTrieNum,
|
||||
Key: common.CopyBytes(encNumber[:]),
|
||||
}
|
||||
@ -525,10 +525,10 @@ func (r *BloomRequest) Validate(db ethdb.Database, msg *Msg) error {
|
||||
|
||||
// Verify the proofs
|
||||
var encNumber [10]byte
|
||||
binary.BigEndian.PutUint16(encNumber[0:2], uint16(r.BitIdx))
|
||||
binary.BigEndian.PutUint16(encNumber[:2], uint16(r.BitIdx))
|
||||
|
||||
for i, idx := range r.SectionIdxList {
|
||||
binary.BigEndian.PutUint64(encNumber[2:10], idx)
|
||||
binary.BigEndian.PutUint64(encNumber[2:], idx)
|
||||
value, err, _ := trie.VerifyProof(r.BloomTrieRoot, encNumber[:], reads)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -281,7 +281,6 @@ func (p *peer) RequestProofs(reqID, cost uint64, reqs []ProofReq) error {
|
||||
default:
|
||||
panic(nil)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// RequestHelperTrieProofs fetches a batch of HelperTrie merkle proofs from a remote node.
|
||||
@ -291,12 +290,12 @@ func (p *peer) RequestHelperTrieProofs(reqID, cost uint64, reqs []HelperTrieReq)
|
||||
case lpv1:
|
||||
reqsV1 := make([]ChtReq, len(reqs))
|
||||
for i, req := range reqs {
|
||||
if req.HelperTrieType != htCanonical || req.AuxReq != auxHeader || len(req.Key) != 8 {
|
||||
if req.Type != htCanonical || req.AuxReq != auxHeader || len(req.Key) != 8 {
|
||||
return fmt.Errorf("Request invalid in LES/1 mode")
|
||||
}
|
||||
blockNum := binary.BigEndian.Uint64(req.Key)
|
||||
// convert HelperTrie request to old CHT request
|
||||
reqsV1[i] = ChtReq{ChtNum: (req.TrieIdx + 1) * (light.ChtFrequency / light.ChtV1Frequency), BlockNum: blockNum, FromLevel: req.FromLevel}
|
||||
reqsV1[i] = ChtReq{ChtNum: (req.TrieIdx + 1) * (light.CHTFrequencyClient / light.CHTFrequencyServer), BlockNum: blockNum, FromLevel: req.FromLevel}
|
||||
}
|
||||
return sendRequest(p.rw, GetHeaderProofsMsg, reqID, cost, reqsV1)
|
||||
case lpv2:
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
|
@ -20,7 +20,6 @@ package les
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"math"
|
||||
"sync"
|
||||
|
||||
@ -73,23 +72,22 @@ func NewLesServer(eth *eth.Ethereum, config *eth.Config) (*LesServer, error) {
|
||||
logger := log.New()
|
||||
|
||||
chtV1SectionCount, _, _ := srv.chtIndexer.Sections() // indexer still uses LES/1 4k section size for backwards server compatibility
|
||||
chtV2SectionCount := chtV1SectionCount / (light.ChtFrequency / light.ChtV1Frequency)
|
||||
chtV2SectionCount := chtV1SectionCount / (light.CHTFrequencyClient / light.CHTFrequencyServer)
|
||||
if chtV2SectionCount != 0 {
|
||||
// convert to LES/2 section
|
||||
chtLastSection := chtV2SectionCount - 1
|
||||
// convert last LES/2 section index back to LES/1 index for chtIndexer.SectionHead
|
||||
chtLastSectionV1 := (chtLastSection+1)*(light.ChtFrequency/light.ChtV1Frequency) - 1
|
||||
chtLastSectionV1 := (chtLastSection+1)*(light.CHTFrequencyClient/light.CHTFrequencyServer) - 1
|
||||
chtSectionHead := srv.chtIndexer.SectionHead(chtLastSectionV1)
|
||||
chtRoot := light.GetChtV2Root(pm.chainDb, chtLastSection, chtSectionHead)
|
||||
logger.Info("CHT", "section", chtLastSection, "sectionHead", fmt.Sprintf("%064x", chtSectionHead), "root", fmt.Sprintf("%064x", chtRoot))
|
||||
logger.Info("Loaded CHT", "section", chtLastSection, "head", chtSectionHead, "root", chtRoot)
|
||||
}
|
||||
|
||||
bloomTrieSectionCount, _, _ := srv.bloomTrieIndexer.Sections()
|
||||
if bloomTrieSectionCount != 0 {
|
||||
bloomTrieLastSection := bloomTrieSectionCount - 1
|
||||
bloomTrieSectionHead := srv.bloomTrieIndexer.SectionHead(bloomTrieLastSection)
|
||||
bloomTrieRoot := light.GetBloomTrieRoot(pm.chainDb, bloomTrieLastSection, bloomTrieSectionHead)
|
||||
logger.Info("BloomTrie", "section", bloomTrieLastSection, "sectionHead", fmt.Sprintf("%064x", bloomTrieSectionHead), "root", fmt.Sprintf("%064x", bloomTrieRoot))
|
||||
logger.Info("Loaded bloom trie", "section", bloomTrieLastSection, "head", bloomTrieSectionHead, "root", bloomTrieRoot)
|
||||
}
|
||||
|
||||
srv.chtIndexer.Start(eth.BlockChain())
|
||||
@ -111,6 +109,7 @@ func (s *LesServer) Protocols() []p2p.Protocol {
|
||||
// Start starts the LES server
|
||||
func (s *LesServer) Start(srvr *p2p.Server) {
|
||||
s.protocolManager.Start(s.config.LightPeers)
|
||||
if srvr.DiscV5 != nil {
|
||||
for _, topic := range s.lesTopics {
|
||||
topic := topic
|
||||
go func() {
|
||||
@ -121,6 +120,7 @@ func (s *LesServer) Start(srvr *p2p.Server) {
|
||||
srvr.DiscV5.RegisterTopic(topic, s.quitSync)
|
||||
}()
|
||||
}
|
||||
}
|
||||
s.privateKey = srvr.PrivateKey
|
||||
s.protocolManager.blockLoop()
|
||||
}
|
||||
|
@ -100,7 +100,6 @@ func NewLightChain(odr OdrBackend, config *params.ChainConfig, engine consensus.
|
||||
if cp, ok := trustedCheckpoints[bc.genesisBlock.Hash()]; ok {
|
||||
bc.addTrustedCheckpoint(cp)
|
||||
}
|
||||
|
||||
if err := bc.loadLastState(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -128,7 +127,7 @@ func (self *LightChain) addTrustedCheckpoint(cp trustedCheckpoint) {
|
||||
if self.odr.BloomIndexer() != nil {
|
||||
self.odr.BloomIndexer().AddKnownSectionHead(cp.sectionIdx, cp.sectionHead)
|
||||
}
|
||||
log.Info("Added trusted checkpoint", "chain name", cp.name)
|
||||
log.Info("Added trusted checkpoint", "chain", cp.name, "block", (cp.sectionIdx+1)*CHTFrequencyClient-1, "hash", cp.sectionHead)
|
||||
}
|
||||
|
||||
func (self *LightChain) getProcInterrupt() bool {
|
||||
@ -454,8 +453,8 @@ func (self *LightChain) SyncCht(ctx context.Context) bool {
|
||||
}
|
||||
headNum := self.CurrentHeader().Number.Uint64()
|
||||
chtCount, _, _ := self.odr.ChtIndexer().Sections()
|
||||
if headNum+1 < chtCount*ChtFrequency {
|
||||
num := chtCount*ChtFrequency - 1
|
||||
if headNum+1 < chtCount*CHTFrequencyClient {
|
||||
num := chtCount*CHTFrequencyClient - 1
|
||||
header, err := GetHeaderByNumber(ctx, self.odr, num)
|
||||
if header != nil && err == nil {
|
||||
self.mu.Lock()
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2014 The go-ethereum Authors
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
@ -29,7 +29,9 @@ import (
|
||||
// NodeSet stores a set of trie nodes. It implements trie.Database and can also
|
||||
// act as a cache for another trie.Database.
|
||||
type NodeSet struct {
|
||||
db map[string][]byte
|
||||
nodes map[string][]byte
|
||||
order []string
|
||||
|
||||
dataSize int
|
||||
lock sync.RWMutex
|
||||
}
|
||||
@ -37,7 +39,7 @@ type NodeSet struct {
|
||||
// NewNodeSet creates an empty node set
|
||||
func NewNodeSet() *NodeSet {
|
||||
return &NodeSet{
|
||||
db: make(map[string][]byte),
|
||||
nodes: make(map[string][]byte),
|
||||
}
|
||||
}
|
||||
|
||||
@ -46,10 +48,15 @@ func (db *NodeSet) Put(key []byte, value []byte) error {
|
||||
db.lock.Lock()
|
||||
defer db.lock.Unlock()
|
||||
|
||||
if _, ok := db.db[string(key)]; !ok {
|
||||
db.db[string(key)] = common.CopyBytes(value)
|
||||
db.dataSize += len(value)
|
||||
if _, ok := db.nodes[string(key)]; ok {
|
||||
return nil
|
||||
}
|
||||
keystr := string(key)
|
||||
|
||||
db.nodes[keystr] = common.CopyBytes(value)
|
||||
db.order = append(db.order, keystr)
|
||||
db.dataSize += len(value)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -58,7 +65,7 @@ func (db *NodeSet) Get(key []byte) ([]byte, error) {
|
||||
db.lock.RLock()
|
||||
defer db.lock.RUnlock()
|
||||
|
||||
if entry, ok := db.db[string(key)]; ok {
|
||||
if entry, ok := db.nodes[string(key)]; ok {
|
||||
return entry, nil
|
||||
}
|
||||
return nil, errors.New("not found")
|
||||
@ -75,7 +82,7 @@ func (db *NodeSet) KeyCount() int {
|
||||
db.lock.RLock()
|
||||
defer db.lock.RUnlock()
|
||||
|
||||
return len(db.db)
|
||||
return len(db.nodes)
|
||||
}
|
||||
|
||||
// DataSize returns the aggregated data size of nodes in the set
|
||||
@ -92,8 +99,8 @@ func (db *NodeSet) NodeList() NodeList {
|
||||
defer db.lock.RUnlock()
|
||||
|
||||
var values NodeList
|
||||
for _, value := range db.db {
|
||||
values = append(values, value)
|
||||
for _, key := range db.order {
|
||||
values = append(values, db.nodes[key])
|
||||
}
|
||||
return values
|
||||
}
|
||||
@ -103,7 +110,7 @@ func (db *NodeSet) Store(target ethdb.Putter) {
|
||||
db.lock.RLock()
|
||||
defer db.lock.RUnlock()
|
||||
|
||||
for key, value := range db.db {
|
||||
for key, value := range db.nodes {
|
||||
target.Put([]byte(key), value)
|
||||
}
|
||||
}
|
||||
|
@ -52,23 +52,20 @@ func GetHeaderByNumber(ctx context.Context, odr OdrBackend, number uint64) (*typ
|
||||
for chtCount > 0 && canonicalHash != sectionHead && canonicalHash != (common.Hash{}) {
|
||||
chtCount--
|
||||
if chtCount > 0 {
|
||||
sectionHeadNum = chtCount*ChtFrequency - 1
|
||||
sectionHeadNum = chtCount*CHTFrequencyClient - 1
|
||||
sectionHead = odr.ChtIndexer().SectionHead(chtCount - 1)
|
||||
canonicalHash = core.GetCanonicalHash(db, sectionHeadNum)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if number >= chtCount*ChtFrequency {
|
||||
if number >= chtCount*CHTFrequencyClient {
|
||||
return nil, ErrNoTrustedCht
|
||||
}
|
||||
|
||||
r := &ChtRequest{ChtRoot: GetChtRoot(db, chtCount-1, sectionHead), ChtNum: chtCount - 1, BlockNum: number}
|
||||
if err := odr.Retrieve(ctx, r); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return r.Header, nil
|
||||
}
|
||||
return r.Header, nil
|
||||
}
|
||||
|
||||
func GetCanonicalHash(ctx context.Context, odr OdrBackend, number uint64) (common.Hash, error) {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
@ -19,7 +19,6 @@ package light
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
@ -35,8 +34,14 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
ChtFrequency = 32768
|
||||
ChtV1Frequency = 4096 // as long as we want to retain LES/1 compatibility, servers generate CHTs with the old, higher frequency
|
||||
// CHTFrequencyClient is the block frequency for creating CHTs on the client side.
|
||||
CHTFrequencyClient = 32768
|
||||
|
||||
// CHTFrequencyServer is the block frequency for creating CHTs on the server side.
|
||||
// Eventually this can be merged back with the client version, but that requires a
|
||||
// full database upgrade, so that should be left for a suitable moment.
|
||||
CHTFrequencyServer = 4096
|
||||
|
||||
HelperTrieConfirmations = 2048 // number of confirmations before a server is expected to have the given HelperTrie available
|
||||
HelperTrieProcessConfirmations = 256 // number of confirmations before a HelperTrie is generated
|
||||
)
|
||||
@ -52,19 +57,19 @@ type trustedCheckpoint struct {
|
||||
|
||||
var (
|
||||
mainnetCheckpoint = trustedCheckpoint{
|
||||
name: "ETH mainnet",
|
||||
sectionIdx: 150,
|
||||
sectionHead: common.HexToHash("1e2e67f289565cbe7bd4367f7960dbd73a3f7c53439e1047cd7ba331c8109e39"),
|
||||
chtRoot: common.HexToHash("f2a6c9ca143d647b44523cc249f1072c8912358ab873a77a5fdc792b8df99e80"),
|
||||
bloomTrieRoot: common.HexToHash("c018952fa1513c97857e79fbb9a37acaf8432d5b85e52a78eca7dff5fd5900ee"),
|
||||
name: "mainnet",
|
||||
sectionIdx: 153,
|
||||
sectionHead: common.HexToHash("04c2114a8cbe49ba5c37a03cc4b4b8d3adfc0bd2c78e0e726405dd84afca1d63"),
|
||||
chtRoot: common.HexToHash("d7ec603e5d30b567a6e894ee7704e4603232f206d3e5a589794cec0c57bf318e"),
|
||||
bloomTrieRoot: common.HexToHash("0b139b8fb692e21f663ff200da287192201c28ef5813c1ac6ba02a0a4799eef9"),
|
||||
}
|
||||
|
||||
ropstenCheckpoint = trustedCheckpoint{
|
||||
name: "Ropsten testnet",
|
||||
sectionIdx: 75,
|
||||
sectionHead: common.HexToHash("12e68324f4578ea3e8e7fb3968167686729396c9279287fa1f1a8b51bb2d05b4"),
|
||||
chtRoot: common.HexToHash("3e51dc095c69fa654a4cac766e0afff7357515b4b3c3a379c675f810363e54be"),
|
||||
bloomTrieRoot: common.HexToHash("33e3a70b33c1d73aa698d496a80615e98ed31fa8f56969876180553b32333339"),
|
||||
name: "ropsten",
|
||||
sectionIdx: 79,
|
||||
sectionHead: common.HexToHash("1b1ba890510e06411fdee9bb64ca7705c56a1a4ce3559ddb34b3680c526cb419"),
|
||||
chtRoot: common.HexToHash("71d60207af74e5a22a3e1cfbfc89f9944f91b49aa980c86fba94d568369eaf44"),
|
||||
bloomTrieRoot: common.HexToHash("70aca4b3b6d08dde8704c95cedb1420394453c1aec390947751e69ff8c436360"),
|
||||
}
|
||||
)
|
||||
|
||||
@ -100,7 +105,7 @@ func GetChtRoot(db ethdb.Database, sectionIdx uint64, sectionHead common.Hash) c
|
||||
// GetChtV2Root reads the CHT root assoctiated to the given section from the database
|
||||
// Note that sectionIdx is specified according to LES/2 CHT section size
|
||||
func GetChtV2Root(db ethdb.Database, sectionIdx uint64, sectionHead common.Hash) common.Hash {
|
||||
return GetChtRoot(db, (sectionIdx+1)*(ChtFrequency/ChtV1Frequency)-1, sectionHead)
|
||||
return GetChtRoot(db, (sectionIdx+1)*(CHTFrequencyClient/CHTFrequencyServer)-1, sectionHead)
|
||||
}
|
||||
|
||||
// StoreChtRoot writes the CHT root assoctiated to the given section into the database
|
||||
@ -124,10 +129,10 @@ type ChtIndexerBackend struct {
|
||||
func NewChtIndexer(db ethdb.Database, clientMode bool) *core.ChainIndexer {
|
||||
var sectionSize, confirmReq uint64
|
||||
if clientMode {
|
||||
sectionSize = ChtFrequency
|
||||
sectionSize = CHTFrequencyClient
|
||||
confirmReq = HelperTrieConfirmations
|
||||
} else {
|
||||
sectionSize = ChtV1Frequency
|
||||
sectionSize = CHTFrequencyServer
|
||||
confirmReq = HelperTrieProcessConfirmations
|
||||
}
|
||||
idb := ethdb.NewTable(db, "chtIndex-")
|
||||
@ -174,8 +179,8 @@ func (c *ChtIndexerBackend) Commit() error {
|
||||
}
|
||||
c.triedb.Commit(root, false)
|
||||
|
||||
if ((c.section+1)*c.sectionSize)%ChtFrequency == 0 {
|
||||
log.Info("Storing CHT", "idx", c.section*c.sectionSize/ChtFrequency, "sectionHead", fmt.Sprintf("%064x", c.lastHash), "root", fmt.Sprintf("%064x", root))
|
||||
if ((c.section+1)*c.sectionSize)%CHTFrequencyClient == 0 {
|
||||
log.Info("Storing CHT", "section", c.section*c.sectionSize/CHTFrequencyClient, "head", c.lastHash, "root", root)
|
||||
}
|
||||
StoreChtRoot(c.diskdb, c.section, c.lastHash, root)
|
||||
return nil
|
||||
@ -294,7 +299,7 @@ func (b *BloomTrieIndexerBackend) Commit() error {
|
||||
b.triedb.Commit(root, false)
|
||||
|
||||
sectionHead := b.sectionHeads[b.bloomTrieRatio-1]
|
||||
log.Info("Storing BloomTrie", "section", b.section, "sectionHead", fmt.Sprintf("%064x", sectionHead), "root", fmt.Sprintf("%064x", root), "compression ratio", float64(compSize)/float64(decompSize))
|
||||
log.Info("Storing bloom trie", "section", b.section, "head", sectionHead, "root", root, "compression", float64(compSize)/float64(decompSize))
|
||||
StoreBloomTrieRoot(b.diskdb, b.section, sectionHead, root)
|
||||
|
||||
return nil
|
||||
|
@ -154,12 +154,20 @@ func (c *BoundContract) GetDeployer() *Transaction {
|
||||
// Call invokes the (constant) contract method with params as input values and
|
||||
// sets the output to result.
|
||||
func (c *BoundContract) Call(opts *CallOpts, out *Interfaces, method string, args *Interfaces) error {
|
||||
if len(out.objects) == 1 {
|
||||
result := out.objects[0]
|
||||
if err := c.contract.Call(&opts.opts, result, method, args.objects...); err != nil {
|
||||
return err
|
||||
}
|
||||
out.objects[0] = result
|
||||
} else {
|
||||
results := make([]interface{}, len(out.objects))
|
||||
copy(results, out.objects)
|
||||
if err := c.contract.Call(&opts.opts, &results, method, args.objects...); err != nil {
|
||||
return err
|
||||
}
|
||||
copy(out.objects, results)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
12
node/api.go
12
node/api.go
@ -114,7 +114,7 @@ func (api *PrivateAdminAPI) PeerEvents(ctx context.Context) (*rpc.Subscription,
|
||||
}
|
||||
|
||||
// StartRPC starts the HTTP RPC API server.
|
||||
func (api *PrivateAdminAPI) StartRPC(host *string, port *int, cors *string, apis *string) (bool, error) {
|
||||
func (api *PrivateAdminAPI) StartRPC(host *string, port *int, cors *string, apis *string, vhosts *string) (bool, error) {
|
||||
api.node.lock.Lock()
|
||||
defer api.node.lock.Unlock()
|
||||
|
||||
@ -141,6 +141,14 @@ func (api *PrivateAdminAPI) StartRPC(host *string, port *int, cors *string, apis
|
||||
}
|
||||
}
|
||||
|
||||
allowedVHosts := api.node.config.HTTPVirtualHosts
|
||||
if vhosts != nil {
|
||||
allowedVHosts = nil
|
||||
for _, vhost := range strings.Split(*host, ",") {
|
||||
allowedVHosts = append(allowedVHosts, strings.TrimSpace(vhost))
|
||||
}
|
||||
}
|
||||
|
||||
modules := api.node.httpWhitelist
|
||||
if apis != nil {
|
||||
modules = nil
|
||||
@ -149,7 +157,7 @@ func (api *PrivateAdminAPI) StartRPC(host *string, port *int, cors *string, apis
|
||||
}
|
||||
}
|
||||
|
||||
if err := api.node.startHTTP(fmt.Sprintf("%s:%d", *host, *port), api.node.rpcAPIs, modules, allowedOrigins); err != nil {
|
||||
if err := api.node.startHTTP(fmt.Sprintf("%s:%d", *host, *port), api.node.rpcAPIs, modules, allowedOrigins, allowedVHosts); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
|
@ -105,6 +105,15 @@ type Config struct {
|
||||
// useless for custom HTTP clients.
|
||||
HTTPCors []string `toml:",omitempty"`
|
||||
|
||||
// HTTPVirtualHosts is the list of virtual hostnames which are allowed on incoming requests.
|
||||
// This is by default {'localhost'}. Using this prevents attacks like
|
||||
// DNS rebinding, which bypasses SOP by simply masquerading as being within the same
|
||||
// origin. These attacks do not utilize CORS, since they are not cross-domain.
|
||||
// By explicitly checking the Host-header, the server will not allow requests
|
||||
// made against the server with a malicious host domain.
|
||||
// Requests using ip address directly are not affected
|
||||
HTTPVirtualHosts []string `toml:",omitempty"`
|
||||
|
||||
// HTTPModules is a list of API modules to expose via the HTTP RPC interface.
|
||||
// If the module list is empty, all RPC API endpoints designated public will be
|
||||
// exposed.
|
||||
@ -137,7 +146,7 @@ type Config struct {
|
||||
WSExposeAll bool `toml:",omitempty"`
|
||||
|
||||
// Logger is a custom logger to use with the p2p.Server.
|
||||
Logger log.Logger
|
||||
Logger log.Logger `toml:",omitempty"`
|
||||
}
|
||||
|
||||
// IPCEndpoint resolves an IPC endpoint based on a configured value, taking into
|
||||
|
29
node/node.go
29
node/node.go
@ -263,7 +263,7 @@ func (n *Node) startRPC(services map[reflect.Type]Service) error {
|
||||
n.stopInProc()
|
||||
return err
|
||||
}
|
||||
if err := n.startHTTP(n.httpEndpoint, apis, n.config.HTTPModules, n.config.HTTPCors); err != nil {
|
||||
if err := n.startHTTP(n.httpEndpoint, apis, n.config.HTTPModules, n.config.HTTPCors, n.config.HTTPVirtualHosts); err != nil {
|
||||
n.stopIPC()
|
||||
n.stopInProc()
|
||||
return err
|
||||
@ -287,7 +287,7 @@ func (n *Node) startInProc(apis []rpc.API) error {
|
||||
if err := handler.RegisterName(api.Namespace, api.Service); err != nil {
|
||||
return err
|
||||
}
|
||||
n.log.Debug(fmt.Sprintf("InProc registered %T under '%s'", api.Service, api.Namespace))
|
||||
n.log.Debug("InProc registered", "service", api.Service, "namespace", api.Namespace)
|
||||
}
|
||||
n.inprocHandler = handler
|
||||
return nil
|
||||
@ -313,7 +313,7 @@ func (n *Node) startIPC(apis []rpc.API) error {
|
||||
if err := handler.RegisterName(api.Namespace, api.Service); err != nil {
|
||||
return err
|
||||
}
|
||||
n.log.Debug(fmt.Sprintf("IPC registered %T under '%s'", api.Service, api.Namespace))
|
||||
n.log.Debug("IPC registered", "service", api.Service, "namespace", api.Namespace)
|
||||
}
|
||||
// All APIs registered, start the IPC listener
|
||||
var (
|
||||
@ -324,7 +324,7 @@ func (n *Node) startIPC(apis []rpc.API) error {
|
||||
return err
|
||||
}
|
||||
go func() {
|
||||
n.log.Info(fmt.Sprintf("IPC endpoint opened: %s", n.ipcEndpoint))
|
||||
n.log.Info("IPC endpoint opened", "url", n.ipcEndpoint)
|
||||
|
||||
for {
|
||||
conn, err := listener.Accept()
|
||||
@ -337,7 +337,7 @@ func (n *Node) startIPC(apis []rpc.API) error {
|
||||
return
|
||||
}
|
||||
// Not closed, just some error; report and continue
|
||||
n.log.Error(fmt.Sprintf("IPC accept failed: %v", err))
|
||||
n.log.Error("IPC accept failed", "err", err)
|
||||
continue
|
||||
}
|
||||
go handler.ServeCodec(rpc.NewJSONCodec(conn), rpc.OptionMethodInvocation|rpc.OptionSubscriptions)
|
||||
@ -356,7 +356,7 @@ func (n *Node) stopIPC() {
|
||||
n.ipcListener.Close()
|
||||
n.ipcListener = nil
|
||||
|
||||
n.log.Info(fmt.Sprintf("IPC endpoint closed: %s", n.ipcEndpoint))
|
||||
n.log.Info("IPC endpoint closed", "endpoint", n.ipcEndpoint)
|
||||
}
|
||||
if n.ipcHandler != nil {
|
||||
n.ipcHandler.Stop()
|
||||
@ -365,7 +365,7 @@ func (n *Node) stopIPC() {
|
||||
}
|
||||
|
||||
// startHTTP initializes and starts the HTTP RPC endpoint.
|
||||
func (n *Node) startHTTP(endpoint string, apis []rpc.API, modules []string, cors []string) error {
|
||||
func (n *Node) startHTTP(endpoint string, apis []rpc.API, modules []string, cors []string, vhosts []string) error {
|
||||
// Short circuit if the HTTP endpoint isn't being exposed
|
||||
if endpoint == "" {
|
||||
return nil
|
||||
@ -382,7 +382,7 @@ func (n *Node) startHTTP(endpoint string, apis []rpc.API, modules []string, cors
|
||||
if err := handler.RegisterName(api.Namespace, api.Service); err != nil {
|
||||
return err
|
||||
}
|
||||
n.log.Debug(fmt.Sprintf("HTTP registered %T under '%s'", api.Service, api.Namespace))
|
||||
n.log.Debug("HTTP registered", "service", api.Service, "namespace", api.Namespace)
|
||||
}
|
||||
}
|
||||
// All APIs registered, start the HTTP listener
|
||||
@ -393,9 +393,8 @@ func (n *Node) startHTTP(endpoint string, apis []rpc.API, modules []string, cors
|
||||
if listener, err = net.Listen("tcp", endpoint); err != nil {
|
||||
return err
|
||||
}
|
||||
go rpc.NewHTTPServer(cors, handler).Serve(listener)
|
||||
n.log.Info(fmt.Sprintf("HTTP endpoint opened: http://%s", endpoint))
|
||||
|
||||
go rpc.NewHTTPServer(cors, vhosts, handler).Serve(listener)
|
||||
n.log.Info("HTTP endpoint opened", "url", fmt.Sprintf("http://%s", endpoint), "cors", strings.Join(cors, ","), "vhosts", strings.Join(vhosts, ","))
|
||||
// All listeners booted successfully
|
||||
n.httpEndpoint = endpoint
|
||||
n.httpListener = listener
|
||||
@ -410,7 +409,7 @@ func (n *Node) stopHTTP() {
|
||||
n.httpListener.Close()
|
||||
n.httpListener = nil
|
||||
|
||||
n.log.Info(fmt.Sprintf("HTTP endpoint closed: http://%s", n.httpEndpoint))
|
||||
n.log.Info("HTTP endpoint closed", "url", fmt.Sprintf("http://%s", n.httpEndpoint))
|
||||
}
|
||||
if n.httpHandler != nil {
|
||||
n.httpHandler.Stop()
|
||||
@ -436,7 +435,7 @@ func (n *Node) startWS(endpoint string, apis []rpc.API, modules []string, wsOrig
|
||||
if err := handler.RegisterName(api.Namespace, api.Service); err != nil {
|
||||
return err
|
||||
}
|
||||
n.log.Debug(fmt.Sprintf("WebSocket registered %T under '%s'", api.Service, api.Namespace))
|
||||
n.log.Debug("WebSocket registered", "service", api.Service, "namespace", api.Namespace)
|
||||
}
|
||||
}
|
||||
// All APIs registered, start the HTTP listener
|
||||
@ -448,7 +447,7 @@ func (n *Node) startWS(endpoint string, apis []rpc.API, modules []string, wsOrig
|
||||
return err
|
||||
}
|
||||
go rpc.NewWSServer(wsOrigins, handler).Serve(listener)
|
||||
n.log.Info(fmt.Sprintf("WebSocket endpoint opened: ws://%s", listener.Addr()))
|
||||
n.log.Info("WebSocket endpoint opened", "url", fmt.Sprintf("ws://%s", listener.Addr()))
|
||||
|
||||
// All listeners booted successfully
|
||||
n.wsEndpoint = endpoint
|
||||
@ -464,7 +463,7 @@ func (n *Node) stopWS() {
|
||||
n.wsListener.Close()
|
||||
n.wsListener = nil
|
||||
|
||||
n.log.Info(fmt.Sprintf("WebSocket endpoint closed: ws://%s", n.wsEndpoint))
|
||||
n.log.Info("WebSocket endpoint closed", "url", fmt.Sprintf("ws://%s", n.wsEndpoint))
|
||||
}
|
||||
if n.wsHandler != nil {
|
||||
n.wsHandler.Stop()
|
||||
|
13
p2p/dial.go
13
p2p/dial.go
@ -154,6 +154,9 @@ func (s *dialstate) addStatic(n *discover.Node) {
|
||||
func (s *dialstate) removeStatic(n *discover.Node) {
|
||||
// This removes a task so future attempts to connect will not be made.
|
||||
delete(s.static, n.ID)
|
||||
// This removes a previous dial timestamp so that application
|
||||
// can force a server to reconnect with chosen peer immediately.
|
||||
s.hist.remove(n.ID)
|
||||
}
|
||||
|
||||
func (s *dialstate) newTasks(nRunning int, peers map[discover.NodeID]*Peer, now time.Time) []task {
|
||||
@ -390,6 +393,16 @@ func (h dialHistory) min() pastDial {
|
||||
}
|
||||
func (h *dialHistory) add(id discover.NodeID, exp time.Time) {
|
||||
heap.Push(h, pastDial{id, exp})
|
||||
|
||||
}
|
||||
func (h *dialHistory) remove(id discover.NodeID) bool {
|
||||
for i, v := range *h {
|
||||
if v.id == id {
|
||||
heap.Remove(h, i)
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
func (h dialHistory) contains(id discover.NodeID) bool {
|
||||
for _, v := range h {
|
||||
|
@ -515,6 +515,50 @@ func TestDialStateStaticDial(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
// This test checks that static peers will be redialed immediately if they were re-added to a static list.
|
||||
func TestDialStaticAfterReset(t *testing.T) {
|
||||
wantStatic := []*discover.Node{
|
||||
{ID: uintID(1)},
|
||||
{ID: uintID(2)},
|
||||
}
|
||||
|
||||
rounds := []round{
|
||||
// Static dials are launched for the nodes that aren't yet connected.
|
||||
{
|
||||
peers: nil,
|
||||
new: []task{
|
||||
&dialTask{flags: staticDialedConn, dest: &discover.Node{ID: uintID(1)}},
|
||||
&dialTask{flags: staticDialedConn, dest: &discover.Node{ID: uintID(2)}},
|
||||
},
|
||||
},
|
||||
// No new dial tasks, all peers are connected.
|
||||
{
|
||||
peers: []*Peer{
|
||||
{rw: &conn{flags: staticDialedConn, id: uintID(1)}},
|
||||
{rw: &conn{flags: staticDialedConn, id: uintID(2)}},
|
||||
},
|
||||
done: []task{
|
||||
&dialTask{flags: staticDialedConn, dest: &discover.Node{ID: uintID(1)}},
|
||||
&dialTask{flags: staticDialedConn, dest: &discover.Node{ID: uintID(2)}},
|
||||
},
|
||||
new: []task{
|
||||
&waitExpireTask{Duration: 30 * time.Second},
|
||||
},
|
||||
},
|
||||
}
|
||||
dTest := dialtest{
|
||||
init: newDialState(wantStatic, nil, fakeTable{}, 0, nil),
|
||||
rounds: rounds,
|
||||
}
|
||||
runDialTest(t, dTest)
|
||||
for _, n := range wantStatic {
|
||||
dTest.init.removeStatic(n)
|
||||
dTest.init.addStatic(n)
|
||||
}
|
||||
// without removing peers they will be considered recently dialed
|
||||
runDialTest(t, dTest)
|
||||
}
|
||||
|
||||
// This test checks that past dials are not retried for some time.
|
||||
func TestDialStateCache(t *testing.T) {
|
||||
wantStatic := []*discover.Node{
|
||||
|
@ -257,7 +257,7 @@ func (db *nodeDB) expireNodes() error {
|
||||
}
|
||||
// Skip the node if not expired yet (and not self)
|
||||
if !bytes.Equal(id[:], db.self[:]) {
|
||||
if seen := db.lastPong(id); seen.After(threshold) {
|
||||
if seen := db.bondTime(id); seen.After(threshold) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
@ -278,13 +278,18 @@ func (db *nodeDB) updateLastPing(id NodeID, instance time.Time) error {
|
||||
return db.storeInt64(makeKey(id, nodeDBDiscoverPing), instance.Unix())
|
||||
}
|
||||
|
||||
// lastPong retrieves the time of the last successful contact from remote node.
|
||||
func (db *nodeDB) lastPong(id NodeID) time.Time {
|
||||
// bondTime retrieves the time of the last successful pong from remote node.
|
||||
func (db *nodeDB) bondTime(id NodeID) time.Time {
|
||||
return time.Unix(db.fetchInt64(makeKey(id, nodeDBDiscoverPong)), 0)
|
||||
}
|
||||
|
||||
// updateLastPong updates the last time a remote node successfully contacted.
|
||||
func (db *nodeDB) updateLastPong(id NodeID, instance time.Time) error {
|
||||
// hasBond reports whether the given node is considered bonded.
|
||||
func (db *nodeDB) hasBond(id NodeID) bool {
|
||||
return time.Since(db.bondTime(id)) < nodeDBNodeExpiration
|
||||
}
|
||||
|
||||
// updateBondTime updates the last pong time of a node.
|
||||
func (db *nodeDB) updateBondTime(id NodeID, instance time.Time) error {
|
||||
return db.storeInt64(makeKey(id, nodeDBDiscoverPong), instance.Unix())
|
||||
}
|
||||
|
||||
@ -327,7 +332,7 @@ seek:
|
||||
if n.ID == db.self {
|
||||
continue seek
|
||||
}
|
||||
if now.Sub(db.lastPong(n.ID)) > maxAge {
|
||||
if now.Sub(db.bondTime(n.ID)) > maxAge {
|
||||
continue seek
|
||||
}
|
||||
for i := range nodes {
|
||||
|
@ -125,13 +125,13 @@ func TestNodeDBFetchStore(t *testing.T) {
|
||||
t.Errorf("ping: value mismatch: have %v, want %v", stored, inst)
|
||||
}
|
||||
// Check fetch/store operations on a node pong object
|
||||
if stored := db.lastPong(node.ID); stored.Unix() != 0 {
|
||||
if stored := db.bondTime(node.ID); stored.Unix() != 0 {
|
||||
t.Errorf("pong: non-existing object: %v", stored)
|
||||
}
|
||||
if err := db.updateLastPong(node.ID, inst); err != nil {
|
||||
if err := db.updateBondTime(node.ID, inst); err != nil {
|
||||
t.Errorf("pong: failed to update: %v", err)
|
||||
}
|
||||
if stored := db.lastPong(node.ID); stored.Unix() != inst.Unix() {
|
||||
if stored := db.bondTime(node.ID); stored.Unix() != inst.Unix() {
|
||||
t.Errorf("pong: value mismatch: have %v, want %v", stored, inst)
|
||||
}
|
||||
// Check fetch/store operations on a node findnode-failure object
|
||||
@ -224,8 +224,8 @@ func TestNodeDBSeedQuery(t *testing.T) {
|
||||
if err := db.updateNode(seed.node); err != nil {
|
||||
t.Fatalf("node %d: failed to insert: %v", i, err)
|
||||
}
|
||||
if err := db.updateLastPong(seed.node.ID, seed.pong); err != nil {
|
||||
t.Fatalf("node %d: failed to insert lastPong: %v", i, err)
|
||||
if err := db.updateBondTime(seed.node.ID, seed.pong); err != nil {
|
||||
t.Fatalf("node %d: failed to insert bondTime: %v", i, err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -332,8 +332,8 @@ func TestNodeDBExpiration(t *testing.T) {
|
||||
if err := db.updateNode(seed.node); err != nil {
|
||||
t.Fatalf("node %d: failed to insert: %v", i, err)
|
||||
}
|
||||
if err := db.updateLastPong(seed.node.ID, seed.pong); err != nil {
|
||||
t.Fatalf("node %d: failed to update pong: %v", i, err)
|
||||
if err := db.updateBondTime(seed.node.ID, seed.pong); err != nil {
|
||||
t.Fatalf("node %d: failed to update bondTime: %v", i, err)
|
||||
}
|
||||
}
|
||||
// Expire some of them, and check the rest
|
||||
@ -365,8 +365,8 @@ func TestNodeDBSelfExpiration(t *testing.T) {
|
||||
if err := db.updateNode(seed.node); err != nil {
|
||||
t.Fatalf("node %d: failed to insert: %v", i, err)
|
||||
}
|
||||
if err := db.updateLastPong(seed.node.ID, seed.pong); err != nil {
|
||||
t.Fatalf("node %d: failed to update pong: %v", i, err)
|
||||
if err := db.updateBondTime(seed.node.ID, seed.pong); err != nil {
|
||||
t.Fatalf("node %d: failed to update bondTime: %v", i, err)
|
||||
}
|
||||
}
|
||||
// Expire the nodes and make sure self has been evacuated too
|
||||
|
@ -29,6 +29,7 @@ import (
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
@ -51,9 +52,8 @@ type Node struct {
|
||||
// with ID.
|
||||
sha common.Hash
|
||||
|
||||
// whether this node is currently being pinged in order to replace
|
||||
// it in a bucket
|
||||
contested bool
|
||||
// Time when the node was added to the table.
|
||||
addedAt time.Time
|
||||
}
|
||||
|
||||
// NewNode creates a new node. It is mostly meant to be used for
|
||||
|
@ -23,10 +23,11 @@
|
||||
package discover
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
crand "crypto/rand"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
mrand "math/rand"
|
||||
"net"
|
||||
"sort"
|
||||
"sync"
|
||||
@ -35,29 +36,45 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/p2p/netutil"
|
||||
)
|
||||
|
||||
const (
|
||||
alpha = 3 // Kademlia concurrency factor
|
||||
bucketSize = 16 // Kademlia bucket size
|
||||
maxReplacements = 10 // Size of per-bucket replacement list
|
||||
|
||||
// We keep buckets for the upper 1/15 of distances because
|
||||
// it's very unlikely we'll ever encounter a node that's closer.
|
||||
hashBits = len(common.Hash{}) * 8
|
||||
nBuckets = hashBits + 1 // Number of buckets
|
||||
nBuckets = hashBits / 15 // Number of buckets
|
||||
bucketMinDistance = hashBits - nBuckets // Log distance of closest bucket
|
||||
|
||||
maxBondingPingPongs = 16
|
||||
maxFindnodeFailures = 5
|
||||
// IP address limits.
|
||||
bucketIPLimit, bucketSubnet = 2, 24 // at most 2 addresses from the same /24
|
||||
tableIPLimit, tableSubnet = 10, 24
|
||||
|
||||
autoRefreshInterval = 1 * time.Hour
|
||||
maxBondingPingPongs = 16 // Limit on the number of concurrent ping/pong interactions
|
||||
maxFindnodeFailures = 5 // Nodes exceeding this limit are dropped
|
||||
|
||||
refreshInterval = 30 * time.Minute
|
||||
revalidateInterval = 10 * time.Second
|
||||
copyNodesInterval = 30 * time.Second
|
||||
seedMinTableTime = 5 * time.Minute
|
||||
seedCount = 30
|
||||
seedMaxAge = 5 * 24 * time.Hour
|
||||
)
|
||||
|
||||
type Table struct {
|
||||
mutex sync.Mutex // protects buckets, their content, and nursery
|
||||
mutex sync.Mutex // protects buckets, bucket content, nursery, rand
|
||||
buckets [nBuckets]*bucket // index of known nodes by distance
|
||||
nursery []*Node // bootstrap nodes
|
||||
db *nodeDB // database of known nodes
|
||||
rand *mrand.Rand // source of randomness, periodically reseeded
|
||||
ips netutil.DistinctNetSet
|
||||
|
||||
db *nodeDB // database of known nodes
|
||||
refreshReq chan chan struct{}
|
||||
initDone chan struct{}
|
||||
closeReq chan struct{}
|
||||
closed chan struct{}
|
||||
|
||||
@ -89,9 +106,13 @@ type transport interface {
|
||||
|
||||
// bucket contains nodes, ordered by their last activity. the entry
|
||||
// that was most recently active is the first element in entries.
|
||||
type bucket struct{ entries []*Node }
|
||||
type bucket struct {
|
||||
entries []*Node // live entries, sorted by time of last contact
|
||||
replacements []*Node // recently seen nodes to be used if revalidation fails
|
||||
ips netutil.DistinctNetSet
|
||||
}
|
||||
|
||||
func newTable(t transport, ourID NodeID, ourAddr *net.UDPAddr, nodeDBPath string) (*Table, error) {
|
||||
func newTable(t transport, ourID NodeID, ourAddr *net.UDPAddr, nodeDBPath string, bootnodes []*Node) (*Table, error) {
|
||||
// If no node database was given, use an in-memory one
|
||||
db, err := newNodeDB(nodeDBPath, Version, ourID)
|
||||
if err != nil {
|
||||
@ -104,19 +125,42 @@ func newTable(t transport, ourID NodeID, ourAddr *net.UDPAddr, nodeDBPath string
|
||||
bonding: make(map[NodeID]*bondproc),
|
||||
bondslots: make(chan struct{}, maxBondingPingPongs),
|
||||
refreshReq: make(chan chan struct{}),
|
||||
initDone: make(chan struct{}),
|
||||
closeReq: make(chan struct{}),
|
||||
closed: make(chan struct{}),
|
||||
rand: mrand.New(mrand.NewSource(0)),
|
||||
ips: netutil.DistinctNetSet{Subnet: tableSubnet, Limit: tableIPLimit},
|
||||
}
|
||||
if err := tab.setFallbackNodes(bootnodes); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for i := 0; i < cap(tab.bondslots); i++ {
|
||||
tab.bondslots <- struct{}{}
|
||||
}
|
||||
for i := range tab.buckets {
|
||||
tab.buckets[i] = new(bucket)
|
||||
tab.buckets[i] = &bucket{
|
||||
ips: netutil.DistinctNetSet{Subnet: bucketSubnet, Limit: bucketIPLimit},
|
||||
}
|
||||
go tab.refreshLoop()
|
||||
}
|
||||
tab.seedRand()
|
||||
tab.loadSeedNodes(false)
|
||||
// Start the background expiration goroutine after loading seeds so that the search for
|
||||
// seed nodes also considers older nodes that would otherwise be removed by the
|
||||
// expiration.
|
||||
tab.db.ensureExpirer()
|
||||
go tab.loop()
|
||||
return tab, nil
|
||||
}
|
||||
|
||||
func (tab *Table) seedRand() {
|
||||
var b [8]byte
|
||||
crand.Read(b[:])
|
||||
|
||||
tab.mutex.Lock()
|
||||
tab.rand.Seed(int64(binary.BigEndian.Uint64(b[:])))
|
||||
tab.mutex.Unlock()
|
||||
}
|
||||
|
||||
// Self returns the local node.
|
||||
// The returned node should not be modified by the caller.
|
||||
func (tab *Table) Self() *Node {
|
||||
@ -127,9 +171,12 @@ func (tab *Table) Self() *Node {
|
||||
// table. It will not write the same node more than once. The nodes in
|
||||
// the slice are copies and can be modified by the caller.
|
||||
func (tab *Table) ReadRandomNodes(buf []*Node) (n int) {
|
||||
if !tab.isInitDone() {
|
||||
return 0
|
||||
}
|
||||
tab.mutex.Lock()
|
||||
defer tab.mutex.Unlock()
|
||||
// TODO: tree-based buckets would help here
|
||||
|
||||
// Find all non-empty buckets and get a fresh slice of their entries.
|
||||
var buckets [][]*Node
|
||||
for _, b := range tab.buckets {
|
||||
@ -141,8 +188,8 @@ func (tab *Table) ReadRandomNodes(buf []*Node) (n int) {
|
||||
return 0
|
||||
}
|
||||
// Shuffle the buckets.
|
||||
for i := uint32(len(buckets)) - 1; i > 0; i-- {
|
||||
j := randUint(i)
|
||||
for i := len(buckets) - 1; i > 0; i-- {
|
||||
j := tab.rand.Intn(len(buckets))
|
||||
buckets[i], buckets[j] = buckets[j], buckets[i]
|
||||
}
|
||||
// Move head of each bucket into buf, removing buckets that become empty.
|
||||
@ -161,15 +208,6 @@ func (tab *Table) ReadRandomNodes(buf []*Node) (n int) {
|
||||
return i + 1
|
||||
}
|
||||
|
||||
func randUint(max uint32) uint32 {
|
||||
if max == 0 {
|
||||
return 0
|
||||
}
|
||||
var b [4]byte
|
||||
rand.Read(b[:])
|
||||
return binary.BigEndian.Uint32(b[:]) % max
|
||||
}
|
||||
|
||||
// Close terminates the network listener and flushes the node database.
|
||||
func (tab *Table) Close() {
|
||||
select {
|
||||
@ -180,16 +218,15 @@ func (tab *Table) Close() {
|
||||
}
|
||||
}
|
||||
|
||||
// SetFallbackNodes sets the initial points of contact. These nodes
|
||||
// setFallbackNodes sets the initial points of contact. These nodes
|
||||
// are used to connect to the network if the table is empty and there
|
||||
// are no known nodes in the database.
|
||||
func (tab *Table) SetFallbackNodes(nodes []*Node) error {
|
||||
func (tab *Table) setFallbackNodes(nodes []*Node) error {
|
||||
for _, n := range nodes {
|
||||
if err := n.validateComplete(); err != nil {
|
||||
return fmt.Errorf("bad bootstrap/fallback node %q (%v)", n, err)
|
||||
}
|
||||
}
|
||||
tab.mutex.Lock()
|
||||
tab.nursery = make([]*Node, 0, len(nodes))
|
||||
for _, n := range nodes {
|
||||
cpy := *n
|
||||
@ -198,11 +235,19 @@ func (tab *Table) SetFallbackNodes(nodes []*Node) error {
|
||||
cpy.sha = crypto.Keccak256Hash(n.ID[:])
|
||||
tab.nursery = append(tab.nursery, &cpy)
|
||||
}
|
||||
tab.mutex.Unlock()
|
||||
tab.refresh()
|
||||
return nil
|
||||
}
|
||||
|
||||
// isInitDone returns whether the table's initial seeding procedure has completed.
|
||||
func (tab *Table) isInitDone() bool {
|
||||
select {
|
||||
case <-tab.initDone:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Resolve searches for a specific node with the given ID.
|
||||
// It returns nil if the node could not be found.
|
||||
func (tab *Table) Resolve(targetID NodeID) *Node {
|
||||
@ -314,33 +359,49 @@ func (tab *Table) refresh() <-chan struct{} {
|
||||
return done
|
||||
}
|
||||
|
||||
// refreshLoop schedules doRefresh runs and coordinates shutdown.
|
||||
func (tab *Table) refreshLoop() {
|
||||
// loop schedules refresh, revalidate runs and coordinates shutdown.
|
||||
func (tab *Table) loop() {
|
||||
var (
|
||||
timer = time.NewTicker(autoRefreshInterval)
|
||||
waiting []chan struct{} // accumulates waiting callers while doRefresh runs
|
||||
done chan struct{} // where doRefresh reports completion
|
||||
revalidate = time.NewTimer(tab.nextRevalidateTime())
|
||||
refresh = time.NewTicker(refreshInterval)
|
||||
copyNodes = time.NewTicker(copyNodesInterval)
|
||||
revalidateDone = make(chan struct{})
|
||||
refreshDone = make(chan struct{}) // where doRefresh reports completion
|
||||
waiting = []chan struct{}{tab.initDone} // holds waiting callers while doRefresh runs
|
||||
)
|
||||
defer refresh.Stop()
|
||||
defer revalidate.Stop()
|
||||
defer copyNodes.Stop()
|
||||
|
||||
// Start initial refresh.
|
||||
go tab.doRefresh(refreshDone)
|
||||
|
||||
loop:
|
||||
for {
|
||||
select {
|
||||
case <-timer.C:
|
||||
if done == nil {
|
||||
done = make(chan struct{})
|
||||
go tab.doRefresh(done)
|
||||
case <-refresh.C:
|
||||
tab.seedRand()
|
||||
if refreshDone == nil {
|
||||
refreshDone = make(chan struct{})
|
||||
go tab.doRefresh(refreshDone)
|
||||
}
|
||||
case req := <-tab.refreshReq:
|
||||
waiting = append(waiting, req)
|
||||
if done == nil {
|
||||
done = make(chan struct{})
|
||||
go tab.doRefresh(done)
|
||||
if refreshDone == nil {
|
||||
refreshDone = make(chan struct{})
|
||||
go tab.doRefresh(refreshDone)
|
||||
}
|
||||
case <-done:
|
||||
case <-refreshDone:
|
||||
for _, ch := range waiting {
|
||||
close(ch)
|
||||
}
|
||||
waiting = nil
|
||||
done = nil
|
||||
waiting, refreshDone = nil, nil
|
||||
case <-revalidate.C:
|
||||
go tab.doRevalidate(revalidateDone)
|
||||
case <-revalidateDone:
|
||||
revalidate.Reset(tab.nextRevalidateTime())
|
||||
case <-copyNodes.C:
|
||||
go tab.copyBondedNodes()
|
||||
case <-tab.closeReq:
|
||||
break loop
|
||||
}
|
||||
@ -349,8 +410,8 @@ loop:
|
||||
if tab.net != nil {
|
||||
tab.net.close()
|
||||
}
|
||||
if done != nil {
|
||||
<-done
|
||||
if refreshDone != nil {
|
||||
<-refreshDone
|
||||
}
|
||||
for _, ch := range waiting {
|
||||
close(ch)
|
||||
@ -365,38 +426,109 @@ loop:
|
||||
func (tab *Table) doRefresh(done chan struct{}) {
|
||||
defer close(done)
|
||||
|
||||
// Load nodes from the database and insert
|
||||
// them. This should yield a few previously seen nodes that are
|
||||
// (hopefully) still alive.
|
||||
tab.loadSeedNodes(true)
|
||||
|
||||
// Run self lookup to discover new neighbor nodes.
|
||||
tab.lookup(tab.self.ID, false)
|
||||
|
||||
// The Kademlia paper specifies that the bucket refresh should
|
||||
// perform a lookup in the least recently used bucket. We cannot
|
||||
// adhere to this because the findnode target is a 512bit value
|
||||
// (not hash-sized) and it is not easily possible to generate a
|
||||
// sha3 preimage that falls into a chosen bucket.
|
||||
// We perform a lookup with a random target instead.
|
||||
// We perform a few lookups with a random target instead.
|
||||
for i := 0; i < 3; i++ {
|
||||
var target NodeID
|
||||
rand.Read(target[:])
|
||||
result := tab.lookup(target, false)
|
||||
if len(result) > 0 {
|
||||
crand.Read(target[:])
|
||||
tab.lookup(target, false)
|
||||
}
|
||||
}
|
||||
|
||||
func (tab *Table) loadSeedNodes(bond bool) {
|
||||
seeds := tab.db.querySeeds(seedCount, seedMaxAge)
|
||||
seeds = append(seeds, tab.nursery...)
|
||||
if bond {
|
||||
seeds = tab.bondall(seeds)
|
||||
}
|
||||
for i := range seeds {
|
||||
seed := seeds[i]
|
||||
age := log.Lazy{Fn: func() interface{} { return time.Since(tab.db.bondTime(seed.ID)) }}
|
||||
log.Debug("Found seed node in database", "id", seed.ID, "addr", seed.addr(), "age", age)
|
||||
tab.add(seed)
|
||||
}
|
||||
}
|
||||
|
||||
// doRevalidate checks that the last node in a random bucket is still live
|
||||
// and replaces or deletes the node if it isn't.
|
||||
func (tab *Table) doRevalidate(done chan<- struct{}) {
|
||||
defer func() { done <- struct{}{} }()
|
||||
|
||||
last, bi := tab.nodeToRevalidate()
|
||||
if last == nil {
|
||||
// No non-empty bucket found.
|
||||
return
|
||||
}
|
||||
|
||||
// The table is empty. Load nodes from the database and insert
|
||||
// them. This should yield a few previously seen nodes that are
|
||||
// (hopefully) still alive.
|
||||
seeds := tab.db.querySeeds(seedCount, seedMaxAge)
|
||||
seeds = tab.bondall(append(seeds, tab.nursery...))
|
||||
// Ping the selected node and wait for a pong.
|
||||
err := tab.ping(last.ID, last.addr())
|
||||
|
||||
if len(seeds) == 0 {
|
||||
log.Debug("No discv4 seed nodes found")
|
||||
}
|
||||
for _, n := range seeds {
|
||||
age := log.Lazy{Fn: func() time.Duration { return time.Since(tab.db.lastPong(n.ID)) }}
|
||||
log.Trace("Found seed node in database", "id", n.ID, "addr", n.addr(), "age", age)
|
||||
}
|
||||
tab.mutex.Lock()
|
||||
tab.stuff(seeds)
|
||||
tab.mutex.Unlock()
|
||||
defer tab.mutex.Unlock()
|
||||
b := tab.buckets[bi]
|
||||
if err == nil {
|
||||
// The node responded, move it to the front.
|
||||
log.Debug("Revalidated node", "b", bi, "id", last.ID)
|
||||
b.bump(last)
|
||||
return
|
||||
}
|
||||
// No reply received, pick a replacement or delete the node if there aren't
|
||||
// any replacements.
|
||||
if r := tab.replace(b, last); r != nil {
|
||||
log.Debug("Replaced dead node", "b", bi, "id", last.ID, "ip", last.IP, "r", r.ID, "rip", r.IP)
|
||||
} else {
|
||||
log.Debug("Removed dead node", "b", bi, "id", last.ID, "ip", last.IP)
|
||||
}
|
||||
}
|
||||
|
||||
// Finally, do a self lookup to fill up the buckets.
|
||||
tab.lookup(tab.self.ID, false)
|
||||
// nodeToRevalidate returns the last node in a random, non-empty bucket.
|
||||
func (tab *Table) nodeToRevalidate() (n *Node, bi int) {
|
||||
tab.mutex.Lock()
|
||||
defer tab.mutex.Unlock()
|
||||
|
||||
for _, bi = range tab.rand.Perm(len(tab.buckets)) {
|
||||
b := tab.buckets[bi]
|
||||
if len(b.entries) > 0 {
|
||||
last := b.entries[len(b.entries)-1]
|
||||
return last, bi
|
||||
}
|
||||
}
|
||||
return nil, 0
|
||||
}
|
||||
|
||||
func (tab *Table) nextRevalidateTime() time.Duration {
|
||||
tab.mutex.Lock()
|
||||
defer tab.mutex.Unlock()
|
||||
|
||||
return time.Duration(tab.rand.Int63n(int64(revalidateInterval)))
|
||||
}
|
||||
|
||||
// copyBondedNodes adds nodes from the table to the database if they have been in the table
|
||||
// longer then minTableTime.
|
||||
func (tab *Table) copyBondedNodes() {
|
||||
tab.mutex.Lock()
|
||||
defer tab.mutex.Unlock()
|
||||
|
||||
now := time.Now()
|
||||
for _, b := range tab.buckets {
|
||||
for _, n := range b.entries {
|
||||
if now.Sub(n.addedAt) >= seedMinTableTime {
|
||||
tab.db.updateNode(n)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// closest returns the n nodes in the table that are closest to the
|
||||
@ -459,15 +591,14 @@ func (tab *Table) bond(pinged bool, id NodeID, addr *net.UDPAddr, tcpPort uint16
|
||||
if id == tab.self.ID {
|
||||
return nil, errors.New("is self")
|
||||
}
|
||||
// Retrieve a previously known node and any recent findnode failures
|
||||
node, fails := tab.db.node(id), 0
|
||||
if node != nil {
|
||||
fails = tab.db.findFails(id)
|
||||
if pinged && !tab.isInitDone() {
|
||||
return nil, errors.New("still initializing")
|
||||
}
|
||||
// If the node is unknown (non-bonded) or failed (remotely unknown), bond from scratch
|
||||
// Start bonding if we haven't seen this node for a while or if it failed findnode too often.
|
||||
node, fails := tab.db.node(id), tab.db.findFails(id)
|
||||
age := time.Since(tab.db.bondTime(id))
|
||||
var result error
|
||||
age := time.Since(tab.db.lastPong(id))
|
||||
if node == nil || fails > 0 || age > nodeDBNodeExpiration {
|
||||
if fails > 0 || age > nodeDBNodeExpiration {
|
||||
log.Trace("Starting bonding ping/pong", "id", id, "known", node != nil, "failcount", fails, "age", age)
|
||||
|
||||
tab.bondmu.Lock()
|
||||
@ -494,10 +625,10 @@ func (tab *Table) bond(pinged bool, id NodeID, addr *net.UDPAddr, tcpPort uint16
|
||||
node = w.n
|
||||
}
|
||||
}
|
||||
if node != nil {
|
||||
// Add the node to the table even if the bonding ping/pong
|
||||
// fails. It will be relaced quickly if it continues to be
|
||||
// unresponsive.
|
||||
if node != nil {
|
||||
tab.add(node)
|
||||
tab.db.updateFindFails(id, 0)
|
||||
}
|
||||
@ -522,7 +653,6 @@ func (tab *Table) pingpong(w *bondproc, pinged bool, id NodeID, addr *net.UDPAdd
|
||||
}
|
||||
// Bonding succeeded, update the node database.
|
||||
w.n = NewNode(id, addr.IP, uint16(addr.Port), tcpPort)
|
||||
tab.db.updateNode(w.n)
|
||||
close(w.done)
|
||||
}
|
||||
|
||||
@ -533,17 +663,19 @@ func (tab *Table) ping(id NodeID, addr *net.UDPAddr) error {
|
||||
if err := tab.net.ping(id, addr); err != nil {
|
||||
return err
|
||||
}
|
||||
tab.db.updateLastPong(id, time.Now())
|
||||
|
||||
// Start the background expiration goroutine after the first
|
||||
// successful communication. Subsequent calls have no effect if it
|
||||
// is already running. We do this here instead of somewhere else
|
||||
// so that the search for seed nodes also considers older nodes
|
||||
// that would otherwise be removed by the expiration.
|
||||
tab.db.ensureExpirer()
|
||||
tab.db.updateBondTime(id, time.Now())
|
||||
return nil
|
||||
}
|
||||
|
||||
// bucket returns the bucket for the given node ID hash.
|
||||
func (tab *Table) bucket(sha common.Hash) *bucket {
|
||||
d := logdist(tab.self.sha, sha)
|
||||
if d <= bucketMinDistance {
|
||||
return tab.buckets[0]
|
||||
}
|
||||
return tab.buckets[d-bucketMinDistance-1]
|
||||
}
|
||||
|
||||
// add attempts to add the given node its corresponding bucket. If the
|
||||
// bucket has space available, adding the node succeeds immediately.
|
||||
// Otherwise, the node is added if the least recently active node in
|
||||
@ -551,57 +683,29 @@ func (tab *Table) ping(id NodeID, addr *net.UDPAddr) error {
|
||||
//
|
||||
// The caller must not hold tab.mutex.
|
||||
func (tab *Table) add(new *Node) {
|
||||
b := tab.buckets[logdist(tab.self.sha, new.sha)]
|
||||
tab.mutex.Lock()
|
||||
defer tab.mutex.Unlock()
|
||||
if b.bump(new) {
|
||||
return
|
||||
}
|
||||
var oldest *Node
|
||||
if len(b.entries) == bucketSize {
|
||||
oldest = b.entries[bucketSize-1]
|
||||
if oldest.contested {
|
||||
// The node is already being replaced, don't attempt
|
||||
// to replace it.
|
||||
return
|
||||
}
|
||||
oldest.contested = true
|
||||
// Let go of the mutex so other goroutines can access
|
||||
// the table while we ping the least recently active node.
|
||||
tab.mutex.Unlock()
|
||||
err := tab.ping(oldest.ID, oldest.addr())
|
||||
tab.mutex.Lock()
|
||||
oldest.contested = false
|
||||
if err == nil {
|
||||
// The node responded, don't replace it.
|
||||
return
|
||||
}
|
||||
}
|
||||
added := b.replace(new, oldest)
|
||||
if added && tab.nodeAddedHook != nil {
|
||||
tab.nodeAddedHook(new)
|
||||
|
||||
b := tab.bucket(new.sha)
|
||||
if !tab.bumpOrAdd(b, new) {
|
||||
// Node is not in table. Add it to the replacement list.
|
||||
tab.addReplacement(b, new)
|
||||
}
|
||||
}
|
||||
|
||||
// stuff adds nodes the table to the end of their corresponding bucket
|
||||
// if the bucket is not full. The caller must hold tab.mutex.
|
||||
// if the bucket is not full. The caller must not hold tab.mutex.
|
||||
func (tab *Table) stuff(nodes []*Node) {
|
||||
outer:
|
||||
tab.mutex.Lock()
|
||||
defer tab.mutex.Unlock()
|
||||
|
||||
for _, n := range nodes {
|
||||
if n.ID == tab.self.ID {
|
||||
continue // don't add self
|
||||
}
|
||||
bucket := tab.buckets[logdist(tab.self.sha, n.sha)]
|
||||
for i := range bucket.entries {
|
||||
if bucket.entries[i].ID == n.ID {
|
||||
continue outer // already in bucket
|
||||
}
|
||||
}
|
||||
if len(bucket.entries) < bucketSize {
|
||||
bucket.entries = append(bucket.entries, n)
|
||||
if tab.nodeAddedHook != nil {
|
||||
tab.nodeAddedHook(n)
|
||||
}
|
||||
b := tab.bucket(n.sha)
|
||||
if len(b.entries) < bucketSize {
|
||||
tab.bumpOrAdd(b, n)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -611,36 +715,72 @@ outer:
|
||||
func (tab *Table) delete(node *Node) {
|
||||
tab.mutex.Lock()
|
||||
defer tab.mutex.Unlock()
|
||||
bucket := tab.buckets[logdist(tab.self.sha, node.sha)]
|
||||
for i := range bucket.entries {
|
||||
if bucket.entries[i].ID == node.ID {
|
||||
bucket.entries = append(bucket.entries[:i], bucket.entries[i+1:]...)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
tab.deleteInBucket(tab.bucket(node.sha), node)
|
||||
}
|
||||
|
||||
func (b *bucket) replace(n *Node, last *Node) bool {
|
||||
// Don't add if b already contains n.
|
||||
for i := range b.entries {
|
||||
if b.entries[i].ID == n.ID {
|
||||
func (tab *Table) addIP(b *bucket, ip net.IP) bool {
|
||||
if netutil.IsLAN(ip) {
|
||||
return true
|
||||
}
|
||||
if !tab.ips.Add(ip) {
|
||||
log.Debug("IP exceeds table limit", "ip", ip)
|
||||
return false
|
||||
}
|
||||
}
|
||||
// Replace last if it is still the last entry or just add n if b
|
||||
// isn't full. If is no longer the last entry, it has either been
|
||||
// replaced with someone else or became active.
|
||||
if len(b.entries) == bucketSize && (last == nil || b.entries[bucketSize-1].ID != last.ID) {
|
||||
if !b.ips.Add(ip) {
|
||||
log.Debug("IP exceeds bucket limit", "ip", ip)
|
||||
tab.ips.Remove(ip)
|
||||
return false
|
||||
}
|
||||
if len(b.entries) < bucketSize {
|
||||
b.entries = append(b.entries, nil)
|
||||
}
|
||||
copy(b.entries[1:], b.entries)
|
||||
b.entries[0] = n
|
||||
return true
|
||||
}
|
||||
|
||||
func (tab *Table) removeIP(b *bucket, ip net.IP) {
|
||||
if netutil.IsLAN(ip) {
|
||||
return
|
||||
}
|
||||
tab.ips.Remove(ip)
|
||||
b.ips.Remove(ip)
|
||||
}
|
||||
|
||||
func (tab *Table) addReplacement(b *bucket, n *Node) {
|
||||
for _, e := range b.replacements {
|
||||
if e.ID == n.ID {
|
||||
return // already in list
|
||||
}
|
||||
}
|
||||
if !tab.addIP(b, n.IP) {
|
||||
return
|
||||
}
|
||||
var removed *Node
|
||||
b.replacements, removed = pushNode(b.replacements, n, maxReplacements)
|
||||
if removed != nil {
|
||||
tab.removeIP(b, removed.IP)
|
||||
}
|
||||
}
|
||||
|
||||
// replace removes n from the replacement list and replaces 'last' with it if it is the
|
||||
// last entry in the bucket. If 'last' isn't the last entry, it has either been replaced
|
||||
// with someone else or became active.
|
||||
func (tab *Table) replace(b *bucket, last *Node) *Node {
|
||||
if len(b.entries) == 0 || b.entries[len(b.entries)-1].ID != last.ID {
|
||||
// Entry has moved, don't replace it.
|
||||
return nil
|
||||
}
|
||||
// Still the last entry.
|
||||
if len(b.replacements) == 0 {
|
||||
tab.deleteInBucket(b, last)
|
||||
return nil
|
||||
}
|
||||
r := b.replacements[tab.rand.Intn(len(b.replacements))]
|
||||
b.replacements = deleteNode(b.replacements, r)
|
||||
b.entries[len(b.entries)-1] = r
|
||||
tab.removeIP(b, last.IP)
|
||||
return r
|
||||
}
|
||||
|
||||
// bump moves the given node to the front of the bucket entry list
|
||||
// if it is contained in that list.
|
||||
func (b *bucket) bump(n *Node) bool {
|
||||
for i := range b.entries {
|
||||
if b.entries[i].ID == n.ID {
|
||||
@ -653,6 +793,50 @@ func (b *bucket) bump(n *Node) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// bumpOrAdd moves n to the front of the bucket entry list or adds it if the list isn't
|
||||
// full. The return value is true if n is in the bucket.
|
||||
func (tab *Table) bumpOrAdd(b *bucket, n *Node) bool {
|
||||
if b.bump(n) {
|
||||
return true
|
||||
}
|
||||
if len(b.entries) >= bucketSize || !tab.addIP(b, n.IP) {
|
||||
return false
|
||||
}
|
||||
b.entries, _ = pushNode(b.entries, n, bucketSize)
|
||||
b.replacements = deleteNode(b.replacements, n)
|
||||
n.addedAt = time.Now()
|
||||
if tab.nodeAddedHook != nil {
|
||||
tab.nodeAddedHook(n)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (tab *Table) deleteInBucket(b *bucket, n *Node) {
|
||||
b.entries = deleteNode(b.entries, n)
|
||||
tab.removeIP(b, n.IP)
|
||||
}
|
||||
|
||||
// pushNode adds n to the front of list, keeping at most max items.
|
||||
func pushNode(list []*Node, n *Node, max int) ([]*Node, *Node) {
|
||||
if len(list) < max {
|
||||
list = append(list, nil)
|
||||
}
|
||||
removed := list[len(list)-1]
|
||||
copy(list[1:], list)
|
||||
list[0] = n
|
||||
return list, removed
|
||||
}
|
||||
|
||||
// deleteNode removes n from list.
|
||||
func deleteNode(list []*Node, n *Node) []*Node {
|
||||
for i := range list {
|
||||
if list[i].ID == n.ID {
|
||||
return append(list[:i], list[i+1:]...)
|
||||
}
|
||||
}
|
||||
return list
|
||||
}
|
||||
|
||||
// nodesByDistance is a list of nodes, ordered by
|
||||
// distance to target.
|
||||
type nodesByDistance struct {
|
||||
|
@ -20,6 +20,7 @@ import (
|
||||
"crypto/ecdsa"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"sync"
|
||||
|
||||
"net"
|
||||
"reflect"
|
||||
@ -32,60 +33,65 @@ import (
|
||||
)
|
||||
|
||||
func TestTable_pingReplace(t *testing.T) {
|
||||
doit := func(newNodeIsResponding, lastInBucketIsResponding bool) {
|
||||
run := func(newNodeResponding, lastInBucketResponding bool) {
|
||||
name := fmt.Sprintf("newNodeResponding=%t/lastInBucketResponding=%t", newNodeResponding, lastInBucketResponding)
|
||||
t.Run(name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
testPingReplace(t, newNodeResponding, lastInBucketResponding)
|
||||
})
|
||||
}
|
||||
|
||||
run(true, true)
|
||||
run(false, true)
|
||||
run(true, false)
|
||||
run(false, false)
|
||||
}
|
||||
|
||||
func testPingReplace(t *testing.T, newNodeIsResponding, lastInBucketIsResponding bool) {
|
||||
transport := newPingRecorder()
|
||||
tab, _ := newTable(transport, NodeID{}, &net.UDPAddr{}, "")
|
||||
tab, _ := newTable(transport, NodeID{}, &net.UDPAddr{}, "", nil)
|
||||
defer tab.Close()
|
||||
pingSender := NewNode(MustHexID("a502af0f59b2aab7746995408c79e9ca312d2793cc997e44fc55eda62f0150bbb8c59a6f9269ba3a081518b62699ee807c7c19c20125ddfccca872608af9e370"), net.IP{}, 99, 99)
|
||||
|
||||
// Wait for init so bond is accepted.
|
||||
<-tab.initDone
|
||||
|
||||
// fill up the sender's bucket.
|
||||
last := fillBucket(tab, 253)
|
||||
pingSender := NewNode(MustHexID("a502af0f59b2aab7746995408c79e9ca312d2793cc997e44fc55eda62f0150bbb8c59a6f9269ba3a081518b62699ee807c7c19c20125ddfccca872608af9e370"), net.IP{}, 99, 99)
|
||||
last := fillBucket(tab, pingSender)
|
||||
|
||||
// this call to bond should replace the last node
|
||||
// in its bucket if the node is not responding.
|
||||
transport.responding[last.ID] = lastInBucketIsResponding
|
||||
transport.responding[pingSender.ID] = newNodeIsResponding
|
||||
transport.dead[last.ID] = !lastInBucketIsResponding
|
||||
transport.dead[pingSender.ID] = !newNodeIsResponding
|
||||
tab.bond(true, pingSender.ID, &net.UDPAddr{}, 0)
|
||||
tab.doRevalidate(make(chan struct{}, 1))
|
||||
|
||||
// first ping goes to sender (bonding pingback)
|
||||
if !transport.pinged[pingSender.ID] {
|
||||
t.Error("table did not ping back sender")
|
||||
}
|
||||
if newNodeIsResponding {
|
||||
if !transport.pinged[last.ID] {
|
||||
// second ping goes to oldest node in bucket
|
||||
// to see whether it is still alive.
|
||||
if !transport.pinged[last.ID] {
|
||||
t.Error("table did not ping last node in bucket")
|
||||
}
|
||||
}
|
||||
|
||||
tab.mutex.Lock()
|
||||
defer tab.mutex.Unlock()
|
||||
if l := len(tab.buckets[253].entries); l != bucketSize {
|
||||
t.Errorf("wrong bucket size after bond: got %d, want %d", l, bucketSize)
|
||||
wantSize := bucketSize
|
||||
if !lastInBucketIsResponding && !newNodeIsResponding {
|
||||
wantSize--
|
||||
}
|
||||
|
||||
if lastInBucketIsResponding || !newNodeIsResponding {
|
||||
if !contains(tab.buckets[253].entries, last.ID) {
|
||||
t.Error("last entry was removed")
|
||||
if l := len(tab.bucket(pingSender.sha).entries); l != wantSize {
|
||||
t.Errorf("wrong bucket size after bond: got %d, want %d", l, wantSize)
|
||||
}
|
||||
if contains(tab.buckets[253].entries, pingSender.ID) {
|
||||
t.Error("new entry was added")
|
||||
if found := contains(tab.bucket(pingSender.sha).entries, last.ID); found != lastInBucketIsResponding {
|
||||
t.Errorf("last entry found: %t, want: %t", found, lastInBucketIsResponding)
|
||||
}
|
||||
} else {
|
||||
if contains(tab.buckets[253].entries, last.ID) {
|
||||
t.Error("last entry was not removed")
|
||||
wantNewEntry := newNodeIsResponding && !lastInBucketIsResponding
|
||||
if found := contains(tab.bucket(pingSender.sha).entries, pingSender.ID); found != wantNewEntry {
|
||||
t.Errorf("new entry found: %t, want: %t", found, wantNewEntry)
|
||||
}
|
||||
if !contains(tab.buckets[253].entries, pingSender.ID) {
|
||||
t.Error("new entry was not added")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
doit(true, true)
|
||||
doit(false, true)
|
||||
doit(true, false)
|
||||
doit(false, false)
|
||||
}
|
||||
|
||||
func TestBucket_bumpNoDuplicates(t *testing.T) {
|
||||
@ -130,11 +136,45 @@ func TestBucket_bumpNoDuplicates(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// This checks that the table-wide IP limit is applied correctly.
|
||||
func TestTable_IPLimit(t *testing.T) {
|
||||
transport := newPingRecorder()
|
||||
tab, _ := newTable(transport, NodeID{}, &net.UDPAddr{}, "", nil)
|
||||
defer tab.Close()
|
||||
|
||||
for i := 0; i < tableIPLimit+1; i++ {
|
||||
n := nodeAtDistance(tab.self.sha, i)
|
||||
n.IP = net.IP{172, 0, 1, byte(i)}
|
||||
tab.add(n)
|
||||
}
|
||||
if tab.len() > tableIPLimit {
|
||||
t.Errorf("too many nodes in table")
|
||||
}
|
||||
}
|
||||
|
||||
// This checks that the table-wide IP limit is applied correctly.
|
||||
func TestTable_BucketIPLimit(t *testing.T) {
|
||||
transport := newPingRecorder()
|
||||
tab, _ := newTable(transport, NodeID{}, &net.UDPAddr{}, "", nil)
|
||||
defer tab.Close()
|
||||
|
||||
d := 3
|
||||
for i := 0; i < bucketIPLimit+1; i++ {
|
||||
n := nodeAtDistance(tab.self.sha, d)
|
||||
n.IP = net.IP{172, 0, 1, byte(i)}
|
||||
tab.add(n)
|
||||
}
|
||||
if tab.len() > bucketIPLimit {
|
||||
t.Errorf("too many nodes in table")
|
||||
}
|
||||
}
|
||||
|
||||
// fillBucket inserts nodes into the given bucket until
|
||||
// it is full. The node's IDs dont correspond to their
|
||||
// hashes.
|
||||
func fillBucket(tab *Table, ld int) (last *Node) {
|
||||
b := tab.buckets[ld]
|
||||
func fillBucket(tab *Table, n *Node) (last *Node) {
|
||||
ld := logdist(tab.self.sha, n.sha)
|
||||
b := tab.bucket(n.sha)
|
||||
for len(b.entries) < bucketSize {
|
||||
b.entries = append(b.entries, nodeAtDistance(tab.self.sha, ld))
|
||||
}
|
||||
@ -146,30 +186,39 @@ func fillBucket(tab *Table, ld int) (last *Node) {
|
||||
func nodeAtDistance(base common.Hash, ld int) (n *Node) {
|
||||
n = new(Node)
|
||||
n.sha = hashAtDistance(base, ld)
|
||||
n.IP = net.IP{10, 0, 2, byte(ld)}
|
||||
n.IP = net.IP{byte(ld), 0, 2, byte(ld)}
|
||||
copy(n.ID[:], n.sha[:]) // ensure the node still has a unique ID
|
||||
return n
|
||||
}
|
||||
|
||||
type pingRecorder struct{ responding, pinged map[NodeID]bool }
|
||||
type pingRecorder struct {
|
||||
mu sync.Mutex
|
||||
dead, pinged map[NodeID]bool
|
||||
}
|
||||
|
||||
func newPingRecorder() *pingRecorder {
|
||||
return &pingRecorder{make(map[NodeID]bool), make(map[NodeID]bool)}
|
||||
return &pingRecorder{
|
||||
dead: make(map[NodeID]bool),
|
||||
pinged: make(map[NodeID]bool),
|
||||
}
|
||||
}
|
||||
|
||||
func (t *pingRecorder) findnode(toid NodeID, toaddr *net.UDPAddr, target NodeID) ([]*Node, error) {
|
||||
panic("findnode called on pingRecorder")
|
||||
return nil, nil
|
||||
}
|
||||
func (t *pingRecorder) close() {}
|
||||
func (t *pingRecorder) waitping(from NodeID) error {
|
||||
return nil // remote always pings
|
||||
}
|
||||
func (t *pingRecorder) ping(toid NodeID, toaddr *net.UDPAddr) error {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
|
||||
t.pinged[toid] = true
|
||||
if t.responding[toid] {
|
||||
return nil
|
||||
} else {
|
||||
if t.dead[toid] {
|
||||
return errTimeout
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@ -178,7 +227,8 @@ func TestTable_closest(t *testing.T) {
|
||||
|
||||
test := func(test *closeTest) bool {
|
||||
// for any node table, Target and N
|
||||
tab, _ := newTable(nil, test.Self, &net.UDPAddr{}, "")
|
||||
transport := newPingRecorder()
|
||||
tab, _ := newTable(transport, test.Self, &net.UDPAddr{}, "", nil)
|
||||
defer tab.Close()
|
||||
tab.stuff(test.All)
|
||||
|
||||
@ -237,8 +287,11 @@ func TestTable_ReadRandomNodesGetAll(t *testing.T) {
|
||||
},
|
||||
}
|
||||
test := func(buf []*Node) bool {
|
||||
tab, _ := newTable(nil, NodeID{}, &net.UDPAddr{}, "")
|
||||
transport := newPingRecorder()
|
||||
tab, _ := newTable(transport, NodeID{}, &net.UDPAddr{}, "", nil)
|
||||
defer tab.Close()
|
||||
<-tab.initDone
|
||||
|
||||
for i := 0; i < len(buf); i++ {
|
||||
ld := cfg.Rand.Intn(len(tab.buckets))
|
||||
tab.stuff([]*Node{nodeAtDistance(tab.self.sha, ld)})
|
||||
@ -280,7 +333,7 @@ func (*closeTest) Generate(rand *rand.Rand, size int) reflect.Value {
|
||||
|
||||
func TestTable_Lookup(t *testing.T) {
|
||||
self := nodeAtDistance(common.Hash{}, 0)
|
||||
tab, _ := newTable(lookupTestnet, self.ID, &net.UDPAddr{}, "")
|
||||
tab, _ := newTable(lookupTestnet, self.ID, &net.UDPAddr{}, "", nil)
|
||||
defer tab.Close()
|
||||
|
||||
// lookup on empty table returns no nodes
|
||||
|
@ -216,9 +216,22 @@ type ReadPacket struct {
|
||||
Addr *net.UDPAddr
|
||||
}
|
||||
|
||||
// Config holds Table-related settings.
|
||||
type Config struct {
|
||||
// These settings are required and configure the UDP listener:
|
||||
PrivateKey *ecdsa.PrivateKey
|
||||
|
||||
// These settings are optional:
|
||||
AnnounceAddr *net.UDPAddr // local address announced in the DHT
|
||||
NodeDBPath string // if set, the node database is stored at this filesystem location
|
||||
NetRestrict *netutil.Netlist // network whitelist
|
||||
Bootnodes []*Node // list of bootstrap nodes
|
||||
Unhandled chan<- ReadPacket // unhandled packets are sent on this channel
|
||||
}
|
||||
|
||||
// ListenUDP returns a new table that listens for UDP packets on laddr.
|
||||
func ListenUDP(priv *ecdsa.PrivateKey, conn conn, realaddr *net.UDPAddr, unhandled chan ReadPacket, nodeDBPath string, netrestrict *netutil.Netlist) (*Table, error) {
|
||||
tab, _, err := newUDP(priv, conn, realaddr, unhandled, nodeDBPath, netrestrict)
|
||||
func ListenUDP(c conn, cfg Config) (*Table, error) {
|
||||
tab, _, err := newUDP(c, cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -226,25 +239,29 @@ func ListenUDP(priv *ecdsa.PrivateKey, conn conn, realaddr *net.UDPAddr, unhandl
|
||||
return tab, nil
|
||||
}
|
||||
|
||||
func newUDP(priv *ecdsa.PrivateKey, c conn, realaddr *net.UDPAddr, unhandled chan ReadPacket, nodeDBPath string, netrestrict *netutil.Netlist) (*Table, *udp, error) {
|
||||
func newUDP(c conn, cfg Config) (*Table, *udp, error) {
|
||||
udp := &udp{
|
||||
conn: c,
|
||||
priv: priv,
|
||||
netrestrict: netrestrict,
|
||||
priv: cfg.PrivateKey,
|
||||
netrestrict: cfg.NetRestrict,
|
||||
closing: make(chan struct{}),
|
||||
gotreply: make(chan reply),
|
||||
addpending: make(chan *pending),
|
||||
}
|
||||
realaddr := c.LocalAddr().(*net.UDPAddr)
|
||||
if cfg.AnnounceAddr != nil {
|
||||
realaddr = cfg.AnnounceAddr
|
||||
}
|
||||
// TODO: separate TCP port
|
||||
udp.ourEndpoint = makeEndpoint(realaddr, uint16(realaddr.Port))
|
||||
tab, err := newTable(udp, PubkeyID(&priv.PublicKey), realaddr, nodeDBPath)
|
||||
tab, err := newTable(udp, PubkeyID(&cfg.PrivateKey.PublicKey), realaddr, cfg.NodeDBPath, cfg.Bootnodes)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
udp.Table = tab
|
||||
|
||||
go udp.loop()
|
||||
go udp.readLoop(unhandled)
|
||||
go udp.readLoop(cfg.Unhandled)
|
||||
return udp.Table, udp, nil
|
||||
}
|
||||
|
||||
@ -256,14 +273,20 @@ func (t *udp) close() {
|
||||
|
||||
// ping sends a ping message to the given node and waits for a reply.
|
||||
func (t *udp) ping(toid NodeID, toaddr *net.UDPAddr) error {
|
||||
// TODO: maybe check for ReplyTo field in callback to measure RTT
|
||||
errc := t.pending(toid, pongPacket, func(interface{}) bool { return true })
|
||||
t.send(toaddr, pingPacket, &ping{
|
||||
req := &ping{
|
||||
Version: Version,
|
||||
From: t.ourEndpoint,
|
||||
To: makeEndpoint(toaddr, 0), // TODO: maybe use known TCP port from DB
|
||||
Expiration: uint64(time.Now().Add(expiration).Unix()),
|
||||
}
|
||||
packet, hash, err := encodePacket(t.priv, pingPacket, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
errc := t.pending(toid, pongPacket, func(p interface{}) bool {
|
||||
return bytes.Equal(p.(*pong).ReplyTok, hash)
|
||||
})
|
||||
t.write(toaddr, req.name(), packet)
|
||||
return <-errc
|
||||
}
|
||||
|
||||
@ -447,40 +470,45 @@ func init() {
|
||||
}
|
||||
}
|
||||
|
||||
func (t *udp) send(toaddr *net.UDPAddr, ptype byte, req packet) error {
|
||||
packet, err := encodePacket(t.priv, ptype, req)
|
||||
func (t *udp) send(toaddr *net.UDPAddr, ptype byte, req packet) ([]byte, error) {
|
||||
packet, hash, err := encodePacket(t.priv, ptype, req)
|
||||
if err != nil {
|
||||
return err
|
||||
return hash, err
|
||||
}
|
||||
_, err = t.conn.WriteToUDP(packet, toaddr)
|
||||
log.Trace(">> "+req.name(), "addr", toaddr, "err", err)
|
||||
return hash, t.write(toaddr, req.name(), packet)
|
||||
}
|
||||
|
||||
func (t *udp) write(toaddr *net.UDPAddr, what string, packet []byte) error {
|
||||
_, err := t.conn.WriteToUDP(packet, toaddr)
|
||||
log.Trace(">> "+what, "addr", toaddr, "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
func encodePacket(priv *ecdsa.PrivateKey, ptype byte, req interface{}) ([]byte, error) {
|
||||
func encodePacket(priv *ecdsa.PrivateKey, ptype byte, req interface{}) (packet, hash []byte, err error) {
|
||||
b := new(bytes.Buffer)
|
||||
b.Write(headSpace)
|
||||
b.WriteByte(ptype)
|
||||
if err := rlp.Encode(b, req); err != nil {
|
||||
log.Error("Can't encode discv4 packet", "err", err)
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
packet := b.Bytes()
|
||||
packet = b.Bytes()
|
||||
sig, err := crypto.Sign(crypto.Keccak256(packet[headSize:]), priv)
|
||||
if err != nil {
|
||||
log.Error("Can't sign discv4 packet", "err", err)
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
copy(packet[macSize:], sig)
|
||||
// add the hash to the front. Note: this doesn't protect the
|
||||
// packet in any way. Our public key will be part of this hash in
|
||||
// The future.
|
||||
copy(packet, crypto.Keccak256(packet[macSize:]))
|
||||
return packet, nil
|
||||
hash = crypto.Keccak256(packet[macSize:])
|
||||
copy(packet, hash)
|
||||
return packet, hash, nil
|
||||
}
|
||||
|
||||
// readLoop runs in its own goroutine. it handles incoming UDP packets.
|
||||
func (t *udp) readLoop(unhandled chan ReadPacket) {
|
||||
func (t *udp) readLoop(unhandled chan<- ReadPacket) {
|
||||
defer t.conn.Close()
|
||||
if unhandled != nil {
|
||||
defer close(unhandled)
|
||||
@ -585,7 +613,7 @@ func (req *findnode) handle(t *udp, from *net.UDPAddr, fromID NodeID, mac []byte
|
||||
if expired(req.Expiration) {
|
||||
return errExpired
|
||||
}
|
||||
if t.db.node(fromID) == nil {
|
||||
if !t.db.hasBond(fromID) {
|
||||
// No bond exists, we don't process the packet. This prevents
|
||||
// an attack vector where the discovery protocol could be used
|
||||
// to amplify traffic in a DDOS attack. A malicious actor
|
||||
@ -601,18 +629,22 @@ func (req *findnode) handle(t *udp, from *net.UDPAddr, fromID NodeID, mac []byte
|
||||
t.mutex.Unlock()
|
||||
|
||||
p := neighbors{Expiration: uint64(time.Now().Add(expiration).Unix())}
|
||||
var sent bool
|
||||
// Send neighbors in chunks with at most maxNeighbors per packet
|
||||
// to stay below the 1280 byte limit.
|
||||
for i, n := range closest {
|
||||
if netutil.CheckRelayIP(from.IP, n.IP) != nil {
|
||||
continue
|
||||
}
|
||||
for _, n := range closest {
|
||||
if netutil.CheckRelayIP(from.IP, n.IP) == nil {
|
||||
p.Nodes = append(p.Nodes, nodeToRPC(n))
|
||||
if len(p.Nodes) == maxNeighbors || i == len(closest)-1 {
|
||||
}
|
||||
if len(p.Nodes) == maxNeighbors {
|
||||
t.send(from, neighborsPacket, &p)
|
||||
p.Nodes = p.Nodes[:0]
|
||||
sent = true
|
||||
}
|
||||
}
|
||||
if len(p.Nodes) > 0 || !sent {
|
||||
t.send(from, neighborsPacket, &p)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -70,14 +70,15 @@ func newUDPTest(t *testing.T) *udpTest {
|
||||
remotekey: newkey(),
|
||||
remoteaddr: &net.UDPAddr{IP: net.IP{10, 0, 1, 99}, Port: 30303},
|
||||
}
|
||||
realaddr := test.pipe.LocalAddr().(*net.UDPAddr)
|
||||
test.table, test.udp, _ = newUDP(test.localkey, test.pipe, realaddr, nil, "", nil)
|
||||
test.table, test.udp, _ = newUDP(test.pipe, Config{PrivateKey: test.localkey})
|
||||
// Wait for initial refresh so the table doesn't send unexpected findnode.
|
||||
<-test.table.initDone
|
||||
return test
|
||||
}
|
||||
|
||||
// handles a packet as if it had been sent to the transport.
|
||||
func (test *udpTest) packetIn(wantError error, ptype byte, data packet) error {
|
||||
enc, err := encodePacket(test.remotekey, ptype, data)
|
||||
enc, _, err := encodePacket(test.remotekey, ptype, data)
|
||||
if err != nil {
|
||||
return test.errorf("packet (%d) encode error: %v", ptype, err)
|
||||
}
|
||||
@ -90,19 +91,19 @@ func (test *udpTest) packetIn(wantError error, ptype byte, data packet) error {
|
||||
|
||||
// waits for a packet to be sent by the transport.
|
||||
// validate should have type func(*udpTest, X) error, where X is a packet type.
|
||||
func (test *udpTest) waitPacketOut(validate interface{}) error {
|
||||
func (test *udpTest) waitPacketOut(validate interface{}) ([]byte, error) {
|
||||
dgram := test.pipe.waitPacketOut()
|
||||
p, _, _, err := decodePacket(dgram)
|
||||
p, _, hash, err := decodePacket(dgram)
|
||||
if err != nil {
|
||||
return test.errorf("sent packet decode error: %v", err)
|
||||
return hash, test.errorf("sent packet decode error: %v", err)
|
||||
}
|
||||
fn := reflect.ValueOf(validate)
|
||||
exptype := fn.Type().In(0)
|
||||
if reflect.TypeOf(p) != exptype {
|
||||
return test.errorf("sent packet type mismatch, got: %v, want: %v", reflect.TypeOf(p), exptype)
|
||||
return hash, test.errorf("sent packet type mismatch, got: %v, want: %v", reflect.TypeOf(p), exptype)
|
||||
}
|
||||
fn.Call([]reflect.Value{reflect.ValueOf(p)})
|
||||
return nil
|
||||
return hash, nil
|
||||
}
|
||||
|
||||
func (test *udpTest) errorf(format string, args ...interface{}) error {
|
||||
@ -246,12 +247,8 @@ func TestUDP_findnode(t *testing.T) {
|
||||
|
||||
// ensure there's a bond with the test node,
|
||||
// findnode won't be accepted otherwise.
|
||||
test.table.db.updateNode(NewNode(
|
||||
PubkeyID(&test.remotekey.PublicKey),
|
||||
test.remoteaddr.IP,
|
||||
uint16(test.remoteaddr.Port),
|
||||
99,
|
||||
))
|
||||
test.table.db.updateBondTime(PubkeyID(&test.remotekey.PublicKey), time.Now())
|
||||
|
||||
// check that closest neighbors are returned.
|
||||
test.packetIn(nil, findnodePacket, &findnode{Target: testTarget, Expiration: futureExp})
|
||||
expected := test.table.closest(targetHash, bucketSize)
|
||||
@ -351,7 +348,7 @@ func TestUDP_successfulPing(t *testing.T) {
|
||||
})
|
||||
|
||||
// remote is unknown, the table pings back.
|
||||
test.waitPacketOut(func(p *ping) error {
|
||||
hash, _ := test.waitPacketOut(func(p *ping) error {
|
||||
if !reflect.DeepEqual(p.From, test.udp.ourEndpoint) {
|
||||
t.Errorf("got ping.From %v, want %v", p.From, test.udp.ourEndpoint)
|
||||
}
|
||||
@ -365,7 +362,7 @@ func TestUDP_successfulPing(t *testing.T) {
|
||||
}
|
||||
return nil
|
||||
})
|
||||
test.packetIn(nil, pongPacket, &pong{Expiration: futureExp})
|
||||
test.packetIn(nil, pongPacket, &pong{ReplyTok: hash, Expiration: futureExp})
|
||||
|
||||
// the node should be added to the table shortly after getting the
|
||||
// pong packet.
|
||||
|
@ -565,11 +565,8 @@ loop:
|
||||
if lookupChn := searchInfo[res.target.topic].lookupChn; lookupChn != nil {
|
||||
lookupChn <- net.ticketStore.radius[res.target.topic].converged
|
||||
}
|
||||
net.ticketStore.searchLookupDone(res.target, res.nodes, func(n *Node) []byte {
|
||||
net.ping(n, n.addr())
|
||||
return n.pingEcho
|
||||
}, func(n *Node, topic Topic) []byte {
|
||||
if n.state == known {
|
||||
net.ticketStore.searchLookupDone(res.target, res.nodes, func(n *Node, topic Topic) []byte {
|
||||
if n.state != nil && n.state.canQuery {
|
||||
return net.conn.send(n, topicQueryPacket, topicQuery{Topic: topic}) // TODO: set expiration
|
||||
} else {
|
||||
if n.state == unknown {
|
||||
@ -633,7 +630,8 @@ loop:
|
||||
}
|
||||
net.refreshResp <- refreshDone
|
||||
case <-refreshDone:
|
||||
log.Trace("<-net.refreshDone")
|
||||
log.Trace("<-net.refreshDone", "table size", net.tab.count)
|
||||
if net.tab.count != 0 {
|
||||
refreshDone = nil
|
||||
list := searchReqWhenRefreshDone
|
||||
searchReqWhenRefreshDone = nil
|
||||
@ -642,6 +640,10 @@ loop:
|
||||
net.topicSearchReq <- req
|
||||
}
|
||||
}()
|
||||
} else {
|
||||
refreshDone = make(chan struct{})
|
||||
net.refresh(refreshDone)
|
||||
}
|
||||
}
|
||||
}
|
||||
log.Trace("loop stopped")
|
||||
@ -751,7 +753,15 @@ func (net *Network) internNodeFromNeighbours(sender *net.UDPAddr, rn rpcNode) (n
|
||||
return n, err
|
||||
}
|
||||
if !n.IP.Equal(rn.IP) || n.UDP != rn.UDP || n.TCP != rn.TCP {
|
||||
if n.state == known {
|
||||
// reject address change if node is known by us
|
||||
err = fmt.Errorf("metadata mismatch: got %v, want %v", rn, n)
|
||||
} else {
|
||||
// accept otherwise; this will be handled nicer with signed ENRs
|
||||
n.IP = rn.IP
|
||||
n.UDP = rn.UDP
|
||||
n.TCP = rn.TCP
|
||||
}
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
@ -494,13 +494,13 @@ func (s *ticketStore) registerLookupDone(lookup lookupInfo, nodes []*Node, ping
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ticketStore) searchLookupDone(lookup lookupInfo, nodes []*Node, ping func(n *Node) []byte, query func(n *Node, topic Topic) []byte) {
|
||||
func (s *ticketStore) searchLookupDone(lookup lookupInfo, nodes []*Node, query func(n *Node, topic Topic) []byte) {
|
||||
now := mclock.Now()
|
||||
for i, n := range nodes {
|
||||
if i == 0 || (binary.BigEndian.Uint64(n.sha[:8])^binary.BigEndian.Uint64(lookup.target[:8])) < s.radius[lookup.topic].minRadius {
|
||||
if lookup.radiusLookup {
|
||||
if lastReq, ok := s.nodeLastReq[n]; !ok || time.Duration(now-lastReq.time) > radiusTC {
|
||||
s.nodeLastReq[n] = reqInfo{pingHash: ping(n), lookup: lookup, time: now}
|
||||
s.nodeLastReq[n] = reqInfo{pingHash: nil, lookup: lookup, time: now}
|
||||
}
|
||||
} // else {
|
||||
if s.canQueryTopic(n, lookup.topic) {
|
||||
|
@ -49,7 +49,7 @@ var (
|
||||
// Timeouts
|
||||
const (
|
||||
respTimeout = 500 * time.Millisecond
|
||||
sendTimeout = 500 * time.Millisecond
|
||||
queryDelay = 1000 * time.Millisecond
|
||||
expiration = 20 * time.Second
|
||||
|
||||
ntpFailureThreshold = 32 // Continuous timeouts after which to check NTP
|
||||
@ -318,20 +318,20 @@ func (t *udp) sendTopicRegister(remote *Node, topics []Topic, idx int, pong []by
|
||||
|
||||
func (t *udp) sendTopicNodes(remote *Node, queryHash common.Hash, nodes []*Node) {
|
||||
p := topicNodes{Echo: queryHash}
|
||||
if len(nodes) == 0 {
|
||||
t.sendPacket(remote.ID, remote.addr(), byte(topicNodesPacket), p)
|
||||
return
|
||||
}
|
||||
for i, result := range nodes {
|
||||
if netutil.CheckRelayIP(remote.IP, result.IP) != nil {
|
||||
continue
|
||||
}
|
||||
var sent bool
|
||||
for _, result := range nodes {
|
||||
if result.IP.Equal(t.net.tab.self.IP) || netutil.CheckRelayIP(remote.IP, result.IP) == nil {
|
||||
p.Nodes = append(p.Nodes, nodeToRPC(result))
|
||||
if len(p.Nodes) == maxTopicNodes || i == len(nodes)-1 {
|
||||
}
|
||||
if len(p.Nodes) == maxTopicNodes {
|
||||
t.sendPacket(remote.ID, remote.addr(), byte(topicNodesPacket), p)
|
||||
p.Nodes = p.Nodes[:0]
|
||||
sent = true
|
||||
}
|
||||
}
|
||||
if !sent || len(p.Nodes) > 0 {
|
||||
t.sendPacket(remote.ID, remote.addr(), byte(topicNodesPacket), p)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *udp) sendPacket(toid NodeID, toaddr *net.UDPAddr, ptype byte, req interface{}) (hash []byte, err error) {
|
||||
|
@ -18,8 +18,11 @@
|
||||
package netutil
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
@ -189,3 +192,131 @@ func CheckRelayIP(sender, addr net.IP) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SameNet reports whether two IP addresses have an equal prefix of the given bit length.
|
||||
func SameNet(bits uint, ip, other net.IP) bool {
|
||||
ip4, other4 := ip.To4(), other.To4()
|
||||
switch {
|
||||
case (ip4 == nil) != (other4 == nil):
|
||||
return false
|
||||
case ip4 != nil:
|
||||
return sameNet(bits, ip4, other4)
|
||||
default:
|
||||
return sameNet(bits, ip.To16(), other.To16())
|
||||
}
|
||||
}
|
||||
|
||||
func sameNet(bits uint, ip, other net.IP) bool {
|
||||
nb := int(bits / 8)
|
||||
mask := ^byte(0xFF >> (bits % 8))
|
||||
if mask != 0 && nb < len(ip) && ip[nb]&mask != other[nb]&mask {
|
||||
return false
|
||||
}
|
||||
return nb <= len(ip) && bytes.Equal(ip[:nb], other[:nb])
|
||||
}
|
||||
|
||||
// DistinctNetSet tracks IPs, ensuring that at most N of them
|
||||
// fall into the same network range.
|
||||
type DistinctNetSet struct {
|
||||
Subnet uint // number of common prefix bits
|
||||
Limit uint // maximum number of IPs in each subnet
|
||||
|
||||
members map[string]uint
|
||||
buf net.IP
|
||||
}
|
||||
|
||||
// Add adds an IP address to the set. It returns false (and doesn't add the IP) if the
|
||||
// number of existing IPs in the defined range exceeds the limit.
|
||||
func (s *DistinctNetSet) Add(ip net.IP) bool {
|
||||
key := s.key(ip)
|
||||
n := s.members[string(key)]
|
||||
if n < s.Limit {
|
||||
s.members[string(key)] = n + 1
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Remove removes an IP from the set.
|
||||
func (s *DistinctNetSet) Remove(ip net.IP) {
|
||||
key := s.key(ip)
|
||||
if n, ok := s.members[string(key)]; ok {
|
||||
if n == 1 {
|
||||
delete(s.members, string(key))
|
||||
} else {
|
||||
s.members[string(key)] = n - 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Contains whether the given IP is contained in the set.
|
||||
func (s DistinctNetSet) Contains(ip net.IP) bool {
|
||||
key := s.key(ip)
|
||||
_, ok := s.members[string(key)]
|
||||
return ok
|
||||
}
|
||||
|
||||
// Len returns the number of tracked IPs.
|
||||
func (s DistinctNetSet) Len() int {
|
||||
n := uint(0)
|
||||
for _, i := range s.members {
|
||||
n += i
|
||||
}
|
||||
return int(n)
|
||||
}
|
||||
|
||||
// key encodes the map key for an address into a temporary buffer.
|
||||
//
|
||||
// The first byte of key is '4' or '6' to distinguish IPv4/IPv6 address types.
|
||||
// The remainder of the key is the IP, truncated to the number of bits.
|
||||
func (s *DistinctNetSet) key(ip net.IP) net.IP {
|
||||
// Lazily initialize storage.
|
||||
if s.members == nil {
|
||||
s.members = make(map[string]uint)
|
||||
s.buf = make(net.IP, 17)
|
||||
}
|
||||
// Canonicalize ip and bits.
|
||||
typ := byte('6')
|
||||
if ip4 := ip.To4(); ip4 != nil {
|
||||
typ, ip = '4', ip4
|
||||
}
|
||||
bits := s.Subnet
|
||||
if bits > uint(len(ip)*8) {
|
||||
bits = uint(len(ip) * 8)
|
||||
}
|
||||
// Encode the prefix into s.buf.
|
||||
nb := int(bits / 8)
|
||||
mask := ^byte(0xFF >> (bits % 8))
|
||||
s.buf[0] = typ
|
||||
buf := append(s.buf[:1], ip[:nb]...)
|
||||
if nb < len(ip) && mask != 0 {
|
||||
buf = append(buf, ip[nb]&mask)
|
||||
}
|
||||
return buf
|
||||
}
|
||||
|
||||
// String implements fmt.Stringer
|
||||
func (s DistinctNetSet) String() string {
|
||||
var buf bytes.Buffer
|
||||
buf.WriteString("{")
|
||||
keys := make([]string, 0, len(s.members))
|
||||
for k := range s.members {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
for i, k := range keys {
|
||||
var ip net.IP
|
||||
if k[0] == '4' {
|
||||
ip = make(net.IP, 4)
|
||||
} else {
|
||||
ip = make(net.IP, 16)
|
||||
}
|
||||
copy(ip, k[1:])
|
||||
fmt.Fprintf(&buf, "%v×%d", ip, s.members[k])
|
||||
if i != len(keys)-1 {
|
||||
buf.WriteString(" ")
|
||||
}
|
||||
}
|
||||
buf.WriteString("}")
|
||||
return buf.String()
|
||||
}
|
||||
|
@ -17,9 +17,11 @@
|
||||
package netutil
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"reflect"
|
||||
"testing"
|
||||
"testing/quick"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
)
|
||||
@ -171,3 +173,90 @@ func BenchmarkCheckRelayIP(b *testing.B) {
|
||||
CheckRelayIP(sender, addr)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSameNet(t *testing.T) {
|
||||
tests := []struct {
|
||||
ip, other string
|
||||
bits uint
|
||||
want bool
|
||||
}{
|
||||
{"0.0.0.0", "0.0.0.0", 32, true},
|
||||
{"0.0.0.0", "0.0.0.1", 0, true},
|
||||
{"0.0.0.0", "0.0.0.1", 31, true},
|
||||
{"0.0.0.0", "0.0.0.1", 32, false},
|
||||
{"0.33.0.1", "0.34.0.2", 8, true},
|
||||
{"0.33.0.1", "0.34.0.2", 13, true},
|
||||
{"0.33.0.1", "0.34.0.2", 15, false},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
if ok := SameNet(test.bits, parseIP(test.ip), parseIP(test.other)); ok != test.want {
|
||||
t.Errorf("SameNet(%d, %s, %s) == %t, want %t", test.bits, test.ip, test.other, ok, test.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleSameNet() {
|
||||
// This returns true because the IPs are in the same /24 network:
|
||||
fmt.Println(SameNet(24, net.IP{127, 0, 0, 1}, net.IP{127, 0, 0, 3}))
|
||||
// This call returns false:
|
||||
fmt.Println(SameNet(24, net.IP{127, 3, 0, 1}, net.IP{127, 5, 0, 3}))
|
||||
// Output:
|
||||
// true
|
||||
// false
|
||||
}
|
||||
|
||||
func TestDistinctNetSet(t *testing.T) {
|
||||
ops := []struct {
|
||||
add, remove string
|
||||
fails bool
|
||||
}{
|
||||
{add: "127.0.0.1"},
|
||||
{add: "127.0.0.2"},
|
||||
{add: "127.0.0.3", fails: true},
|
||||
{add: "127.32.0.1"},
|
||||
{add: "127.32.0.2"},
|
||||
{add: "127.32.0.3", fails: true},
|
||||
{add: "127.33.0.1", fails: true},
|
||||
{add: "127.34.0.1"},
|
||||
{add: "127.34.0.2"},
|
||||
{add: "127.34.0.3", fails: true},
|
||||
// Make room for an address, then add again.
|
||||
{remove: "127.0.0.1"},
|
||||
{add: "127.0.0.3"},
|
||||
{add: "127.0.0.3", fails: true},
|
||||
}
|
||||
|
||||
set := DistinctNetSet{Subnet: 15, Limit: 2}
|
||||
for _, op := range ops {
|
||||
var desc string
|
||||
if op.add != "" {
|
||||
desc = fmt.Sprintf("Add(%s)", op.add)
|
||||
if ok := set.Add(parseIP(op.add)); ok != !op.fails {
|
||||
t.Errorf("%s == %t, want %t", desc, ok, !op.fails)
|
||||
}
|
||||
} else {
|
||||
desc = fmt.Sprintf("Remove(%s)", op.remove)
|
||||
set.Remove(parseIP(op.remove))
|
||||
}
|
||||
t.Logf("%s: %v", desc, set)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDistinctNetSetAddRemove(t *testing.T) {
|
||||
cfg := &quick.Config{}
|
||||
fn := func(ips []net.IP) bool {
|
||||
s := DistinctNetSet{Limit: 3, Subnet: 2}
|
||||
for _, ip := range ips {
|
||||
s.Add(ip)
|
||||
}
|
||||
for _, ip := range ips {
|
||||
s.Remove(ip)
|
||||
}
|
||||
return s.Len() == 0
|
||||
}
|
||||
|
||||
if err := quick.Check(fn, cfg); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
@ -419,6 +419,9 @@ type PeerInfo struct {
|
||||
Network struct {
|
||||
LocalAddress string `json:"localAddress"` // Local endpoint of the TCP data connection
|
||||
RemoteAddress string `json:"remoteAddress"` // Remote endpoint of the TCP data connection
|
||||
Inbound bool `json:"inbound"`
|
||||
Trusted bool `json:"trusted"`
|
||||
Static bool `json:"static"`
|
||||
} `json:"network"`
|
||||
Protocols map[string]interface{} `json:"protocols"` // Sub-protocol specific metadata fields
|
||||
}
|
||||
@ -439,6 +442,9 @@ func (p *Peer) Info() *PeerInfo {
|
||||
}
|
||||
info.Network.LocalAddress = p.LocalAddr().String()
|
||||
info.Network.RemoteAddress = p.RemoteAddr().String()
|
||||
info.Network.Inbound = p.rw.is(inboundConn)
|
||||
info.Network.Trusted = p.rw.is(trustedConn)
|
||||
info.Network.Static = p.rw.is(staticDialedConn)
|
||||
|
||||
// Gather all the running protocol infos
|
||||
for _, proto := range p.running {
|
||||
|
311
p2p/protocols/protocol.go
Normal file
311
p2p/protocols/protocol.go
Normal file
@ -0,0 +1,311 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
/*
|
||||
Package protocols is an extension to p2p. It offers a user friendly simple way to define
|
||||
devp2p subprotocols by abstracting away code standardly shared by protocols.
|
||||
|
||||
* automate assigments of code indexes to messages
|
||||
* automate RLP decoding/encoding based on reflecting
|
||||
* provide the forever loop to read incoming messages
|
||||
* standardise error handling related to communication
|
||||
* standardised handshake negotiation
|
||||
* TODO: automatic generation of wire protocol specification for peers
|
||||
|
||||
*/
|
||||
package protocols
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sync"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
)
|
||||
|
||||
// error codes used by this protocol scheme
|
||||
const (
|
||||
ErrMsgTooLong = iota
|
||||
ErrDecode
|
||||
ErrWrite
|
||||
ErrInvalidMsgCode
|
||||
ErrInvalidMsgType
|
||||
ErrHandshake
|
||||
ErrNoHandler
|
||||
ErrHandler
|
||||
)
|
||||
|
||||
// error description strings associated with the codes
|
||||
var errorToString = map[int]string{
|
||||
ErrMsgTooLong: "Message too long",
|
||||
ErrDecode: "Invalid message (RLP error)",
|
||||
ErrWrite: "Error sending message",
|
||||
ErrInvalidMsgCode: "Invalid message code",
|
||||
ErrInvalidMsgType: "Invalid message type",
|
||||
ErrHandshake: "Handshake error",
|
||||
ErrNoHandler: "No handler registered error",
|
||||
ErrHandler: "Message handler error",
|
||||
}
|
||||
|
||||
/*
|
||||
Error implements the standard go error interface.
|
||||
Use:
|
||||
|
||||
errorf(code, format, params ...interface{})
|
||||
|
||||
Prints as:
|
||||
|
||||
<description>: <details>
|
||||
|
||||
where description is given by code in errorToString
|
||||
and details is fmt.Sprintf(format, params...)
|
||||
|
||||
exported field Code can be checked
|
||||
*/
|
||||
type Error struct {
|
||||
Code int
|
||||
message string
|
||||
format string
|
||||
params []interface{}
|
||||
}
|
||||
|
||||
func (e Error) Error() (message string) {
|
||||
if len(e.message) == 0 {
|
||||
name, ok := errorToString[e.Code]
|
||||
if !ok {
|
||||
panic("invalid message code")
|
||||
}
|
||||
e.message = name
|
||||
if e.format != "" {
|
||||
e.message += ": " + fmt.Sprintf(e.format, e.params...)
|
||||
}
|
||||
}
|
||||
return e.message
|
||||
}
|
||||
|
||||
func errorf(code int, format string, params ...interface{}) *Error {
|
||||
return &Error{
|
||||
Code: code,
|
||||
format: format,
|
||||
params: params,
|
||||
}
|
||||
}
|
||||
|
||||
// Spec is a protocol specification including its name and version as well as
|
||||
// the types of messages which are exchanged
|
||||
type Spec struct {
|
||||
// Name is the name of the protocol, often a three-letter word
|
||||
Name string
|
||||
|
||||
// Version is the version number of the protocol
|
||||
Version uint
|
||||
|
||||
// MaxMsgSize is the maximum accepted length of the message payload
|
||||
MaxMsgSize uint32
|
||||
|
||||
// Messages is a list of message data types which this protocol uses, with
|
||||
// each message type being sent with its array index as the code (so
|
||||
// [&foo{}, &bar{}, &baz{}] would send foo, bar and baz with codes
|
||||
// 0, 1 and 2 respectively)
|
||||
// each message must have a single unique data type
|
||||
Messages []interface{}
|
||||
|
||||
initOnce sync.Once
|
||||
codes map[reflect.Type]uint64
|
||||
types map[uint64]reflect.Type
|
||||
}
|
||||
|
||||
func (s *Spec) init() {
|
||||
s.initOnce.Do(func() {
|
||||
s.codes = make(map[reflect.Type]uint64, len(s.Messages))
|
||||
s.types = make(map[uint64]reflect.Type, len(s.Messages))
|
||||
for i, msg := range s.Messages {
|
||||
code := uint64(i)
|
||||
typ := reflect.TypeOf(msg)
|
||||
if typ.Kind() == reflect.Ptr {
|
||||
typ = typ.Elem()
|
||||
}
|
||||
s.codes[typ] = code
|
||||
s.types[code] = typ
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Length returns the number of message types in the protocol
|
||||
func (s *Spec) Length() uint64 {
|
||||
return uint64(len(s.Messages))
|
||||
}
|
||||
|
||||
// GetCode returns the message code of a type, and boolean second argument is
|
||||
// false if the message type is not found
|
||||
func (s *Spec) GetCode(msg interface{}) (uint64, bool) {
|
||||
s.init()
|
||||
typ := reflect.TypeOf(msg)
|
||||
if typ.Kind() == reflect.Ptr {
|
||||
typ = typ.Elem()
|
||||
}
|
||||
code, ok := s.codes[typ]
|
||||
return code, ok
|
||||
}
|
||||
|
||||
// NewMsg construct a new message type given the code
|
||||
func (s *Spec) NewMsg(code uint64) (interface{}, bool) {
|
||||
s.init()
|
||||
typ, ok := s.types[code]
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
return reflect.New(typ).Interface(), true
|
||||
}
|
||||
|
||||
// Peer represents a remote peer or protocol instance that is running on a peer connection with
|
||||
// a remote peer
|
||||
type Peer struct {
|
||||
*p2p.Peer // the p2p.Peer object representing the remote
|
||||
rw p2p.MsgReadWriter // p2p.MsgReadWriter to send messages to and read messages from
|
||||
spec *Spec
|
||||
}
|
||||
|
||||
// NewPeer constructs a new peer
|
||||
// this constructor is called by the p2p.Protocol#Run function
|
||||
// the first two arguments are the arguments passed to p2p.Protocol.Run function
|
||||
// the third argument is the Spec describing the protocol
|
||||
func NewPeer(p *p2p.Peer, rw p2p.MsgReadWriter, spec *Spec) *Peer {
|
||||
return &Peer{
|
||||
Peer: p,
|
||||
rw: rw,
|
||||
spec: spec,
|
||||
}
|
||||
}
|
||||
|
||||
// Run starts the forever loop that handles incoming messages
|
||||
// called within the p2p.Protocol#Run function
|
||||
// the handler argument is a function which is called for each message received
|
||||
// from the remote peer, a returned error causes the loop to exit
|
||||
// resulting in disconnection
|
||||
func (p *Peer) Run(handler func(msg interface{}) error) error {
|
||||
for {
|
||||
if err := p.handleIncoming(handler); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Drop disconnects a peer.
|
||||
// TODO: may need to implement protocol drop only? don't want to kick off the peer
|
||||
// if they are useful for other protocols
|
||||
func (p *Peer) Drop(err error) {
|
||||
p.Disconnect(p2p.DiscSubprotocolError)
|
||||
}
|
||||
|
||||
// Send takes a message, encodes it in RLP, finds the right message code and sends the
|
||||
// message off to the peer
|
||||
// this low level call will be wrapped by libraries providing routed or broadcast sends
|
||||
// but often just used to forward and push messages to directly connected peers
|
||||
func (p *Peer) Send(msg interface{}) error {
|
||||
code, found := p.spec.GetCode(msg)
|
||||
if !found {
|
||||
return errorf(ErrInvalidMsgType, "%v", code)
|
||||
}
|
||||
return p2p.Send(p.rw, code, msg)
|
||||
}
|
||||
|
||||
// handleIncoming(code)
|
||||
// is called each cycle of the main forever loop that dispatches incoming messages
|
||||
// if this returns an error the loop returns and the peer is disconnected with the error
|
||||
// this generic handler
|
||||
// * checks message size,
|
||||
// * checks for out-of-range message codes,
|
||||
// * handles decoding with reflection,
|
||||
// * call handlers as callbacks
|
||||
func (p *Peer) handleIncoming(handle func(msg interface{}) error) error {
|
||||
msg, err := p.rw.ReadMsg()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// make sure that the payload has been fully consumed
|
||||
defer msg.Discard()
|
||||
|
||||
if msg.Size > p.spec.MaxMsgSize {
|
||||
return errorf(ErrMsgTooLong, "%v > %v", msg.Size, p.spec.MaxMsgSize)
|
||||
}
|
||||
|
||||
val, ok := p.spec.NewMsg(msg.Code)
|
||||
if !ok {
|
||||
return errorf(ErrInvalidMsgCode, "%v", msg.Code)
|
||||
}
|
||||
if err := msg.Decode(val); err != nil {
|
||||
return errorf(ErrDecode, "<= %v: %v", msg, err)
|
||||
}
|
||||
|
||||
// call the registered handler callbacks
|
||||
// a registered callback take the decoded message as argument as an interface
|
||||
// which the handler is supposed to cast to the appropriate type
|
||||
// it is entirely safe not to check the cast in the handler since the handler is
|
||||
// chosen based on the proper type in the first place
|
||||
if err := handle(val); err != nil {
|
||||
return errorf(ErrHandler, "(msg code %v): %v", msg.Code, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Handshake negotiates a handshake on the peer connection
|
||||
// * arguments
|
||||
// * context
|
||||
// * the local handshake to be sent to the remote peer
|
||||
// * funcion to be called on the remote handshake (can be nil)
|
||||
// * expects a remote handshake back of the same type
|
||||
// * the dialing peer needs to send the handshake first and then waits for remote
|
||||
// * the listening peer waits for the remote handshake and then sends it
|
||||
// returns the remote handshake and an error
|
||||
func (p *Peer) Handshake(ctx context.Context, hs interface{}, verify func(interface{}) error) (rhs interface{}, err error) {
|
||||
if _, ok := p.spec.GetCode(hs); !ok {
|
||||
return nil, errorf(ErrHandshake, "unknown handshake message type: %T", hs)
|
||||
}
|
||||
errc := make(chan error, 2)
|
||||
handle := func(msg interface{}) error {
|
||||
rhs = msg
|
||||
if verify != nil {
|
||||
return verify(rhs)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
send := func() { errc <- p.Send(hs) }
|
||||
receive := func() { errc <- p.handleIncoming(handle) }
|
||||
|
||||
go func() {
|
||||
if p.Inbound() {
|
||||
receive()
|
||||
send()
|
||||
} else {
|
||||
send()
|
||||
receive()
|
||||
}
|
||||
}()
|
||||
|
||||
for i := 0; i < 2; i++ {
|
||||
select {
|
||||
case err = <-errc:
|
||||
case <-ctx.Done():
|
||||
err = ctx.Err()
|
||||
}
|
||||
if err != nil {
|
||||
return nil, errorf(ErrHandshake, err.Error())
|
||||
}
|
||||
}
|
||||
return rhs, nil
|
||||
}
|
389
p2p/protocols/protocol_test.go
Normal file
389
p2p/protocols/protocol_test.go
Normal file
@ -0,0 +1,389 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package protocols
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
|
||||
p2ptest "github.com/ethereum/go-ethereum/p2p/testing"
|
||||
)
|
||||
|
||||
// handshake message type
|
||||
type hs0 struct {
|
||||
C uint
|
||||
}
|
||||
|
||||
// message to kill/drop the peer with nodeID
|
||||
type kill struct {
|
||||
C discover.NodeID
|
||||
}
|
||||
|
||||
// message to drop connection
|
||||
type drop struct {
|
||||
}
|
||||
|
||||
/// protoHandshake represents module-independent aspects of the protocol and is
|
||||
// the first message peers send and receive as part the initial exchange
|
||||
type protoHandshake struct {
|
||||
Version uint // local and remote peer should have identical version
|
||||
NetworkID string // local and remote peer should have identical network id
|
||||
}
|
||||
|
||||
// checkProtoHandshake verifies local and remote protoHandshakes match
|
||||
func checkProtoHandshake(testVersion uint, testNetworkID string) func(interface{}) error {
|
||||
return func(rhs interface{}) error {
|
||||
remote := rhs.(*protoHandshake)
|
||||
if remote.NetworkID != testNetworkID {
|
||||
return fmt.Errorf("%s (!= %s)", remote.NetworkID, testNetworkID)
|
||||
}
|
||||
|
||||
if remote.Version != testVersion {
|
||||
return fmt.Errorf("%d (!= %d)", remote.Version, testVersion)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// newProtocol sets up a protocol
|
||||
// the run function here demonstrates a typical protocol using peerPool, handshake
|
||||
// and messages registered to handlers
|
||||
func newProtocol(pp *p2ptest.TestPeerPool) func(*p2p.Peer, p2p.MsgReadWriter) error {
|
||||
spec := &Spec{
|
||||
Name: "test",
|
||||
Version: 42,
|
||||
MaxMsgSize: 10 * 1024,
|
||||
Messages: []interface{}{
|
||||
protoHandshake{},
|
||||
hs0{},
|
||||
kill{},
|
||||
drop{},
|
||||
},
|
||||
}
|
||||
return func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
|
||||
peer := NewPeer(p, rw, spec)
|
||||
|
||||
// initiate one-off protohandshake and check validity
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||
defer cancel()
|
||||
phs := &protoHandshake{42, "420"}
|
||||
hsCheck := checkProtoHandshake(phs.Version, phs.NetworkID)
|
||||
_, err := peer.Handshake(ctx, phs, hsCheck)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lhs := &hs0{42}
|
||||
// module handshake demonstrating a simple repeatable exchange of same-type message
|
||||
hs, err := peer.Handshake(ctx, lhs, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if rmhs := hs.(*hs0); rmhs.C > lhs.C {
|
||||
return fmt.Errorf("handshake mismatch remote %v > local %v", rmhs.C, lhs.C)
|
||||
}
|
||||
|
||||
handle := func(msg interface{}) error {
|
||||
switch msg := msg.(type) {
|
||||
|
||||
case *protoHandshake:
|
||||
return errors.New("duplicate handshake")
|
||||
|
||||
case *hs0:
|
||||
rhs := msg
|
||||
if rhs.C > lhs.C {
|
||||
return fmt.Errorf("handshake mismatch remote %v > local %v", rhs.C, lhs.C)
|
||||
}
|
||||
lhs.C += rhs.C
|
||||
return peer.Send(lhs)
|
||||
|
||||
case *kill:
|
||||
// demonstrates use of peerPool, killing another peer connection as a response to a message
|
||||
id := msg.C
|
||||
pp.Get(id).Drop(errors.New("killed"))
|
||||
return nil
|
||||
|
||||
case *drop:
|
||||
// for testing we can trigger self induced disconnect upon receiving drop message
|
||||
return errors.New("dropped")
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unknown message type: %T", msg)
|
||||
}
|
||||
}
|
||||
|
||||
pp.Add(peer)
|
||||
defer pp.Remove(peer)
|
||||
return peer.Run(handle)
|
||||
}
|
||||
}
|
||||
|
||||
func protocolTester(t *testing.T, pp *p2ptest.TestPeerPool) *p2ptest.ProtocolTester {
|
||||
conf := adapters.RandomNodeConfig()
|
||||
return p2ptest.NewProtocolTester(t, conf.ID, 2, newProtocol(pp))
|
||||
}
|
||||
|
||||
func protoHandshakeExchange(id discover.NodeID, proto *protoHandshake) []p2ptest.Exchange {
|
||||
|
||||
return []p2ptest.Exchange{
|
||||
{
|
||||
Expects: []p2ptest.Expect{
|
||||
{
|
||||
Code: 0,
|
||||
Msg: &protoHandshake{42, "420"},
|
||||
Peer: id,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Triggers: []p2ptest.Trigger{
|
||||
{
|
||||
Code: 0,
|
||||
Msg: proto,
|
||||
Peer: id,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func runProtoHandshake(t *testing.T, proto *protoHandshake, errs ...error) {
|
||||
pp := p2ptest.NewTestPeerPool()
|
||||
s := protocolTester(t, pp)
|
||||
// TODO: make this more than one handshake
|
||||
id := s.IDs[0]
|
||||
if err := s.TestExchanges(protoHandshakeExchange(id, proto)...); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var disconnects []*p2ptest.Disconnect
|
||||
for i, err := range errs {
|
||||
disconnects = append(disconnects, &p2ptest.Disconnect{Peer: s.IDs[i], Error: err})
|
||||
}
|
||||
if err := s.TestDisconnected(disconnects...); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProtoHandshakeVersionMismatch(t *testing.T) {
|
||||
runProtoHandshake(t, &protoHandshake{41, "420"}, errorf(ErrHandshake, errorf(ErrHandler, "(msg code 0): 41 (!= 42)").Error()))
|
||||
}
|
||||
|
||||
func TestProtoHandshakeNetworkIDMismatch(t *testing.T) {
|
||||
runProtoHandshake(t, &protoHandshake{42, "421"}, errorf(ErrHandshake, errorf(ErrHandler, "(msg code 0): 421 (!= 420)").Error()))
|
||||
}
|
||||
|
||||
func TestProtoHandshakeSuccess(t *testing.T) {
|
||||
runProtoHandshake(t, &protoHandshake{42, "420"})
|
||||
}
|
||||
|
||||
func moduleHandshakeExchange(id discover.NodeID, resp uint) []p2ptest.Exchange {
|
||||
|
||||
return []p2ptest.Exchange{
|
||||
{
|
||||
Expects: []p2ptest.Expect{
|
||||
{
|
||||
Code: 1,
|
||||
Msg: &hs0{42},
|
||||
Peer: id,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Triggers: []p2ptest.Trigger{
|
||||
{
|
||||
Code: 1,
|
||||
Msg: &hs0{resp},
|
||||
Peer: id,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func runModuleHandshake(t *testing.T, resp uint, errs ...error) {
|
||||
pp := p2ptest.NewTestPeerPool()
|
||||
s := protocolTester(t, pp)
|
||||
id := s.IDs[0]
|
||||
if err := s.TestExchanges(protoHandshakeExchange(id, &protoHandshake{42, "420"})...); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := s.TestExchanges(moduleHandshakeExchange(id, resp)...); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var disconnects []*p2ptest.Disconnect
|
||||
for i, err := range errs {
|
||||
disconnects = append(disconnects, &p2ptest.Disconnect{Peer: s.IDs[i], Error: err})
|
||||
}
|
||||
if err := s.TestDisconnected(disconnects...); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestModuleHandshakeError(t *testing.T) {
|
||||
runModuleHandshake(t, 43, fmt.Errorf("handshake mismatch remote 43 > local 42"))
|
||||
}
|
||||
|
||||
func TestModuleHandshakeSuccess(t *testing.T) {
|
||||
runModuleHandshake(t, 42)
|
||||
}
|
||||
|
||||
// testing complex interactions over multiple peers, relaying, dropping
|
||||
func testMultiPeerSetup(a, b discover.NodeID) []p2ptest.Exchange {
|
||||
|
||||
return []p2ptest.Exchange{
|
||||
{
|
||||
Label: "primary handshake",
|
||||
Expects: []p2ptest.Expect{
|
||||
{
|
||||
Code: 0,
|
||||
Msg: &protoHandshake{42, "420"},
|
||||
Peer: a,
|
||||
},
|
||||
{
|
||||
Code: 0,
|
||||
Msg: &protoHandshake{42, "420"},
|
||||
Peer: b,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Label: "module handshake",
|
||||
Triggers: []p2ptest.Trigger{
|
||||
{
|
||||
Code: 0,
|
||||
Msg: &protoHandshake{42, "420"},
|
||||
Peer: a,
|
||||
},
|
||||
{
|
||||
Code: 0,
|
||||
Msg: &protoHandshake{42, "420"},
|
||||
Peer: b,
|
||||
},
|
||||
},
|
||||
Expects: []p2ptest.Expect{
|
||||
{
|
||||
Code: 1,
|
||||
Msg: &hs0{42},
|
||||
Peer: a,
|
||||
},
|
||||
{
|
||||
Code: 1,
|
||||
Msg: &hs0{42},
|
||||
Peer: b,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
{Label: "alternative module handshake", Triggers: []p2ptest.Trigger{{Code: 1, Msg: &hs0{41}, Peer: a},
|
||||
{Code: 1, Msg: &hs0{41}, Peer: b}}},
|
||||
{Label: "repeated module handshake", Triggers: []p2ptest.Trigger{{Code: 1, Msg: &hs0{1}, Peer: a}}},
|
||||
{Label: "receiving repeated module handshake", Expects: []p2ptest.Expect{{Code: 1, Msg: &hs0{43}, Peer: a}}}}
|
||||
}
|
||||
|
||||
func runMultiplePeers(t *testing.T, peer int, errs ...error) {
|
||||
pp := p2ptest.NewTestPeerPool()
|
||||
s := protocolTester(t, pp)
|
||||
|
||||
if err := s.TestExchanges(testMultiPeerSetup(s.IDs[0], s.IDs[1])...); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// after some exchanges of messages, we can test state changes
|
||||
// here this is simply demonstrated by the peerPool
|
||||
// after the handshake negotiations peers must be added to the pool
|
||||
// time.Sleep(1)
|
||||
tick := time.NewTicker(10 * time.Millisecond)
|
||||
timeout := time.NewTimer(1 * time.Second)
|
||||
WAIT:
|
||||
for {
|
||||
select {
|
||||
case <-tick.C:
|
||||
if pp.Has(s.IDs[0]) {
|
||||
break WAIT
|
||||
}
|
||||
case <-timeout.C:
|
||||
t.Fatal("timeout")
|
||||
}
|
||||
}
|
||||
if !pp.Has(s.IDs[1]) {
|
||||
t.Fatalf("missing peer test-1: %v (%v)", pp, s.IDs)
|
||||
}
|
||||
|
||||
// peer 0 sends kill request for peer with index <peer>
|
||||
err := s.TestExchanges(p2ptest.Exchange{
|
||||
Triggers: []p2ptest.Trigger{
|
||||
{
|
||||
Code: 2,
|
||||
Msg: &kill{s.IDs[peer]},
|
||||
Peer: s.IDs[0],
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// the peer not killed sends a drop request
|
||||
err = s.TestExchanges(p2ptest.Exchange{
|
||||
Triggers: []p2ptest.Trigger{
|
||||
{
|
||||
Code: 3,
|
||||
Msg: &drop{},
|
||||
Peer: s.IDs[(peer+1)%2],
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// check the actual discconnect errors on the individual peers
|
||||
var disconnects []*p2ptest.Disconnect
|
||||
for i, err := range errs {
|
||||
disconnects = append(disconnects, &p2ptest.Disconnect{Peer: s.IDs[i], Error: err})
|
||||
}
|
||||
if err := s.TestDisconnected(disconnects...); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// test if disconnected peers have been removed from peerPool
|
||||
if pp.Has(s.IDs[peer]) {
|
||||
t.Fatalf("peer test-%v not dropped: %v (%v)", peer, pp, s.IDs)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestMultiplePeersDropSelf(t *testing.T) {
|
||||
runMultiplePeers(t, 0,
|
||||
fmt.Errorf("subprotocol error"),
|
||||
fmt.Errorf("Message handler error: (msg code 3): dropped"),
|
||||
)
|
||||
}
|
||||
|
||||
func TestMultiplePeersDropOther(t *testing.T) {
|
||||
runMultiplePeers(t, 1,
|
||||
fmt.Errorf("Message handler error: (msg code 3): dropped"),
|
||||
fmt.Errorf("subprotocol error"),
|
||||
)
|
||||
}
|
@ -108,10 +108,16 @@ func (t *rlpx) close(err error) {
|
||||
// Tell the remote end why we're disconnecting if possible.
|
||||
if t.rw != nil {
|
||||
if r, ok := err.(DiscReason); ok && r != DiscNetworkError {
|
||||
t.fd.SetWriteDeadline(time.Now().Add(discWriteTimeout))
|
||||
// rlpx tries to send DiscReason to disconnected peer
|
||||
// if the connection is net.Pipe (in-memory simulation)
|
||||
// it hangs forever, since net.Pipe does not implement
|
||||
// a write deadline. Because of this only try to send
|
||||
// the disconnect reason message if there is no error.
|
||||
if err := t.fd.SetWriteDeadline(time.Now().Add(discWriteTimeout)); err == nil {
|
||||
SendItems(t.rw, discMsg, r)
|
||||
}
|
||||
}
|
||||
}
|
||||
t.fd.Close()
|
||||
}
|
||||
|
||||
|
@ -156,14 +156,18 @@ func TestProtocolHandshake(t *testing.T) {
|
||||
node1 = &discover.Node{ID: discover.PubkeyID(&prv1.PublicKey), IP: net.IP{5, 6, 7, 8}, TCP: 44}
|
||||
hs1 = &protoHandshake{Version: 3, ID: node1.ID, Caps: []Cap{{"c", 1}, {"d", 3}}}
|
||||
|
||||
fd0, fd1 = net.Pipe()
|
||||
wg sync.WaitGroup
|
||||
)
|
||||
|
||||
fd0, fd1, err := tcpPipe()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
wg.Add(2)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
defer fd1.Close()
|
||||
defer fd0.Close()
|
||||
rlpx := newRLPX(fd0)
|
||||
remid, err := rlpx.doEncHandshake(prv0, node1)
|
||||
if err != nil {
|
||||
@ -597,3 +601,31 @@ func TestHandshakeForwardCompatibility(t *testing.T) {
|
||||
t.Errorf("ingress-mac('foo') mismatch:\ngot %x\nwant %x", fooIngressHash, wantFooIngressHash)
|
||||
}
|
||||
}
|
||||
|
||||
// tcpPipe creates an in process full duplex pipe based on a localhost TCP socket
|
||||
func tcpPipe() (net.Conn, net.Conn, error) {
|
||||
l, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
defer l.Close()
|
||||
|
||||
var aconn net.Conn
|
||||
aerr := make(chan error, 1)
|
||||
go func() {
|
||||
var err error
|
||||
aconn, err = l.Accept()
|
||||
aerr <- err
|
||||
}()
|
||||
|
||||
dconn, err := net.Dial("tcp", l.Addr().String())
|
||||
if err != nil {
|
||||
<-aerr
|
||||
return nil, nil, err
|
||||
}
|
||||
if err := <-aerr; err != nil {
|
||||
dconn.Close()
|
||||
return nil, nil, err
|
||||
}
|
||||
return aconn, dconn, nil
|
||||
}
|
||||
|
@ -40,11 +40,10 @@ const (
|
||||
refreshPeersInterval = 30 * time.Second
|
||||
staticPeerCheckInterval = 15 * time.Second
|
||||
|
||||
// Maximum number of concurrently handshaking inbound connections.
|
||||
maxAcceptConns = 50
|
||||
|
||||
// Maximum number of concurrently dialing outbound connections.
|
||||
// Connectivity defaults.
|
||||
maxActiveDialTasks = 16
|
||||
defaultMaxPendingPeers = 50
|
||||
defaultDialRatio = 3
|
||||
|
||||
// Maximum time allowed for reading a complete message.
|
||||
// This is effectively the amount of time a connection can be idle.
|
||||
@ -70,6 +69,11 @@ type Config struct {
|
||||
// Zero defaults to preset values.
|
||||
MaxPendingPeers int `toml:",omitempty"`
|
||||
|
||||
// DialRatio controls the ratio of inbound to dialed connections.
|
||||
// Example: a DialRatio of 2 allows 1/2 of connections to be dialed.
|
||||
// Setting DialRatio to zero defaults it to 3.
|
||||
DialRatio int `toml:",omitempty"`
|
||||
|
||||
// NoDiscovery can be used to disable the peer discovery mechanism.
|
||||
// Disabling is useful for protocol debugging (manual topology).
|
||||
NoDiscovery bool
|
||||
@ -138,7 +142,7 @@ type Config struct {
|
||||
EnableMsgEvents bool
|
||||
|
||||
// Logger is a custom logger to use with the p2p.Server.
|
||||
Logger log.Logger
|
||||
Logger log.Logger `toml:",omitempty"`
|
||||
}
|
||||
|
||||
// Server manages all peer connections.
|
||||
@ -427,7 +431,6 @@ func (srv *Server) Start() (err error) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
realaddr = conn.LocalAddr().(*net.UDPAddr)
|
||||
if srv.NAT != nil {
|
||||
if !realaddr.IP.IsLoopback() {
|
||||
@ -447,11 +450,16 @@ func (srv *Server) Start() (err error) {
|
||||
|
||||
// node table
|
||||
if !srv.NoDiscovery {
|
||||
ntab, err := discover.ListenUDP(srv.PrivateKey, conn, realaddr, unhandled, srv.NodeDatabase, srv.NetRestrict)
|
||||
if err != nil {
|
||||
return err
|
||||
cfg := discover.Config{
|
||||
PrivateKey: srv.PrivateKey,
|
||||
AnnounceAddr: realaddr,
|
||||
NodeDBPath: srv.NodeDatabase,
|
||||
NetRestrict: srv.NetRestrict,
|
||||
Bootnodes: srv.BootstrapNodes,
|
||||
Unhandled: unhandled,
|
||||
}
|
||||
if err := ntab.SetFallbackNodes(srv.BootstrapNodes); err != nil {
|
||||
ntab, err := discover.ListenUDP(conn, cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
srv.ntab = ntab
|
||||
@ -476,10 +484,7 @@ func (srv *Server) Start() (err error) {
|
||||
srv.DiscV5 = ntab
|
||||
}
|
||||
|
||||
dynPeers := (srv.MaxPeers + 1) / 2
|
||||
if srv.NoDiscovery {
|
||||
dynPeers = 0
|
||||
}
|
||||
dynPeers := srv.maxDialedConns()
|
||||
dialer := newDialState(srv.StaticNodes, srv.BootstrapNodes, srv.ntab, dynPeers, srv.NetRestrict)
|
||||
|
||||
// handshake
|
||||
@ -536,6 +541,7 @@ func (srv *Server) run(dialstate dialer) {
|
||||
defer srv.loopWG.Done()
|
||||
var (
|
||||
peers = make(map[discover.NodeID]*Peer)
|
||||
inboundCount = 0
|
||||
trusted = make(map[discover.NodeID]bool, len(srv.TrustedNodes))
|
||||
taskdone = make(chan task, maxActiveDialTasks)
|
||||
runningTasks []task
|
||||
@ -621,14 +627,14 @@ running:
|
||||
}
|
||||
// TODO: track in-progress inbound node IDs (pre-Peer) to avoid dialing them.
|
||||
select {
|
||||
case c.cont <- srv.encHandshakeChecks(peers, c):
|
||||
case c.cont <- srv.encHandshakeChecks(peers, inboundCount, c):
|
||||
case <-srv.quit:
|
||||
break running
|
||||
}
|
||||
case c := <-srv.addpeer:
|
||||
// At this point the connection is past the protocol handshake.
|
||||
// Its capabilities are known and the remote identity is verified.
|
||||
err := srv.protoHandshakeChecks(peers, c)
|
||||
err := srv.protoHandshakeChecks(peers, inboundCount, c)
|
||||
if err == nil {
|
||||
// The handshakes are done and it passed all checks.
|
||||
p := newPeer(c, srv.Protocols)
|
||||
@ -639,8 +645,11 @@ running:
|
||||
}
|
||||
name := truncateName(c.name)
|
||||
srv.log.Debug("Adding p2p peer", "name", name, "addr", c.fd.RemoteAddr(), "peers", len(peers)+1)
|
||||
peers[c.id] = p
|
||||
go srv.runPeer(p)
|
||||
peers[c.id] = p
|
||||
if p.Inbound() {
|
||||
inboundCount++
|
||||
}
|
||||
}
|
||||
// The dialer logic relies on the assumption that
|
||||
// dial tasks complete after the peer has been added or
|
||||
@ -655,6 +664,9 @@ running:
|
||||
d := common.PrettyDuration(mclock.Now() - pd.created)
|
||||
pd.log.Debug("Removing p2p peer", "duration", d, "peers", len(peers)-1, "req", pd.requested, "err", pd.err)
|
||||
delete(peers, pd.ID())
|
||||
if pd.Inbound() {
|
||||
inboundCount--
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -681,20 +693,22 @@ running:
|
||||
}
|
||||
}
|
||||
|
||||
func (srv *Server) protoHandshakeChecks(peers map[discover.NodeID]*Peer, c *conn) error {
|
||||
func (srv *Server) protoHandshakeChecks(peers map[discover.NodeID]*Peer, inboundCount int, c *conn) error {
|
||||
// Drop connections with no matching protocols.
|
||||
if len(srv.Protocols) > 0 && countMatchingProtocols(srv.Protocols, c.caps) == 0 {
|
||||
return DiscUselessPeer
|
||||
}
|
||||
// Repeat the encryption handshake checks because the
|
||||
// peer set might have changed between the handshakes.
|
||||
return srv.encHandshakeChecks(peers, c)
|
||||
return srv.encHandshakeChecks(peers, inboundCount, c)
|
||||
}
|
||||
|
||||
func (srv *Server) encHandshakeChecks(peers map[discover.NodeID]*Peer, c *conn) error {
|
||||
func (srv *Server) encHandshakeChecks(peers map[discover.NodeID]*Peer, inboundCount int, c *conn) error {
|
||||
switch {
|
||||
case !c.is(trustedConn|staticDialedConn) && len(peers) >= srv.MaxPeers:
|
||||
return DiscTooManyPeers
|
||||
case !c.is(trustedConn) && c.is(inboundConn) && inboundCount >= srv.maxInboundConns():
|
||||
return DiscTooManyPeers
|
||||
case peers[c.id] != nil:
|
||||
return DiscAlreadyConnected
|
||||
case c.id == srv.Self().ID:
|
||||
@ -704,6 +718,21 @@ func (srv *Server) encHandshakeChecks(peers map[discover.NodeID]*Peer, c *conn)
|
||||
}
|
||||
}
|
||||
|
||||
func (srv *Server) maxInboundConns() int {
|
||||
return srv.MaxPeers - srv.maxDialedConns()
|
||||
}
|
||||
|
||||
func (srv *Server) maxDialedConns() int {
|
||||
if srv.NoDiscovery || srv.NoDial {
|
||||
return 0
|
||||
}
|
||||
r := srv.DialRatio
|
||||
if r == 0 {
|
||||
r = defaultDialRatio
|
||||
}
|
||||
return srv.MaxPeers / r
|
||||
}
|
||||
|
||||
type tempError interface {
|
||||
Temporary() bool
|
||||
}
|
||||
@ -714,10 +743,7 @@ func (srv *Server) listenLoop() {
|
||||
defer srv.loopWG.Done()
|
||||
srv.log.Info("RLPx listener up", "self", srv.makeSelf(srv.listener, srv.ntab))
|
||||
|
||||
// This channel acts as a semaphore limiting
|
||||
// active inbound connections that are lingering pre-handshake.
|
||||
// If all slots are taken, no further connections are accepted.
|
||||
tokens := maxAcceptConns
|
||||
tokens := defaultMaxPendingPeers
|
||||
if srv.MaxPendingPeers > 0 {
|
||||
tokens = srv.MaxPendingPeers
|
||||
}
|
||||
@ -758,9 +784,6 @@ func (srv *Server) listenLoop() {
|
||||
|
||||
fd = newMeteredConn(fd, true)
|
||||
srv.log.Trace("Accepted connection", "addr", fd.RemoteAddr())
|
||||
|
||||
// Spawn the handler. It will give the slot back when the connection
|
||||
// has been established.
|
||||
go func() {
|
||||
srv.SetupConn(fd, inboundConn, nil)
|
||||
slots <- struct{}{}
|
||||
|
@ -13,6 +13,7 @@
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package adapters
|
||||
|
||||
type SimStateStore struct {
|
||||
|
67
p2p/testing/peerpool.go
Normal file
67
p2p/testing/peerpool.go
Normal file
@ -0,0 +1,67 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package testing
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
)
|
||||
|
||||
type TestPeer interface {
|
||||
ID() discover.NodeID
|
||||
Drop(error)
|
||||
}
|
||||
|
||||
// TestPeerPool is an example peerPool to demonstrate registration of peer connections
|
||||
type TestPeerPool struct {
|
||||
lock sync.Mutex
|
||||
peers map[discover.NodeID]TestPeer
|
||||
}
|
||||
|
||||
func NewTestPeerPool() *TestPeerPool {
|
||||
return &TestPeerPool{peers: make(map[discover.NodeID]TestPeer)}
|
||||
}
|
||||
|
||||
func (self *TestPeerPool) Add(p TestPeer) {
|
||||
self.lock.Lock()
|
||||
defer self.lock.Unlock()
|
||||
log.Trace(fmt.Sprintf("pp add peer %v", p.ID()))
|
||||
self.peers[p.ID()] = p
|
||||
|
||||
}
|
||||
|
||||
func (self *TestPeerPool) Remove(p TestPeer) {
|
||||
self.lock.Lock()
|
||||
defer self.lock.Unlock()
|
||||
delete(self.peers, p.ID())
|
||||
}
|
||||
|
||||
func (self *TestPeerPool) Has(id discover.NodeID) bool {
|
||||
self.lock.Lock()
|
||||
defer self.lock.Unlock()
|
||||
_, ok := self.peers[id]
|
||||
return ok
|
||||
}
|
||||
|
||||
func (self *TestPeerPool) Get(id discover.NodeID) TestPeer {
|
||||
self.lock.Lock()
|
||||
defer self.lock.Unlock()
|
||||
return self.peers[id]
|
||||
}
|
280
p2p/testing/protocolsession.go
Normal file
280
p2p/testing/protocolsession.go
Normal file
@ -0,0 +1,280 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package testing
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
|
||||
)
|
||||
|
||||
var errTimedOut = errors.New("timed out")
|
||||
|
||||
// ProtocolSession is a quasi simulation of a pivot node running
|
||||
// a service and a number of dummy peers that can send (trigger) or
|
||||
// receive (expect) messages
|
||||
type ProtocolSession struct {
|
||||
Server *p2p.Server
|
||||
IDs []discover.NodeID
|
||||
adapter *adapters.SimAdapter
|
||||
events chan *p2p.PeerEvent
|
||||
}
|
||||
|
||||
// Exchange is the basic units of protocol tests
|
||||
// the triggers and expects in the arrays are run immediately and asynchronously
|
||||
// thus one cannot have multiple expects for the SAME peer with DIFFERENT message types
|
||||
// because it's unpredictable which expect will receive which message
|
||||
// (with expect #1 and #2, messages might be sent #2 and #1, and both expects will complain about wrong message code)
|
||||
// an exchange is defined on a session
|
||||
type Exchange struct {
|
||||
Label string
|
||||
Triggers []Trigger
|
||||
Expects []Expect
|
||||
Timeout time.Duration
|
||||
}
|
||||
|
||||
// Trigger is part of the exchange, incoming message for the pivot node
|
||||
// sent by a peer
|
||||
type Trigger struct {
|
||||
Msg interface{} // type of message to be sent
|
||||
Code uint64 // code of message is given
|
||||
Peer discover.NodeID // the peer to send the message to
|
||||
Timeout time.Duration // timeout duration for the sending
|
||||
}
|
||||
|
||||
// Expect is part of an exchange, outgoing message from the pivot node
|
||||
// received by a peer
|
||||
type Expect struct {
|
||||
Msg interface{} // type of message to expect
|
||||
Code uint64 // code of message is now given
|
||||
Peer discover.NodeID // the peer that expects the message
|
||||
Timeout time.Duration // timeout duration for receiving
|
||||
}
|
||||
|
||||
// Disconnect represents a disconnect event, used and checked by TestDisconnected
|
||||
type Disconnect struct {
|
||||
Peer discover.NodeID // discconnected peer
|
||||
Error error // disconnect reason
|
||||
}
|
||||
|
||||
// trigger sends messages from peers
|
||||
func (self *ProtocolSession) trigger(trig Trigger) error {
|
||||
simNode, ok := self.adapter.GetNode(trig.Peer)
|
||||
if !ok {
|
||||
return fmt.Errorf("trigger: peer %v does not exist (1- %v)", trig.Peer, len(self.IDs))
|
||||
}
|
||||
mockNode, ok := simNode.Services()[0].(*mockNode)
|
||||
if !ok {
|
||||
return fmt.Errorf("trigger: peer %v is not a mock", trig.Peer)
|
||||
}
|
||||
|
||||
errc := make(chan error)
|
||||
|
||||
go func() {
|
||||
errc <- mockNode.Trigger(&trig)
|
||||
}()
|
||||
|
||||
t := trig.Timeout
|
||||
if t == time.Duration(0) {
|
||||
t = 1000 * time.Millisecond
|
||||
}
|
||||
select {
|
||||
case err := <-errc:
|
||||
return err
|
||||
case <-time.After(t):
|
||||
return fmt.Errorf("timout expecting %v to send to peer %v", trig.Msg, trig.Peer)
|
||||
}
|
||||
}
|
||||
|
||||
// expect checks an expectation of a message sent out by the pivot node
|
||||
func (self *ProtocolSession) expect(exps []Expect) error {
|
||||
// construct a map of expectations for each node
|
||||
peerExpects := make(map[discover.NodeID][]Expect)
|
||||
for _, exp := range exps {
|
||||
if exp.Msg == nil {
|
||||
return errors.New("no message to expect")
|
||||
}
|
||||
peerExpects[exp.Peer] = append(peerExpects[exp.Peer], exp)
|
||||
}
|
||||
|
||||
// construct a map of mockNodes for each node
|
||||
mockNodes := make(map[discover.NodeID]*mockNode)
|
||||
for nodeID := range peerExpects {
|
||||
simNode, ok := self.adapter.GetNode(nodeID)
|
||||
if !ok {
|
||||
return fmt.Errorf("trigger: peer %v does not exist (1- %v)", nodeID, len(self.IDs))
|
||||
}
|
||||
mockNode, ok := simNode.Services()[0].(*mockNode)
|
||||
if !ok {
|
||||
return fmt.Errorf("trigger: peer %v is not a mock", nodeID)
|
||||
}
|
||||
mockNodes[nodeID] = mockNode
|
||||
}
|
||||
|
||||
// done chanell cancels all created goroutines when function returns
|
||||
done := make(chan struct{})
|
||||
defer close(done)
|
||||
// errc catches the first error from
|
||||
errc := make(chan error)
|
||||
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(len(mockNodes))
|
||||
for nodeID, mockNode := range mockNodes {
|
||||
nodeID := nodeID
|
||||
mockNode := mockNode
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
// Sum all Expect timeouts to give the maximum
|
||||
// time for all expectations to finish.
|
||||
// mockNode.Expect checks all received messages against
|
||||
// a list of expected messages and timeout for each
|
||||
// of them can not be checked separately.
|
||||
var t time.Duration
|
||||
for _, exp := range peerExpects[nodeID] {
|
||||
if exp.Timeout == time.Duration(0) {
|
||||
t += 2000 * time.Millisecond
|
||||
} else {
|
||||
t += exp.Timeout
|
||||
}
|
||||
}
|
||||
alarm := time.NewTimer(t)
|
||||
defer alarm.Stop()
|
||||
|
||||
// expectErrc is used to check if error returned
|
||||
// from mockNode.Expect is not nil and to send it to
|
||||
// errc only in that case.
|
||||
// done channel will be closed when function
|
||||
expectErrc := make(chan error)
|
||||
go func() {
|
||||
select {
|
||||
case expectErrc <- mockNode.Expect(peerExpects[nodeID]...):
|
||||
case <-done:
|
||||
case <-alarm.C:
|
||||
}
|
||||
}()
|
||||
|
||||
select {
|
||||
case err := <-expectErrc:
|
||||
if err != nil {
|
||||
select {
|
||||
case errc <- err:
|
||||
case <-done:
|
||||
case <-alarm.C:
|
||||
errc <- errTimedOut
|
||||
}
|
||||
}
|
||||
case <-done:
|
||||
case <-alarm.C:
|
||||
errc <- errTimedOut
|
||||
}
|
||||
|
||||
}()
|
||||
}
|
||||
|
||||
go func() {
|
||||
wg.Wait()
|
||||
// close errc when all goroutines finish to return nill err from errc
|
||||
close(errc)
|
||||
}()
|
||||
|
||||
return <-errc
|
||||
}
|
||||
|
||||
// TestExchanges tests a series of exchanges against the session
|
||||
func (self *ProtocolSession) TestExchanges(exchanges ...Exchange) error {
|
||||
for i, e := range exchanges {
|
||||
if err := self.testExchange(e); err != nil {
|
||||
return fmt.Errorf("exchange #%d %q: %v", i, e.Label, err)
|
||||
}
|
||||
log.Trace(fmt.Sprintf("exchange #%d %q: run successfully", i, e.Label))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// testExchange tests a single Exchange.
|
||||
// Default timeout value is 2 seconds.
|
||||
func (self *ProtocolSession) testExchange(e Exchange) error {
|
||||
errc := make(chan error)
|
||||
done := make(chan struct{})
|
||||
defer close(done)
|
||||
|
||||
go func() {
|
||||
for _, trig := range e.Triggers {
|
||||
err := self.trigger(trig)
|
||||
if err != nil {
|
||||
errc <- err
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
select {
|
||||
case errc <- self.expect(e.Expects):
|
||||
case <-done:
|
||||
}
|
||||
}()
|
||||
|
||||
// time out globally or finish when all expectations satisfied
|
||||
t := e.Timeout
|
||||
if t == 0 {
|
||||
t = 2000 * time.Millisecond
|
||||
}
|
||||
alarm := time.NewTimer(t)
|
||||
select {
|
||||
case err := <-errc:
|
||||
return err
|
||||
case <-alarm.C:
|
||||
return errTimedOut
|
||||
}
|
||||
}
|
||||
|
||||
// TestDisconnected tests the disconnections given as arguments
|
||||
// the disconnect structs describe what disconnect error is expected on which peer
|
||||
func (self *ProtocolSession) TestDisconnected(disconnects ...*Disconnect) error {
|
||||
expects := make(map[discover.NodeID]error)
|
||||
for _, disconnect := range disconnects {
|
||||
expects[disconnect.Peer] = disconnect.Error
|
||||
}
|
||||
|
||||
timeout := time.After(time.Second)
|
||||
for len(expects) > 0 {
|
||||
select {
|
||||
case event := <-self.events:
|
||||
if event.Type != p2p.PeerEventTypeDrop {
|
||||
continue
|
||||
}
|
||||
expectErr, ok := expects[event.Peer]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if !(expectErr == nil && event.Error == "" || expectErr != nil && expectErr.Error() == event.Error) {
|
||||
return fmt.Errorf("unexpected error on peer %v. expected '%v', got '%v'", event.Peer, expectErr, event.Error)
|
||||
}
|
||||
delete(expects, event.Peer)
|
||||
case <-timeout:
|
||||
return fmt.Errorf("timed out waiting for peers to disconnect")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user